text
stringlengths
56
7.94M
\begin{document} \title[Finite abelian group actions on the Razak-Jacelon algebra]{Approximate representability of finite abelian group actions on the Razak-Jacelon algebra} \author{Norio Nawata} \address{Department of Pure and Applied Mathematics, Graduate School of Information Science and Technology, Osaka University, Yamadaoka 1-5, Suita, Osaka 565-0871, Japan} \email{[email protected]} \keywords{Razak-Jacelon algebra; Approximate representability; Rohlin property; Kirchberg's central sequence C$^*$-algebra.} \subjclass[2020]{Primary 46L55, Secondary 46L35; 46L40} \thanks{This work was supported by JSPS KAKENHI Grant Number 20K03630} \begin{abstract} Let $A$ be a simple separable nuclear monotracial C$^*$-algebra, and let $\alpha$ be an outer action of a finite abelian group $\Gamma$ on $A$. In this paper, we show that $\alpha\otimes \mathrm{id}_{\mathcal{W}}$ on $A\otimes\mathcal{W}$ is approximately representable if and only if the characteristic invariant of $\tilde{\alpha}$ is trivial, where $\mathcal{W}$ is the Razak-Jacelon algebra and $\tilde{\alpha}$ is the induced action on the injective II$_1$ factor $\pi_{\tau_{A}}(A)^{''}$. As an application of this result, we classify such actions up to conjugacy and cocycle conjugacy. We also construct the model actions. \end{abstract} \maketitle \section{Introduction} In \cite{Jones}, Jones gave a complete classification of finite group actions on the injective II$_1$ factor up to conjugacy. This can be regarded as a generalization of Connes' classification \cite{C3} of periodic automorphisms of the injective II$_1$ factor. In this paper, we study a C$^*$-analog of these results. There exist some difficulties for the classification of (amenable) group actions on ``classifiable'' C$^*$-algebras because of $K$-theoretical obstructions. We refer the reader to \cite{I} for details. In spite of these difficulties, Gabe and Szab\'o classified outer actions of countable discrete amenable groups on Kirchberg algebras up to cocycle conjugacy by equivariant $KK$-theory in \cite{GS}. This classification can be regarded as a C$^*$-analog in Kirchberg algebras of Ocneanu's classification theorem \cite{Oc}. For stably finite classifiable C$^*$-algebras, such a classification is an interesting open problem. The Razak-Jacelon algebra $\mathcal{W}$ (\cite{J} and \cite{Raz}) is the simple separable nuclear monotracial $\mathcal{Z}$-stable C$^*$-algebra which is $KK$-equivalent to $\{0\}$. We regard $\mathcal{W}$ as a monotracial analog of the Cuntz algebra $\mathcal{O}_2$. Indeed, if $A$ is a simple separable nuclear monotracial C$^*$-algebra, then $A\otimes\mathcal{W}$ is isomorphic to $\mathcal{W}$ by classification results in \cite{CE} and \cite{EGLN} (see also \cite{Na4}). Let $\alpha$ be an action on a simple separable nuclear monotracial C$^*$-algebra $A$. Then the action $\alpha\otimes\mathrm{id}_{\mathcal{W}}$ on $A\otimes\mathcal{W}$ is an action on $\mathcal{W}$. We call such a tensor product type action a \textit{$\mathcal{W}$-type action}. Note that there exist no $K$-theoretical obstructions for $\mathcal{W}$-type actions. Therefore we can recognize difficulties due to stably finiteness by studying $\mathcal{W}$-type actions. In \cite{Na5}, the author showed that if $\alpha$ is a strongly outer action of a countable discrete amenable group, then $\mathcal{W}$-type actions are unique up to cocycle conjugacy. (Note that an action $\alpha$ of a discrete countable group on a simple separable monotracial C$^*$-algebra $A$ is \textit{strongly outer} if the induce action $\tilde{\alpha}$ by $\alpha$ on $\pi_{\tau_A}(A)^{''}$ is outer, where $\pi_{\tau_A}$ is the Gelfand-Naimark-Segal (GNS) representation associated with the unique tracial state $\tau_A$ on $A$.) This result can be regarded as a monotracial analog of Szab\'o's equivariant Kirchberg-Phillips type absorption theorem for $\mathcal{O}_2$ \cite{Sza4}. In this paper, we study $\mathcal{W}$-type outer actions of finite abelian groups, which include ``weakly inner'' (or non-strongly outer) actions. One of the main results in this paper is the characterization of approximate representability of $\alpha\otimes\mathrm{id}_{\mathcal{W}}$ by using the characteristic invariant of $\tilde{\alpha}$. Note that approximate representability is the dual notion of the Rohlin property \cite{I1} (see also \cite{GHS}, \cite{GSan1}, \cite{Na0} and \cite{San}). Also, we can classify approximately representable actions by using classification results of C$^*$-algebras (see \cite{GSan2}, \cite{I1} and \cite{Na0}). As an application of this result, we can classify such actions up to conjugacy and cocycle conjugacy. For an action $\alpha$ on a simple separable monotracial C$^*$-algebra $A$ of a countable discrete group $\Gamma$, put $N(\tilde{\alpha}):=\{g\in \Gamma \; | \; \tilde{\alpha}_g=\mathrm{Ad}(u)\; \text{for some}\; u\in \pi_{\tau_A}(A)^{''}\}$ and let $i(\tilde{\alpha})$ be the inner invariant of $\tilde{\alpha}$ defined in \cite{Jones}. We show the following classification result in this paper. \begin{mainthm} (Corollary \ref{main:cor}) \ \\ Let $A$ and $B$ be simple separable nuclear monotracial C$^*$-algebras, and let $\alpha$ and $\beta$ be outer actions of a finite abelian group $\Gamma$ on $A$ and $B$, respectively. Assume that the characteristic invariants of $\tilde{\alpha}$ and $\tilde{\beta}$ are trivial. Then \ \\ (i) $\alpha\otimes \mathrm{id}_{\mathcal{W}}$ on $A\otimes\mathcal{W}$ and $\beta\otimes \mathrm{id}_{\mathcal{W}}$ on $B\otimes\mathcal{W}$ are cocycle conjugate if and only if $N(\tilde{\alpha})=N(\tilde{\beta})$; \ \\ (ii) $\alpha\otimes \mathrm{id}_{\mathcal{W}}$ on $A\otimes\mathcal{W}$ and $\beta\otimes \mathrm{id}_{\mathcal{W}}$ on $B\otimes\mathcal{W}$ are conjugate if and only if $N(\tilde{\alpha})=N(\tilde{\beta})$ and $i(\tilde{\alpha})=i(\tilde{\beta})$. \end{mainthm} To the author's best knowledge, this classification is the first abstract classification result for ``weakly inner'' (or non-strongly outer) actions on stably finite C$^*$-algebras without inductive limit type structures. Also, we construct the model actions of the actions in the theorem above. This construction might be of independent interest. \section{Preliminaries}\label{sec:pre} \subsection{Group actions} Let $\alpha$ and $\beta$ be actions of a discrete group $\Gamma$ on C$^*$-algebras $A$ and $B$, respectively. We say that $\alpha$ is \textit{conjugate} to $\beta$ if there exists an isomorphism $\theta$ from $A$ onto $B$ such that $\theta\circ \alpha_g= \beta_g\circ \theta$ for any $g\in\Gamma$. An \textit{$\alpha$-cocycle} is a map $u$ from $\Gamma$ to the unitary group of the multiplier algebra $M(A)$ of $A$ such that $u_{gh}=u_g\alpha_g(u_h)$. Note that we denote the induced action on $M(A)$ by the same symbol $\alpha$ for simplicity. We say that $\alpha$ is \textit{cocycle conjugate} to $\beta$ if there exist an isomorphism $\theta$ from $A$ to $B$ and a $\beta$-cocycle $u$ such that $\theta\circ \alpha_g=\mathrm{Ad}(u_g)\circ \beta_g \circ \theta$ for any $g\in \Gamma$. Let $$ N(\alpha):= \{g\in\Gamma \; |\; \alpha_g=\mathrm{Ad}(u)\; \text{for some}\; u\in M(A)\}. $$ It is said to be that $\alpha$ is \textit{outer} if $N(\alpha)=\{\iota\}$ where $\iota$ is the identity element in $\Gamma$. We denote by $A^{\alpha}$ and $A\rtimes_{\alpha}\Gamma$ the fixed point subalgebra and the reduced crossed product C$^*$-algebra, respectively. Let $E_{\alpha}$ denote the canonical conditional expectation from $A\rtimes_{\alpha}\Gamma$ onto $A$. If $A$ is simple and $\alpha$ is outer, then $A\rtimes_{\alpha}\Gamma$ is simple by \cite{K}. Assume that $\Gamma$ is a finite abelian group. Let $$ e_{\alpha}:= \frac{1}{|\Gamma|}\sum_{g\in\Gamma}\lambda_g\in M(A\rtimes_{\alpha}\Gamma) $$ where $\lambda_g$ is the implementing unitary of $\alpha_g$ in $M(A\rtimes_{\alpha}\Gamma)$ and $|\cdot|$ denotes cardinality. Then $e_{\alpha}$ is a projection and $e_{\alpha}(A\rtimes_{\alpha}\Gamma)e_{\alpha}$ is isomorphic to $A^{\alpha}$. We denote by $\hat{\alpha}$ the dual action of $\alpha$, that is, $\hat{\alpha}_{\eta}(\sum_{g\in \Gamma}a_g\lambda_g)= \sum_{g\in \Gamma}\eta(g)a_g\lambda_g$ for any $\sum_{g\in \Gamma}a_g\lambda_g\in A\rtimes_{\alpha}\Gamma$ and $\eta\in \hat{\Gamma}$. Note that $\hat{\Gamma}$ is isomorphic to $\Gamma$ since we assume that $\Gamma$ is a finite abelian group. Let $T_1(A)$ denote the tracial state space of $A$. Every tracial state on $A$ can be uniquely extended to a tracial state on $M(A)$. We denote it by the same symbol for simplicity. If $\varphi$ is a nondegenerate homomorphism from $A$ to $B$, then $\varphi$ induces an affine continuous map $T(\varphi)$ from $T_1(B)$ to $T_1(A)$ by $T(\varphi)(\tau)= \tau\circ \varphi$ for any $\tau\in T_1(B)$. Hence every action $\alpha$ on $A$ induces an action $T(\alpha)$ on $T_1(A)$. We denote by $T_1(A)^{\alpha}$ the fixed point set of this induced action. Straightforward arguments show the following proposition. \begin{pro}\label{pro:conjugacy-trace-spaces} Let $\alpha$ and $\beta$ be actions of a finite abelian group $\Gamma$ on C$^*$-algebras $A$ and $B$, respectively. Then \ \\ (i) if $\alpha$ and $\beta$ are cocycle conjugate, then there exists an affine homeomorphism $F$ from $T_1(B\rtimes_{\beta}\Gamma)$ onto $T_1(A\rtimes_{\alpha}\Gamma)$ such that $F\circ T(\hat{\beta}_{\eta})=T(\hat{\alpha}_{\eta})\circ F$ for any $\eta\in \hat{\Gamma}$; \ \\ (ii) if $\alpha$ and $\beta$ are conjugate, then there exists an affine homeomorphism $F$ from $T_1(B\rtimes_{\beta}\Gamma)$ onto $T_1(A\rtimes_{\alpha}\Gamma)$ such that $F(\tau)(e_{\alpha})=\tau (e_\beta)$ for any $\tau \in T_1(B\rtimes_{\beta}\Gamma)$ and $F\circ T(\hat{\beta}_{\eta})=T(\hat{\alpha}_{\eta})\circ F$ for any $\eta\in \hat{\Gamma}$. \end{pro} Let $\tau$ be an $\alpha$-invariant tracial state on $A$, that is, $\tau\in T_1(A)^{\alpha}$. Then $\alpha$ induces an action $\tilde{\alpha}$ on $\pi_{\tau}(A)^{''}$ where $\pi_{\tau}$ is the GNS representation associated with $\tau$. \subsection{Finite abelian group actions on the injective II$_1$ factor} We shall recall some results in \cite{Jones}. We refer the reader to \cite{Jones}, \cite{Jones2} and \cite{Oc} for details. Let $M$ be a II$_1$ factor, and let $\delta$ be an action of a finite abelian group $\Gamma$ on $M$. Although we can consider in a more general setting, we assume that $\Gamma$ is a finite abelian group for simplicity. Also, note that the von Neumann algebraic crossed product of $(M, \Gamma, \delta)$ is isomorphic to (the reduced crossed product C$^*$-algebra) $M\rtimes_{\delta}\Gamma$ by this assumption. By definition of $N(\delta)$, there exists a map $v$ from $N(\delta)$ to the unitary group of $M$ such that $\delta_h=\mathrm{Ad}(v_h)$ for any $h\in N(\delta)$. For any $g\in \Gamma$ and $h\in N(\delta)$, there exists a complex number $\lambda_{\delta}(g, h)$ with $|\lambda_{\delta} (g, h)|=1$ such that $\delta_g(v_h)=\lambda_{\delta}(g,h)v_h$. It is easy to see that $\lambda_{\delta}(g, h)$ does not depend on the choice of $v$. We say that \textit{the characteristic invariant of $\delta$ is trivial} if $\lambda_{\delta}(g,h)=1$ for any $g\in \Gamma$ and $h\in N(\delta)$. We refer the reader to \cite[Section 1.2]{Jones} for the precise definition of the characteristic invariant. Note that we may assume that $v$ is a unitary representation since $N(\delta)$ is a finite abelian group. Indeed, if $N(\delta)$ is a cyclic group generated by $g$ of order $n$, then there exists a complex number $\gamma$ with $|\gamma| =1$ such that $v_g^n=\gamma 1$. Choose an $n$-th root $\gamma^{\prime}$ of $\gamma$ and define a map $v^{\prime}$ from $N(\delta)$ to the unitary group of $M$ by $v^{\prime}_{g^k}:= \gamma^{\prime k}v_g^k$ for any $1\leq k\leq n$. Then $v^{\prime}$ is a unitary representation such that $\delta_h=\mathrm{Ad}(v_h^{\prime})$ for any $h\in N(\delta)$. Since every finite abelian group is a finite direct sum of cyclic groups of finite order, if $N(\delta)$ is a finite abelian group, then there exists such a unitary representation. \begin{pro}\label{pro:non-trivial-characteristic} Let $M$ be a II$_1$ factor, and let $\delta$ be an action of a finite abelian group $\Gamma$ on $M$. If the characteristic invariant of $\delta$ is not trivial, then the dual action $\hat{\delta}$ is not outer. \end{pro} \begin{proof} Since the characteristic invariant of $\delta$ is not trivial, there exist $g_0\in\Gamma$ and $h_0\in N(\delta)$ such that $\lambda_{\delta}(g_0, h_0)\neq 1$. Define a map $\eta_0$ from $\Gamma$ to $\mathbb{C}$ by $\eta_0(g):=\lambda_{\delta} (g, h_0)$ for any $g\in\Gamma$. Then $\eta_0$ is a nontrivial character on $\Gamma$. Let $v_{h_0}$ be a unitary element in $M$ such that $\delta_{h_0}=\mathrm{Ad}(v_{h_0})$. Then we have \begin{align*} \mathrm{Ad}(\lambda_{h_0}v_{h_0}^*)\left(\sum_{g\in \Gamma}a_g\lambda_g\right) &=\sum_{g\in\Gamma}\lambda_{h_0}v_{h_0}^*a_g \lambda_gv_{h_0}\lambda_{h_0}^* \\ &= \sum_{g\in\Gamma}\lambda_{h_0}v_{h_0}^*a_g \delta_{g}( v_{h_0})\lambda_g\lambda_{h_0}^* \\ &=\sum_{g\in\Gamma}\lambda_{\delta}(g,h_0) \lambda_{h_0}v_{h_0}^*a_gv_{h_0}\lambda_{h_0}^* \lambda_{g} \\ &=\sum_{g\in\Gamma}\lambda_{\delta}(g,h_0) \lambda_{h_0}\delta_{h^{-1}_0}(a_g)\lambda_{h_0}^* \lambda_{g} \\ &= \sum_{g\in\Gamma}\eta_0(g)a_g\lambda_{g} \\ &= \hat{\delta}_{\eta_0}\left( \sum_{g\in \Gamma}a_g\lambda\right) \end{align*} for any $\sum_{g\in \Gamma}a_g\lambda_g \in M\rtimes_{\gamma}\Gamma$. Therefore $\hat{\delta}$ is not outer. \end{proof} In the rest of this section, we assume that the characteristic invariant of $\delta$ is trivial, and $v$ is a unitary representation $v$ of $N(\delta)$ on $M$ such that $\delta_h=\mathrm{Ad}(v_h)$ for any $h\in N(\delta)$. Define a homomorphism $\Phi_v$ from the group algebra $\mathbb{C}N(\delta)$ to $M$ by $\Phi_v(\sum_{h\in N(\delta)}c_h h)=\sum_{h\in N(\delta)}c_hv_h$. Let $P_{N(\delta)}$ be the set of minimal projections in $\mathbb{C}N(\delta)$, and let $M(P_{N(\delta)})$ be the set of probability measures on $P_{N(\delta)}$. Note that we have $P_{N(\delta)}=\left\{\frac{1}{|N(\delta)|}\sum_{h\in N(\delta)} \eta (h)h\; |\; \eta\in \hat{N(\delta)}\right\}$. Define a probability measure $m_{v}(\delta)$ on $P_{N(\delta)}$ by $\tau_{M}(\Phi_v(p))$ for any $p\in P_{N(\delta)}$ where $\tau_{M}$ is the unique tracial state on $M$. Note that $m_v(\delta)$ depends on the choice of $v$. For any character $\eta$ of $N(\delta)$, define an automorphism $\partial(\eta)$ of $\mathbb{C}N(\delta)$ by $\partial(\eta) (\sum_{h\in N(\delta)}c_h h)= \sum_{h\in N(\delta)}\eta(h)c_h h$. Define an equivalence relation $\sim$ on $M(P_{N(\delta)})$ by $m\sim m^{\prime}$ if there exists a character $\eta$ of $N(\delta)$ such that $m = m^{\prime }\circ \partial(\eta)$. Put $i(\delta):= [m_v(\delta)]\in M(P_{N(\delta)})/\sim$. Then $i(\delta)$ does not depend on the choice of $v$ and $i(\delta)$ is a conjugacy invariant. The following theorem is a part of Ocneanu's classification theorem and Jones' classification theorem. \begin{thm}\label{thm:jones} (Cf. \cite[Theorem 2.6]{Oc} and \cite[Theorem 1.4.8]{Jones}) \ \\ Let $\delta$ and $\delta^{\prime}$ be actions of a finite abelian group $\Gamma$ on the injective II$_1$ factor $M$. Assume that the characteristic invariants of $\delta$ and $\delta^{\prime}$ are trivial. Then \ \\ (i) $\delta$ and $\delta^{\prime}$ are cocycle conjugate if and only if $N(\delta)=N(\delta^{\prime})$; \ \\ (ii) $\delta$ and $\delta^{\prime}$ are conjugate if and only if $N(\delta)=N(\delta^{\prime})$ and $i(\delta)=i(\delta^{\prime})$. \end{thm} Define a map $\Pi_v$ from $\mathbb{C}N(\delta)$ to $M\rtimes_{\delta}\Gamma$ by $\Pi_v(\sum_{h\in N(\delta)}c_h h)=\sum_{h\in N(\delta)}c_h v_h\lambda_h^*$. Then $\Pi_v$ is an isomorphism from $\mathbb{C}N(\delta)$ onto the center $Z(M\rtimes_{\delta}\Gamma)$ of $M\rtimes_{\delta}\Gamma$ by \cite[Corollary 2.2.2]{Jones}. This implies that $T_1(M\rtimes_{\delta}\Gamma)$ is an $|N(\delta)|$-simplex. Indeed, for any $p\in P_{N(\delta)}$, define a tracial state $\tau_{p}$ on $M\rtimes_{\delta}\Gamma$ by $\tau_{p}(x):= |N(\delta)|\tau_{M}\circ E_{\delta}(\Pi_v(p)x)$ for any $x\in M\rtimes_{\delta}\Gamma$. Then $\tau_{p}$ is a unique tracial state on $\Pi_v(p)M\rtimes_{\delta}\Gamma$ since $\Pi_v(p)M\rtimes_{\delta}\Gamma$ is a factor. If $\tau$ is a tracial state on $M\rtimes_{\delta}\Gamma$, then $\tau(x)=\sum_{p\in P_{N(\delta)}}\tau (\Pi_v(p)x)=\sum_{p\in P_{N(\delta)}}\tau (\Pi_v(p))\tau_{p}(x)$ for any $x\in M\rtimes_{\delta}\Gamma$. Hence $T_1(M\rtimes_{\delta}\Gamma)$ is an $|N(\delta)|$-simplex and the set of extremal tracial states on $M\rtimes_{\delta}\Gamma$ is equal to $\{\tau_{p}\; |\; p\in P_{N(\delta)}\}$. Easy computations show that we have $\tau_{p}(e_{\delta})=\frac{|N(\delta)|}{|\Gamma|}\tau_{M}(\Phi_v(p))$ for any $p\in P_{N(\delta)}$. Therefore we can recover $i(\delta)$ by considering the extremal tracial states on $M\rtimes_{\delta}\Gamma$. \subsection{Finite abelian group actions on monotracial C$^*$-algebras} We say a C$^*$-algebra $A$ is \textit{monotracial} if $A$ has a unique tracial state and no unbounded traces. For a monotracial C$^*$-algebra $A$, we denote by $\tau_{A}$ the unique tracial state on $A$ unless otherwise specified. Let $A$ be a simple separable monotracial C$^*$-algebra, and let $\alpha$ be an outer action of a finite abelian group $\Gamma$ on $A$. Then $\pi_{\tau_{A}}(A)^{''}$ is a II$_1$ factor. Note that $A$ is not of type I since $A$ has an outer action. Of course, $N(\tilde{\alpha})$ and $i(\tilde{\alpha})$ are a cocycle conjugacy invariant and a conjugacy invariant for $\alpha$ on $A$, respectively. Since we assume that $A$ is simple, $\tau_A\circ E_{\alpha}$ is faithful. Hence we can regard $A\rtimes_{\alpha}\Gamma$ and $M(A\rtimes_{\alpha}\Gamma)$ as subalgebras of $\pi_{\tau_A\circ E_{\alpha}}(A\rtimes_{\alpha}\Gamma)^{''}\cong \pi_{\tau_{A}}(A)^{''}\rtimes_{\tilde{\alpha}}\Gamma$. \begin{lem}\label{lem:trace-spaces-crossed-products} Let $B$ be a simple separable C$^*$-algebra with a compact tracial state space $T_1(B)$, and let $\beta$ be an action of a finite group $\Gamma$ on $B$ with $T_1(B)^{\beta}=\{\tau_{0}\}$. Assume that $\pi_{\tau_{0}}(B)^{''}$ has finitely many extremal tracial states (this is equivalent to that every tracial state on $\pi_{\tau_{0}}(B)^{''}$ is normal). Then the restriction map $T_1(\pi_{\tau_{0}}(B)^{''})\ni \tau \mapsto \tau|_B\in T_1(B)$ is an affine homeomorphism. \end{lem} \begin{proof} It is obvious that the restriction map is a continuous affine map. Since $\pi_{\tau_{0}}(B)$ is weakly dense in $\pi_{\tau_{0}}(B)^{''}$ and every tracial state on $\pi_{\tau_{0}}(B)^{''}$ is normal, the restriction map is injective. We shall show the surjectivity. Let $\{\tau_1,\tau_2,...,\tau_{k}\}$ be the set of extremal tracial states on $\pi_{\tau_{0}}(B)^{''}$. Note that $\tau_{1}|_B$, $\tau_{2}|_B$,..., $\tau_{k}|_B$ are extremal tracial states on $B$ because $\tau$ is an extremal tracial state if and only if $\tau$ is a factorial tracial state. Since $T_1(B)$ is compact, it is enough to show that the set of extremal tracial states on $B$ is equal to $\{\tau_1|_B,\tau_2|_B,...,\tau_{k}|_B\}$ by the Krein-Milman theorem. On the contrary, suppose that there were an extremal tracial state $\sigma$ on $B$ such that $\sigma\notin \{\tau_1|_B,\tau_2|_B,...,\tau_{k}|_B\}$. Since $\tau_0$ is $\beta$-invariant, $\beta$ induces an action $\tilde{\beta}$ on $\pi_{\tau_{0}}(B)^{''}$. It is easy to see that $\{\tau_1,\tau_2,...,\tau_{k}\}$ is a $\tilde{\beta}$-invariant set. Hence $\{\tau_1|_B,\tau_2|_B,...,\tau_{k}|_B\}$ is a $\beta$-invariant set and $\sigma\circ \beta_g\notin \{\tau_1|_B,\tau_2|_B,...,\tau_{k}|_B\}$ for any $g\in \Gamma$. Since $\tau_0$ is the unique $\beta$-invariant tracial state on $B$, $$ \tau_0=\frac{1}{|\Gamma|}\sum_{g\in\Gamma}\tau_1\circ \beta_g= \frac{1}{|\Gamma|}\sum_{g\in\Gamma}\sigma \circ \beta_g . $$ Since $T_1(B)$ is a Choquet simplex, which we remind the reader requires $\tau_0$ to have a unique representation as a convex combination of the finitely many extremal traces of $T_1(B)$, this is a contradiction. \end{proof} \begin{pro}\label{pro:trace-spaces-crossed-products} Let $A$ be a simple separable monotracial C$^*$-algebra, and let $\alpha$ be an outer action of a finite abelian group $\Gamma$ on $A$. Then the restriction map $T_1(\pi_{\tau_A\circ E_{\alpha}}(A\rtimes_{\alpha}\Gamma)^{''})\ni \tau \mapsto \tau|_{A\rtimes_{\alpha}\Gamma}\in T_1(A\rtimes_{\alpha}\Gamma)$ is an affine homeomorphism. \end{pro} \begin{proof} By the outerness of $\alpha$, $A\rtimes_{\alpha}\Gamma$ is simple. \cite[Corollary 2.2.3]{Jones} implies that $\pi_{\tau_A\circ E_{\alpha}}(A\rtimes_{\alpha}\Gamma)^{''}\cong \pi_{\tau_{A}}(A)^{''}\rtimes_{\tilde{\alpha}}\Gamma$ has finitely many extremal tracial states. Since $A$ is monotracial, \cite[Lemma 2.2]{Na0} implies that $T_1(A\rtimes_{\alpha}\Gamma)$ is compact. Furthermore, we see that $T_1(A\rtimes_{\alpha}\Gamma)^{\hat{\alpha}}$ is a one point set by the Takesaki-Takai duality theorem. Applying Lemma \ref{lem:trace-spaces-crossed-products} to $B=A\rtimes_{\alpha}\Gamma$ and $\beta=\hat{\alpha}$, we obtain the conclusion. \end{proof} The following corollary is an immediate consequence of the proposition above and the previous subsection. \begin{cor} Let $A$ be a simple separable monotracial C$^*$-algebra, and let $\alpha$ be an outer action of a finite abelian group $\Gamma$ on $A$. Assume that the characteristic invariant of $\tilde{\alpha}$ is trivial. Then $T_1(A\rtimes_{\alpha}\Gamma)$ is an $|N(\tilde{\alpha})|$-simplex. \end{cor} A probability measure $m$ on a finite set $P$ is said to have \textit{full support} if $m(p)>0$ for any $p\in P$. \begin{pro}\label{pro:realized-invariant} Let $A$ be a simple separable monotracial C$^*$-algebra, and let $\alpha$ be an outer action of a finite abelian group $\Gamma$ on $A$. Assume that the characteristic invariant of $\tilde{\alpha}$ is trivial. If $m$ is a probability measure on $P_{N(\tilde{\alpha})}$ such that $i(\tilde{\alpha})=[m]$, then $m$ has full support. \end{pro} \begin{proof} By Proposition \ref{pro:trace-spaces-crossed-products} and the previous subsection, we may assume that $m(p)=\frac{|\Gamma|}{|N(\delta)|}\tau_{p}(e_{\alpha})$ for any $p\in P_{N(\tilde{\alpha})}$ where $\tau_{p}$ is the extremal tracial state on $A\rtimes_{\alpha}\Gamma$ corresponding to $p$. Since $\alpha$ is outer, $A\rtimes_{\alpha}\Gamma$ is simple. Hence we have $\tau_{p}(e_{\alpha})>0$ for any $p\in P_{N(\tilde{\alpha})}$ because $\tau_{p}$ is faithful and $e_{\alpha}$ is a non-zero projection in $M(A\rtimes_{\alpha}\Gamma)$. Therefore we obtain the conclusion. \end{proof} \subsection{Kirchberg's relative central sequence C$^*$-algebras} Fix a free ultrafilter $\omega$ on $\mathbb{N}$. Let $A$ and $B$ be C$^*$-algebras, and let $\Phi$ be a homomorphism from $A$ to $B$. Put $$ B^{\omega}:=\ell^{\infty}(\mathbb{N}, B)/\{\{x_n\}_{n\in\mathbb{N}}\in \ell^{\infty}(\mathbb{N}, B)\; |\; \lim_{n\to\omega} \|x_n\|=0\} $$ and we regard $B$ as a C$^*$-subalgebra of $B^{\omega}$ consisting of equivalence classes of constant sequences. We denote by $(x_n)_n$ a representative of an element in $B^{\omega}$. Set $$ F(\Phi (A), B)=B^{\omega}\cap \Phi(A)^{\prime}/ \{(x_n)_n\in B^{\omega}\cap \Phi(A)^{\prime}\; | \; (x_n \Phi(a))_n=0\; \text{for any}\; a\in A\} $$ and we call it \textit{Kirchberg's relative central sequence C$^*$-algebra}. If $A=B$ and $\Phi=\mathrm{id}_A$, then we denote $F(\Phi(A), B)$ by $F(B)$. Every action $\alpha$ of a discrete group $\Gamma$ on $B$ with $\alpha_g(\Phi(A))=\Phi(A)$ for any $g\in\Gamma$ induces an action on $F(\Phi(A), B)$. We denote it by the same symbol $\alpha$ for simplicity unless otherwise specified. Let $A$ be a simple separable non-type I nuclear monotracial C$^*$-algebra and $B$ a monotracial C$^*$-algebra with strict comparison, and let $\Phi$ be a homomorphism from $A$ to $B$. Assume that $\tau_{B}$ is faithful and $\tau_A=\tau_B\circ \Phi$. Then $F(\Phi(A), B)$ has a tracial state $\tau_{B, \omega}$ such that $\tau_{B,\omega}([(a_n)_n])=\lim_{n\to\omega}\tau_B(a_n)$ for any $[(a_n)_n]\in F(\Phi(A), B)$ by \cite[Proposition 2.1]{Na4}. Put $$ \mathcal{M}:= \ell^{\infty}(\mathbb{N}, \pi_{\tau_B}(B)^{''})/ \{\{x_n\}_{n\in\mathbb{N}}\in \ell^{\infty}(\mathbb{N}, \pi_{\tau_B}(B)^{''})\; |\; \lim_{n\to\omega}\tilde{\tau}_B(x_n^*x_n)=0\} $$ where $\tilde{\tau}_B$ is the unique normal extension of $\tau_B$ on $\pi_{\tau_B}(B)^{''}$, and let $$ \mathcal{M}(\Phi(A), B):= \mathcal{M}\cap \pi_{\tau_B}(\Phi(A))^{\prime}. $$ If $A=B$ and $\Phi=\mathrm{id}_A$, then we denote $\mathcal{M}(\Phi(A), B)$ by $\mathcal{M}(B)$. Note that $\mathcal{M}(B)$ is equal to the von Neumann algebraic central sequence algebra of $\pi_{\tau_{B}}(B)^{''}$. For any homomorphism $\Phi$ from $A$ to $B$, $\mathcal{M}(B)$ is a subalgebra of $\mathcal{M}(\Phi(A), B)$. Let $\beta$ be an action of a finite abelian group $\Gamma$ on $B$ such that $\beta_g(\Phi(A))=\Phi(A)$ for any $g\in\Gamma$. Then $\beta$ induces an action $\tilde{\beta}$ on $\mathcal{M}(\Phi(A), B)$. By \cite[Proposition 3.11]{Na5}, we have the following proposition. Note that \cite[Proposition 3.11]{Na5} is based on Matui and Sato's techniques \cite{MS}, \cite{MS2} and \cite{MS3} (with pioneering works \cite{Sa0} and \cite{Sa}). See also \cite{Sa2} and \cite{Sza6}. \begin{pro}\label{pro:strict-comparison} With notation as above, assume that $\mathcal{M}(\Phi(A), B)^{\tilde{\beta}}$ is a factor and $\beta_g|_{\Phi(A)}$ is outer for any $g\in\Gamma\setminus \{\iota\}$. If $a$ and $b$ are positive elements in $F(\Phi(A), B)^{\beta}$ satisfying $d_{\tau_{B},\omega}(a)< d_{\tau_{B}, \omega}(b)$, then there exists an element $r$ in $F(\Phi(A), B)^{\beta}$ such that $r^*br=a$. \end{pro} \section{Approximate representability and classification}\label{sec:app} We shall recall the definition of the Rohlin property and approximate representability for finite abelian group actions. \begin{Def} Let $A$ be a separable C$^*$-algebra, and let $\alpha$ be an action of a finite abelian group $\Gamma$ on $A$. \ \\ (i) We say that $\alpha$ has the \textit{Rohlin property} if there exists a partition of unity $\{p_g\}_{g\in\Gamma}$ consisting of projections in $F(A)$ such that $$ \alpha_g(p_h)=p_{gh} $$ for any $g,h\in \Gamma$. \ \\ (ii) We say that $\alpha$ is \textit{approximately representable} if there exists a map $w$ from $\Gamma$ to $(A^{\alpha})^{\omega}$ such that the map $u$ from $\Gamma$ to $F(A^{\alpha})$ given by $u_g=[w_g]$ is a unitary representation of $\Gamma$ and $$ \alpha_g(a)=w_gaw_g^* \quad \text{in} \quad A^{\omega} $$ for any $g\in\Gamma$ and $a\in A$. \end{Def} We refer the reader to \cite{I1}, \cite{GSan1}, \cite{Na0} and \cite{San} for basic properties of the Rohlin property and approximate representability. See \cite{GHS} for some generalization. \begin{pro}\label{pro:rohlin-outer} Let $A$ be a separable C$^*$-algebra, and let $\alpha$ be an action of a finite group $\Gamma$ on $A$. Assume that $\tau$ is an $\alpha$-invariant tracial state on $A$. If $\alpha$ has the Rohlin property, then $\tilde{\alpha}$ on $\pi_{\tau}(A)^{''}$ is outer. \end{pro} \begin{proof} Let $\mathcal{M}_{\omega}$ be a von Neumann algebraic central sequence algebra of $\pi_{\tau}(A)^{''}$. Then there exists a unital homomorphism from $F(A)$ to $\mathcal{M}_{\omega}$ (see, for example, \cite[Proposition 2.2]{Na2}). Hence there exists a partition of unity $\{P_g\}_{g\in\Gamma}$ consisting of projections in $\mathcal{M}_{\omega}$ such that $$ \tilde{\alpha}_g(P_h)=P_{gh} $$ for any $g,h\in \Gamma$ since $\alpha$ has the Rohlin property. This shows that $\tilde{\alpha}$ is outer. Indeed, if $\tilde{\alpha}_g$ is an inner automorphism of $\pi_{\tau}(A)^{''}$, then $\tilde{\alpha}_g(P_{\iota})=P_{\iota}$. Therefore we have $P_{g}=P_{\iota}$, and hence $g=\iota$. \end{proof} \begin{pro}\label{pro:unitary} Let $A$ be a separable C$^*$-algebra, and let $\alpha$ be an action of a finite group $\Gamma$ on $A$. For any $v\in (A^{\alpha})^{\omega}\cap (A^{\alpha})^{\prime}$, if $[v]$ is a unitary element in $F(A^{\alpha})$, then $v^*va=vv^*a=av^*v=avv^*=a$ for any $a\in A \subset A^{\omega}$. \end{pro} \begin{proof} Let $\{h_n\}_{n=1}^{\infty}$ be an approximate unit for $A^{\alpha}$. Then $v^*vh_n=h_n$ in $A^{\omega}$ for any $n\in\mathbb{N}$ because $[v]$ is a unitary element in $F(A^{\alpha})$. Since $\Gamma$ is a finite group, $A^{\alpha}\subset A$ is a nondegenerate inclusion. Hence $\{h_n\}_{n=1}^{\infty}$ is an approximate unit for $A$. Therefore, for any $a\in A$, we have $$ \| v^*va-a\| =\|v^*va- v^*vh_na+ v^*vh_na-a\| \leq \|v^*v\| \|a-h_na\|+ \| h_na-a\|\to 0 $$ as $n\to \infty$. Consequently, $v^*va=a$. Similar arguments show $vv^*a=av^*v=avv^*=a$. \end{proof} Let $A$ be a simple separable nuclear monotracial C$^*$-algebra, and let $\alpha$ be an outer action of a finite abelian group $\Gamma$ on $A$. We shall consider the action $\gamma:=\alpha\otimes\mathrm{id}_\mathcal{W}$ on $A\otimes\mathcal{W}$. We denote by $M_{n^{\infty}}$ the uniformly hyperfinite (UHF) algebra of type $n^{\infty}$. The following lemma is based on \cite[Lemma 3.10]{I1} and \cite[Proposition 2.1.3]{sut}. \begin{lem}\label{lem:sutherland} Let $A$ be a separable C$^*$-algebra, and let $\alpha$ be an action of a finite abelian group $\Gamma$ on $A$ and put $\gamma=\alpha\otimes\mathrm{id}_{\mathcal{W}}$. Assume that for any $g\in\Gamma$, there exists an element $v_g$ in $((A\otimes\mathcal{W})^{\gamma})^{\omega}$ such that $$ \gamma_g(a)=v_gav_g^* \quad \text{in} \quad (A\otimes\mathcal{W})^{\omega} $$ for any $a\in A\otimes\mathcal{W}$ and $[v_{g}]$ is a unitary element in $F((A\otimes\mathcal{W})^{\gamma})$. Then $\gamma$ on $A\otimes\mathcal{W}$ is approximately representable. \end{lem} \begin{proof} Since $\mathcal{W}$ is isomorphic to $\mathcal{W}\otimes M_{|\Gamma|^{\infty}}$, there exists a unital homomorphism $\psi$ from $M_{|\Gamma|}(\mathbb{C})$ to $F(A\otimes\mathcal{W})^{\gamma}$. For any $g,h\in \Gamma$, let $E_{g,h}\in (A\otimes\mathcal{W})^{\omega}\cap (A\otimes\mathcal{W})^{\prime}$ be a representative of $\psi (e_{g,h})$ where $\{e_{g,h}\}_{g,h\in \Gamma}$ are the matrix units of $M_{|\Gamma|}(\mathbb{C})$. Taking suitable subsequences, we may assume that $E_{g,h}\in \{v_k, v_k^*\; |\; k\in\Gamma\}^{\prime}$ for any $g,h\in\Gamma$. Moreover, we may assume that $E_{g,h}\in ((A\otimes\mathcal{W})^{\gamma})^{\omega}$ for any $g,h\in \Gamma$ by replacing $E_{g,h}$ with $\frac{1}{|\Gamma|}\sum_{k\in \Gamma} \gamma_g(E_{g,h})$. For any $g\in\Gamma$, let $z_g:=\sum_{h\in \Gamma}v_gv_hv_{gh}^*E_{h, gh}$. Note that we have $v_ga=\gamma_g(a)v_g$ and $\gamma_{g^{-1}}(a)v_g^*=v_g^*a$ in $(A\otimes \mathcal{W})^{\omega}$ for any $a\in A\otimes\mathcal{W}$ and $g\in\Gamma$ by Proposition \ref{pro:unitary}. Hence we have $z_g\in ((A\otimes\mathcal{W})^{\gamma})^{\omega}\cap (A\otimes\mathcal{W})^{\prime}$ for any $g\in\Gamma$. Define a map $w$ from $\Gamma$ to $((A\otimes\mathcal{W})^{\gamma})^{\omega}$ by $w_g:= z_g^*v_g$ for any $g\in \Gamma$. Note that we have \begin{align*} z_g^*z_ga &= \sum_{h\in \Gamma}E_{gh,h}v_{gh}v_h^*v_g^*\sum_{k\in \Gamma}v_gv_kv_{gk}^*E_{k, gk}a = \sum_{h,k\in \Gamma}v_{gh}v_h^*v_g^*v_gv_kv_{gk}^*E_{gh, h}E_{k, gk}a \\ &= \sum_{h\in \Gamma}v_{gh}v_h^*v_g^*v_gv_hv_{gh}^*E_{gh, gh}a = \sum_{h\in \Gamma}v_{gh}v_h^*v_g^*v_g\gamma_{g^{-1}}(a)v_hv_{gh}^*E_{gh, gh} \\ &= \sum_{h\in \Gamma} v_{gh}v_h^*\gamma_{g^{-1}}(a)v_hv_{gh}^*E_{gh, gh} = \sum_{h\in \Gamma} v_{gh}v_h^*v_h\gamma_{h^{-1}g^{-1}}(a)v_{gh}^*E_{gh, gh} \\ &= \sum_{h\in \Gamma} v_{gh}\gamma_{h^{-1}g^{-1}}(a)v_{gh}^*E_{gh, gh} = \sum_{h\in \Gamma} v_{gh}v_{gh}^*aE_{gh, gh} = \sum_{h\in \Gamma} aE_{gh, gh}=a \end{align*} in $(A\otimes\mathcal{W})^{\omega}$ for any $a\in A\otimes\mathcal{W}$ and $g\in \Gamma$. Hence we have $$ w_gaw_g^*=z_g^*v_gav_g^*z_g=z_g^*\gamma_g(a)z_g =z_g^*z_g\gamma_g(a)=\gamma_g(a) $$ in $(A\otimes\mathcal{W})^{\omega}$ for any $a\in A\otimes\mathcal{W}$ and $g\in\Gamma$. We shall show the map $u$ from $\Gamma$ to $F((A\otimes\mathcal{W})^{\gamma})$ given by $u_g=[w_g]$ is a unitary representation. In a similar way as above, we see that $ z_gz_g^*a=a $ in $(A\otimes\mathcal{W})^{\omega}$ for any $a\in A\otimes\mathcal{W}$ and $g\in \Gamma$. Hence the image of $u$ is contained in the unitary group of $F((A\otimes\mathcal{W})^{\gamma})$. Note that we have \begin{align*} v_gz_hv_g^*z_gz_{gh}^*a &=v_gz_hv_g^*\sum_{k\in \Gamma}v_gv_kv_{gk}^*E_{k, gk}\sum_{k^{\prime}\in \Gamma}E_{ghk^{\prime}, k^{\prime}} v_{ghk^{\prime}}v_{k^{\prime}}^*v_{gh}^*a \\ &= v_gz_hv_g^*\sum_{k,k^{\prime}\in \Gamma}v_gv_kv_{gk}^*v_{ghk^{\prime}}v_{k^{\prime}}^*v_{gh}^* E_{k, gk}E_{ghk^{\prime},k^{\prime}}a \\ &= v_gz_h\sum_{k\in \Gamma}v_g^*v_gv_kv_{gk}^*v_{gk}v_{h^{-1}k}^*v_{gh}^*E_{k,h^{-1}k}a \\ &= v_gz_h\sum_{k\in \Gamma}v_kv_{h^{-1}k}^*v_{gh}^*E_{k,h^{-1}k}a \\ &= v_g\sum_{k, k^{\prime}\in \Gamma}v_hv_{k^{\prime}}v_{hk^{\prime}}^* v_kv_{h^{-1}k}^*v_{gh}^*E_{k^{\prime}, hk^{\prime}}E_{k, h^{-1}k}a \\ &= v_g\sum_{k^{\prime}\in \Gamma}v_hv_{k^{\prime}}v_{hk^{\prime}}^* v_{hk^{\prime}}v_{k^{\prime}}^*v_{gh}^*E_{k^{\prime}, k^{\prime}}a \\ &= v_{g}v_{h}v_{gh}^*\sum_{k^{\prime}\in \Gamma}E_{k^{\prime}, k^{\prime}}a =v_{g}v_{h}v_{gh}^*a \end{align*} in $(A\otimes\mathcal{W})^{\omega}$ for any $a\in A\otimes\mathcal{W}$ and $g, h\in \Gamma$. This implies that $$ z_{gh}^*v_{gh}\gamma_{h^{-1}g^{-1}}(a) =z^*_gv_gz_{h}^*v_{h} \gamma_{h^{-1}g^{-1}}(a) \quad \text{in} \quad (A\otimes\mathcal{W})^{\omega} $$ for any $a\in A\otimes\mathcal{W}$ and $g, h\in \Gamma$. Consequently, we have $u_{gh}=u_{g}u_{h}$ for any $g,h\in \Gamma$. \end{proof} We have the following lemma by \cite[Corollary 4.6]{Na5}. \begin{lem}\label{lem:corollary 4.6} With notation as above, let $p$ and $q$ be projections in $F(A\otimes\mathcal{W})^{\gamma}$ such that $0<\tau_{A\otimes\mathcal{W}, \omega}(p)\leq 1$. Then $p$ and $q$ are Murray-von Neumann equivalent if and only if $\tau_{A\otimes\mathcal{W}, \omega}(p)=\tau_{A\otimes\mathcal{W}, \omega}(q)$. \end{lem} Fix $g_0\in \Gamma$. Define a homomorphism $\Phi_{g_0}$ from $A\otimes\mathcal{W}$ to $M_2(A\otimes\mathcal{W})$ by $$ \Phi_{g_0}(a)= \left(\begin{array}{cc} a & 0 \\ 0 & \gamma_{g_0}(a) \end{array} \right). $$ Since $\Gamma$ is abelian, we have $\gamma_g \otimes\mathrm{id}_{M_2(\mathbb{C})}(\Phi_{g_0}(A\otimes\mathcal{W}))=\Phi_{g_0}(A\otimes\mathcal{W})$ for any $g\in\Gamma$. Therefore $\gamma\otimes\mathrm{id}_{M_2(\mathbb{C})}$ induces an action on $F(\Phi_{g_0}(A\otimes\mathcal{W}), M_2(A\otimes\mathcal{W}))$. We denote it by $\beta$. Also, let $\tau_{\omega}$ denote the induced tracial state on $F(\Phi_{g_0}(A\otimes\mathcal{W}), M_2(A\otimes\mathcal{W}))$ by $\tau_{M_2(A\otimes\mathcal{W})}$. \begin{lem}\label{lem:strict-comparison} With notation as above, assume that the characteristic invariant of $\tilde{\alpha}$ is trivial. If $a$ and $b$ are positive elements in $F(\Phi_{g_0}(A\otimes\mathcal{W}), M_2(A\otimes\mathcal{W}))^{\beta}$ satisfying $d_{\tau_{\omega}}(a)< d_{\tau_{\omega}}(b)$, then there exists an element $r$ in $F(\Phi_{g_0}(A\otimes\mathcal{W}), M_2(A\otimes\mathcal{W}))^{\beta}$ such that $r^*br=a$. \end{lem} \begin{proof} By Proposition \ref{pro:strict-comparison}, it suffices to show that $\mathcal{M}(\Phi_{g_0}(A\otimes\mathcal{W}), M_2(A\otimes\mathcal{W}))^{\tilde{\beta}}$ is a factor. Since the characteristic invariant of $\tilde{\alpha}$ is trivial, there exists a group homomorphism $v$ from $N(\tilde{\alpha})$ to the unitary group of $(\pi_{\tau_{A}}(A)^{''})^{\tilde{\alpha}}$ such that $\tilde{\alpha}_g=\mathrm{Ad}(v_g)$ for any $g\in N(\tilde{\alpha})$. Since we have $$ \tilde{\gamma}_g\otimes\mathrm{id}_{M_2(\mathbb{C})}= \mathrm{Ad}\left( \left(\begin{array}{cc} v_g\otimes 1_{\pi_{\tau_{\mathcal{W}}}(\mathcal{W})^{''}} & 0 \\ 0 & v_g\otimes 1_{\pi_{\tau_{\mathcal{W}}}(\mathcal{W})^{''}} \end{array} \right)\right) $$ and $\tilde{\beta}_g([(x_n)_n])=[(\tilde{\gamma}_g\otimes\mathrm{id}_{M_2(\mathbb{C})}(x_n))_n]$, $$ \tilde{\beta_g}([(x_n)_n])= \left[\left(\mathrm{Ad}\left( \left(\begin{array}{cc} v_g\otimes 1_{\pi_{\tau_{\mathcal{W}}}(\mathcal{W})^{''}} & 0 \\ 0 & v_g\otimes 1_{\pi_{\tau_{\mathcal{W}}}(\mathcal{W})^{''}} \end{array} \right)\right)(x_n)\right)_n\right] $$ for any for any $g\in N_{\tilde{\alpha}}$ and $[(x_n)_n]$ in $\mathcal{M}(\Phi_{g_0}(A\otimes\mathcal{W}), M_2(A\otimes\mathcal{W}))^{\tilde{\beta}}$. Note that $$ \left(\begin{array}{cc} v_g\otimes 1_{\pi_{\tau_{\mathcal{W}}}(\mathcal{W})^{''}} & 0 \\ 0 & v_g\otimes 1_{\pi_{\tau_{\mathcal{W}}}(\mathcal{W})^{''}} \end{array} \right) \in \pi_{\tau_{\omega}}(\Phi_{g_0}(A\otimes\mathcal{W}))^{''} $$ because we have $v_g\otimes 1_{\pi_{\tau_{\mathcal{W}}}(\mathcal{W})^{''}}\in \pi_{\tau_{A\otimes\mathcal{W}}}(A\otimes\mathcal{W})^{''}$ and $$ \left(\begin{array}{cc} v_g\otimes 1_{\pi_{\tau_{\mathcal{W}}}(\mathcal{W})^{''}} & 0 \\ 0 & v_g\otimes 1_{\pi_{\tau_{\mathcal{W}}}(\mathcal{W})^{''}} \end{array} \right) =\left(\begin{array}{cc} v_g\otimes 1_{\pi_{\tau_{\mathcal{W}}}(\mathcal{W})^{''}} & 0 \\ 0 & \tilde{\gamma}_{g_0}(v_g\otimes 1_{\pi_{\tau_{\mathcal{W}}}(\mathcal{W})^{''}}) \end{array} \right). $$ Hence $\tilde{\beta}_g$ is the trivial automorphism for any $g\in N(\tilde{\alpha})$. Therefore $\gamma\otimes\mathrm{id}_{M_2(\mathbb{C})}$ induces an action $\delta$ of $\Gamma /N(\tilde{\alpha})$ on $\mathcal{M}(\Phi_{g_0}(A\otimes\mathcal{W}), M_2(A\otimes\mathcal{W}))$ such that $$ \mathcal{M}(\Phi_{g_0}(A\otimes\mathcal{W}), M_2(A\otimes\mathcal{W}))^{\delta} = \mathcal{M}(\Phi_{g_0}(A\otimes\mathcal{W}), M_2(A\otimes\mathcal{W}))^{\tilde{\beta}}. $$ Note that the restriction of $\delta$ on $\mathcal{M}(M_2(A\otimes\mathcal{W}))$ is strongly free or $\Gamma /N(\tilde{\alpha})=\{\iota\}$ by \cite[Theorem 3.2]{C3}, \cite[Lemma 5.6]{Oc} and the definition of $N(\tilde{\alpha})$. (Note that every centrally nontrivial automorphism of a factor is properly centrally nontrivial by definition. See \cite[Section 5.2]{Oc}.) Therefore \cite[Proposition 3.14]{Na5}(see also \cite[Remark 3.15]{Na5}) implies that $ \mathcal{M}(\Phi_{g_0}(A\otimes\mathcal{W}), M_2(A\otimes\mathcal{W}))^{\delta} $ is a factor. Consequently, $\mathcal{M}(\Phi_{g_0}(A\otimes\mathcal{W}), M_2(A\otimes\mathcal{W}))^{\tilde{\beta}}$ is a factor. \end{proof} The following theorem is one of the main results in this paper. \begin{thm}\label{thm:main} Let $A$ be a simple separable nuclear monotracial C$^*$-algebra, and let $\alpha$ be an outer action of a finite abelian group $\Gamma$ on $A$. Then $\gamma=\alpha\otimes\mathrm{id}_{\mathcal{W}}$ on $A\otimes\mathcal{W}$ is approximately representable if and only if the characteristic invariant of $\tilde{\alpha}$ is trivial. \end{thm} \begin{proof} First, we shall show the only if part. Assume that the characteristic invariant of $\tilde{\alpha}$ is not trivial. By Proposition \ref{pro:non-trivial-characteristic}, the dual action of $\tilde{\alpha}$ on $\pi_{\tau_{A}}(A)^{''}\rtimes_{\tilde{\alpha}}\Gamma$ is not outer, and hence the dual action of $\tilde{\gamma}$ on $\pi_{\tau_{A}\otimes\tau_{\mathcal{W}}}(A\otimes\mathcal{W})^{''}\rtimes_{\tilde{\gamma}}\Gamma \cong\pi_{\tau_A\otimes\tau_{\mathcal{W}}\circ E_{\gamma}}((A\otimes\mathcal{W})\rtimes_{\gamma}\Gamma)^{''}$ is not outer. Proposition \ref{pro:rohlin-outer} implies that $\hat{\gamma}$ does not have the Rohlin property. Therefore $\gamma$ is not approximately representable by \cite[Proposition 4.4]{Na0}. We shall show the if part. Fix $g_0\in \Gamma$. Let $\{h_n\}_{n\in\mathbb{N}}$ be an approximate unit for $(A\otimes\mathcal{W})^{\gamma}$. Note that $\{h_n\}_{n\in\mathbb{N}}$ is also an approximate unit for $A\otimes\mathcal{W}$. Put $$ P:=\left[\left(\left(\begin{array}{cc} h_n & 0 \\ 0 & 0 \end{array} \right)\right)_n\right] \quad \text{and} \quad Q:=\left[\left(\left(\begin{array}{cc} 0 & 0 \\ 0 & h_n \end{array} \right)\right)_n\right] $$ in $F(\Phi_{g_0}(A\otimes\mathcal{W}), M_2(A\otimes\mathcal{W}))$. Then $P$ and $Q$ are projections in $F(\Phi_{g_0}(A\otimes\mathcal{W}), M_2(A\otimes\mathcal{W}))^{\beta}$. Using \cite[Proposition 4.2]{Na5}, Lemma \ref{lem:corollary 4.6} and Lemma \ref{lem:strict-comparison} instead of \cite[Proposition 2.6]{Na3}, \cite[Corollary 5.5]{Na3} and \cite[Lemma 6.1]{Na3}, similar arguments as in the proof of \cite[Lemma 6.2]{Na3} show that $P$ is Murray-von Neumann equivalent to $Q$ in $F(\Phi_{g_0}(A\otimes\mathcal{W}), M_2(A\otimes\mathcal{W}))^{\beta}$. (See also the proof of \cite[Lemma 4.2]{Na4}.) Hence there exists an element $V$ in $F(\Phi_{g_0}(A\otimes\mathcal{W}), M_2(A\otimes\mathcal{W}))^{\beta}$ such that $V^*V=P$ and $VV^*=Q$. It is easy to see that there exists an element $v_{g_0}=(v_{g_0, n})_n$ in $(A\otimes\mathcal{W})^{\omega}$ such that $$ V=\left[\left(\left(\begin{array}{cc} 0 & 0 \\ v_{g_0,n} & 0 \end{array} \right)\right)_n\right]. $$ Since we have $\beta_g(V)=V$ for any $g\in\Gamma$, we see that $$ \left[\left(\left(\begin{array}{cc} 0 & 0 \\ v_{g_0,n} & 0 \end{array} \right)\right)_n\right] = \left[\left(\left(\begin{array}{cc} 0 & 0 \\ \frac{1}{|\Gamma|}\sum_{g\in\Gamma}\gamma_g(v_{g_0,n}) & 0 \end{array} \right)\right)_n\right]. $$ Hence we may assume that $v_{g_0}$ is an element in $((A\otimes\mathcal{W})^{\gamma})^{\omega}$. Since we have $V^*V=P$ and $VV^*=Q$, $$ av_{g_0}^*v_{g_0}=av_{g_0}v_{g_0}^*=a $$ for any $a\in A\otimes\mathcal{W}$. Furthermore, we have $$ v_{g_0}a=\gamma_{g_0}(a)v_{g_0} $$ for any $a\in A\otimes\mathcal{W}$ since $$ \left(\left(\begin{array}{cc} 0 & 0 \\ v_{g_0,n} & 0 \end{array} \right)\right)_n \in M_2(A\otimes\mathcal{W})^{\omega}\cap \Phi_{g_0}(A\otimes\mathcal{W})^{\prime}. $$ These imply that $$ \gamma_{g_0}(a)=v_{g_0}av_{g_0}^* $$ for any $a\in A\otimes\mathcal{W}$ and $[v_{g_0}]$ is a unitary element in $F((A\otimes\mathcal{W})^{\gamma})$. Since $g_0\in\Gamma$ is arbitrary, $\gamma$ is approximately representable by Lemma \ref{lem:sutherland}. \end{proof} Since $(A\otimes\mathcal{W})\rtimes_{\alpha\otimes\mathrm{id}_{\mathcal{W}}}\Gamma$ is isomorphic to $(A\rtimes_{\alpha}\Gamma)\otimes \mathcal{W}$, we see that $(A\otimes\mathcal{W})\rtimes_{\alpha\otimes\mathrm{id}_{\mathcal{W}}}\Gamma$ is in the class of Elliott-Gong-Lin-Niu's classification theorem \cite[Theorem 7.5]{EGLN} (see also \cite[Theorem A]{CE}). Furthermore, we see that $(A\otimes\mathcal{W})\rtimes_{\alpha\otimes\mathrm{id}_{\mathcal{W}}}\Gamma$ is in the class of Robert's classification theorem \cite{Rob}. In particular, these C$^*$-algebras and automorphisms can be classified by using trace spaces. Note that the map from $T_1(A\rtimes_{\alpha}\Gamma)$ to $T_1( (A\rtimes_{\alpha}\Gamma)\otimes \mathcal{W})$ given by $\tau \mapsto \tau\otimes \tau_{\mathcal{W}}$ is an affine homeomorphism. As an application of the theorem above and these classification theorem, we obtain the following classification result. \begin{thm} Let $A$ and $B$ be simple separable nuclear monotracial C$^*$-algebras, and let $\alpha$ and $\beta$ be outer actions of a finite abelian group $\Gamma$ on $A$ and $B$, respectively. Assume that the characteristic invariants of $\tilde{\alpha}$ and $\tilde{\beta}$ are trivial. Then \ \\ (i) $\alpha\otimes \mathrm{id}_{\mathcal{W}}$ on $A\otimes\mathcal{W}$ and $\beta\otimes \mathrm{id}_{\mathcal{W}}$ on $B\otimes\mathcal{W}$ are cocycle conjugate if and only if $\tilde{\alpha}$ on $\pi_{\tau_A}(A)^{''}$ and $\tilde{\beta}$ on $\pi_{\tau_B}(B)^{''}$ are cocycle conjugate; \ \\ (ii) $\alpha\otimes \mathrm{id}_{\mathcal{W}}$ on $A\otimes\mathcal{W}$ and $\beta\otimes \mathrm{id}_{\mathcal{W}}$ on $B\otimes\mathcal{W}$ are conjugate if and only if $\tilde{\alpha}$ on $\pi_{\tau_A}(A)^{''}$ and $\tilde{\beta}$ on $\pi_{\tau_B}(B)^{''}$ are conjugate. \end{thm} \begin{proof} (i) First, we shall show the only if part. Since $\alpha\otimes \mathrm{id}_{\mathcal{W}}$ and $\beta\otimes \mathrm{id}_{\mathcal{W}}$ are cocycle conjugate, $\tilde{\alpha}\otimes\mathrm{id}_{\pi_{\tau_{\mathcal{W}}}(\mathcal{W})^{''}}$ and $\tilde{\beta}\otimes\mathrm{id}_{\pi_{\tau_{\mathcal{W}}}(\mathcal{W})^{''}}$ are cocycle conjugate. Since $\tilde{\alpha}\otimes\mathrm{id}_{\pi_{\tau_{\mathcal{W}}}(\mathcal{W})^{''}}$ is conjugate to $\tilde{\alpha}$ by \cite[Corollary 5.2.3]{Jones}, we see that $\tilde{\alpha}$ and $\tilde{\beta}$ are cocycle conjugate. We shall show the if part. Since $\tilde{\alpha}$ and $\tilde{\beta}$ are cocycle conjugate, there exists an affine homeomorphism $F$ from $T_1(\pi_{\tau_B}(B)^{''}\rtimes_{\tilde{\beta}}\Gamma)$ onto $T_1(\pi_{\tau_A}(A)^{''}\rtimes_{\tilde{\alpha}}\Gamma)$ such that $F\circ T(\hat{\tilde{\beta}}_{\eta})=T(\hat{\tilde{\alpha}}_{\eta})\circ F$ for any $\eta\in \hat{\Gamma}$ by Proposition \ref{pro:conjugacy-trace-spaces}. Proposition \ref{pro:trace-spaces-crossed-products} implies that the restriction map $F|_{T_1(B\rtimes_{\beta}\Gamma)}$ is an affine homeomorphism from $T_1(B\rtimes_{\beta}\Gamma)$ onto $T_1(A\rtimes_{\alpha}\Gamma)$. Define a map $G$ from $T_1((B\otimes\mathcal{W})\rtimes_{\beta\otimes\mathrm{id}_{\mathcal{W}}}\Gamma)$ to $T_1((A\otimes\mathcal{W})\rtimes_{\alpha\otimes\mathrm{id}_{\mathcal{W}}}\Gamma)$ by $G(\tau\otimes\tau_{\mathcal{W}})= F(\tau)\otimes \tau_{\mathcal{W}}$ for any $\tau\in T_1(B\rtimes_{\beta}\Gamma)$. Then $G$ is an affine homeomorphism such that $G\circ T(\hat{\beta}_\eta\otimes\mathrm{id}_{\mathcal{W}}) = T(\hat{\alpha}_{\eta}\otimes\mathrm{id}_{\mathcal{W}})\circ G$ for any $\eta\in\hat{\Gamma}$. By Elliott-Gong-Lin-Niu's classification theorem \cite[Theorem 7.5]{EGLN}, there exists an isomorphism $\theta$ from $(A\otimes\mathcal{W})\rtimes_{\alpha\otimes\mathrm{id}_{\mathcal{W}}}\Gamma$ onto $(B\otimes\mathcal{W})\rtimes_{\beta\otimes\mathrm{id}_{\mathcal{W}}}\Gamma$ such that $T(\theta)=G$. Since we have $$ T(\theta \circ \hat{\alpha}_{\eta}\otimes\mathrm{id}_{\mathcal{W}} \circ \theta^{-1}) =G^{-1}\circ T(\hat{\alpha}_{\eta}\otimes\mathrm{id}_{\mathcal{W}})\circ G =T(\hat{\beta}_{\eta}\otimes\mathrm{id}_{\mathcal{W}}), $$ $\hat{\beta}_{\eta}\otimes\mathrm{id}_{\mathcal{W}}$ is approximately unitarily equivalent to $\theta \circ \hat{\alpha}_{\eta}\otimes\mathrm{id}_{\mathcal{W}} \circ \theta^{-1}$ for any $\eta\in \hat{\Gamma}$ by \cite[Theorem 1.0.1]{Rob} and \cite[Proposition 6.2.3]{Rob}. Therefore \cite[Theorem 3.5]{Na0} implies that $\hat{\alpha}\otimes\mathrm{id}_{\mathcal{W}}$ and $\hat{\beta}\otimes\mathrm{id}_{\mathcal{W}}$ are conjugate because $\hat{\alpha}\otimes\mathrm{id}_{\mathcal{W}}$ and $\hat{\beta}\otimes\mathrm{id}_{\mathcal{W}}$ have the Rohlin property by Theorem \ref{thm:main} and \cite[Proposition 4.4]{Na0}. Consequently, \cite[Proposition 5.4]{Na0} implies that $\alpha\otimes\mathrm{id}_{\mathcal{W}}$ and $\beta\otimes\mathrm{id}_{\mathcal{W}}$ are cocycle conjugate. \ \\ (ii) Since we can show the only if part by the same argument as in (i), we shall show the if part. By Proposition \ref{pro:conjugacy-trace-spaces}, there exists an affine homeomorphism $F$ from $T_1(\pi_{\tau_B}(B)^{''}\rtimes_{\tilde{\beta}}\Gamma)$ onto $T_1(\pi_{\tau_B}(B)^{''}\rtimes_{\tilde{\alpha}}\Gamma)$ such that $F(\tau)(e_{\tilde{\alpha}})=\tau (e_{\tilde{\beta}})$ for any $\tau\in T_1(\pi_{\tau_B}(B)^{''}\rtimes_{\tilde{\alpha}}\Gamma)$ and $F\circ T(\hat{\tilde{\beta}}_{\eta})=T(\hat{\tilde{\alpha}}_{\eta})\circ F$ for any $\eta\in \hat{\Gamma}$. Note that we have $e_{\alpha}=e_{\tilde{\alpha}}$ and $e_{\beta}=e_{\tilde{\beta}}$ because we regard $M(A\rtimes_{\alpha}\Gamma)$ and $M(B\rtimes_{\beta}\Gamma)$ as subalgebras of $\pi_{\tau_A}(A)^{''}\rtimes_{\tilde{\alpha}}\Gamma$ and $\pi_{\tau_B}(B)^{''}\rtimes_{\tilde{\beta}}\Gamma$, respectively. By the same argument as in (i), we see that there exists an isomorphism $\theta$ from $(A\otimes\mathcal{W})\rtimes_{\alpha\otimes\mathrm{id}_{\mathcal{W}}}\Gamma$ onto $(B\otimes\mathcal{W})\rtimes_{\beta\otimes\mathrm{id}_{\mathcal{W}}}\Gamma$ such that $\hat{\beta}_{\eta}\otimes\mathrm{id}_{\mathcal{W}}$ is approximately unitarily equivalent to $\theta\circ \hat{\alpha}_{\eta}\otimes\mathrm{id}_{\mathcal{W}} \circ \theta^{-1}$ for any $\eta\in \hat{\Gamma}$. Since we have $e_{\alpha\otimes\mathrm{id}_{\mathcal{W}}}= e_{\alpha}\otimes 1_{\mathcal{W}^{\sim}}$ and $e_{\beta\otimes\mathrm{id}_{\mathcal{W}}}= e_{\beta}\otimes 1_{\mathcal{W}^{\sim}}$, $$ \tau\otimes\tau_{\mathcal{W}} (\theta (e_{\alpha\otimes\mathrm{id}_{\mathcal{W}}})) =F(\tau)\otimes \tau_{\mathcal{W}}(e_{\alpha}\otimes1_{\mathcal{W}}) =\tau \otimes\tau_{\mathcal{W}}(e_{\beta}\otimes1_{\mathcal{W}}) =\tau \otimes\tau_{\mathcal{W}}(e_{\beta\otimes\mathrm{id}_{\mathcal{W}}}) $$ for any $\tau\in T_1(B\rtimes_{\beta}\Gamma)$. Therefore \cite[Corollary 4.5]{Na0} implies that $\alpha\otimes\mathrm{id}_{\mathcal{W}}$ and $\beta\otimes\mathrm{id}_{\mathcal{W}}$ are conjugate because $\alpha\otimes\mathrm{id}_{\mathcal{W}}$ and $\beta\otimes\mathrm{id}_{\mathcal{W}}$ are approximately representable by Theorem \ref{thm:main}. \end{proof} The following corollary is an immediate consequence of the theorem above and Theorem \ref{thm:jones}. \begin{cor}\label{main:cor} Let $A$ and $B$ be simple separable nuclear monotracial C$^*$-algebras, and let $\alpha$ and $\beta$ be outer actions of a finite abelian group $\Gamma$ on $A$ and $B$, respectively. Assume that the characteristic invariants of $\tilde{\alpha}$ and $\tilde{\beta}$ are trivial. Then \ \\ (i) $\alpha\otimes \mathrm{id}_{\mathcal{W}}$ on $A\otimes\mathcal{W}$ and $\beta\otimes \mathrm{id}_{\mathcal{W}}$ on $B\otimes\mathcal{W}$ are cocycle conjugate if and only if $N(\tilde{\alpha})=N(\tilde{\beta})$; \ \\ (ii) $\alpha\otimes \mathrm{id}_{\mathcal{W}}$ on $A\otimes\mathcal{W}$ and $\beta\otimes \mathrm{id}_{\mathcal{W}}$ on $B\otimes\mathcal{W}$ are conjugate if and only if $N(\tilde{\alpha})=N(\tilde{\beta})$ and $i(\tilde{\alpha})=i(\tilde{\beta})$. \end{cor} If $\delta$ is an action of a finite cyclic group $\Gamma$ with prime order, then $N(\delta)=\Gamma$ or $N(\delta)=\{\iota\}$. Hence we have the following corollary. \begin{cor} Let $A$ and $B$ be simple separable nuclear monotracial C$^*$-algebras, and let $\alpha$ and $\beta$ be outer actions of a finite cyclic group $\Gamma$ with prime order on $A$ and $B$, respectively. Then \ \\ (i) $\alpha\otimes \mathrm{id}_{\mathcal{W}}$ on $A\otimes\mathcal{W}$ and $\beta\otimes \mathrm{id}_{\mathcal{W}}$ on $B\otimes\mathcal{W}$ are cocycle conjugate if and only if $N(\tilde{\alpha})=N(\tilde{\beta})$; \ \\ (ii) $\alpha\otimes \mathrm{id}_{\mathcal{W}}$ on $A\otimes\mathcal{W}$ and $\beta\otimes \mathrm{id}_{\mathcal{W}}$ on $B\otimes\mathcal{W}$ are conjugate if and only if $N(\tilde{\alpha})=N(\tilde{\beta})$ and $i(\tilde{\alpha})=i(\tilde{\beta})$. \end{cor} \section{Model actions} In this section, we shall construct simple separable nuclear monotracial C$^*$-algebras $A$ and outer actions $\alpha$ on $A$ with arbitrary invariants $(N(\tilde{\alpha}), i(\tilde{\alpha}))$ in Corollary \ref{main:cor} with the restrictions demanded by Proposition \ref{pro:realized-invariant}. In particular, $A$ can be chosen to be approximately finite-dimensional (AF) algebras. We shall give a summary of the construction. First, we construct ``inner'' actions $\alpha$ on a simple monotracial AF algebra $A$ with arbitrary invariants in Lemma \ref{lem:jones}. Note that if $\beta$ is an action of $\Gamma$ on a simple monotracial C$^*$-algebra $B$ with $N(\beta)=\{\iota\}$ and $N(\tilde{\beta})=\Gamma$, then we have $N(\alpha \otimes \beta)=\{\iota\}$ and $N(\tilde{\alpha}\otimes\tilde{\beta})=N(\tilde{\alpha})$ for any action of $\Gamma$ on a simple monotracial C$^*$-algebra $A$. Hence if we can construct a simple monotracial AF algebra $B$ and an action $\beta$ of $\Gamma$ on $B$ with $N(\beta)=\{\iota\}$ and $N(\tilde{\beta})=\Gamma$, then we obtain actions $\alpha\otimes\beta$ with arbitrary invariant $N(\tilde{\alpha}\otimes\tilde{\beta})$. Note that constructing such an action $\beta$ is equivalent to constructing a unitary representation $u$ of $\Gamma$ on $\pi_{\tau_B}(B)^{''}$ such that $u$ induces an action on $B$. We construct such representations in Lemma \ref{lem:outer}. (We construct unitary representations of cyclic groups for simplicity.) On the other hand, the inner invariant $i(\tilde{\alpha}\otimes\mathrm{Ad}(u))$ is not equal to $i(\tilde{\alpha})$ unless $|\tilde{\tau}_{B}(u_g)|=1$ for any $g\in \Gamma$. (Note that $|\tilde{\tau}_{B}(u_g)|=1$ for any $g\in \Gamma$ is equivalent to $\mathrm{Ad}(u_g)= \mathrm{id}_{\pi_{\tau_B}(B)^{''}}$ for any $g\in \Gamma$.) But if $\tilde{\tau}_{B}(u_g)$ is ``near'' $1$ for any $g\in \Gamma$, then $i(\tilde{\alpha}\otimes\mathrm{Ad}(u))$ is ``near'' $i(\tilde{\alpha})$. Hence applying Lemma \ref{lem:jones} to a ``small'' perturbation of the desired inner invariant (we need to assume that $m$ has full support here) and considering the tensor product type action, we obtain the conclusion. Of course, we need to construct unitary representations $u$ in Lemma \ref{lem:outer} such that $\tilde{\tau}_{B}(u_g)$ is ``near'' $1$ for any $g\in \Gamma$. The following lemma is based on \cite[Proposition 1.5.8 and Theorem 1.5.11]{Jones}. Recall that $\Phi_{v}$ is the homomorphism from $\mathbb{C}N$ to $A$ defined by $\Phi_v(\sum_{h\in N}c_hh)=\sum_{h\in N}c_hv_h$ where $v$ is a unitary representation of $N$ on $A$. \begin{lem}\label{lem:jones} Let $\Gamma$ be a finite abelian group, and let $N$ be a subgroup of $\Gamma$. Suppose that $m$ is a probability measure on the set $P_{N}$ of minimal projections in $\mathbb{C}N$. Then there exist a simple unital monotracial AF algebra $A$, an action $\alpha$ on $A$ of $\Gamma$ and a unitary representation $v$ of $N$ on $A$ such that $N(\alpha)=N(\tilde{\alpha})=N$, $\alpha_h=\mathrm{Ad}(v_h)$, $\alpha_g(v_h)=v_h$ for any $h\in N$ and $g\in \Gamma$ and $\tau_{A}(\Phi_{v}(p))=m(p)$ for any $p\in P_N$. \end{lem} \begin{proof} Define an action $\mu^{\Gamma}$ of $\Gamma$ on $M_{|\Gamma|^{\infty}} \cong \bigotimes _{n=1}^{\infty} M_{|\Gamma|}(\mathbb{C})$ by $\mu^{\Gamma}:= \bigotimes_{n=1}^{\infty} \mathrm{Ad}(\lambda)$ where $\lambda$ is the left regular representation of $\Gamma$. Then $\mu^{\Gamma}$ is the Rohlin action (see, for example, \cite[Example 3.2]{I1} and \cite[1.5]{Jones}). Let $B:=M_{|\Gamma|^{\infty}}\rtimes_{\mu^{\Gamma}|_N}N$. Then $B$ is a unital AF algebra because $\mu^{\Gamma}|_N$ is an action of product type. Since $N(\tilde{\mu}^{\Gamma}|_N)=\{\iota\}$, $B$ is simple and monotracial. Note that the unique tracial state on $B$ is given by $\tau_{M_{|\Gamma|^{\infty}}}\circ E_{\mu^{\Gamma}|_N}$. Define an action $\beta$ on $B$ of $\Gamma$ by $\beta_g(\sum_{h\in N}a_h\lambda_h)= \sum_{h\in N}\mu^{\Gamma}_g(a_h)\lambda_h$ for any $g\in \Gamma$. By the same argument as in the proof of \cite[Proposition 1.5.8]{Jones}, we see that $N(\tilde{\beta})=N(\beta)=N$. In particular, the map $\lambda$ given by $N \ni h \mapsto \lambda_h\in B$ is a unitary representation of $N$ on $B$ such that $\beta_g(\lambda_h)=\lambda_h$ for any $g\in\Gamma$ and $h\in N$. The Effros-Handelman-Shen theorem \cite{EHS} (or \cite[Theorem 2.2]{Ell-order}) implies that there exists a simple unital monotracial AF algebra $C$ such that $K_0(C)$ is the additive subgroup of $\mathbb{R}$ generated by $\mathbb{Q}$ and $\{m(p)\; |\; p\in P_{N}\}$, $K_0(C)_{+}=K_0(C)\cap \mathbb{R}_{+}$ and $[1]_0=1$. For any $p\in P_N$, there exists a projection $q_p$ in $C$ such that $\tau_{C}(q_p)=m(p)$, and put $e_{p}=\Phi_{\lambda}(p)\otimes q_p\in B\otimes C$. Then we have $e_{p}\in (B\otimes C)^{\beta\otimes\mathrm{id}_{C}} \cap \{\lambda_h\otimes 1\; |\; h\in N\}^{\prime}$ for any $p\in P_N$. Since there exists an element $\eta$ in $\hat{N}$ such that $\Phi_{\lambda}(p)=\frac{1}{|N|}\sum_{h\in N}\eta (h) \lambda_h$, $ \tau_{B} (\Phi_{\lambda}(p))= \frac{1}{|N|} $ for any $p\in P_N$. Hence we have $\tau_{B\otimes C}(e_p)= \frac{m(p)}{|N|}$ for any $p\in P_N$. Put $e:= \sum_{p\in P_N}e_p$, and let $A:= e(B\otimes C)e$. Then $A$ is a simple unital monotracial AF algebra. Since we have $e\in (B\otimes C)^{\beta\otimes\mathrm{id}_{C}}$, $\beta\otimes\mathrm{id}_{C}$ induces an action $\alpha$ on $A$ of $\Gamma$. By the same reason as in the proof of \cite[Theorem 1.5.11]{Jones}, we have $N(\tilde{\alpha})=N(\tilde{\beta}\otimes \mathrm{id}_{C})=N$. Define a unitary representation $v$ of $N$ on $A$ by $v_h:= (\lambda_h\otimes 1)e$ for any $h\in N$. It is easy to see that $\alpha_h=\mathrm{Ad}(v_h)$ and $\alpha_g(v_h)=v_h$ for any $h\in N$ and $g\in \Gamma$. This also implies that $N(\alpha)=N$. Since we have $\Phi_{v}(p)=(\Phi_{\lambda}(p)\otimes 1)e =e_{p}$, $$ \tau_{A}(\Phi_{v}(p))=\frac{\tau_{B\otimes C}(e_p)}{\tau_{B\otimes C}(e)} =\frac{\frac{m(p)}{|N|}}{\frac{1}{|N|}}=m(p) $$ for any $p\in P_N$. Therefore the proof is complete. \end{proof} We recall properties of characters of finite abelian groups. If $N$ is a finite abelian group, then we have $$ \sum_{h\in N}\eta (h) = \left\{\begin{array}{cl} |N| & \text{if}\quad \eta=\iota \in \hat{N} \\ 0 & \text{if}\quad \eta\in \hat{N}\setminus \{\iota\} \end{array} \right. \quad\text{and}\quad \sum_{\eta \in \hat{N}}\eta (h) = \left\{\begin{array}{cl} |N| & \text{if}\quad h=\iota \in N \\ 0 & \text{if}\quad h\in N\setminus \{\iota \}. \end{array} \right. $$ We denote by $\mathbb{Z}_k$ the cyclic group of order $k$. For any natural number $k$, let $\zeta_{k}:=e^{\frac{2\pi i}{k}}$. Note that $\hat{\mathbb{Z}}_k$ can be identified with $\{\zeta_k^l\; | \; 1 \leq l \leq k\}$ by the pairing $\mathbb{Z}_k \times \hat{\mathbb{Z}_k}\to \mathbb{T}$ given by $([l], \zeta_k)\mapsto \zeta_k^{l}$. Also, if $\zeta$ is a root of unity and not equal to $1$, then we have $\sum_{j=1}^{n}\zeta^j=0$. \begin{lem}\label{lem:outer} Let $k$ be a natural number with $k\geq 2$ and $r$ a real number with $0<r<1$. Then there exist a simple unital monotracial AF algebra $A$ and a unitary element $V$ in $\pi_{\tau_{A}}(A)^{''}$ such that $\mathrm{Ad}(V)$ induces an outer action of $\mathbb{Z}_k$ on $A$, $V^{k}=1$ and $\tilde{\tau}_{A}(V^{l})=r$ for any $1\leq l \leq k-1$. \end{lem} \begin{proof} By the Effros-Handelman-Shen theorem \cite{EHS} (or \cite[Theorem 2.2]{Ell-order}), there exists a simple unital monotracial AF algebra $B$ such that $$ K_0(B)=\left\{a_0+\sum_{n=1}^{m}a_nr^{\frac{1}{2^{n}}}\in\mathbb{R}\; |\; m\in\mathbb{N}, a_0, a_1,..., a_m\in\mathbb{Q}\right\}, $$ $K_0(B)_{+}=K_0(B)\cap \mathbb{R}_{+}$ and $[1]_0=1$. For any $n\in\mathbb{N}$, there exist mutually orthogonal projections $p_{1,n}$,..., $p_{k, n}$ in $B$ such that $\sum_{j=1}^{k}p_{j, n}=1$, $\tau_{B}(p_{k, n})=\frac{1+(k-1)r^{\frac{1}{2^n}}}{k}$ and $\tau_{B}(p_{j, n})=\frac{1-r^{\frac{1}{2^n}}}{k}$ for any $1\leq j \leq k-1$ because we have $\frac{1+(k-1)r^{\frac{1}{2^n}}}{k}\in K_0(B)_{+}\cap (\frac{1}{k},1)$, $\frac{1-r^{\frac{1}{2^n}}}{k}\in K_0(B)_{+}\cap (0,\frac{1}{k})$ and $\frac{1+(k-1)r^{\frac{1}{2^n}}}{k}+(k-1)\times \frac{1-r^{\frac{1}{2^n}}}{k}=1$. For any $n\in\mathbb{N}$, put $$ u_n:= \sum_{j=1}^{k}\zeta_{k}^{j}p_{j,n}\in B. $$ Then $u_n$ is a unitary element such that $u_n^{k}=1$ and we have \begin{align*} \tau_{B}(u_n^l) &= \tau_{B}\left(\sum_{j=1}^{k}\zeta_{k}^{lj}p_{j,n}\right) = \sum_{j=1}^{k}\zeta_k^{lj}\tau_{B}(p_{j,n}) = \sum_{j=1}^{k-1}\zeta_k^{lj} \times \frac{1-r^{\frac{1}{2^n}}}{k}+ \frac{1+(k-1)r^{\frac{1}{2^n}}}{k} \\ &=-\frac{1-r^{\frac{1}{2^n}}}{k} + \frac{1+(k-1)r^{\frac{1}{2^n}}}{k}=r^{\frac{1}{2^n}} \end{align*} for any $1\leq l \leq k-1$. Note that $\zeta_k^{l}$ is a root of unity and not equal to $1$. Let $A:=\bigotimes_{n=1}^{\infty} B$. Then $A$ is a simple unital monotracial AF algebra. Note that the unique tracial state $\tau_{A}$ is a product of traces $\tau_{B}$ in each component. For any $n\in\mathbb{N}$, put $$ w_n:= u_1\otimes u_2\otimes \cdots \otimes u_n\otimes 1 \otimes \cdots \in \bigotimes_{n=1}^{\infty} B=A. $$ Then $w_n$ is a unitary element such that $w_n^k=1$. We shall show that $\{w_n\}_{n\in\mathbb{N}}$ is a Cauchy sequence with respect to the 2-norm. Let $\varepsilon>0$. Take a natural number $N$ such that $\sum_{j=n+1}^{m}\frac{1}{2^{j}}<\log_{r}{(1-\varepsilon)}$ for any $m> n\geq N$. Then we have \begin{align*} \tau_{A}((w_{m}-w_{n})^*(w_{m}-w_{n})) &=2-2\mathrm{Re} \tau_{A}(1\otimes \cdots \otimes 1\otimes u_{n+1}\otimes \cdots \otimes u_{m} \otimes 1\cdots) \\ &=2- 2\prod_{j=n+1}^{m}r^{\frac{1}{2^j}} =2- 2r^{\sum_{j=n+1}^{m}\frac{1}{2^j}}< 2\varepsilon \end{align*} for any $m> n\geq N$. Hence there exists a unitary element $V$ in $\pi_{\tau_{A}}(A)^{''}$ such that $\{w_n\}_{n\in\mathbb{N}}$ converges to $V$ in the strong-$^*$ topology. We have $V^{k}=1$ and \begin{align*} \tilde{\tau}_{A}(V^l)= \lim_{n\to\infty}\tau_{A}(w_n^l)=\lim_{n\to \infty}\tau_A(u_1^l)\tau_{A}(u_2^l)\cdots \tau_{A}(u_n^l)=\prod_{n=1}^{\infty}r^{\frac{1}{2^n}}=r \end{align*} for any $1\leq l\leq k-1$. It is easy to see that $\mathrm{Ad}(V)$ induces an action $\alpha$ of $\mathbb{Z}_k$ on $A$. Note that we have $$ \alpha_{[l]} (x_1\otimes x_2\otimes \cdots \otimes x_n\otimes 1 \otimes \cdots) =u_1^{l}x_1u_1^{*l}\otimes u_2^{l}x_2u_2^{*l}\otimes \cdots \otimes u_n^{l}x_nu_n^{*l}\otimes 1 \otimes \cdots $$ for any $1\leq l \leq k$. We shall show that $\alpha$ is outer. Let $l$ be a natural number with $1\leq l \leq k-1$. Since we have $\tau_{B}(p_{k,n})>\tau_{B}(p_{1,n})$ for any $n\in\mathbb{N}$, there exists a partial isometry $s_n$ in $B$ such that $s_ns_n^*=p_{1,n}$ and $s_n^*s_n \leq p_{k,n}$. Put $$ x_n:= \overbrace{1\otimes \cdots \otimes 1}^{n-1}\otimes s_n \otimes 1\cdots $$ Then $(x_n)_n$ is a central sequence in $A$ of norm one. Since we have $u_n^{l}s_nu_n^{*l}=\zeta_k^{l}s_n$, $\alpha_{[l]}(x_n)=\zeta_k^{l}x_n$ for any $n\in\mathbb{N}$. Therefore $\alpha_{[l]}$ induces a non-trivial automorphism of $F(A)$. This implies $\alpha_{[l]}$ is not an inner automorphism of $A$. Consequently, $\alpha$ is outer. \end{proof} The following theorem is the main result in this section. \begin{thm} Let $\Gamma$ be a finite abelian group, and let $N$ be a subgroup of $\Gamma$. Suppose that $m$ is a probability measure with full support on the set $P_{N}$ of minimal projections in $\mathbb{C}N$. Then there exist a simple unital monotracial AF algebra $A_{(\Gamma, N, m)}$ and an outer action $\alpha^{(\Gamma, N, m)}$ on $A_{(\Gamma, N, m)}$ such that the characteristic invariant of $\tilde{\alpha}^{(\Gamma, N, m)}$ is trivial, $N(\tilde{\alpha}^{(\Gamma, N, m)})=N$ and $i(\tilde{\alpha}^{(\Gamma, N, m)})=[m]$. \end{thm} \begin{proof} Since $\Gamma$ is a finite abelian group, we may assume that there exist a natural number $n$ and prime powers $k_1$,..., $k_{n}$ such that $\Gamma=\bigoplus_{j=1}^{n} \mathbb{Z}_{k_j}$. For any $0\leq i \leq n$, let $$ N_{i}:=\left\{([l_j])_{j=1}^n\in N\subseteq \bigoplus_{j=1}^{n} \mathbb{Z}_{k_j}\; |\; i=|\{j\in\{1,...,n\}\; |\; [l_j]\neq [0]\}| \right\}. $$ Then we have $N=\bigsqcup_{i=0}^n N_{i}$ (which is a disjoint union) and $N_i^{-1}=N_{i}$ for any $0\leq i \leq n$. We identify $P_{N}$ with $\left\{p_{\eta}=\frac{1}{|N|}\sum_{h\in N} \eta (h)h\; |\; \eta\in \hat{N}\right\}$. Since $m$ has full support, there exists a real number $r$ with $0<r<1$ such that $$ \frac{1}{|N|}\sum_{j=0}^{n}\frac{1}{r^j}\sum_{h\in N_j}\sum_{\eta^{\prime}\in \hat{N}}\eta(h) \overline{\eta^{\prime}(h)}m(p_{\eta^{\prime}})\geq 0 $$ for any $\eta\in \hat{N}$. Indeed, we have \begin{align*} \lim_{r\to 1-0}\frac{1}{|N|}\sum_{j=0}^{n}\frac{1}{r^j}\sum_{h\in N_j}\sum_{\eta^{\prime}\in \hat{N}}\eta(h)\overline{\eta^{\prime}(h)}m(p_{\eta^{\prime}}) &= \frac{1}{|N|}\sum_{h\in N}\sum_{\eta^{\prime}\in \hat{N}}\eta(h)\overline{\eta^{\prime}(h)}m(p_{\eta^{\prime}}) \\ &=\frac{1}{|N|}\sum_{\eta^{\prime}\in \hat{N}}m(p_{\eta^{\prime}})\sum_{h\in N}\eta\eta^{\prime-1}(h) \\ &= \frac{1}{|N|}\times m(p_{\eta})\times |N| =m(p_{\eta})>0 \end{align*} and \begin{align*} \overline{\frac{1}{|N|}\sum_{j=0}^{n}\frac{1}{r^j}\sum_{h\in N_j}\sum_{\eta^{\prime}\in \hat{N}}\eta(h) \overline{\eta^{\prime}(h)}m(p_{\eta^{\prime}})} &=\frac{1}{|N|}\sum_{j=0}^{n}\frac{1}{r^j}\sum_{h\in N_j}\sum_{\eta^{\prime}\in \hat{N}} \overline{\eta(h)}\eta^{\prime}(h)m(p_{\eta^{\prime}}) \\ &= \frac{1}{|N|}\sum_{j=0}^{n}\frac{1}{r^j}\sum_{h\in N_j}\sum_{\eta^{\prime}\in \hat{N}} \eta(h^{-1})\overline{\eta^{\prime}(h^{-1})}m(p_{\eta^{\prime}}) \\ &= \frac{1}{|N|}\sum_{j=0}^{n}\frac{1}{r^j}\sum_{h\in N_j}\sum_{\eta^{\prime}\in \hat{N}}\eta(h) \overline{\eta^{\prime}(h)}m(p_{\eta^{\prime}}) \end{align*} for any $\eta\in \hat{N}$. Hence a sufficiently large $r<1$ satisfies the property above. Define a map $m^{\prime}$ from $P_{N}$ to $\mathbb{R}_{+}$ by $$ m^{\prime}(p_{\eta}) =\frac{1}{|N|}\sum_{j=0}^{n}\frac{1}{r^j}\sum_{h\in N_j}\sum_{\eta^{\prime}\in \hat{N}}\eta(h) \overline{\eta^{\prime}(h)}m(p_{\eta^{\prime}}) $$ for any $\eta\in \hat{N}$. Since we have \begin{align*} \sum_{\eta\in \hat{N}} m^{\prime}(p_{\eta}) &=\frac{1}{|N|}\sum_{\eta\in \hat{N}}\sum_{j=0}^{n}\frac{1}{r^j} \sum_{h\in N_j}\sum_{\eta^{\prime}\in \hat{N}}\eta(h) \overline{\eta^{\prime}(h)}m(p_{\eta^{\prime}}) \\ &=\frac{1}{|N|}\sum_{j=0}^{n}\frac{1}{r^j} \sum_{h\in N_j}\sum_{\eta^{\prime}\in \hat{N}} \overline{\eta^{\prime}(h)}m(p_{\eta^{\prime}})\sum_{\eta\in \hat{N}}\eta(h) \\ &= \frac{1}{|N|}\sum_{\eta^{\prime}\in \hat{N}} \overline{\eta^{\prime}(\iota)}m(p_{\eta^{\prime}})\times |N|= \sum_{\eta^{\prime}\in \hat{N}}m(p_{\eta^{\prime}}) =1, \end{align*} $m^{\prime}$ is a probability measure on $P_{N}$. Lemma \ref{lem:jones} implies that there exist a simple unital monotracial AF algebra $B_0$, an action $\beta^{(0)}$ on $B_0$ of $\Gamma$ and a unitary representation $u$ of $N$ on $B_0$ such that $N(\beta)=N(\tilde{\beta})=N$, $\beta^{(0)}_h=\mathrm{Ad}(u_h)$, $\beta^{(0)}_g(u_h)=u_h$ for any $h\in N$ and $g\in \Gamma$ and $\tau_{B_0}(\Phi_{u}(p_{\eta}))=m^{\prime}(p_{\eta})$ for any $\eta\in \hat{N}$. Note that we have \begin{align*} \sum_{\eta\in\hat{N}}\overline{\eta(h)}\Phi_{u}\left(p_{\eta}\right) = \frac{1}{|N|}\sum_{\eta\in\hat{N}}\sum_{h^{\prime}\in N} \overline{\eta(h)} \eta (h^{\prime})u_{h^{\prime}} = \frac{1}{|N|}\sum_{h^{\prime}\in N}u_{h^{\prime}}\sum_{\eta\in\hat{N}}\eta(h^{-1}h^{\prime})=u_{h} \end{align*} for any $h\in N$. Hence if $h$ is an element in $N_{i}$ for some $0\leq i \leq n$, then \begin{align*} \tau_{B_0}(u_{h})&= \tau_{B_0}\left(\sum_{\eta\in\hat{N}}\overline{\eta(h)}\Phi_{u}\left(p_{\eta}\right)\right) =\sum_{\eta\in \hat{N}}\overline{\eta(h)}m^{\prime}(p_{\eta}) \\ &= \frac{1}{|N|}\sum_{\eta\in\hat{N}} \sum_{j=0}^{n}\frac{1}{r^j}\sum_{h^{\prime}\in N_j}\sum_{\eta^{\prime}\in \hat{N}} \overline{\eta(h)}\eta(h^{\prime})\overline{\eta^{\prime}(h^{\prime})}m(p_{\eta^{\prime}}) \\ &= \frac{1}{|N|}\sum_{j=0}^{n}\frac{1}{r^j}\sum_{h^{\prime}\in N_j}\sum_{\eta^{\prime}\in \hat{N}} \overline{\eta^{\prime}(h^{\prime})}m(p_{\eta^{\prime}}) \sum_{\eta\in\hat{N}}\eta(h^{-1}h^{\prime}) \\ &= \frac{1}{|N|} \times \frac{1}{r^i}\sum_{\eta^{\prime}\in \hat{N}} \overline{\eta^{\prime}(h)}m(p_{\eta^{\prime}})\times |N| =\frac{1}{r^i}\sum_{\eta^{\prime}\in \hat{N}}\overline{\eta^{\prime}(h)}m(p_{\eta^{\prime}}). \end{align*} For any natural number $j$ with $1\leq j\leq n$, there exist a simple unital monotracial AF algebra $B_j$ and a unitary element $V_j$ in $\pi_{\tau_{B_j}}(B_j)^{''}$ such that $\mathrm{Ad}(V_j)$ induces an outer action of $\mathbb{Z}_{k_j}$ on $B_j$, $V_{j}^{k_j}=1$ and $\tilde{\tau}_{B_j}(V_j^{l})=r$ for any $1\leq l \leq k_j-1$ by Lemma \ref{lem:outer}. Let $\beta^{(j)}$ be the induced action by $\mathrm{Ad}(V_j)$ on $B_j$ of $\mathbb{Z}_{k_j}$. Put $$ A_{(\Gamma, N, m)}:= \bigotimes_{j=0}^n B_j $$ and define an action $\alpha^{(\Gamma, N, m)}$ on $A_{(\Gamma, N, m)}$ of $\Gamma$ by $$ \alpha^{(\Gamma, N, m)}_{g}:=\beta^{(0)}_{g}\otimes \bigotimes_{j=1}^{n} \beta^{(j)}_{[l_j]} $$ for any $g=([l_j])_{j=1}^{n}\in \Gamma=\bigoplus_{j=1}^{n} \mathbb{Z}_{k_j}$. Then $A_{(\Gamma, N, m)}$ is a simple unital monotracial AF algebra. We denote by $\tau$ the unique tracial state on $A_{(\Gamma, N, m)}$. It can be easily checked that $N(\alpha^{(\Gamma, N, m)})=\{\iota\}$ and $N(\tilde{\alpha}^{(\Gamma, N, m)})=N$ since we have $N(\beta^{(j)})=\{\iota \}$ for any $1\leq j\leq n$ and $N(\tilde{\beta}^{(0)})=N$. Define a unitary representation $v$ of $N$ on $\pi_{\tau}( A_{(\Gamma, N, m)})^{''}$ by $$ v_{h}:=\pi_{\tau_{B_0}}(u_{h})\otimes \bigotimes_{j=1}^nV_j^{l_j} \in \pi_{\tau_{B_0}}(B_0)^{''}\otimes \bigotimes_{j=1}^n \pi_{\tau_{B_j}}(B_j)^{''} \cong \pi_{\tau}( A_{(\Gamma, N, m)})^{''} $$ for any $h=([l_j])_{j=1}^n\in N$. Then $\tilde{\alpha}^{(\Gamma, N, m)}_{h}=\mathrm{Ad}(v_{h})$ and $\tilde{\alpha}^{(\Gamma, N, m)}_{g}(v_h)=v_h$ for any $h\in N$ and $g\in \Gamma$. This implies that the characteristic invariant of $\tilde{\alpha}^{(\Gamma, N, m)}$ is trivial. Since we have \begin{align*} \tilde{\tau} (\Phi_{v}(p_{\eta})) &=\frac{1}{|N|}\sum_{h\in N}\eta(h) \tilde{\tau} (v_h) \\ & =\frac{1}{|N|}\sum_{h=([l_j])_{j=1}^n\in N}\eta(h) \tau_{B_0} (u_{h}) \times \prod_{j=1}^{n}\tilde{\tau}_{B_j}(V_j^{l_j}) \\ &= \frac{1}{|N|}\sum_{j=0}^n\sum_{h\in N_{j}}\eta(h)\times \frac{1}{r^j}\sum_{\eta^{\prime}\in \hat{N}}\overline{\eta^{\prime}(h)}m(p_{\eta^{\prime}})\times r^{j} \\ &= \frac{1}{|N|}\sum_{h\in N}\sum_{\eta^{\prime}\in \hat{N}}\eta\eta^{\prime-1}(h)m(p_{\eta^{\prime}}) \\ &= \frac{1}{|N|}\sum_{\eta^{\prime}\in \hat{N}}m(p_{\eta^{\prime}})\sum_{h\in N}\eta\eta^{\prime-1}(h)\\ &=\frac{1}{|N|}\times m(p_{\eta})\times |N| =m(p_{\eta}), \end{align*} $i(\tilde{\alpha}^{(\Gamma, N, m)})=[m]$. Consequently, the proof is complete. \end{proof} The following corollary is an immediate consequence of Proposition \ref{pro:realized-invariant}, Corollary \ref{main:cor} and the theorem above. \begin{cor} Let $A$ be a simple separable nuclear monotracial C$^*$-algebra, and let $\alpha$ be an outer action of a finite abelian group $\Gamma$ on $A$. Assume that the characteristic invariant of $\tilde{\alpha}$ is trivial. Then there exists a probability measure $m$ with full support on $P_{N(\tilde{\alpha})}$ such that $\alpha\otimes\mathrm{id}_{\mathcal{W}}$ on $A\otimes \mathcal{W}$ is conjugate to $\alpha^{(\Gamma, N(\tilde{\alpha}), m)}\otimes \mathrm{id}_{\mathcal{W}}$ on $A_{(\Gamma, N(\tilde{\alpha}), m)}\otimes \mathcal{W}$. \end{cor} \section*{Acknowledgments} The author would like to thank Eusebio Gardella for pointing out a misleading terminology. \end{document}
\begin{document} \title[Forests and connected spanning subgraphs]{On the number of forests and connected spanning subgraphs} \author[M. Borb\'enyi]{M\'arton Borb\'enyi} \address{ELTE: E\"{o}tv\"{o}s Lor\'{a}nd University \\ H-1117 Budapest \\ P\'{a}zm\'{a}ny P\'{e}ter s\'{e}t\'{a}ny 1/C} \email{[email protected]} \author[P. Csikv\'ari]{P\'{e}ter Csikv\'{a}ri} \address{Alfr\'ed R\'enyi Institute of Mathematics, H-1053 Budapest Re\'altanoda utca 13/15 \and ELTE: E\"{o}tv\"{o}s Lor\'{a}nd University \\ Mathematics Institute, Department of Computer Science \\ H-1117 Budapest \\ P\'{a}zm\'{a}ny P\'{e}ter s\'{e}t\'{a}ny 1/C} \email{[email protected]} \author[H. Luo]{Haoran Luo} \address{Department of Mathematics, University of Illinois at Urbana-Champaign, Urbana, Illinois 61801, USA} \email{[email protected]} \thanks{The first author was partially supported by the EFOP program (EFOP-3.6.3-VEKOP-16-2017-00002) and the New National Excellence Program (\'UNKP) when the project started. The second author is supported by the Counting in Sparse Graphs Lend\"ulet Research Group. When the project started he was also supported by the Marie Sk\l{}odowska-Curie Individual Fellowship grant no. 747430. } \subjclass[2010]{Primary: 05C30. Secondary: 05C31, 05C70} \keywords{forests, connected spanning subgraphs, acyclic orientations} \begin{abstract} Let $F(G)$ be the number of forests of a graph $G$. Similarly let $C(G)$ be the number of connected spanning subgraphs of a connected graph $G$. We bound $F(G)$ and $C(G)$ for regular graphs and for graphs with a fixed average degree. Among many other things we study $f_d=\sup_{G\in \mathcal{G}_d}F(G)^{1/v(G)}$, where $\mathcal{G}_d$ is the family of $d$-regular graphs, and $v(G)$ denotes the number of vertices of a graph $G$. We show that $f_3=2^{3/2}$, and if $(G_n)_n$ is a sequence of $3$-regular graphs with the length of the shortest cycle tending to infinity, then $\lim_{n\to \infty}F(G_n)^{1/v(G_n)}=2^{3/2}$. We also improve on the previous best bounds on $f_d$ for $4\leq d\leq 9$. \end{abstract} \maketitle \section{Introduction} For a graph $G=(V,E)$ let $T_G(x,y)$ denote its Tutte polynomial, that is, $$T_G(x,y)=\sum_{A\subseteq E}(x-1)^{k(A)-k(E)}(y-1)^{k(A)-|A|-v(G)},$$ where $k(A)$ denotes the number of connected components of the graph $(V,A)$, and $v(G)$ denotes the number of vertices of the graph $G$. It is well-known that special evaluations of the Tutte polynomial have various combinatorial meaning. For instance, $T_G(1,1)$ counts the number of spanning trees for a connected graph $G$. Similarly, $T_G(2,1)$ enumerates the number of forests (acyclic edge subsets), and for a connected graph $G$ the evaluation $T_G(1,2)$ is equal to the number of connected spanning subgraphs, and $T_G(2,0)$ is the number of acyclic orientations of $G$. In this paper, we use the notation $F(G)=T_G(2,1)$ for the number of forests, $C(G)=T_G(1,2)$ for the number of connected spanning subgraphs, and $a(G)=T_G(2,0)$ for the number of acyclic orientations. The scope of this paper is to give various upper bounds for $F(G)$ and $C(G)$ in terms of the average degree. A special emphasis is put on the case when $G$ is a regular graph of degree $d$. \subsection{Number of forests} First, we collect our results for the number of forests. The following statement is well-known and serves as a motivation for many of our results. For the sake of completeness, we will give a proof of it. \begin{Prop}[\cite{Thom}] \label{product-forest} Let $G$ be a graph, and let $d_v$ be the degree of a vertex $v$. Then $$F(G)\leq \prod_{v\in V(G)}(d_v+1).$$ \end{Prop} When one applies Proposition~\ref{product-forest} to regular graphs of degree $3$ and $4$, it turns out to be rather poor since the trivial inequality in terms of the number of edges $e(G)$, that is, $F(G)\leq 2^{e(G)}$ gives stronger results. Indeed, for a $3$-regular graph this trivial inequality gives $F(G)^{1/v(G)}\leq 2\sqrt{2}$, while for a $4$-regular graph it gives $F(G)^{1/v(G)}\leq 4$. Surprisingly, this inequality cannot be improved for $3$-regular graphs as the following result shows. Let $g(G)$ be the length of the shortest cycle, which is called the girth of the graph $G$. \begin{Th} \label{3-regular-forest} Let $(G_n)_n$ be a sequence of $3$-regular graphs with girth $g(G_n)\to \infty$. Then $$\lim_{n\to \infty}a(G_n)^{1/v(G_n)}=2\sqrt{2},$$ and $$\lim_{n\to \infty}F(G_n)^{1/v(G_n)}=2\sqrt{2}.$$ In particular, $f_3=2\sqrt{2}$. \end{Th} Note that the large girth requirement is necessary in the following sense. Suppose that for a fixed $k$ and $\varepsilon$ the graph $G$ contains at least $\varepsilon v(G)$ edge-disjoint cycles of length at most $k$. Then $$F(G)^{1/v(G)}\leq c(k,\varepsilon)2^{3/2},$$ where $c(k,\varepsilon)<1$. Indeed, if $C_1,C_2,\dots ,C_r$ are edge-disjoint cycles of length at most $k$, then $$F(G)\leq \prod_{i=1}^r(2^{|C_i|}-1)\cdot 2^{e(G)-\sum_{i=1}^r|C_i|}=\prod_{i=1}^r\frac{2^{|C_i|}-1}{2^{|C_i|} }\cdot 2^{e(G)}\leq \left(\frac{2^k-1}{2^k}\right)^{\varepsilon v(G)}\cdot 2^{e(G)}.$$ Hence $$F(G)^{1/v(G)}\leq \left(\frac{2^k-1}{2^k}\right)^{\varepsilon}\cdot 2^{3/2}.$$ We remark that the ratio $\frac{F(G)}{2^{e(G)}}$ can be rather large for a $3$-regular graph. For instance, for the Tutte-Coxeter graph this ratio is roughly $0.728$. Note that this is a $3$-regular graph on $30$ vertices with girth $8$. It seems that for cages, that is, for regular graphs that have minimal size for a given degree and girth, this ratio can be quite large. This motivates the following question. \begin{?} Let $\mathcal{G}_3$ be the family of $3$-regular graphs. Is it true that $$\sup_{G\in \mathcal{G}_3}\frac{F(G)}{2^{e(G)}}=1?$$ \end{?} Before we turn our attention to $4$-regular graphs, let us give one more general upper bound for the number of forests. Let us introduce the entropy function $$H(x):=x\ln \left(\frac{1}{x}\right)+(1-x)\ln \left(\frac{1}{1-x}\right)$$ with the usual convention $H(0)=H(1)=0$. In the proofs, we will often use the following inequality. If $n\leq m/2$, then $$\sum_{k=0}^n\binom{m}{k}\leq \exp \left(mH\left(\frac{n}{m}\right)\right).$$ The proof of this inequality can be found in \cite{AlSp}. \begin{Prop} \label{average-forest} Let $G$ be a graph with average degree $\overline{d}$. If $\overline{d}\geq 4$, then $$F(G)^{1/v(G)}\leq \exp\left(\frac{\overline{d}}{2}H\left(2/\overline{d}\right)\right).$$ \end{Prop} This simple inequality is based on the rather trivial observation that a forest can have at most $v(G)-1$ edges, and so $$F(G)\leq \sum_{k=0}^{v(G)-1}\binom{e(G)}{k}.$$ The problem with this bound is that if the average degree is exactly $4$, this is not much different from the trivial upper bound $2^{e(G)}=4^{v(G)}$. Already Merino and Welsh \cite{MeWe} noted that they found rather challenging to improve on this trivial bound even for grids. Nevertheless, $4^{v(G)}$ is definitely not the best answer for $4$-regular graphs as the following theorem shows. \begin{Th} \label{4-regular-forest} Let $\mathcal{G}_4$ be the family of $4$-regular graphs. Then $$\sup_{G\in \mathcal{G}_4}F(G)^{1/v(G)}<3.994.$$ \end{Th} We have seen that for $3$-regular graphs the quantity $F(G)^{1/v(G)}$ is asymptotically maximized by large girth graphs. A similar theorem was proved for the number of spanning trees by McKay \cite{McKay1} for regular graphs of degree $d$ for arbitrary $d$. Here we show that the same holds for $F(G)$ for any $d$ assuming a well-known conjecture about a certain negative correlation. \begin{Conj}[\cite{GW,Pem}]\label{correlation-conjecture} Let $G$ be a graph and let $\textbf{F}$ be a random forest chosen uniformly from all the forests of $G$. Let $e,f\in E(G)$, then $$\mathbb{P}(e,f\in \textbf{F})\leq \mathbb{P}(e\in \textbf{F})\mathbb{P}(f\in \textbf{F}).$$ \end{Conj} Assuming Conjecture~\ref{correlation-conjecture} we can prove a result on forests of $2$-covers which then implies our claim about the large girth graphs. Recall that a $k$-cover (or $k$-lift) $H$ of a graph $G$ is defined as follows. The vertex set of $H$ is $V(H)=V(G)\times \{0,1,\dots, k-1\}$, and if $(u,v)\in E(G)$, then we choose a perfect matching between the vertex set $L_u=\{(u,i)\ |\ 0\leq i\leq k-1\}$ and $L_v=\{(v,i)\ |\ 0\leq i\leq k-1\}$. If $(u,v)\notin E(G)$, then there are no edges between $L_u$ and $L_v$. Figure~\ref{2-lift-picture} depicts a 2-lift. \begin{figure} \caption{A $2$-lift.} \label{2-lift-picture} \end{figure} When $k=2$ one can encode the $2$-lift $H$ by putting signs on the edges of the graph $G$: the $+$ sign means that we use the matching $((u,0),(v,0)),((u,1),(v,1))$ at the edge $(u,v)$, the $-$ sign means that we use the matching $((u,0),(v,1)),((u,1),(v,0))$ at the edge $(u,v)$. For instance, if we put $+$ signs to every edge, then we simply get the disjoint union $G\cup G$, and if we put $-$ signs everywhere, then the obtained $2$-cover $H$ is simply the tensor product $G\times K_2$. Observe that if $G$ is bipartite, then $G\cup G=G\times K_2$, but other $2$-covers might differ from $G\cup G$. \begin{Th} \label{forest-cover} Let $G$ be a graph, and let $H$ be a $2$-cover of $G$. If Conjecture~\ref{correlation-conjecture} is true, then we have $$F(G\cup G)\leq F(H).$$ In other words, $F(G)^{1/v(G)}\leq F(H)^{1/v(H)}$. \end{Th} There is a nice property of covers that is related to the girth. For every graph $G$, there is a sequence of graphs $(G_n)_n$ such that $G_0=G$, $G_k$ is a $2$-cover of $G_{k-1}$, and $g(G_k)\to \infty$. This is an observation due to Linial \cite{Lin}, his proof is also given in \cite{Csikv}. This observation and Theorem~\ref{forest-cover} (assuming Conjecture~\ref{correlation-conjecture}) together imply the following statement. If one can prove that for any sequence of $d$-regular graphs $(G_n)_n$ with $g(G_n)\to \infty$, the limit $\lim_{n\to \infty}F(G_n)^{1/v(G_n)}$ always exists, and its value is (always) $s_d$, then $\sup_{G\in \mathcal{G}_d}F(G)^{1/v(G)}=s_d$. Large girth $d$-regular graphs locally look like the infinite $d$-regular tree. So the above discussion suggests that it is natural to compare finite graphs with the infinite $d$-regular tree. At first sight, it might not be clear how to do it. Nevertheless, there is already such an argument in the literature. Kahale and Schulman \cite{KaSc} gave an upper bound on the number of acyclic orientations $a(G)$ in this spirit. Note that $a(G)=T_G(2,0)\leq T_G(2,1)=F(G)$. Their proof actually works for $F(G)$ too, and for $d\geq 6$ this upper bound is better than any of these three bounds: the trivial bound $2^{d/2}$ provided by $F(G)\leq 2^{e(G)}$, the bound $d+1$ provided by Proposition~\ref{product-forest}, and the bound $\exp\left(\frac{d}{2}H\left(\frac{2}{d}\right)\right)$ provided by Proposition~\ref{average-forest}. \begin{Th}[Kahale and Schulman \cite{KaSc}] \label{KS-bound} Let $G$ be a $d$-regular graph. Then $$F(G)^{1/v(G)}\leq \frac{d+1}{\eta}\left(\frac{d-1}{d-\eta}\right)^{(d-2)/2},$$ where $$\eta=\frac{(d+1)^2-(d+1)(d^2-2d+5)^{1/2}}{2(d-1)}.$$ \end{Th} \noindent Theorem~\ref{KS-bound} gives the bound $$F(G)^{1/v(G)}\leq d+\frac{1}{2}+\frac{1}{8d}+\frac{13}{48d^2}+O\left(\frac{1}{d^3}\right).$$ In this paper, we will review the proof of Theorem~\ref{KS-bound} and show how to improve on it for certain $d$. The proof is actually a combination of the proof of Theorem~\ref{KS-bound} and Proposition~\ref{average-forest}. In particular, we will prove the following statement. \begin{Th} \label{regular upper bounds} Let $G$ be a $d$-regular graph, where $d\in \{5,6,7,8,9\}$. Then $F(G)^{1/v(G)}\leq C_d$, where $C_d$ is a constant strictly better than the one given in Theorem~\ref{KS-bound} and is given in Table~\ref{table forest}. \end{Th} \begin{center} \begin{table}[h!] \label{table forest} \caption{Bounds on the number of forests for small $d$} \begin{tabular}{|c|c|c|c|c|} \textbf{h}line $d$ & new bound $C_d$ & Thm.~\ref{KS-bound} & Prop.~\ref{product-forest} & Prop.~\ref{average-forest}\\ \textbf{h}line $5$ & $5.1965$ & $5.5362$ & 6 & 5.3792\\ \textbf{h}line $6$ & $6.3367$ & $6.5287$ & 7 & 6.7500 \\ \textbf{h}line $7$ & $7.4290$ & $7.5236$ & 8 & 8.1169 \\ \textbf{h}line $8$ & $8.4843$ & $8.5201$ & 9 & 9.4815 \\ \textbf{h}line $9$ & $9.5116$ & $9.5174$ & 10 & 10.8447 \\ \textbf{h}line \end{tabular} \end{table} \end{center} \begin{Rem} One might wish to compare these results with existing bounds on finite and infinite sections of Archimedean lattices, cf. \cite{CMNN,ChSh1,ChSh2,MeWe,Mani}. For these specific graphs one may give more accurate bounds. \end{Rem} \subsection{Number of connected spanning subgraphs} In this section, we collect the results on the number of connected spanning subgraphs. Again the trivial upper bound is $C(G)\leq 2^{e(G)}$ which gives $C(G)^{1/v(G)}\leq 2^{d/2}$ for a graph with average degree $d$. This time this inequality can never be tight, not even for $3$-regular graphs. \begin{Th} \label{regular-connected} Let $\mathcal{G}_d$ be the set of $d$-regular graphs. Then $$\sup_{G\in \mathcal{G}_d}C(G)^{1/v(G)}<2^{d/2}\left(1-\frac{1}{2^d}\right)\exp\left(\frac{d}{2^{d}(2^d-1)}\right).$$ \end{Th} We will again prove another upper bound for graphs with small average degree. \begin{Th} \label{average-connected} Let $G$ be a graph with average degree $\overline{d}$. If $2< \overline{d}\leq 4$, then $$C(G)\leq \frac{2}{\overline{d}-2}\exp\left(v(G)\cdot \frac{\overline{d}}{2}H\left(2/{\overline{d}}\right)\right).$$ \end{Th} \subsection{This paper is organized as follows.} Each section of the paper contains a proof of a theorem or proposition that is stated in the introduction. The proofs are in the same order as the results appear in the introduction. \section{Proof of Proposition~\ref{product-forest}} In this section, we give two proofs of Proposition~\ref{product-forest}. In the first proof we will use the recursion $$F(G)=F(G-e)+F(G/e),$$ where $G-e$ is the graph obtained from $G$ by deleting the edge $e$, and $G/e$ is the graph obtained from $G$ by contracting the edge $e$. This latter operation means that we replace the end vertices $u,v$ of $e$ by a new vertex $w$, and for a vertex $s\neq u,v$ we add as many edges between $s$ and $w$ as it goes between $s$ and the set $\{u,v\}$ in the graph $G$, and if there were $k$ edges going between $u$ and $v$ in $G$, then we add $k-1$ loops to the vertex $w$ in $G/e$. The above recursion simply counts the number of forests based on the property that a forest contains the edge $e$ or not. Note that the contraction may produce multiple edges so we necessarily work in the class of graphs with multiple edges. A forest cannot contain a loop so we can even delete them from the contraction. \begin{proof}[Proof of Proposition~\ref{product-forest}] This can easily be proved by induction using the identity \\ $F(G)=F(G-e)+F(G/e)$. If $e=(u,v)$, then \begin{align*} F(G)=F(G-e)+F(G/e)&\leq (d_u-1+1)(d_v-1+1)\prod_{w\neq u,v}(d_w+1)\\ &\ +(d_u+d_v-2+1)\prod_{w\neq u,v}(d_w+1)\\ &\leq (d_u+1)(d_v+1)\prod_{w\neq u,v}(d_w+1). \end{align*} \end{proof} \begin{proof}[Second proof] For a graph $G$ and an orientation $\mathcal{O}$ of the edges, the score vector of $\mathcal{O}$ is simply the out-degree sequence of this orientation. It is known that the number of different score vectors is exactly the number of forests of $G$. This is an unpublished result of R. Stanley, a bijective proof can be found in \cite{KlWi}. Since the out-degree of a vertex $u$ is between $0$ and $d_u$, the number of different score vectors is at most $\prod_{u\in V(G)}(d_u+1)$. \end{proof} \section{Proof of Theorem~\ref{3-regular-forest}} In this section, we prove Theorem~\ref{3-regular-forest}. Since $a(G)\leq F(G)\leq 2^{e(G)}$ it is enough to prove that $\lim_{n\to \infty}a(G_n)^{1/v(G_n)}=2\sqrt{2}$. In fact, we will prove a slightly stronger theorem. For this, we need the concepts of weakly induced forest and broken cycle. Figure 2 may help to understand these concepts. \begin{Def} Let us label the edges of the graph $G$ with numbers from $1$ to $|E(G)|$. A broken cycle is an edge set that we obtain from a cycle by deleting the edge with the largest label. Let $c_k(G)$ be the number of edge sets with exactly $k$ edges that do not contain any broken cycle. (Note that these edge sets must be forests, since they cannot contain cycles.) \end{Def} \begin{Def} A set $S\subseteq E(G)$ is called a weakly induced forest if it contains no cycle, and the connected components determined by $S$ induces exactly the edges of $S$, all other edges are going between the connected components. Note that the vertex set of a weakly induced forest is the vertex set of the original graph $G$, that is, $V(G)$. Let $F_{wi}(G)$ be the number of weakly induced forests. \end{Def} \begin{figure} \caption{In the left graph, the broken cycles are edge sets $\{1,2\} \label{broken cycles picture} \end{figure} \begin{Lemma} \label{two inequalities} For any graph $G$, we have $$F_{wi}(G)\leq a(G)\leq F(G).$$ \end{Lemma} \begin{proof} The proof is based on the fact that $a(G)$ is the number of edge subsets of $E(G)$ without a broken cycle. This follows from the following well-known facts. Let $\mathrm{ch}(G,q)$ be the chromatic polynomial of the graph $G$, this polynomial counts the number of proper colorings of the graph $G$ when we color the vertices of the graph with $q$ colors (it is allowed that some colors are not used), see for instance \cite{Read}. It is also known \cite{Stan} that $|\mathrm{ch}(G,-1)|=a(G)$. Furthermore, $\mathrm{ch}(G,q)=\sum_{k=0}^{n-1}(-1)^kc_k(G)q^{n-k}$, see \cite{Whit}. So $a(G)=\sum_{k=0}^{n-1}c_k(G)$ is the number of edge subsets of $E(G)$ without a broken cycle. From this it is immediately clear that $a(G)\leq F(G)$. It is also clear that a weakly induced forest does not contain any broken cycle no matter what the labeling is since it does not contain a path that can be obtained by deleting an edge from a cycle. Hence $F_{wi}(G)\leq a(G)\leq F(G)$. \end{proof} \begin{Rem} This remark outlines another proof of Lemma~\ref{two inequalities} via shattering sets. Its sole purpose is to share this unusual proof, the reader should feel free to skip this remark. The following proof of Lemma~\ref{two inequalities} is based on an observation of Kozma and Moran \cite{KoMo}. For a set $X$ and a set system $\mathcal{S}\subseteq 2^{X}$ let $$\mathrm{str}(\mathcal{S})=\{ Y\subseteq X\ |\ \forall A\subseteq Y\ \ \exists S\in \mathcal{S}\ \ A=Y\cap S\}$$ and $$\mathrm{sstr}(\mathcal{S})=\{ Z\subseteq X\ |\ \exists B\subseteq X\setminus Z\ \ \forall A\subseteq Z\ \ \ A\cup B\in \mathcal{S}\}.$$ The elements of the set system $\mathrm{str}(\mathcal{S})$ are the shattered sets of $\mathcal{S}$, and elements of the set system $\mathrm{sstr}(\mathcal{S})$ are the strongly shattered sets of $\mathcal{S}$. It is known that $$|\mathrm{sstr}(\mathcal{S})|\leq |\mathcal{S}|\leq |\mathrm{str}(\mathcal{S})|.$$ Now let $X=E(G)$ and let us fix an orientation of the edges. Then every orientation corresponds to a subset of $E(G)$, namely to the edge set where the orientation differs from the fixed orientation. Now following Kozma and Moran let $\mathcal{S}$ be the family of acyclic orientations. Then $Y\subset E(G)$ is shattered if no matter how we orient the edges of $Y$ we can extend it to an acyclic orientation of $G$. It is easy to see that these are exactly the forests of $G$: first of all, it cannot contain a cycle, because then by orienting the cycle we cannot extend it to an acyclic orientation. Secondly, if we orient a forest somehow, then we can orient the rest of the edges according to some topological order that is compatible with the orientation of the forest. A set $Z\subset E(G)$ is strongly shattered if we can orient the rest of the edges in a way that no matter how the edges of $Z$ are oriented it will be an acyclic orientation. Again it is easy to see that these edge sets are exactly the weakly induced forests of $G$: such an edge set cannot contain a cycle or a cycle minus an edge, because otherwise no matter how we orient the rest of the edges we would be able to achieve a cycle by orienting the elements of $Z$. On the other hand, if $Z$ determines a weakly induced forest, then by numbering the connected components of $Z$, and orienting the rest of the edges towards the largest numbers we get an orientation of $E(G)\setminus Z$ that satisfies that no matter how we orient the edges of $Z$ it will yield an acyclic orientation. \end{Rem} As a preparation for the proof of Theorem~\ref{3-regular-forest} we add some remarks. The proof uses probabilistic ideas, in particular, the so-called FKG-inequality \cite{FKG,AlSp}. For each subset $S\subseteq E(G)$ we can associate the indicator vector $\omega_S\in \{0,1\}^{E(G)}$: $$\omega_S(e)=\left\{ \begin{array}{cl} 1 & \mbox{if}\ e\in S, \\ 0 & \mbox{if}\ e\notin S. \end{array} \right.$$ There is a natural partial ordering on the vectors of $\{0,1\}^{E(G)}$: $\omega\leq \omega'$ if for all $e$ we have $\omega(e)\leq \omega'(e)$. On the level of sets this simply means that $\omega_S\leq \omega_{S'}$ if and only if $S\subseteq S'$. A function $f:\{0,1\}^{E(G)}\to \mathbb{R}$ is monotone increasing if $f(\omega)\leq f(\omega')$ whenever $\omega\leq \omega'$. A function $g:\{0,1\}^{E(G)}\to \mathbb{R}$ is monotone decreasing if $g(\omega)\geq g(\omega')$ whenever $\omega\leq \omega'$. Next let us consider the uniform measure $\mu$ on $\{0,1\}^{E(G)}$, that is, $\mu(\omega_S)=\frac{1}{2^{e(G)}}$ for all $S\subseteq E(G)$. Then $\mu$ trivially satisfies (with equality) the so-called log-supermodularity inequality: $$\mu(\omega)\mu(\omega')\leq \mu(\omega \vee \omega')\mu(\omega \wedge \omega'),$$ where $\omega \vee \omega'$ is the vector such that $(\omega \vee \omega')(e)=\max(\omega(e),\omega'(e))$, and $\omega \wedge \omega'$ is the vector such that $(\omega \wedge \omega')(e)=\min(\omega(e),\omega'(e))$. The so-called FKG-inequality \cite{FKG} asserts that if the random variables $X,Y:\{0,1\}^{E(G)}\to \mathbb{R}_{\geq 0}$ are both monotone increasing or both monotone decreasing and $\mu$ is log-supermodular, then $$\mathbb{E}(XY)\geq \mathbb{E}(X)\mathbb{E}(Y).$$ Since the product of non-negative monotone decreasing random variables is also monotone decreasing, we get that $$\mathbb{E}\left[\prod_{i=1}^kX_i\right]\geq \prod_{i=1}^k\mathbb{E}[X_i]$$ if every $X_i$ is non-negative monotone decreasing random variable. With this preparation, we can prove Theorem~\ref{3-regular-forest}. \begin{proof}[Proof of Theorem~\ref{3-regular-forest}] We will actually prove that $\lim_{n\to \infty}F_{wi}(G_n)^{1/v(G_n)}=2\sqrt{2}$. By the above lemma it implies that $\lim_{n\to \infty}a(G_n)^{1/v(G_n)}=2\sqrt{2},$ and \\ $\lim_{n\to \infty}F(G_n)^{1/v(G_n)}=2\sqrt{2}$. Let us consider a subset $A\subseteq E$ chosen uniformly at random from all possible $2^{e(G)}$ subsets. Then $$\mathbb{P}(A\ \mbox{is a weakly induced forest})=2^{-3n/2}F_{wi}(G).$$ Let $C_1,C_2,\dots ,C_k$ be the connected components of $A$. Note that $C_j$ might be a single vertex, or it might contain a cycle. For a fixed vertex $v$ let $X_v:\{0,1\}^{E(G)}\to \mathbb{R}$ be the indicator variable that the vertex $v$ is in a weakly induced tree, that is, it is a tree whose connected component contains only the edges of the tree. In other words, if $v\in C_j$ for some $j$, then $X_v(\omega_A)=1$ if $C_j$ is a tree that only contains the edges of $G[V(C_j)]$, otherwise it is $0$. In particular, it is $0$ if it contains a cycle, or $G[V(C_j)]$ contains an edge that is not in $A$. The set $A$ is weakly induced forest if and only if $X_v(\omega_A)=1$ for all $v\in V(G)$. Hence $$\mathbb{P}(A\ \mbox{is a weakly induced forest})=\mathbb{E}\left[\prod_{v\in V}X_v\right].$$ Observe that $X_v$ are all monotone decreasing functions (a subset of a weakly induced tree is also weakly induced), and so by the FKG-inequality we get that $$\mathbb{E}\left[\prod_{v\in V}X_v\right]\geq \prod_{v\in V}\mathbb{E}[X_v].$$ Here $\mathbb{E}[X_v]$ is the probability that $v$ is in a weakly induced tree. Suppose that $g(G)\geq 2k+1$, and let us choose $R=k-1$, then the $R$-neighborhood of any vertex in $G$ is an induced tree. The probability that $v$ is in a weakly induced tree is clearly bigger than the probability that in $A$ there is no path between $v$ and a vertex at distance $R$. Next we examine this probability. Before estimating the above probability, let us consider the case when we have a $2$-ary tree of depth $t$, that is the root vertex has degree $2$ and all other non-leaf vertices have degree $3$, and all leaves are of distance $t$ from the root vertex. For a $2$-ary tree of depth $t$ let us consider a random subset of edges. The probability $p_t$ that the root vertex is connected to some leaf vertex in this random subset satisfies the recursion $p_t=p_{t-1}-\frac{1}{4}p_{t-1}^2$. Clearly, $p_t$ is a monotone decreasing sequence and the limit $q$ must satisfy $q=q-\frac{1}{4}q^2$, so $q=0$. The $R$-neighborhood of a vertex $v$ in the graph $G$ is not exactly a $2$-ary tree, because $v$ has degree $3$ unlike the root vertex of a $2$-ary tree which has degree $2$. Still, we can upper bound the probability that in $A$ there is a path between $v$ and a vertex at distance $R$ by $3p_{R-1}$. Hence $\mathbb{E}[X_v]\geq 1-3p_{R-1}$. So we have $$\mathbb{P}(A\ \mbox{is a weakly induced forest})\geq (1-3p_{R-1})^{v(G)}.$$ Then using the trivial upper bound and the lower bound obtained now we get that $$2\sqrt{2}\geq F_{wi}(G)^{1/v(G)}\geq 2\sqrt{2}(1-3p_{R-1}).$$ Since $p_{R}\to 0$ as $n\to\infty$ we get that $$\lim_{n\to \infty}F_{wi}(G_n)^{1/v(G_n)}=2\sqrt{2}.$$ \end{proof} \begin{Rem} Since subgraphs free of broken cycles are already decreasing it would have been enough to consider $a(G)$, but from the point of view of the theorem, it was a bit more convenient and natural to use weakly induced forests. \end{Rem} \section{Proof of Proposition~\ref{average-forest}} In this section we prove Proposition~\ref{average-forest}. \begin{proof}[Proof of Proposition~\ref{average-forest}] Let $n$ be the number of vertices and let $m=\overline{d}n/2$ be the number of edges. Since a forest has at most $n-1$ edges we have $$F(G)\leq \sum_{r=0}^{n-1}\binom{m}{r}\leq \exp\left(m H\left(\frac{n-1}{m}\right)\right)\leq \exp\left(m H\left(\frac{n}{m}\right)\right),$$ where we used the fact that $H(x)$ is monotone increasing for $0<x<1/2$. Hence $$\frac{1}{v(G)}\ln F(G)\leq \frac{\overline{d}}{2}H\left(\frac{2}{\overline{d}}\right).$$ \end{proof} \section{Proof of Theorem~\ref{4-regular-forest}} In this section we prove Theorem~\ref{4-regular-forest}. The following lemma is not crucial, but will turn out to be useful at some point to avoid certain technical difficulties. \begin{Lemma} \label{special cover} Let $G$ be a graph and $e=(u,v)\in E(G)$. Let us consider the graph $H$ obtained from $G\cup G$ in such a way that we delete the two copies $(u_1,v_1)$ and $(u_2,v_2)$ of the edge $e$ and add the edges $(u_1,v_2)$ and $(u_2,v_1)$. Then $$F(G)^2\leq F(H).$$ \end{Lemma} \begin{Rem} If $G$ is connected and $e=(u,v)\in E(G)$ is not a cut edge, then $H$ is connected too. Note that $H$ is a very special $2$-cover of $G$. \end{Rem} \begin{proof} Let $$F_1=\left\{S\subseteq E(G)\setminus \{e\}\ |\ S\mbox{\ is a forest}\right\}\ \ \mbox{and}\ \ F_2=\left\{S\subseteq E(G)\setminus \{e\}\ |\ S\cup \{e\}\mbox{\ is a forest}\right\}.$$ Then $|F_1|\geq |F_2|$ and $F(G)=|F_1|+|F_2|$. Set $|F_1|=f_1$ and $|F_2|=f_2$. Note that $F(H)=3f_1^2+(f_1^2-(f_1-f_2)^2)=3f_1^2+2f_1f_2-f_2^2$ since if we add at most one of the edges of $(u_1,v_2)$ and $(u_2,v_1)$, then there are $f_1^2$ ways to add a subset of the edges such that it will be a forest, and if we add both edges, then the only bad case that we add sets $S_1,S_2\in F_1\setminus F_2$ in the two copies of $G$. Since $3f_1^2+2f_1f_2-f_2^2\geq (f_1+f_2)^2$ we are done. \end{proof} Now we are ready to prove Theorem~\ref{4-regular-forest}. \begin{proof}[Proof of Theorem~\ref{4-regular-forest}] First, let us assume that $G$ is connected. The idea is to bound the number of forests according to the number of edges. If the number of edges of the forest is at most $(1-\varepsilon)n$, then the number of forests is at most $$\sum_{k=0}^{(1-\varepsilon)n}\binom{2n}{k}\leq \exp\left(2nH\left(\frac{1-\varepsilon}{2}\right)\right).$$ If the number of edges is at least $(1-\varepsilon)n$, then we can get it from a spanning tree by deleting at most $\varepsilon n$ edges. Hence the number of such forests is at most $$\tau(G)\sum_{k=0}^{\varepsilon n}\binom{n-1}{k}\leq \tau(G)\exp(nH(\varepsilon)),$$ where $\tau(G)$ is the number of spanning trees. We will use the theorem of McKay \cite{McKay1} claiming that the number of spanning trees of a $d$-regular graph is at most $$\tau(G)\leq \frac{c_d\ln n}{n}\left(\frac{(d-1)^{d-1}}{(d^2-2d)^{d/2-1}}\right)^n.$$ For us $d=4$, so $$\tau(G)\leq \frac{c_4\ln n}{n}\left(\frac{27}{8}\right)^n.$$ Hence $$F(G)\leq \exp\left(2nH\left(\frac{1-\varepsilon}{2}\right)\right)+\frac{c_4\ln n}{n}\left(\frac{27}{8}\right)^n\exp(nH(\varepsilon)).$$ By choosing $\varepsilon=0.04$ we get that $F(G)\leq C\cdot 3.994^n$, where $C$ is some absolute constant. Next, we show that this statement is true for all $4$-regular connected graphs without $C$, that is, $F(G)\leq 3.994^n$. Let $$M=\sup_{G\in \mathcal{G}^c_4} \frac{F(G)}{3.994^{v(G)}},$$ where the supremum is taken over all $4$-regular connected graphs. We know that $M\leq C$. Let $G$ be an arbitrary $4$-regular connected graph. Now let $H$ be the special $2$-cover described in Lemma~\ref{special cover} such that $H$ is connected too. Then $$F(G)^2\leq F(H)\leq M\cdot 3.994^{v(H)}=M\cdot 3.994^{2v(G)},$$ whence $F(G)\leq \sqrt{M}\cdot 3.994^{v(G)}$. Since $G$ was arbitrary we get that $M\leq \sqrt{M}$, hence $M\leq 1$. Hence $F(G)^{1/v(G)}\leq 3.994$ for all $4$-regular connected graphs. Since $F(\bigcup G_i)=\prod F(G_i)$, the same inequality is true for disconnected graphs. \end{proof} \section{Proof of Theorem~\ref{forest-cover}} In this section we prove Theorem~\ref{forest-cover}. We first need a lemma. \begin{Lemma} \label{equivalent inequalities} Let $S$ be a finite set, and let $\mu$ be a probability distribution on the subsets of $S$. Let $x,y$ be fixed elements of $S$. Let $\textbf{S}$ be a random subset of $S$ according to the distribution $\mu$. Then the following inequalities are equivalent $$\mathbb{P}_{\mu}(x,y\in \textbf{S})\leq \mathbb{P}_{\mu}(x\in \textbf{S})\mathbb{P}_{\mu}(y\in \textbf{S}),$$ $$\mathbb{P}_{\mu}(x,y\notin \textbf{S})\leq \mathbb{P}_{\mu}(x\notin \textbf{S})\mathbb{P}_{\mu}(y\notin \textbf{S}),$$ $$\mathbb{P}_{\mu}(x,y\in \textbf{S})\mathbb{P}_{\mu}(x,y\notin \textbf{S})\leq \mathbb{P}_{\mu}(x\in \textbf{S},y\notin \textbf{S})\mathbb{P}_{\mu}(x\notin \textbf{S},y\in \textbf{S}).$$ \end{Lemma} \begin{proof} We prove the equivalence of the first and third inequalities, the rest is similar. $$\mathbb{P}_{\mu}(x\in \textbf{S})=\mathbb{P}_{\mu}(x\in \textbf{S},y\in \textbf{S})+\mathbb{P}_{\mu}(x\in \textbf{S},y\notin \textbf{S})$$ and $$\mathbb{P}_{\mu}(y\in \textbf{S})=\mathbb{P}_{\mu}(x\in \textbf{S},y\in \textbf{S})+\mathbb{P}_{\mu}(x\notin \textbf{S},y\in \textbf{S}).$$ Furthermore, $$\mathbb{P}_{\mu}(x\in \textbf{S},y\in \textbf{S})=\mathbb{P}_{\mu}(x\in \textbf{S},y\in \textbf{S})\cdot 1=$$ $$\mathbb{P}_{\mu}(x\in \textbf{S},y\in \textbf{S})(\mathbb{P}_{\mu}(x\in \textbf{S},y\in \textbf{S})+\mathbb{P}_{\mu}(x\in \textbf{S},y\notin \textbf{S})+\mathbb{P}_{\mu}(x\notin \textbf{S},y\in \textbf{S})+\mathbb{P}_{\mu}(x\notin \textbf{S},y\notin \textbf{S})).$$ Now writing the above identities into $\mathbb{P}_{\mu}(x,y\in \textbf{S})\leq \mathbb{P}_{\mu}(x\in \textbf{S})\mathbb{P}_{\mu}(y\in \textbf{S}),$ and subtracting the identical terms we get the third inequality. \end{proof} In the forthcoming proof, we will apply Lemma~\ref{equivalent inequalities} and Conjecture~\ref{correlation-conjecture} to the set $S=E(H)$ and probability distribution $\mu$ which takes value $0$ on the non-forest subsets, and uniform on the forests. The other tool that we need is the recursion $$F(G)=F(G-e)+F(G/e)$$ that we already used in the proof of Proposition~\ref{product-forest}. As we already noted the contraction may produce multiple edges so we necessarily work in the class of graphs with multiple edges. Conjecture~\ref{correlation-conjecture} is expected to be true for graphs with multiple edges, in fact, it is expected to be true even for weighted graphs. Now we are ready to prove Theorem~\ref{forest-cover}. \begin{proof}[Proof of Theorem~\ref{forest-cover}] We prove the statement by induction on the number of edges. When $G$ is the empty graph on $n$ vertices, the claim is trivial. Let $e=(u,v)\in E(G)$, and let $e_1$ and $e_2$ be the $2$-lifts of $e$ in a $2$-cover $H$. For the sake of simplicity we also denote by $e_1$ and $e_2$ the $2$-lifts of $e$ in $G\cup G$. We decompose $F(H)$ according to the cases whether a forest contains $e_1$ and/or $e_2$: $$F(H)=F_{e_1,e_2}(H)+F_{\overline{e}_1,e_2}(H)+F_{e_1,\overline{e}_2}(H)+F_{\overline{e}_1,\overline{e}_2}(H),$$ where the first term means that we count the number of forests containing both $e_1,e_2$, the second term counts the number of forests containing $e_2$, but not $e_1$, etc. Similarly, we have $$F(G\cup G)=F_{e_1,e_2}(G\cup G)+F_{\overline{e}_1,e_2}(G\cup G)+F_{e_1,\overline{e}_2}(G\cup G)+F_{\overline{e}_1,\overline{e}_2}(G\cup G).$$ Now observe that the terms $F_{\overline{e}_1,\overline{e}_2}(H)$ and $F_{\overline{e}_1,\overline{e}_2}(G\cup G)$ count the number of forests in $2$-covers of $G-e$, and by induction $$F_{\overline{e}_1,\overline{e}_2}(H)\geq F_{\overline{e}_1,\overline{e}_2}(G\cup G).$$ Similarly, $F_{e_1,e_2}(H)=F(H/\{e_1,e_2\})$ and $H/\{e_1,e_2\}$ is isomorphic to a $2$-cover of $G/e$. Hence $$F_{e_1,e_2}(H)\geq F_{e_1,e_2}(G\cup G).$$ Observe that by symmetry we have $F_{\overline{e}_1,e_2}(H)=F_{e_1,\overline{e}_2}(H)$. Let $\mathbf{F}$ be a random forest of $H$ chosen uniformly, and $\mathbb{P}_H$ be the corresponding probability distribution. Note that with our previous notation we have $$\mathbb{P}_H(e_1\in \textbf{F},e_2\notin \textbf{F})=\frac{F_{e_1,\overline{e}_2}(H)}{F(H)}\ \ \ \mbox{and}\ \ \ \mathbb{P}_H(e_1\notin \textbf{F},e_2\in \textbf{F})=\frac{F_{\overline{e_1},e_2}(H)}{F(H)}.$$ Lemma~\ref{equivalent inequalities} shows that the negative correlation inequality of Conjecture~\ref{correlation-conjecture}, namely, $$\mathbb{P}_H(e,f\in \textbf{F})\leq \mathbb{P}_H(e\in \textbf{F})\mathbb{P}_H(f\in \textbf{F})$$ is equivalent to $$\mathbb{P}_H(e\in \textbf{F},f\notin \textbf{F})\mathbb{P}_H(e\notin \textbf{F},f\in \textbf{F})\geq \mathbb{P}_H(e\in \textbf{F},f\in \textbf{F})\mathbb{P}_H(e\notin \textbf{F},f\notin \textbf{F}).$$ In the following computation, we will apply this inequality to $e=e_1$ and $f=e_2$. Then \begin{align*} F_{\overline{e_1},e_2}(H)^2&=F_{\overline{e}_1,e_2}(H)\cdot F_{e_1,\overline{e}_2}(H)\\ &=F(H)^2\mathbb{P}_H(e_1\in \textbf{F},e_2\notin \textbf{F})\mathbb{P}_H(e_1\notin \textbf{F},e_2\in \textbf{F})\\ &\geq F(H)^2\mathbb{P}_H(e_1\in \textbf{F},e_2\in \textbf{F})\mathbb{P}_H(e_1\notin \textbf{F},e_2\notin \textbf{F})\\ &=F_{e_1,e_2}(H)F_{\overline{e}_1,\overline{e}_2}(H)\\ &\geq F_{e_1,e_2}(G\cup G)F_{\overline{e}_1,\overline{e}_2}(G\cup G)\\ &=F_{e_1}(G)F_{e_2}(G)F_{\overline{e}_1}(G)F_{\overline{e}_2}(G)\\ &=F_{\overline{e}_1,e_2}(G\cup G)\cdot F_{e_1,\overline{e}_2}(G\cup G)\\ &=F_{\overline{e}_1,e_2}(G\cup G)^2. \end{align*} Hence $F_{\overline{e}_1,e_2}(H)\geq F_{\overline{e}_1,e_2}(G\cup G)$. Putting together the $4$ inequalities we get that $F(H)\geq F(G\cup G)$. \end{proof} \section{Proof of Theorem~\ref{regular upper bounds}} In this section, we give a new upper bound on the number of forests in regular graphs. We use some basic results from spectral graph theory such as the matrix-tree theorem and the expression of the number of closed walks as a power sum of the eigenvalues of the adjacency matrix. All these results can be found in the books of Brouwer and Haemers \cite{BrHa} and Godsil and Royle \cite{GoRo}. \begin{Def} Let $G$ be a graph with edge weights $w:E(G)\to \mathbb{R}$. Let $L(G,w)$ be the $|V|\times |V|$ matrix defined as follows: $$L(G,w)_{i,j}=\left\{\begin{array} {cl} \sum_{e: i\in e}w_e & \mbox{if}\ i=j, \\ -w_e &\mbox{if}\ e=(i,j)\in E(G),\\ 0 &\mbox{if}\ (i,j)\notin E(G). \end{array} \right.$$ The matrix $L(G,w)$ is the weighted Laplacian matrix of the graph $G$. \end{Def} \begin{Lemma}[Kirchhoff's matrix-tree theorem \cite{Kirc,BrHa,GoRo}] \label{Kirchhoff} Let $G$ be a graph with edge weights $w:E(G)\to \mathbb{R}$. Let $L(G,w)$ be its weighted Laplacian matrix. Let $L_0(G,w)$ be the matrix obtained from $L(G,w)$ by deleting the first row and column. Then $$\det L_0(G,w)=\sum_{T\in \mathcal{T}(G)}\prod_{e\in E(T)}w_e,$$ where $\mathcal{T}(G)$ is the set of spanning trees of $G$. \end{Lemma} Let $G$ be a $d$-regular graph with Laplacian matrix $L(G,\underline{1})$. Let us add one more vertex to $G$, and connect it to all vertices with an edge of weight $\alpha$. Let $G_{\alpha}$ be the new graph. The original edges have weight $1$, and a weight of a spanning tree of the graph $G_{\alpha}$ is the product of the weights of the edges in the spanning tree. Then the weighted sum of the spanning trees of $G_{\alpha}$ can be computed as a weighted sum of the forests of the original graph $G$ as follows. The total weight of spanning trees in $G_{\alpha}$, which correspond to a forest $F$ in $G$ with connected components $F_1,F_2,\dots ,F_k$, is $$w_{\alpha}(F)=\alpha^k\prod_{i=1}^k|F_i|.$$ Indeed, once we have a forest of $G$ we can create $\prod_{i=1}^k|F_i|$ spanning trees of $G_{\alpha}$ by connecting one of the vertices of each component to the new vertex. Each such spanning tree has a weight $\alpha^k$. Let $k(F)$ denote the number of connected components of a forest $F$. By the above discussion we get that for each $\alpha>0$ we have $w_{\alpha}(F)(1/\alpha)^{k(F)}\geq 1$. Let us use the matrix-tree theorem (Lemma~\ref{Kirchhoff}): we have $L_0(G_{\alpha},w)=L(G,\underline{1})+\alpha I$, where $I$ is the identity matrix. Then $$S:=\sum_Fw_{\alpha}(F)=\det(L(G,\underline{1})+\alpha I)=\prod_{i=1}^n(\lambda_i+\alpha),$$ where $\lambda_i$ are the eigenvalues of the matrix $L(G,\underline{1})$. If $d=\mu_1\geq \mu_2\geq \dots \geq \mu_n$ are the eigenvalues of the adjacency matrix of the graph $G$, then $\lambda_i=d-\mu_i$. Hence $$\frac{1}{n}\ln S=\frac{1}{n}\sum_{i=1}^n\ln(\lambda_i+\alpha)=\frac{1}{n}\sum_{i=1}^n\ln(d-\mu_i+\alpha).$$ Now we can estimate this sum as follows. In the following computation $\mu_{KM}$ is the Kesten-McKay measure \cite{McKay1,McKay2}. Its explicit form is given by the density function $$\frac{d\sqrt{4(d-1)-x^2}}{2\pi(d^2-x^2)}\cdot 1_{(-2\sqrt{d-1},2\sqrt{d-1})}.$$ Its speciality is that $$W_k(\mathbb{T}_d,o):=\int x^k\, d\mu_{KM}(x)$$ is equal to the number of closed walks of length $k$ in the infinite $d$-regular tree from a fixed root vertex $o$. The quantity $W_k(G)=\sum_{i=1}^n\mu_i^k$ counts the number of closed walks of length $k$ in the graph $G$. The following standard argument shows that $W_k(G)\geq nW_k(\mathbb{T}_d,o)$ since $\mathbb{T}_d$ is the universal cover of any $d$-regular graph $G$. We show that for any vertex $v$, the number of closed walks $W_{k}(G,v)$ of length $k$ starting and ending at vertex $v$ is at least as large as the number of closed walks starting and ending at some root vertex of the infinite $d$-regular tree $\mathbb{T}_d$. Let us consider the following infinite $d$-regular tree, its vertices are labeled by the walks starting at the vertex $v$ which never steps immediately back to a vertex, where it came from. Such walks are called non-backtracking walks. For instance, in the depicted graph below $149831$ is such a walk, but $1494$ is not a backtracking walk since after $9$ we immediately stepped back to $4$. We connect two non-backtracking walks in the tree if one of them is a one-step extension of the other. \begin{figure}\label{universal cover picture} \end{figure} Note that every closed walk in the tree corresponds to a closed walk in the graph $G$: for instance, for the depicted graph the walk $1,14,149,14,1$ in the tree corresponds to the walk $1,4,9,4,1$ in the graph. On the other hand, there are closed walks in the graph $G$, like $149831$, which are not closed anymore in the tree. This argument shows that $W_k(G,v)\geq W_k(\mathbb{T}_d,o)$ for all $v\in V(G)$. Consequently, $W_k(G)\geq nW_k(\mathbb{T}_d,o)$. Then \begin{align*} \frac{1}{n}\sum_{i=1}^n\ln(d-\mu_i+\alpha)&=\ln(d+\alpha)+\frac{1}{n}\sum_{i=1}^n\ln\left(1-\frac{\mu_i}{d+\alpha}\right)\\ &=\ln(d+\alpha)+\frac{1}{n}\sum_{i=1}^n\sum_{k=1}^{\infty}-\frac{1}{k}\left(\frac{\mu_i}{d+\alpha}\right)^k\\ &=\ln(d+\alpha)-\sum_{k=1}^{\infty}\frac{1}{k(d+\alpha)^k}\frac{1}{n}\sum_{i=1}^n\mu_i^k\\ &=\ln(d+\alpha)-\sum_{k=1}^{\infty}\frac{1}{k(d+\alpha)^k}\frac{W_k(G)}{n}\\ &\leq \ln(d+\alpha)-\sum_{k=1}^{\infty}\frac{1}{k(d+\alpha)^k}W_k(\mathbb{T}_d,o)\\ &=\ln(d+\alpha)-\sum_{k=1}^{\infty}\frac{1}{k(d+\alpha)^k}\int x^k\, d\mu_{KM}(x)\\ &=\ln(d+\alpha)+\int \ln\left(1-\frac{x}{d+\alpha}\right)\, d\mu_{KM}(x)\\ &=\int \ln(d+\alpha-x)\, d\mu_{KM}(x). \end{align*} Hence $$S\leq \exp\left(n \int \ln(d+\alpha-x)\, d\mu_{KM}(x)\right).$$ When $\alpha=1$, then $w_1(F)\geq 1$, and we get back the result of Kahale and Schulman (actually they used almost the same argument for acyclic orientations instead of forests). When $\alpha =1/2$, then we get that the number of forests without isolated vertices, denoted by $F_1(G)$, satisfies $$F_1(G)\leq \exp\left(n \int \ln\left(d+\frac{1}{2}-x\right)\, d\mu_{KM}(x)\right)<d^n,$$ since $w_{1/2}(F)\geq 1$ for forests without isolated vertices. \begin{Rem} One can explicitly compute the above integrals using a theorem of McKay \cite{McKay1}. For $|\gamma|<\frac{1}{2\sqrt{d-1}}$ let $$J_d(\gamma)=\int \ln(1-\gamma x) \, d\mu_{KM}(x).$$ Let $$\eta=\frac{1-(1-4(d-1)\gamma^2)^{1/2}}{2(d-1)\gamma^2}.$$ Then $$J_d(\gamma)=-\ln \left(\eta \left(\frac{d-\eta}{d-1}\right)^{(d-2)/2}\right).$$ Clearly, one needs to use that $$\int \ln(d+\alpha-x)\, d\mu_{KM}(x)=\ln(d+\alpha)+J_d\left(\frac{1}{d+\alpha}\right).$$ This is how we got the explicit bound in Theorem~\ref{KS-bound} that does not appear in the original paper of Kahale and Schulman \cite{KaSc}. \end{Rem} Now we are ready to improve the result of Kahale and Schulman \cite{KaSc}. \begin{proof} Let $G$ be a $d$-regular graph on $n$ vertices. Then the number of edges is $m=dn/2$. Let $F_1$ be the number of forests of $G$ with at most $cn$ connected components, where $c$ is some constant that we will choose later. Let $F_2$ be the number of forests of $G$ with more than $cn$ connected components. In this latter case the number of edges of the forest is at most $e(F)=n-k(F)\leq (1-c)n$. Then $$F_1\leq \exp\left(n \int \ln(d+\alpha-x)\, d\mu_{KM}(x)\right) \cdot \left(\frac{1}{\alpha}\right)^{cn}$$ since $F_1\leq \sum_{F}w_{\alpha}(F)\left(\frac{1}{\alpha}\right)^{k(F)}\leq \sum_{F}w_{\alpha}(F)\left(\frac{1}{\alpha}\right)^{cn}$. For $F_2$ we use the trivial bound based on the fact that such a forest has at most $(1-c)n$ edges. $$F_2\leq \sum_{k=0}^{(1-c)n}\binom{m}{k}\leq \exp\left(n\frac{d}{2}H\left(\frac{2(1-c)}{d}\right)\right).$$ Hence $$F(G)\leq \exp\left(n \int \ln(d+\alpha-x)\, d\mu_{KM}(x)\right) \cdot \left(\frac{1}{\alpha}\right)^{cn}+ \exp\left(n\frac{d}{2}H\left(\frac{2(1-c)}{d}\right)\right).$$ Next, we choose $\alpha$ and $c$ to make the two terms (approximately) the same, then we arrive at the bound $F(G)\leq 2C_d^n$. If we cannot find such $c$, then we can still use $c=1$ to recover the bound of Kahale and Schulman (KS-bound in the table below) that corresponds to the choice $\alpha=1, c=1$. Besides, one can get rid of the constant $2$ by the trick used in the proof of Theorem~\ref{4-regular-forest}. The value of $C_d$ is given in the table below. As we see we get a much better bound for $d=5,6,7,8$, and a slightly better bound for $d=9$. \begin{center} \begin{table}[h!] \label{table forest2} \caption{Bounds on the number of forests for small $d$} \begin{tabular}{|c|c|c|c|c|} \textbf{h}line $d$ & new bound $C_d$ & KS-bound & $\alpha$ & $c$ \\ \textbf{h}line $5$ & $5.1965$ & $5.5361$ & $0.3084$ & $0.0739$ \\ \textbf{h}line $6$ & $6.3367$ & $6.5286$ & $0.4482$ & $0.0835$ \\ \textbf{h}line $7$ & $7.4290$ & $7.5235$ & $0.5917$ & $0.0903$ \\ \textbf{h}line $8$ & $8.4843$ & $8.5200$ & $0.7374$ & $0.0955$ \\ \textbf{h}line $9$ & $9.5116$ & $9.5173$ & $0.8844$ & $0.0995$ \\ \textbf{h}line \end{tabular} \end{table} \end{center} \end{proof} \section{Proof of Theorem~\ref{regular-connected}} In this section we prove Theorem~\ref{regular-connected}. Note that for a bipartite graph $G=(A,B,E)$ there is a strikingly simple argument giving $$C(G)^{1/v(G)}\leq 2^{d/2}\left(1-\frac{1}{2^d}\right)^{1/2}.$$ Indeed, if we consider the probability that a random edge subset spans a connected graph, then this probability is clearly smaller than the probability that on one side of bipartition none of the vertices are isolated. Let $S$ be a random subset of the edge set. If $B_v$ is the bad event that the vertex $v\in A$ is isolated, then $$\mathbb{P}(S\ \mbox{spans a connected subgraph})\leq \mathbb{P}(\bigcap_{v\in A} \overline{B_v})=\prod_{v\in A}\mathbb{P}(\overline{B_v})=\left(1-\frac{1}{2^d}\right)^{v(G)/2}.$$ We used the fact that the events $B_v$ for $v\in A$ are independent. This gives the above inequality. In the general case, there is no independence for all vertices. (Though we can take a large independent set of the vertex set instead of the set $A$.) Nevertheless, we can easily overcome it by using one of Janson's inequalities. This needs some preparation. \textbf{Setup of Janson's inequalities.} Let $\Omega$ be a fixed set and let $R$ be a random subset of $\Omega$ by choosing $r\in R$ with probability $p_r$ mutually independently of each other. Let $(A_i)_{i\in I}$ be subsets of $\Omega$ for some index set $I$. Let $B_i$ be the event that $A_i\subseteq R$. Let $X_i$ be the indicator random variable for the event $B_i$. Let $$X:=\sum_{i\in I}X_i.$$ It is, of course, the number of $A_i\subseteq R$. So the events $\bigcap_{i\in I}\overline{B_i}$ and $X=0$ are identical. For $i,j\in I$ we say that $i\sim j$ if $A_i\cap A_j\neq \emptyset$. Note that if $i\not\sim j$ then this is consistent with our previous notation that $B_i$ and $B_j$ are independent. Let $$\Delta=\sum_{i\sim j}\mathbb{P} \left(B_i\cap B_j\right),$$ where the sum is over all ordered pairs, so $\Delta/2$ is the same sum for unordered pairs. Set $$M=\prod_{i\in I}\mathbb{P}\left(\overline{B_i}\right).$$ This would be the probability of $\bigcap_{i\in I}\overline{B_i}$ if the events $B_i$ were independent. Finally, set $$\mu=\mathbb{E} X=\sum_{i\in I}\mathbb{P}(B_i).$$ Now we are ready to state Janson's inequalities. \begin{Th}[Janson inequality \cite{JLR},\cite{AlSp}] \label{janson1} Let $(B_i)_{i\in I},M,\Delta,\mu$ be as above, and assume that $\mathbb{P}(B_i)\leq \varepsilon$ for all $i\in I$. Then $$M\leq \mathbb{P} \left(\bigcap_{i\in I}\overline{B_i}\right)\leq M \exp \left(\frac{1}{1-\varepsilon}\cdot \frac{\Delta}{2}\right).$$ \end{Th} \begin{proof}[Proof of Theorem~\ref{regular-connected}] Now let $R\subseteq E(G)$ be a set chosen uniformly at random. For a vertex $v$ let $A_v$ be the set of edges incident to the vertex $v$. Let $B_v$ be the bad event that $A_v\subseteq R$. If one of $B_v$ occurs, then $E(G)\setminus R$ cannot be connected since the vertex $v$ would be an isolated vertex. We have $$\mathbb{P}(B_v)=\frac{1}{2^d},$$ and $$\Delta=\sum_{u\sim v}\mathbb{P} \left(B_u\cap B_v\right)=\frac{nd}{2^{2d-1}}.$$ Then using Janson's inequality with $\varepsilon=\frac{1}{2^d}$ we get that $$C(G)\leq 2^{nd/2}\mathbb{P} \left(\bigcap_{v\in V}\overline{B_v}\right)\leq 2^{nd/2}\left(1-\frac{1}{2^d}\right)^n\exp\left(\frac{nd}{2^{d}(2^d-1)}\right).$$ Hence $$C(G)^{1/n}\leq 2^{d/2}\left(1-\frac{1}{2^d}\right)\exp\left(\frac{d}{2^{d}(2^d-1)}\right).$$ \end{proof} \section{Proof of Theorem~\ref{average-connected}} In this section we prove Theorem~\ref{average-connected}. \begin{proof}[Proof of Theorem~\ref{average-connected}] Let us consider $$Z_{\mathrm{RC}}(G,q,w)=\sum_{A\subseteq E(G)}q^{k(A)}w^{|A|}.$$ We show that if $0<q\leq 1$ and $w\geq 0$, then $$Z_{\mathrm{RC}}(G,q,w)\leq q^{v(G)}\left(1+\frac{w}{q}\right)^{e(G)}.$$ Indeed, $$q^{v(G)}\left(1+\frac{w}{q}\right)^{e(G)}=\sum_{A\subseteq E(G)}q^{v(G)}\left(\frac{w}{q}\right)^{|A|}.$$ So it is enough to show that $$q^{k(A)}w^{|A|}\leq q^{v(G)}\left(\frac{w}{q}\right)^{|A|},$$ equivalently $1\leq q^{v(G)-k(A)-|A|}$ which is indeed true since $|A|+k(A)\geq v(G)$ and $0<q\leq 1$. Let us apply this inequality to $q=\frac{\overline{d}-2}{2}$ and $w=1$. Note that $0<q\leq 1$ since $2<\overline{d}\leq 4$. Observe also that just by keeping the connected spanning subgraphs $A$ we have $$qC(G)\leq Z_{\mathrm{RC}}(G,q,1)\leq q^{v(G)}\left(1+\frac{1}{q}\right)^{e(G)}.$$ By substituting $q=\frac{\overline{d}-2}{2}$ and evaluating the right-hand side we get that $$C(G)\leq \frac{2}{\overline{d}-2}\exp\left(v(G)\frac{\overline{d}}{2}H\left(\frac{2}{\overline{d}}\right)\right).$$ \end{proof} \noindent \textbf{Acknowledgment.} The second author thanks Ferenc Bencs for the useful discussions. The authors are very grateful to the referees for their comments and suggestions. \end{document}
\begin{document} \title{Average number of flips in pancake sorting \\[20pt]} \author{Josef Cibulka\\ \small{Department of Applied Mathematics, Charles University,} \\ \small{Malostransk\'e n\'am.~25, 118~ 00 Prague, Czech Republic. }\\ \small{\it [email protected]} \thanks{Work on this paper was supported by the project 1M0545 of the Ministry of Education of the Czech Republic and by the Czech Science Foundation under the contract no.\ 201/09/H057. The access to the METACentrum computing facilities provided under the research intent MSM6383917201 is highly appreciated.} } \date{} \maketitle \begin{abstract} We are given a stack of pancakes of different sizes and the only allowed operation is to take several pancakes from top and flip them. The unburnt version requires the pancakes to be sorted by their sizes at the end, while in the burnt version they additionally need to be oriented burnt-side down. We present an algorithm with the average number of flips, needed to sort a stack of $n$ burnt pancakes, equal to $7n/4 + O(1)$ and a randomized algorithm for the unburnt version with at most $17n/12 + O(1)$ flips on average. In addition, we show that in the burnt version, the average number of flips of any algorithm is at least $n+\Omega(n/\leftog n)$ and conjecture that some algorithm can reach $n+\Theta(n/\leftog n)$. We also slightly increase the lower bound on $g(n)$, the minimum number of flips needed to sort the worst stack of $n$ burnt pancakes. This bound together with the upper bound found by Heydari and Sudborough in 1997 gives the exact number of flips to sort the previously conjectured worst stack $-I_n$ for $n \equiv 3 \pmod 4$ and $n \geq 15$. Finally we present exact values of $f(n)$ up to $n=19$ and of $g(n)$ up to $n=17$ and disprove a conjecture of Cohen and Blum by showing that the burnt stack $-I_{15}$ is not the worst one for $n=15$. \end{abstract} \emph{Keywords\/}: Pancake problem, Burnt pancake problem, Permutations, Prefix reversals, Average-case analysis \section{Introduction} The pancake problem was first posed in \cite{Dweighter}. We are given a stack of pancakes each two of which have different sizes and our aim is to sort them in as few operations as possible to obtain a stack of pancakes with sizes increasing from top to bottom. The only allowed sorting operation is a "spatula flip", in which a spatula is inserted beneath an arbitrary pancake, all pancakes above the spatula are lifted and replaced in reverse order. We can see the stack as a permutation $\pi$. A flip is then a prefix reversal of the permutation. The set of all permutations on $n$ elements is denoted by $S_n$, $f(\pi)$ is the minimum number of flips needed to obtain $(1,2,3,\dots,n)$ from $\pi$ and \[ f(n) := \max_{\pi \in S_n}f(\pi). \] The exact values of $f(n)$ are known for all $n\lefteq 19$, see Table~\rightef{table:val} for their list and references. In general $15\leftfloor n/14\rightfloor \lefteq f(n) \lefteq 18n/11 + O(1)$. The upper bound is due to Chitturi et al.~\cite{Chitturi+2008} and the lower bound was proved by Heydari and Sudborough~\cite{HeydariSudb}. These bounds improved the previous bounds $17n/16 \lefteq f(n) \lefteq (5n+5)/3$ due to Gates and Papadimitriou~\cite{GatesPapad}, where the upper bound was also independently found by Gy\"{o}ri and Tur\'{a}n~\cite{GyoriTuran}. A related problem in which the reversals are not restricted to intervals containing the first element received considerable attention in computational biology; see e.\ g.~\cite{Hayes2007}. A variation on the pancake problem is the burnt pancake problem in which pancakes are burnt on one of their sides. This time, the aim is not only to sort them by their sizes, but we also require that at the end, they all have their burnt sides down. Let $C=(\pi,v)$ denote a stack of $n$ burnt pancakes, where $\pi \in S_n$ is the permutation of the pancakes and $v \in \{0,1\}^n$ is the vector of their orientations ($v_i=0$ if the $i$-th pancake from top is oriented burnt side down). Pancake $i$ will be represented by $\bsd i$ if its burnt side is down and $\bsu i$ if up. Let \[ I_n= \left( \begin{array}{c} \bsd{1} \\ \bsd{2} \\ \vdots \\ \bsd{n} \end{array} \right) \qquad \text{and} \qquad -I_n= \left( \begin{array}{c} \bsu{1} \\ \bsu{2} \\ \vdots \\ \bsu{n} \end{array} \right). \] Let $g(C)$ be the minimum number of flips needed to obtain $I_n$ from $C$ and let \[ g(n) := \max_{\pi \in S_n, v \in \{0,1\}^n}g((\pi,v)). \] Exact values of $g(n)$ are known for all $n\lefteq 17$, see Table~\rightef{table:val}. In 1979 Gates and Papadimitriou~\cite{GatesPapad} provided the bounds $3n/2-1 \lefteq g(n) \lefteq 2n+3$. Since then these were improved only slightly by Cohen and Blum~\cite{CohenBlum} to $3n/2 \lefteq g(n) \lefteq 2n-2$, where the upper bound holds for $n \geq 10$. The result $g(16)=26$ further improves the upper bound to $2n-6$ for $n\geq 16$. Cohen and Blum also conjectured that the maximum number of flips is always achieved for the stack $-I_n$. But we present two counterexamples with $n=15$ in Section~\rightef{sec:comp}. The stack $-I_n$ can be sorted in $(3(n+1))/2$ flips for $n \equiv 3 \pmod 4$ and $n \geq 23$~\cite{HeydariSudb}. In Section~\rightef{sec:lb} we present a new formula for determining a lower bound on the number of flips needed to sort a given stack of burnt pancakes. The highest value that this formula gives for a stack of $n$ pancakes, is $\leftfloor (3(n+1))/2 \rightfloor$ for the stack $-I_n$. These bounds together with the known values of $g(-I_{15})$ and $g(-I_{19})$ give $g(-I_n)=(3(n+1))/2$ if $n \equiv 3\pmod 4$ and $n \geq 15$. \begin{table}[ht] \centering \begin{tabular}{r rl rl rl} $n$ & $f(n)$ & & $g(n)$ & & $g(-I_n)$ & \\ \hline 2 & 1 & \cite{Garey+1977} & 4 & \cite{CohenBlum} & 4 & \cite{CohenBlum} \\ 3 & 3 & \cite{Garey+1977} & 6 & \cite{CohenBlum} & 6 & \cite{CohenBlum} \\ 4 & 4 & \cite{Garey+1977} & 8 & \cite{CohenBlum} & 8 & \cite{CohenBlum} \\ 5 & 5 & \cite{Garey+1977} & 10 & \cite{CohenBlum} & 10 & \cite{CohenBlum} \\ 6 & 7 & \cite{Garey+1977} & 12 & \cite{CohenBlum} & 12 & \cite{CohenBlum} \\ 7 & 8 & \cite{Garey+1977} & 14 & \cite{CohenBlum} & 14 & \cite{CohenBlum} \\ 8 & 9 & \cite{Robbins1979} & 15 & \cite{CohenBlum} & 15 & \cite{CohenBlum} \\ 9 & 10 & \cite{Robbins1979} & 17 & \cite{CohenBlum} & 17 & \cite{CohenBlum} \\ 10& 11 & \cite{CohenBlum} & 18 & \cite{CohenBlum} & 18 & \cite{CohenBlum} \\ 11& 13 & \cite{CohenBlum} & 19 & \cite{Korf2008} & 19 & \cite{CohenBlum} \\ 12& 14 & \cite{HeydariSudb} & 21 & \cite{Korf2008} & 21 & \cite{CohenBlum} \\ 13& 15 & \cite{HeydariSudb} & 22 & Section~\rightef{sec:comp} & 22 & \cite{CohenBlum} \\ 14& 16 & \cite{Kounoike+2005}& 23 & Section~\rightef{sec:comp} & 23 & \cite{CohenBlum} \\ 15& 17 & \cite{Kounoike+2005}& 25 & Section~\rightef{sec:comp} & 24 & \cite{CohenBlum} \\ 16& 18 & \cite{Asai+2006} & 26 & Section~\rightef{sec:comp} & 26 & \cite{CohenBlum} \\ 17& 19 & \cite{Asai+2006} & 28 & Section~\rightef{sec:comp} & 28 & \cite{CohenBlum} \\ 18& 20 & Section~\rightef{sec:comp} & & & 29 & \cite{CohenBlum} \\ 19& 22 & Section~\rightef{sec:comp} & & & 30 & Section~\rightef{sec:comp} \\ 20& & & & & 32 & Section~\rightef{sec:comp} \\ $n\equiv 3 \pmod 4$ & & & & & $\leftfloor\frac{3n+3}{2}\rightfloor$ & Corollary~\rightef{cor:cbexact} \\ \end{tabular} \caption{known values of $f(n)$, $g(n)$ and $g(-I_n)$} \leftabel{table:val} \end{table} We present an algorithm that needs on average $7n/4 + O(1)$ flips to sort a stack of $n$ burnt pancakes and a randomized algorithm for sorting $n$ unburnt pancakes with $17n/12 + O(1)$ flips on average. We also show that any algorithm for the unburnt version requires on average at least $n-O(1)$ flips and in the burnt version $n+\Omega(n/\leftog n)$ flips are needed on average. Section~\rightef{sec_concl} introduces a conjecture that the average number of flips of the optimal algorithm for sorting burnt pancakes is $n+\Theta(n/\leftog n)$. \section{Terminology and notation} The stack obtained by flipping the whole stack $C$ is $\flipped C$. The stack $-C$ is obtained from $C$ by changing the orientation of each pancake while keeping the order of pancakes. If two unburnt pancakes of consecutive sizes are located next to each other, they are \emph{adjacent}. Two burnt pancakes located next to each other are \emph{adjacent} if they form a substack of $I_n$ or of $\flipped{I_n}$. Two burnt pancakes located next to each other are \emph{anti-adjacent} if they form a substack of $-I_n$ or of $\flipped{-I_n}$. In both versions a \emph{block} in a stack $C$ is an inclusion-wise maximal substack $S$ of $C$ such that each two pancakes of $S$ on consecutive positions are adjacent. A substack $S$ of a stack $C$ with burnt pancakes is called a \emph{clan}, if $-S$ is a block in $-C$. Pancake not taking part in a block or a clan is \emph{free}. If the top $i$ pancakes are flipped, the flip is an \emph{$i$-flip}. \section{Lower bound in the burnt version} \leftabel{sec:lb} \begin{theorem} \leftabel{thm:blb} For each $n$ \[ g(-I_n) \geq \left\leftfloor \frac{3(n + 1)}{2}\right\rightfloor. \] \end{theorem} \begin{proof} {\ }\par The claim is easy to verify for $n \lefteq 2$, so we can assume $n \geq 3$. A block (clan) is called a \emph{surface block (clan)} if the topmost pancake is part of it, otherwise it is \emph{deep}. We will assign to each stack $C$ the value $v(C)$: \[ v(C) := a(C)-a^-(C) - \frac13 (b(C)-b^-(C)) + \frac13 (o(C)-o^-(C)) + l(C)-l^-(C) + \frac13 (ll(C)-ll^-(C)), \] where \begin{align*} a(C) &:= \text{number of adjacencies} \\ b(C) &:= \text{number of deep blocks} \\ o(C) &:= \left\{ \begin{array}{ll} 1 & \text{if the pancake on top of the stack is the free $\overline{1}$ or} \\ & \text{if $1$ is in a block (necessarily with $2$)} \\ 0 & \text{otherwise} \\ \end{array}\right. \\ l(C) &:= \left\{ \begin{array}{ll} 1 & \text{if the lowest pancake is $\underline{n}$} \\ 0 & \text{otherwise}\\ \end{array}\right. \\ ll(C) &:= \left\{ \begin{array}{ll} 1 & \text{if the lowest pancake is $\bsd n$ and the second lowest is $\bsd{n-1}$} \\ 0 & \text{otherwise}\\ \end{array}\right. \\ a^-(C) &:= a(-C) = \text{number of anti-adjacencies in $C$}\\ b^-(C) &:= b(-C) = \text{number of deep clans in $C$}\\ o^-(C) &:= o(-C) \\ l^-(C) &:= l(-C) \\ ll^-(C) &:= ll(-C). \\ \end{align*} \begin{lemma} \leftabel{lem:blb} If $C$ and $C'$ are stacks of at least two pancakes and $C'$ can be obtained from $C$ by a single flip, then \[\Delta v := v(C') - v(C) \lefteq \frac43.\] Therefore the minimum number of flips needed to sort a stack $C$ is at least \[ \left\leftceil \frac34 (v(I_n) - v(C)) \right\rightceil .\] \end{lemma} \begin{proof} {\ }\par First we introduce notation for contributions of each of the functions to $\Delta v$: \begin{align*} \Delta a &:= a(C') - a(C) & \Delta a^- &:= -(a^-(C') - a^-(C)) \\ \Delta b &:= -\frac13 (b(C') - b(C)) & \Delta b^- &:= \frac13 (b^-(C') - b^-(C)) \\ \Delta o &:= \frac13 (o(C') - o(C)) & \Delta o^- &:= -\frac13 (o^-(C') - o^-(C)) \\ \Delta l &:= l(C') - l(C) & \Delta l^- &:= -(l^-(C') - l^-(C)) \\ \Delta ll &:= \frac13 (ll(C') - ll(C)) & \Delta ll^- &:= -\frac13 (ll^-(C') - ll^-(C)) \end{align*} \begin{observation} Values of $\Delta a$, $\Delta a^-$, $\Delta l$ and $\Delta l^-$ are among $\{0,1,-1\}$. Values of $\Delta b$, $\Delta b^-$, $\Delta o$, $\Delta o^-$, $\Delta ll$ and $\Delta ll^-$ are among $\{0, 1/3, -1/3\}$. \end{observation} \begin{proof} The only nontrivial part is $\Delta b \lefteq 1/3$ and symmetrically $\Delta b^- \lefteq 1/3$. For contradiction suppose $\Delta b > 1/3$, which can only happen when one block was split to two free pancakes and another block became surface in a single flip. But the higher of the two pancakes that formed the split block will end on top of the stack after the flip. Therefore no block became surface. To show $\Delta b^- \lefteq 1/3$ we consider the flip $\phi: -C' \rightightarrow -C$, for which \[ \frac13 \geq \Delta_{\phi}b = -\frac13 (b(-C)-b(-C')) = -\frac13 (b^-(C)-b^-(C')) = \frac13 (b^-(C')-b^-(C)) = \Delta b^-. \] \end{proof} {\ }\par The proof of the lemma is based on restricting possible combinations of values of the above defined functions. \begin{itemize} \item Both $\Delta l$ and $\Delta l^-$ are positive. This would require the pancake $n$ to be before and after the flip at the bottom of the stack each time with a different orientation. But this is not possible when $n > 1$. \item Exactly one of $\Delta l$ and $\Delta l^-$ is positive. The case $\Delta l^- > 0$ can be transformed to the case $\Delta l > 0$ by considering the flip $\phi: -C' \rightightarrow -C$, for which \begin{align*} \Delta_{\phi} v &:= v(-C)-v(-C') = -v(C)-(-v(C')) = v(C')-v(C) = \Delta v, \\ \Delta_{\phi} l &:= l(-C)-l(-C') = l^-(C)-l^-(C') = -(l^-(C')-l^-(C)) = \Delta l^- ,\\ \Delta_{\phi} l^- &:= l^-(-C)-l^-(-C') = \Delta l. \end{align*} The equality $v(-C) = -v(C)$ follows from the definition of $v(C)$. If the value of $l$ changes, the flip must be an $n$-flip. Therefore $\Delta a = \Delta a^- = 0$. Because $\Delta l = 1$, the pancake $\bsd n$ has to be at the bottom of the stack after the flip, so $\Delta ll^- = 0$. Moreover neither a clan nor the pancake $\bsd 1$ could be on top of the stack before the flip so $\Delta b^- \lefteq 0$ and $\Delta o^- \lefteq 0$. Because $\Delta ll = 1/3$ implies a block on top of the stack before the flip and $\Delta o = 1/3$ implies no block on top of the stack after the flip, we obtain \begin{align*} \Delta ll = \frac13 ~\&~ \Delta o \lefteq 0 &\Rightarrow \Delta b \lefteq 0, \\ \Delta ll \lefteq 0 ~\&~ \Delta o = \frac13 &\Rightarrow \Delta b \lefteq 0, \\ \Delta ll = \frac13 ~\&~ \Delta o =\frac13 &\Rightarrow \Delta b \lefteq -\frac13. \end{align*} In any of the cases $\Delta ll + \Delta o + \Delta b \lefteq 1/3$ and $\Delta v \lefteq 4/3$. From now on, we can assume $\Delta l, \Delta l^- \lefteq 0$. \item At least one of $\Delta ll$ and $\Delta ll^-$ is positive. If both of them were positive then again the pancake $n$ would be at the bottom of the stack before and after the flip, each time with a different orientation. Similarly to the previous case, we can choose $\Delta ll^-=0$ and $\Delta ll = 1/3$. Because $\Delta l \lefteq 0$, the last flip was an $(n-1)$-flip, the pancake at the bottom of the stack is $\bsd n$ and the pancake on top of the stack before the flip was $\bsu{(n-1)}$. Therefore $\Delta a = 1$, $\Delta a^- = 0$, $\Delta o^- \lefteq 0$ and $\Delta b^- \lefteq 0$. If pancake $n-1$ was part of a block before the flip, then this block became deep, otherwise pancakes $n-1$ and $n$ created a new deep block. Thus $\Delta b \lefteq 0$. No block was destroyed and if $\Delta o = 1/3$, then no block became surface and thus $\Delta b = -1/3$. All in all $\Delta v \lefteq 4/3$. In the remaining cases we have $\Delta l,~\Delta l^-,~\Delta ll,~\Delta ll^- \lefteq 0$. \item Both $\Delta o$ and $\Delta o^-$ are positive. Because $\Delta o^- > 0$ then either 1 was in a clan or on top of the stack with burnt side down before the flip. If 1 was in a clan, then a single flip would not make it either a part of a block or a free $\bsu 1$ on top of the stack and thus $\Delta o$ would not be positive. Using a similar reasoning for $\Delta o$, we obtain that the flip was a 1-flip, the topmost pancake before the flip was $\bsd 1$ and the second pancake from top is different from $2$. Thus $\Delta a = \Delta a^- = \Delta b = \Delta b^- = 0$ and $\Delta v \lefteq 2/3$. \item Exactly one of $\Delta o$ and $\Delta o^-$ is positive; without loss of generality it is $\Delta o$. This can happen only in two ways. \begin{itemize} \item We did an $i$-flip, the topmost pancake before the flip was $\bsd 2$ and the $(i+1)$-st pancake is $\bsu 1$. Then $\Delta a = 1$, $\Delta a^- = 0$, $\Delta b \lefteq 0$ and $\Delta b^- \lefteq 0$ and so $\Delta v \lefteq 4/3$. \item We did an $i$-flip, the $i$-th pancake before the flip was $\bsd 1$ and neither the $(i-1)$-st nor the $(i+1)$-st pancake was $\bsd 2$. Then $\Delta b \lefteq 0$ and $\Delta a^- \lefteq 0$. If $\Delta a \lefteq 0$, then $\Delta v \lefteq 2/3$, otherwise $\Delta b^- \lefteq 0$ and $\Delta v \lefteq 4/3$. \end{itemize} Now only $\Delta a, \Delta a^-, \Delta b$ and $\Delta b^-$ can be positive. \item If $\Delta a = \Delta a^- = 1$, then the flip was either \[ \left( \begin{array}{c} \bsu{i-1} \\ \vdots \\ \bsd{i+1} \\ \bsd{i} \\ \vdots \end{array} \right) \rightightarrow \left( \begin{array}{c} \bsu{i+1} \\ \vdots \\ \bsd{i-1} \\ \bsd{i} \\ \vdots \end{array} \right) \text{\qquad, or \qquad} \left( \begin{array}{c} \bsd{i+1} \\ \vdots \\ \bsu{i-1} \\ \bsu{i} \\ \vdots \end{array} \right) \rightightarrow \left( \begin{array}{c} \bsd{i-1} \\ \vdots \\ \bsu{i+1} \\ \bsu{i} \\ \vdots \end{array} \right). \] In both cases the topmost pancake before the flip was not part of a clan and the topmost pancake after the flip is not part of a block, so the number of deep blocks increased and the number of deep clans decreased and $\Delta v \lefteq 4/3$. \item Exactly one of $\Delta a$ and $\Delta a^-$ is positive; without loss of generality $\Delta a = 1$, $\Delta a^- \lefteq 0$. Neither a new clan was created, nor became deep, so $\Delta b^- \lefteq 0$ and $\Delta v \lefteq 4/3$. \item None of $\Delta a$ and $\Delta a^-$ is positive, so $\Delta v \lefteq 2/3$. \end{itemize} \end{proof} It is easy to compute that $v(I_n)=n+2/3$ and $v(-I_n)=-n-2/3$ and thus the number of flips needed to transform $-I_n$ to $I_n$ is at least \[ \left\leftceil \frac34 \left(v(I_n) - v(-I_n)\right) \right\rightceil = \left\leftceil \frac34 \left(2n+\frac43\right)\right\rightceil = \left\leftceil \frac32n + 1\right\rightceil = \left\leftfloor \frac{3(n + 1)}{2}\right\rightfloor . \] \end{proof} \begin{corollary} \leftabel{cor:cbexact} For all integers $n\geq 15$ with $n \equiv 3 \pmod 4$, \[ g(-I_n) = \left\leftfloor \frac{3(n + 1)}{2}\right\rightfloor. \] \end{corollary} \begin{proof} The lower bound comes from Theorem~\rightef{thm:blb}. For all $n\geq 23$ with $n \equiv 3 \pmod 4$, the upper bound was proved by Heydari and Sudborough~\cite{HeydariSudb}. The exact value for $n=15$ was computed by Cohen and Blum~\cite{CohenBlum} and the exact value for $n=19$ is computed in Section~\rightef{sec:comp}. \end{proof} \section{Algorithm for the burnt version} \leftabel{sec:avb} In this section we will design an algorithm that sorts burnt pancakes with small average number of flips. First we will show a lower bound on the average number of flips of any algorithm that sorts a stack of $n$ burnt pancakes. \begin{theorem} \leftabel{thm:avgblb} Let $av_{opt}(n)$ be the average number of flips of the optimal algorithm for sorting a stack of $n$ burnt pancakes. For any $n\geq 16$ \[ av_{opt}(n) \geq n + \frac{n}{16\leftog_2 n} - \frac32. \] \end{theorem} \begin{proof} We will first count the expected number of adjacencies in a stack of $n$ burnt pancakes. A stack has $n-1$ pairs of pancakes on consecutive positions. For each such pair of pancakes, there are $4 n (n-1)$ equally probable combinations of their values and orientations and the pancakes form an adjacency in exactly $2(n-1)$ of them. From the linearity of expectation \[\mathbb E[adj] = (n-1)\frac{1}{2n} = \frac12 \frac{n-1}{n}. \] Therefore at least half of the stacks have no adjacency. \begin{itemize} \item First we take a half of the stacks such, that it contains all the stacks which have some adjacency. The stacks of this half have less than 1 adjacency on average. Each flip creates at most one adjacency, therefore when we want to obtain the stack $I_n$ with $n-1$ adjacencies, we need at least $n-2$ flips on average. \item The other half contains $n! \cdot 2^{n-1}$ stacks each with no adjacency, thus requiring at least $n-1$ flips. For each stack we take one of the shortest sequences of flips that create the stack from $I_n$ and call it the \emph{creating sequence} of the stack. Note that creating sequences of two different stacks are different. We will now count the number of different creating sequences of length at most $n-1+n/(4\leftog_2 n)$, which will give an upper bound on the number of stacks with no adjacency that can be sorted in $n-1+n/(4\leftog_2 n)$ flips. Shorter creating sequences will be followed by several 0-flips, therefore we will consider $n+1$ possible flips. A \emph{split-flip} is a flip in a creating sequence that decreases the number of adjacencies to a value smaller than the lowest value obtained before the flip. Therefore there are exactly $n-1$ split-flips in each of our creating sequences. In a creating sequence, the $i$-th split-flip removes one of $n-i$ existing adjacencies and therefore there are $n-i$ possibilities how to make the $i$-th split-flip. The number of different creating sequences of the above given length is at most \begin{align*} & \binom{n-1+\frac{n}{4\leftog_2 n}}{\frac{n}{4\leftog_2 n}}\cdot(n-1)! \cdot (n+1)^{n/(4\leftog_2 n)} \\ & \lefteq \left({n-1+\frac{n}{4\leftog_2 n}}\right)^{n/(4\leftog_2 n)} \cdot (n-1)! \cdot (2n)^{n/(4\leftog_2 n)} \\ & \lefteq (n-1)! \cdot (2n)^{n/(4\leftog_2 n)} \cdot (2n)^{n/(4\leftog_2 n)} \\ & \lefteq (n-1)! \cdot \left(n^{5/4}\right)^{2n/(4\leftog_2 n)} \\ & \lefteq (n-1)! \cdot 2^{5n/8} \\ & < \frac14 n! \cdot 2^n. \end{align*} Thus at least half of the stacks with no adjacency need more than $n-1+n/(4\leftog_2 n)$ flips while the rest needs at least $n-1$ flips. Therefore in this case the average number of flips is at least \[ n-1+\frac{n}{8\leftog_2 n}. \] \end{itemize} The overall average number of flips is then \[ av_{opt}(n) \geq n - \frac32 + \frac{n}{16\leftog_2 n}. \] \end{proof} \begin{theorem} \leftabel{thm:balgo} There exists an algorithm that sorts a stack of $n$ burnt pancakes with the average number of flips at most \[ \frac74 n + 5. \] \end{theorem} \begin{proof} Let $\mathbb C_n$ denote the set of all stacks of $n$ burnt pancakes, $h(C)$ will be the number of flips used by the algorithm to sort the stack $C$ and let \begin{align*} H(n) &:= \sum_{C \in \mathbb C_n} h(C), \\ av(n) &:= \frac{H(n)}{|\mathbb C_n|} = \frac{H(n)}{2n|\mathbb C_{n-1}|}. \end{align*} The algorithm will never break previously created adjacencies. This allows us to consider the adjacent pancakes as a single burnt pancake. In each iteration of the algorithm one adjacency is created, the two adjacent pancakes are contracted and the size of the stack decreases by one. We stop when the number of pancakes is two and the algorithm can transform the stack to the stack $(\bsd 1)$ in at most four flips. However for the simplicity of the discussion, we will not do such a contraction for adjacencies already existing in the input stack (as can be seen in the proof of Theorem~\rightef{thm:avgblb}, there are very few such adjacencies, so the benefit would be negligible). One more simplification is used. Before each iteration, the algorithm looks at the topmost pancake and cyclically renumbers the pancakes so as to have the topmost pancake numbered $2$ --- pancake number $j$ will become $j+s+kn$, where $s=(2-\pi(1))$ and $k$ is an integer chosen so as to have the result inside the interval $\{1,\dots,n\}$. Let $\mathbb C^2_{n}$ be the set of stacks with $n$ burnt pancakes and the pancake number 2 on top. When we end up with the stack $(\bsd 1)$, we in fact have \[ \left( \begin{array}{c} \bsd{i} \\ \bsd{i+1} \\ \vdots \\ \bsd{n} \\ \bsd{1}\\ \bsd{2}\\ \vdots \\ \bsd{i-1} \end{array} \right), \] for some $i \in \{1,2, \dots n \}$. This stack needs at most four more flips to become $I_n$. Therefore $av(2) \lefteq 8$. We will do four flips at the end even if they are not necessary. Then the number of flips will not be changed by a cyclic renumbering of pancakes and $H(n) = n \cdot \sum_{C \in \mathbb C^2_n} h(C)$. \begin{itemize} \item If the stack from $\mathbb C^2_{n}$ can be flipped so that the topmost pancake will form an adjacency, we will do it: \[ \left( \begin{array}{c} \bsd{2} \\ X \\ \bsu{1} \\ Y \end{array} \right) \rightightarrow \left( \begin{array}{c} \flipped X \\ \bsu{2} \\ \bsu{1} \\ Y \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X' \\ \bsu{1} \\ Y ' \end{array} \right) \in \mathbb C_{n-1}, \] or \[ \left( \begin{array}{c} \bsu{2} \\ X \\ \bsd{3} \\ Y \end{array} \right) \rightightarrow \left( \begin{array}{c} \flipped X \\ \bsd{2} \\ \bsd{3} \\ Y \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X' \\ \bsd{2} \\ Y ' \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X'' \\ \bsd{1} \\ Y'' \end{array} \right) \in \mathbb C_{n-1}. \] Each stack from $\mathbb C_{n-1}$ appears as a result of the above described process for exactly one stack from $\mathbb C^2_{n}$. \item If no adjacency can be created in a single flip, we will look at both pancakes $1$ and $3$ and analyze all possible cases. Note that this time when $2$ has its burnt side up, then $3$ has its burnt side up and similarly $\bsd 2$ implies $\bsd 1$. \begin{enumerate} \item \[ \left( \begin{array}{c} \bsd{2} \\ X \\ \bsd{1} \\ Y \\ \bsd{3} \\ Z \end{array} \right) \rightightarrow \left( \begin{array}{c} \bsu{2} \\ X \\ \bsd{1} \\ Y \\ \bsd{3} \\ Z \end{array} \right) \rightightarrow \left( \begin{array}{c} \flipped Y \\ \bsu{1} \\ \flipped X \\ \bsd{2} \\ \bsd{3} \\ Z \end{array} \right) \Leftrightarrow \left( \begin{array}{c} Y' \\ \bsu{1} \\ X' \\ \bsd{2} \\ Z' \end{array} \right) \in \mathbb C_{n-1} \] \item \[ \left( \begin{array}{c} \bsd{2} \\ X \\ \bsd{3} \\ Y \\ \bsd{1} \\ Z \end{array} \right) \rightightarrow \left( \begin{array}{c} \bsu{2} \\ X \\ \bsd{3} \\ Y \\ \bsd{1} \\ Z \end{array} \right) \rightightarrow \left( \begin{array}{c} \flipped X \\ \bsd{2}\\ \bsd{3} \\ Y \\ \bsd{1} \\ Z \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X' \\ \bsd{2} \\ Y' \\ \bsd{1} \\ Z' \end{array} \right) \in \mathbb C_{n-1} \] \item \[ \left( \begin{array}{c} \bsd{2} \\ X \\ \bsd{1} \\ Y \\ \bsu{3} \\ Z \end{array} \right) \rightightarrow \left( \begin{array}{c} \bsd{3} \\ \flipped Y \\ \bsu{1} \\ \flipped X \\ \bsu{2} \\ Z \end{array} \right) \rightightarrow \left( \begin{array}{c} X \\ \bsd{1}\\ Y \\ \bsu{3} \\ \bsu{2} \\ Z \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X' \\ \bsd{1} \\ Y' \\ \bsu{2} \\ Z' \end{array} \right) \in \mathbb C_{n-1} \] \item \[ \left( \begin{array}{c} \bsd{2} \\ X \\ \bsu{3} \\ Y \\ \bsd{1} \\ Z \end{array} \right) \rightightarrow \left( \begin{array}{c} \bsd{3} \\ \flipped X \\ \bsu{2} \\ Y \\ \bsd{1} \\ Z \end{array} \right) \rightightarrow \left( \begin{array}{c} X \\ \bsu{3}\\ \bsu{2} \\ Y \\ \bsd{1} \\ Z \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X' \\ \bsu{2} \\ Y' \\ \bsd{1} \\ Z' \end{array} \right) \in \mathbb C_{n-1} \] \item \[ \left( \begin{array}{c} \bsu{2} \\ X \\ \bsu{3} \\ Y \\ \bsd{1} \\ Z \end{array} \right) \rightightarrow \left( \begin{array}{c} \bsu{1} \\ \flipped Y \\ \bsd{3} \\ \flipped X \\ \bsd{2} \\ Z \end{array} \right) \rightightarrow \left( \begin{array}{c} X \\ \bsu{3} \\ Y \\ \bsd{1} \\ \bsd{2} \\ Z \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X' \\ \bsu{2} \\ Y' \\ \bsd{1} \\ Z' \end{array} \right) \rightightarrow \left( \begin{array}{c} \flipped Z' \\ \bsu{1} \\ \flipped Y' \\ \bsd{2} \\ \flipped X' \end{array} \right) \rightightarrow \left( \begin{array}{c} Y' \\ \bsd{1} \\ Z' \\ \bsd{2} \\ \flipped X' \end{array} \right) \in \mathbb C_{n-1} \] \item \[ \left( \begin{array}{c} \bsu{2} \\ X \\ \bsd{1} \\ Y \\ \bsu{3} \\ Z \end{array} \right) \rightightarrow \left( \begin{array}{c} \bsu{1} \\ \flipped X \\ \bsd{2} \\ Y \\ \bsu{3} \\ Z \end{array} \right) \rightightarrow \left( \begin{array}{c} X \\ \bsd{1} \\ \bsd{2} \\ Y \\ \bsu{3} \\ Z \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X' \\ \bsd{1} \\ Y' \\ \bsu{2} \\ Z' \end{array} \right) \rightightarrow \left( \begin{array}{c} \flipped Z' \\ \bsd{2} \\ \flipped Y' \\ \bsu{1} \\ \flipped X' \end{array} \right) \rightightarrow \left( \begin{array}{c} Y' \\ \bsu{2} \\ Z' \\ \bsu{1} \\ \flipped X' \end{array} \right) \in \mathbb C_{n-1} \] \item \[ \left( \begin{array}{c} \bsu{2} \\ X \\ \bsu{3} \\ Y \\ \bsu{1} \\ Z \end{array} \right) \rightightarrow \left( \begin{array}{c} \bsd{2} \\ X \\ \bsu{3} \\ Y \\ \bsu{1} \\ Z \end{array} \right) \rightightarrow \left( \begin{array}{c} \flipped Y \\ \bsd{3} \\ \flipped X \\ \bsu{2} \\ \bsu{1} \\ Z \end{array} \right) \Leftrightarrow \left( \begin{array}{c} Y' \\ \bsd{2} \\ X' \\ \bsu{1} \\ Z' \end{array} \right) \in \mathbb C_{n-1} \] \item \[ \left( \begin{array}{c} \bsu{2} \\ X \\ \bsu{1} \\ Y \\ \bsu{3} \\ Z \end{array} \right) \rightightarrow \left( \begin{array}{c} \bsd{2} \\ X \\ \bsu{1} \\ Y \\ \bsu{3} \\ Z \end{array} \right) \rightightarrow \left( \begin{array}{c} \flipped X \\ \bsu{2} \\ \bsu{1} \\ Y \\ \bsu{3} \\ Z \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X' \\ \bsu{1} \\ Y' \\ \bsu{2} \\ Z' \end{array} \right) \in \mathbb C_{n-1} \] \end{enumerate} Again each stack from $\mathbb C_{n-1}$ appears as a result of the process for exactly one stack from $\mathbb C^2_{n}$, but we needed two additional flips in two of the cases to ensure this. We did four flips in a quarter of the cases and two flips in all other cases. Each case has the same probability and hence the average number of flips is $5/2$. \end{itemize} All in all \begin{align*} H(n) &= n \cdot \left(\sum_{C \in \mathbb C_{n-1}} (h(C)+1) + \sum_{C \in \mathbb C_{n-1}} \left(h(C)+\frac52 \right) \right) = 2n H(n-1) + \frac72 n |C_{n-1}|, \\ av(n) &= \frac{2nH(n-1) + \frac72 n |C_{n-1}|}{2n|C_{n-1}|} = av(n-1) + \frac74 = av(2) + \frac74(n-2) \lefteq \frac74 n + 5. \end{align*} \end{proof} \section{Randomized algorithm for the unburnt version} \leftabel{sec:avu} \begin{observation} Let $av'_{opt}(n,0)$ be the average number of flips of the optimal algorithm for sorting a stack of $n$ unburnt pancakes. For any positive $n$ \[ av'_{opt}(n,0) \geq n-2. \] \end{observation} \begin{proof} We will now count the expected number of adjacencies in a stack of $n$ pancakes. For the purpose of this proof we will consider the pancake number $n$ at the bottom of the stack as an additional adjacency; this has probability $1/n$. Pancakes on consecutive positions form an adjacency if their values differ by $1$; the probability of this is $2/n$. Therefore the expected number of adjacencies is \[\mathbb E[adj] = \frac{1}{n} + (n-1)\frac{2}{n} < 2. \] Each flip creates at most one adjacency, therefore when we want to obtain the stack $I_n$ with $n$ adjacencies, the average number of flips is at least $n-2$. \end{proof} \begin{theorem} \leftabel{thm:ualgo} There exists a randomized algorithm that sorts a stack of $n$ unburnt pancakes with the average number of flips at most \[ \frac{17}{12}n + 9, \] where the average is taken both over the stacks and the random bits. \end{theorem} \begin{proof} If two pancakes become adjacent, we contract them to a single burnt pancake; its burnt side will be the one where the pancake with higher number was. Therefore in the course of the algorithm, some of the pancakes will be burnt and some unburnt. For this reason we say that two pancakes are \emph{adjacent} if the unburnt ones of them can be oriented so that the two resulting pancakes satisfy the definition of adjacency for burnt pancakes. Let $\mathbb U_{n,b}$ denote the set of all stacks of $n$ pancakes $b$ of which are burnt and let $\mathbb U^{2}_{n,b}$ be the stacks from $\mathbb U_{n,b}$ with the pancake number 2 on top. Let $k(C)$ be the number of flips needed by the algorithm to sort the stack $C$ and let \begin{align*} K(n,b) &:= \sum_{C \in \mathbb U_{n,b}} k(C), \\ av'(n,b) &:= \frac{K(n,b)}{|\mathbb U_{n,b}|}. \end{align*} When there are only two pancakes left, we can sort the stack in at most 4 flips. Similarly to the burnt version, we will sometimes cyclically renumber the pancakes. After renumbering them back at the end, we will do 4 flips to get the sorted stack. Therefore $av'(1,0) = av'(1,1) = 4$, $av'(2,b) \lefteq 8$ for any $b \in \{0,1,2\}$ and $K(n,b) = n \cdot \sum_{C \in \mathbb U^{2}_{n,b}} k(C)$. The algorithm first cyclically renumbers the pancakes so as to have the topmost pancake numbered 2 thus obtaining a stack from $\mathbb U^{2}_{n,b}$. Then we look at the topmost pancake. If it is unburnt, we uniformly at random select whether to look at 1 or 3; if it is burnt and the burnt side is down, we look at 1 and in the case when the burnt side is up, we look at 3. Notice that we could also look at both pancakes 1 and 3. But if we joined only two of the pancakes 1, 2 and 3 we would have to count the average number of flips for each combination not only of the number of pancakes and the number of burnt pancakes, but also of the number of pairs of pancakes of consecutive sizes exactly one of which is burnt. This would make the calculations too complicated. We could also join all three of them, but this would lead to a worse result. \begin{enumerate}[I.] \item \leftabel{case1} Both the pancakes we looked at are unburnt. The set of such stacks is $\mathbb U^{2,\rightef{case1}}_{n,b}$. Note that stacks with pancake 2 unburnt and exactly one of pancakes 1 and 3 unburnt belong to this set from $50\%$ --- with $50\%$ probability, we choose to look at the unburnt pancake. Let $av'_{\rightef{case1}}(n,b)$ be the weighted average number of flips used by the algorithm to sort a stack from $\mathbb U^{2,\rightef{case1}}_{n,b}$, where the weight is the ratio with which the stack belongs to $\mathbb U^{2,\rightef{case1}}_{n,b}$. \[ \left( \begin{array}{c} 2 \\ X \\ 1 \\ Y \end{array} \right) \rightightarrow \left( \begin{array}{c} \flipped X \\ 2 \\ 1 \\ Y \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X' \\ \bsu{1} \\ Y' \end{array} \right) \in \mathbb U_{n-1,b+1} \] \[ \left( \begin{array}{c} 2 \\ X \\ 3 \\ Y \end{array} \right) \rightightarrow \left( \begin{array}{c} \flipped X \\ 2 \\ 3 \\ Y \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X' \\ \bsd{2} \\ Y' \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X'' \\ \bsd{1} \\ Y'' \end{array} \right) \in \mathbb U_{n-1,b+1} \] For each stack from $\mathbb U_{n-1,b+1}$ there are exactly $b+1$ its cyclic renumberings each appearing as a result with a $50\%$ probability. Thus we can compute the average number of flips in this case: \[ av'_{\rightef{case1}}(n,b) = av'(n-1,b+1)+1 . \] \item \leftabel{case2} The topmost pancake is unburnt, while the other pancake we looked at is burnt. \[ \left( \begin{array}{c} 2 \\ X \\ \bsu{1} \\ Y \end{array} \right) \rightightarrow \left( \begin{array}{c} \flipped X \\ 2 \\ \bsu{1} \\ Y \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X' \\ \bsu{1} \\ Y' \end{array} \right) \in \mathbb U_{n-1,b} \] \[ \left( \begin{array}{c} 2 \\ X \\ \bsd{1} \\ Y \end{array} \right) \rightightarrow \left( \begin{array}{c} \bsu{1} \\ \flipped X \\ 2 \\ Y \end{array} \right) \rightightarrow \left( \begin{array}{c} X \\ \bsd{1} \\ 2 \\ Y \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X' \\ \bsd{1} \\ Y' \end{array} \right) \in \mathbb U_{n-1,b} \] The case when we looked at pancake $3$ is similar, so we can conclude that \[ av'_{\rightef{case2}}(n,b) = av'(n-1,b) + \frac32 . \] \item \leftabel{case3} The topmost pancake is burnt, while the other one we looked at is unburnt. \[ \left( \begin{array}{c} \bsu{2} \\ X \\ 3 \\ Y \end{array} \right) \rightightarrow \left( \begin{array}{c} \flipped X \\ \bsd{2} \\ 3 \\ Y \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X' \\ \bsd{2} \\ Y' \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X'' \\ \bsd{1} \\ Y'' \end{array} \right) \in \mathbb U_{n-1,b} \] \[ \left( \begin{array}{c} \bsd{2} \\ X \\ 1 \\ Y \end{array} \right) \rightightarrow \left( \begin{array}{c} \flipped X \\ \bsu{2} \\ 1 \\ Y \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X'' \\ \bsu{1} \\ Y'' \end{array} \right) \in \mathbb U_{n-1,b} \] Each stack from $\mathbb U_{n-1,b}$ appears as a result exactly once for $b$ its cyclic renumberings. Therefore \[ av'_{\rightef{case3}}(n,b) = av'(n-1,b) + 1 . \] \item \leftabel{case4} Both the pancakes we looked at are burnt. In half of the cases the two pancakes can be joined in a single flip: \[ \left( \begin{array}{c} \bsu{2} \\ X \\ \bsd{3} \\ Y \end{array} \right) \rightightarrow \left( \begin{array}{c} \flipped X \\ \bsd{2} \\ \bsd{3} \\ Y \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X' \\ \bsd{2} \\ Y' \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X'' \\ \bsd{1} \\ Y'' \end{array} \right) \in \mathbb U_{n-1,b-1} \] \[ \left( \begin{array}{c} \bsd{2} \\ X \\ \bsu{1} \\ Y \end{array} \right) \rightightarrow \left( \begin{array}{c} \flipped X \\ \bsu{2} \\ \bsu{1} \\ Y \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X'' \\ \bsu{1} \\ Y'' \end{array} \right) \in \mathbb U_{n-1,b-1} \] Otherwise we need three flips to join the two pancakes: \[ \left( \begin{array}{c} \bsu{2} \\ X \\ \bsu{3} \\ Y \end{array} \right) \rightightarrow \left( \begin{array}{c} \bsd{2} \\ X \\ \bsu{3} \\ Y \end{array} \right) \rightightarrow \left( \begin{array}{c} \bsd{3} \\ \flipped X \\ \bsu{2} \\ Y \end{array} \right) \rightightarrow \left( \begin{array}{c} X \\ \bsu{3} \\ \bsu{2} \\ Y \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X' \\ \bsu{2} \\ Y' \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X'' \\ \bsu{1} \\ Y'' \end{array} \right) \in \mathbb U_{n-1,b-1} \] \[ \left( \begin{array}{c} \bsd{2} \\ X \\ \bsd{1} \\ Y \end{array} \right) \rightightarrow \left( \begin{array}{c} \bsu{2} \\ X \\ \bsd{1} \\ Y \end{array} \right) \rightightarrow \left( \begin{array}{c} \bsu{1} \\ \flipped X \\ \bsd{2} \\ Y \end{array} \right) \rightightarrow \left( \begin{array}{c} X \\ \bsd{1} \\ \bsd{2} \\ Y \end{array} \right) \Leftrightarrow \left( \begin{array}{c} X'' \\ \bsd{1} \\ Y'' \end{array} \right) \in \mathbb U_{n-1,b-1} \] Altogether \[ av'_{\rightef{case4}}(n,b) = av'(n-1,b-1) + 2. \] \end{enumerate} After summing up all the above average numbers of flips multiplied by their probabilities, we obtain: \begin{itemize} \item For $1\lefteq b < n$ \begin{align*} av'(n,b) =& \frac{(n-b)(n-b-1)}{n(n-1)} av'_{\rightef{case1}}(n,b) + \frac{(n-b)b} {n(n-1)} \left( av'_{\rightef{case2}}(n,b) + av'_{\rightef{case3}}(n,b) \right) + \\ &+ \frac {b(b-1)}{n(n-1)}av'_{\rightef{case4}}(n,b) = \\ =& \frac{(n-b)(n-b-1)}{n(n-1)} (1 + av'(n-1,b+1)) + \\ &+ 2\frac{(n-b)b} {n(n-1)} \left( \frac54 + av'(n-1,b) \right) + \frac {b(b-1)}{n(n-1)} \left( 2+av'(n-1,b-1) \right) . \end{align*} \item For $b=0$ \[ av'(n,0) = \frac{n(n-1)}{n(n-1)} av'_{\rightef{case1}}(n,0) = 1 + av'(n-1,1). \] \item For $b=n$ \[ av'(n,n) = \frac {n(n-1)}{n(n-1)}av'_{\rightef{case4}}(n,n) = 2 + av'(n-1,n-1). \] \end{itemize} Instead of solving these recurrent formulas, we will use them to bound $av'(n,b)$ from above by the following function: \[ av^+(n,b) := \frac{17}{12}n + \frac{7}{12}b - \frac16 \frac{(n-b+1)b}{n} + 9. \] \begin{lemma} For any nonnegative $n$ and $b$, such that $b$ is not greater than $n$ \[av^+(n,b)\geq av'(n,b).\] \end{lemma} \begin{proof} We will use induction on the number of pancakes. \begin{itemize} \item For $n=1$ we have $av'(1,b)=4$ and it is easy to verify that the lemma holds. \item If $b=0$, then the induction hypothesis gives \begin{align*} av'(n,0) &= 1 + av'(n-1,1) \lefteq 1 + av^+(n-1,1) = \\ &= 1 + \frac{17}{12}(n-1) + \frac{7}{12} - \frac16 \frac{n-1}{n-1} + 9 = \frac{17}{12}n + 9 = av^+(n,0). \end{align*} \item For $b=n$ we get \begin{align*} av'(n,n) &= 2 + av'(n-1,n-1) \lefteq 2 + av^+(n-1,n-1) = \\ &= 2 + \frac{17}{12}(n-1) + \frac{7}{12}(n-1) - \frac16 + 9 = \frac{17}{12}n + \frac{7}{12}n - \frac16 + 9 = av^+(n,n). \end{align*} \item In the case $1 \lefteq b < n$ \begin{align*} n (n-1)&(av^+(n,b) - av'(n,b)) \\ \geq &n(n-1)av^+(n,b) - (n-b)(n-b-1) (1 + av^+(n-1,b+1)) \\ &- 2(n-b)b \left( \frac54 + av^+(n-1,b) \right) - b(b-1) \left( 2+av^+(n-1,b-1) \right) \\ = & \frac{b}{n-1}\left(\frac13 n - \frac13 b\right) > 0. \end{align*} \end{itemize} \end{proof} Therefore $av^+(n,b) \geq av'(n,b)$ and thus \[ av'(n,0) \lefteq av^+(n,0) = \frac{17}{12}n + 9. \] \end{proof} \section{Computational results} \leftabel{sec:comp} Computer search found the following sequence of 30 flips that sorts the stack $-I_{19}$: (19, 14, 7, 4, 10, 18, 6, 4, 10, 19, 14, 4, 9, 11, 8, 18, 8, 11, 9, 4, 14, 19, 10, 4, 6, 18, 10, 4, 7, 14). Thus, using Theorem~\rightef{thm:blb}, $g(-I_{19}) = 30$. We also computed $g(-I_{20}) = 32$: From~\cite[Theorem 7]{CohenBlum}: $g(-I_{20}) \lefteq g(-I_{19}) + 2 = 32$. From Theorem~\rightef{thm:blb}: $g(-I_{20}) \geq 31$ and from Lemma~\rightef{lem:blb} follows that if $g(-I_{20}) = 31$, then each flip of the optimal sorting sequence increases the value of the function $v$ by $4/3$. But computer search revealed that starting at $-I_{20}$ we can make a sequence of only at most 29 such flips. The values $f(18)=20$ and $f(19)=22$ were computed by the method of Kounoike et al.~\cite{Kounoike+2005} and Asai et al.~\cite{Asai+2006}. It is an improvement of the method of Heydari and Sudborough~\cite{HeydariSudb}. Let $\mathbb U_{n}^{m}$ be the set of stacks of $n$ unburnt pancakes requiring $m$ flips to sort. For every stack $U \in \mathbb U_{n}^{m}$, $2$ flips always suffice to move the largest pancake to the bottom of the stack, obtaining stack $U'$. Since then, it never helps to move the largest pancake. Therefore $U'$ requires exactly the same number of flips as $U''$ obtained from $U'$ by removing the largest pancake and thus $U''$ requires at least $m-2$ flips. To determine $\mathbb U_{n}^{i}$ for all $i \in \{m, m+1,\dots, f(n)\}$, it is thus enough to consider the set $\cup_{m'=m-2}^{f(n-1)}\mathbb U_{n-1}^{m'}$. In each stack from this set, we try adding the pancake number $n$ to the bottom, flipping the whole stack and trying every possible flip. The candidate set composed of the resulting and the intermediate stacks contains all the stacks from $\cup_{i=m}^{f(n)}\mathbb U_{n}^{i}$. Now it remains to determine the value of $f(U)$ for each stack $U$ in the candidate set. As in~\cite{Kounoike+2005} and~\cite{Asai+2006}, this is done using the A* search. During the A* search, we need to compute a lower bound on the number of flips needed to sort a stack. It is counted differently then in~\cite{Kounoike+2005} and~\cite{Asai+2006}: We try all possible sequences of flips that create an adjacency in every flip. If some such sequence sorts the stack, it is optimal and we are done. Otherwise, we obtain a lower bound equal to the number of adjacencies that are needed to be made plus 1 (here we count pancake $n$ at the bottom of the stack as an adjacency). In addition, we also use a heuristic to compute an upper bound. If the upper bound is equal to the lower bound they give the exact number of flips. \begin{table}[ht] \centering \begin{tabular}{|rrr|rrr|rrr|} \hline \hline $n$ & $m$ & $ |\mathbb U_{n}^{m}| $ & $n$ & $m$ & $ |\mathbb U_{n}^{m}| $ & $n$ & $m$ & $ |\mathbb U_{n}^{m}| $ \\ \hline 14 & 13 & 30,330,792,508 & 15 & 15 & 310,592,646,490 & 16 & 17 & 756,129,138,051 \\ 14 & 14 & 20,584,311,501 & 15 & 16 & 45,016,055,055 & 16 & 18 & 4,646,117 \\ 14 & 15 & 2,824,234,896 & 15 & 17 & 339,220 & 17 & 19 & 65,758,725 \\ 14 & 16 & 24,974 & & & & & & \\ \hline \end{tabular} \caption{numbers of stacks of $n$ unburnt pancakes requiring $m$ flips to sort} \leftabel{tab:fnsizes} \end{table} Sizes of the computed sets $\mathbb U_{n}^{m}$ can be found in Table~\rightef{tab:fnsizes}. It was previously known~\cite{HeydariSudb}, that $f(18)\geq 20$ and $f(19)\geq 22$. No candidate stack of $18$ pancakes needed $21$ flips thus $f(18)=20$. Then $f(19)=22$ because $f(19)\lefteq f(18)+2 = 22$. The following modification of this method was also used to compute the values of $g(n)$ up to $n=17$. Again, $\mathbb C_{n}^{m}$, the set of stacks of $n$ burnt pancakes requiring $m$ flips, is determined from the set $\cup_{m'=m-2}^{g(n-1)}\mathbb C_{n-1}^{m'}$, but in a slightly different way. In every stack of $n$ burnt pancakes other than $-I_n$ (which must be treated separately), some two pancakes can be joined in two flips~\cite[Theorem 1]{CohenBlum}. We will now show that the two adjacent pancakes can be contracted to a single pancake, which decreases the size of the stack. The reverse process is again used to determine the stacks of the candidate set, which are then processed by the A* search. \begin{lemma} \leftabel{lemma:bcontr} Let $C$ be a stack of burnt pancakes with a pair $(p_1, p_2)$ of adjacent pancakes and let $C'$ be obtained from $C$ by contracting the two adjacent pancakes to a single pancake $p$. Then $C$ can be sorted in exactly the same number of flips as $C'$. \end{lemma} \begin{proof} If we can sort $C'$ in $m$ steps, we can sort $C$ in $m$ steps as well --- we do the flips below the same pancakes as in an optimal sorting sequence for $C'$. Flips in $C'$ below $p$ are performed below the lower of $p_1, p_2$ in $C$. The stack $C'$ can be also obtained from $C$ by removing one of the two adjacent pancakes. Then we can sort $C'$ by doing the flips below the same pancakes as in a sorting sequence for $C$. Flips in $C$ below the removed pancake are performed in $C'$ below the pancake above it. \end{proof} During the A* search, we compute two lower bounds and take the larger one. One lower bound is computed from the formula in Lemma~\rightef{lem:blb}. To compute the other lower bound, we try all possible sequences of flips that create an adjacency in all but at most two flips. If no such sequence sorts the stack, we obtain a lower bound equal to the number of adjacencies that are needed to be made plus 3. In the stacks visited during the A* search, we can contract a block to a single burnt pancake thanks to Lemma~\rightef{lemma:bcontr}. If, after the contraction of blocks, the stack has at most nine pancakes, we look up the exact number of flips in a table previously computed by a breadth-first search starting at $I_9$. \begin{table}[ht] \centering \begin{tabular}{|rrr|rrr|rrr|rrr|} \hline \hline $n$ & $m$ & $ |\mathbb C_{n}^{m}| $ & $n$ & $m$ & $ |\mathbb C_{n}^{m}| $ & $n$ & $m$ & $ |\mathbb C_{n}^{m}| $ & $n$ & $m$ & $ |\mathbb C_{n}^{m}| $\\ \hline 10 & 15 & 22,703,532 & 11 & 17 & 5,928,175 & 12 & 19 & 344,884 & 13 & 21 & 15,675 \\ 10 & 16 & 179,828 & 11 & 18 & 10,480 & 12 & 20 & 265 & 13 & 22 & 4 \\ 10 & 17 & 523 & 11 & 19 & 36 & 12 & 21 & 1 & 14 & 23 & 122 \\ 10 & 18 & 1 & & & & & & & 15 & 25 & 2 \\ \hline \end{tabular} \caption{numbers of stacks of $n$ burnt pancakes requiring $m$ flips to sort} \leftabel{tab:gnsizes} \end{table} Sizes of the computed sets $\mathbb C_{n}^{m}$ can be found in Table~\rightef{tab:gnsizes}. No stack of 16 pancakes needs 27 flips thus $g(16)=26$ because $g(-I_{16})=26$. Then $g(17)=28$ because $g(-I_{17})=28$ and $g(17)\lefteq g(16)+2 = 28$~\cite[Theorem 8]{CohenBlum}. The stack obtained from $-I_n$ by flipping the topmost pancake is known as $J_n$~\cite{CohenBlum}. Let $Y_n$ be the stack obtained from $-I_n$ by changing the orientation of the second pancake from the bottom. The two found stacks of 15 pancakes requiring 25 flips are $J_{15}$ and $Y_{15}$ and they are the first known counterexamples to the Cohen-Blum conjecture which claimed that for every $n$, $-I_n$ requires the largest number of flips among all stacks of $n$ pancakes. However, no other $J_n$ or $Y_n$ with $n\lefteq 20$ is a counterexample to the conjecture. Majority of the computations were done on computers of the CESNET METACentrum grid. Some of the computations also took place on computers at the Department of Applied Mathematics of Charles University in Prague. Data and source codes of programs mentioned above can be downloaded from the following webpage: \url{http://kam.mff.cuni.cz/~cibulka/pancakes}. \section{Conclusions} \leftabel{sec_concl} Although the two algorithms presented in Sections~\rightef{sec:avb}~and~\rightef{sec:avu} have a good guaranteed average number of flips, experimental results show that both of them are often outperformed by the corresponding algorithms of Gates and Papadimitriou. The average numbers of flips of the two new algorithms are very near to their upper bounds calculated in Theorems~\rightef{thm:balgo}~and~\rightef{thm:ualgo} and the averages for the algorithms of Gates and Papadimitriou are in Table~\rightef{tab:experiment}. We will now design one more polynomial-time algorithm for the burnt version, for which no guarantee of the average number of flips will be given, but its experimental results are close to the lower bound from Theorem~\rightef{thm:avgblb}. Call a sequence of flips, each of which creates an adjacency, a \emph{greedy sequence}. Note that since we are in the burnt version, there is always at most one possible flip that creates a new adjacency. In a random stack the probability that we can join the pancake on top in a single flip is $50\%$, therefore starting from a random stack, we can perform a greedy sequence of length $\leftog_2 n$ with probability roughly $1/n$. The idea of the algorithm is, that whenever we cannot create an adjacency in a single flip, we try all $n$ possible flips and do the one that can be followed by the longest greedy sequence. As in the previous algorithms, two adjacent pancakes are contracted to a single pancake. Pancakes $1$ and $n$ can create an adjacency ($1$ is viewed as $(n+1)\bmod n$). Therefore when the algorithm obtains the stack $(\bsd 1)$ we need at most four more flips. In Table~\rightef{tab:experiment}, $n$ is the size of a stack, $s_{GP}$ is the average number of flips used by the algorithm of Gates and Papadimitriou to sort a randomly generated stack of $n$ unburnt pancakes, $s_{GPB}$ is the average number of flips used by the algorithm of Gates and Papadimitriou for the burnt version and $s_N$ is the average number of flips of the algorithm described in this section. \begin{table}[ht] \centering \begin{tabular}{|rrrrrr|} \hline \hline $n$ & $s_{GP}$ & $s_{GPB}$ & $s_N$ & $n+n/\leftog_2 n$ & stacks generated\\ \hline 10 & 11.129 & 15.383 & 14.935 & 13.010 & 1000000\\ 100 & 122.925 & 150.887 & 123.463 & 115.051 & 100000\\ 1000 & 1240.949 & 1502.926 & 1127.901 & 1100.343 & 10000\\ 10000 & 12408.686 & 15002.212 & 10863.502 & 10752.570 & 1000\\ 100000 & 124115.000 & 150063.000 & 106608.900 & 106220.600 & 10\\ 1000000& 1241263.600 & 1499875.600 & 1053866.000& 1050171.666 & 5\\ \hline \end{tabular} \caption{experimental results of algorithms} \leftabel{tab:experiment} \end{table} The experimental results together with Theorem~\rightef{thm:avgblb} support the following conjecture. \begin{conjecture} The average number of flips of the optimal algorithm for sorting burnt pancakes satisfies \[ av_{opt}(n) = n+\Theta\left(\frac{n}{\leftog n}\right). \] \end{conjecture} \end{document}
\begin{document} \title{Stabilities of one-dimensional stationary states of Bose-Einstein condensates} \begin{abstract} We explore the dynamical stabilities of a quasi-one dimensional (1D) Bose-Einstein condensate (BEC) consisting of fixed $N$ atoms with time-independent external potential. For the stationary states with zero flow density the general solution of the perturbed time evolution equation is constructed, and the stability criterions concerning the initial conditions and system parameters are established. Taking the lattice potential case as an example, the stability and instability regions on the parameter space are found. The results suggest a method for selecting experimental parameters and adjusting initial conditions to suppress the instabilities. \textbf{Keywords}: Bose-Einstein condensate, Lyapunov stability, stability criterion, stability region, lattice potential PACS numbers: 03.75.Kk, 32.80.Pj, 03.75.Lm, 05.45.-a \end{abstract} \section{Introduction} Experimental observation of atomic gas Bose-Einstein condensates (BECs) has caused significant stimulation to the study of macroscopic quantum phenomena with nonlinearity. In the mean field regime where the BECs are governed by the Gross-Pitaevskii equations (GPE), the BEC of a stationary state can be observed carefully in experiments only for the stable solutions of GPE. For the purpose of applications, the studies on the stability and instability of the solutions of GPE are necessary and important \cite{Chin}-\cite{Saito}. Recently, the instabilities of BECs have attracted much interest and the corresponding experimental \cite{Chin, Fallani, Burger} and theoretical \cite{Zheng}-\cite{FAbdullaev} works were reported for various BEC systems. Several different definitions such as the Landau instability \cite{Wu, Machholm}, dynamical instability \cite{Bronski}, quantum instability \cite{Shchesnovich}, parametric instability \cite{Genkin} and modulational instability \cite{Konotop} were employed. The used research methods concerned the characteristic exponent technique \cite{Zheng}, Gaussian variational approach \cite{Abdullaev}, and the numerical simulations to the partial differential equations \cite{Wu, Bronski, Deconinck}. The reported results showed that the instabilities are associated with the BEC collapse \cite{Konotop, Kagan}, implosion and chaos \cite{Saito2} - \cite{Xia}, dynamical superfluid-insulator transition \cite{Smerzi}, and the formation and evolution of the matter-wave bright solitons \cite{Strecker, Carr, Salasnich}. In order to stabilize the BECs \cite{Saito}, some stability criteria \cite{Berge} and parameter regions \cite{Zheng, Wu, Luo, Montina} were demonstrated. Most of the works focus in the stabilities under random perturbations. Experimentally \cite{Burger} and theoretically \cite{Wu} investigating the stabilities under the controllable perturbations has also become a challenging problem. In the sense of Lyapunov, the instability entails that the initially small deviations from the unperturbed state grow without upper limit. We shall restrict the dynamical instability to the particular case of nonzero characteristic exponents such that the minor deviations from the unperturbed state grow exponentially fast \cite{Wu, Bronski}. All of the above-mentioned investigations on the dynamical stabilities and instabilities are based on such a type of instability. By the control of instability we mean to induce the transitions from unstable states to stable ones. Realization of the control needs selecting the system parameters to enter the stability regions, or initially using a controllable perturbation as a control signal to suppress the growth of perturbed solutions. Any experiment always contains a degree of noise, that leads to the random perturbations to the system. Therefore, in order to suppress the known unstable motions, we have to initially adjust the system by using the control signal being stronger than the noise. In the previous work, we have investigated the stabilities of BECs for the time-dependent chaotic states \cite{whai, Chong} and dissipative cases \cite{Luo}. In this paper, we shall consider the dynamical stability of the stationary states for a quasi-1D BEC consisting of fixed $N$ atoms with time-independent external potential and atomic scattering length. It will be demonstrated that for the case of zero flow density the bounded perturbed solutions depend on the external potential, condensed atom number, and the initial disturbances. The dependence implies that the stationary state of BEC is certainly stable only for the given parameter region and the possible instability can be suppressed by some initial adjustments. We take the BECs held in an optical lattice as an exemplification to illustrate the results on the stability, instability and undetermined stability. The results contain the known analytical assertions for the optical potential case \cite{Wu, Bronski} and supply a method for selecting experimental parameters and adjusting initial conditions to establish the stable motions of BEC. \section{Linearized equations and their solutions in the case of zero flow density} We start with the dimensionless quasi-1D GPE \cite{Bronski,Dalfovo, Leggett} \begin{eqnarray} i \psi_t=- \frac 1 2 \psi_{xx} + [V(x) +g_{1}|\psi|^2]\psi, \end{eqnarray} where the suitable units with $\hbar=m=1$ have been considered, $V(x)$ denotes the external potential, the quasi-1D interaction intensity $g_{1}$ is related to the $s$-wave scattering length $a_s$, atomic mass $m$ and the transverse trap frequency $\omega_r$ \cite{Gardiner, Hai2} for the normalized wave-function $\psi$ with norm $|\psi|^2$ being the linear density of atomic number \cite{Bronski, Leggett}. It is well known that different solutions of a nonlinear equation may possess different stabilities. Here we study stability only for the stationary state solution of the form \begin{eqnarray} \psi_0=R(x)\exp [i\theta(x)-i\mu t], \end{eqnarray} where $\mu$ is the chemical potential, $R(x)$ and $\theta(x)$ represent the module and phase, which are both the real functions. In the considered units, the phase gradient $\theta_x$ is equal to the flow velocity field. Given the module, we define the useful Hermitian operators \cite{Bronski} \begin{eqnarray} L_n=-\frac 1 2 \frac{\partial ^2}{\partial x ^2}+n g_1 R^2+V(x)-\mu, \ \ \ for \ \ n=1,3. \end{eqnarray} Then inserting Eq. (2) into Eq. (1) gives the equations \begin{eqnarray} L_1 R(x)=0. \end{eqnarray} In the equation we have assumed the flow velocity field and current density being zero. We now investigate the stability of stationary state Eq. (2) by using the linear stability analysis, which is associated with boundedness of the perturbed solution \cite{Bronski, Berge} \begin{eqnarray} \psi=[R(x)+\varepsilon \phi_1(x,t)+i \varepsilon \phi_2(x,t)]\exp [i\theta(x)-i\mu t], \end{eqnarray} where the perturbed correction $\varepsilon\phi_i(x,t)$ is real function with constant $|\varepsilon|\ll 1$. Substituting Eqs. (5) and (4) into Eq. (1) yields the linearized equations \cite{Bronski} \begin{eqnarray} \phi_{1t}=L_1 \phi_2, \ \ \ \ \ \phi_{2t}=-L_3\phi_1. \end{eqnarray} For most of external potentials $V(x)$ we cannot derive the exact solutions from Eq. (1) or Eq. (4) such that the operators $L_n$ cannot be determined exactly. In the case of optic lattice potential, some specially exact solutions have been found \cite{Bronski, Deconinck, Hai2}, however, solving Eq. (6) for the general solution is still difficult. Therefore, we have to focus our attentions to the dynamical stability which is associated with the perturbed solutions of space-time separation, \begin{eqnarray} \phi_i(x,t)=T_i(t)\varphi_i(x), \ \ \ for \ \ \ i=1,2. \end{eqnarray} Note that the real function $\phi_i$ limits $T_i$ and $\varphi_i$ to real or imaginary simultaneously, the difference between both is only a sign $``-"$ of $\phi_i$. We take real $T_1, \varphi_1, T_2$, and $\varphi_2$ without loss of generality, since the changes of the signs of $\phi_i$ do not affect the stability analysis. We shall discuss how to establish the sufficient conditions of stability as follows. Combining Eq. (6) with Eq. (7), we get the coupled ordinary differential equations \begin{eqnarray} \dot T_1(t)&=&\lambda_1 T_2(t), \ \ \dot T_2(t)=-\lambda_2 T_1(t); \\ L_3 \varphi_1(x)&=&\lambda_2 \varphi_2(x), \ \ L_1 \varphi_2(x)=\lambda_1 \varphi_1(x). \end{eqnarray} Here $\lambda_i$ is the real eigenvalue determined by the initial perturbations $\dot T_i(0),T_i(0)$. The corresponding decoupled equations are derived easily from the coupled ones as \begin{eqnarray} \ddot T_i(t)=&-& \lambda_1 \lambda_2 T_i(t), \lambda_1=\frac{\dot T_1(0)}{T_2(0)}, \lambda_2=-\frac{\dot T_2(0)}{T_1(0)}; \\ L_1 L_3 \varphi_1&=&\lambda_1 \lambda_2 \varphi_1, \ \ \ \ L_3 L_1 \varphi_2=\lambda_1 \lambda_2 \varphi_2. \end{eqnarray} Obviously, the general solutions of Eq. (10) can be written as the exponential functions \begin{eqnarray} T_i=A_ie^{\lambda t}+B_i e^{-\lambda t}, \ \ \ \lambda=\sqrt{-\lambda_1 \lambda_2}, \ \ \ \ \ \ \ \ \ \ \ \nonumber \\ A_i=\frac 1 2 \Big[T_i(0)+\frac{1}{\lambda}\dot T_i(0)\Big], \ B_i=\frac 1 2 \Big[T_i(0)-\frac{1}{\lambda}\dot T_i(0)\Big], \end{eqnarray} where $A_i, B_i$ are real or complex constants, which make $T_i(t)$ the real functions. Based on the existence of bounded eigenstates $\varphi_i(x)$, the results are classified as the three cases: (!`) {\bf Stability criterion}: The eigenstates of Eq. (11) are bounded if and only if their eigenvalues are positive, $\lambda_1 \lambda_2=-\lambda^2>0$, that makes $\lambda$ the imaginary constant and $T_i$ the periodic functions. (!`!`) {\bf Instability criterion}: One can find a negative eigenvalue $\lambda_1 \lambda_2=-\lambda^2<0$ associated with a set of bounded eigenstates of Eq. (11) that makes $T_i$ the real exponential function. (!`!`!`) {\bf Undetermined stability}: One cannot determine whether all eigenvalues of the bounded eigenstates of Eq. (11) are positive. In this case, we can use criterion (!`) to control the possible instability of case (!`!`). \section{Stability regions on the parameter space and control of instability} It is interesting noting that if the initial perturbations can be determined, the dynamical instability of real $\lambda$ case can be controlled by adjusting the initial disturbances to obey $A_i=0$ that will suppress the exponentially rapid growth of $T_i$ in Eq. (12). From Eqs. (12) and (10) we establish the controlling criteria for the instability as $ \dot T_i(0)=- \lambda T_i(0).$ However, for the random initial perturbations such a control is difficult to do, since we cannot determine the initial values $ \dot T_i(0)$ and $ T_i(0).$ Therefore, in the case of random perturbation we are interested in determining the same eigenvalue $\lambda_1 \lambda_2$ of operators $L_1 L_3$ and $L_3 L_1$, since the stability can be established if and only if the eigenvalue is positive such that $\lambda^2=-\lambda_1 \lambda_2<0$. Let $\alpha\ge \alpha_g$ and $\beta\ge \beta_g$ be the eigenvalues of operators $L_1$ and $L_3$, which are determined by the eigenequations $L_1 u(x)=\alpha u(x), \ \ L_3v(x)=\beta v(x)$ with $u$ and $v$ being their eigenfunctions, where $\alpha_g$ and $\beta_g$ express the corresponding ground state eigenvalues respectively. From Eq. (3) we know the relation $L_3= L_1+2g_1 R^2$ that means $\alpha_g <\beta_g$ for $g_1>0$ and $\alpha_g>\beta_g$ for $g_1<0$. It is clear that Eq. (4) is one of the eigenequations of $L_1$ for the eigenvalue $\alpha=0$ so that the ground state eigenvalue obeys $\alpha_g\le 0$ for any $g_1$. Then $\beta_g$ can be positive or negative for $g_1>0$ and $\beta_g<0$ for $g_1<0$. From the above-mentioned results we establish the stability and instability conditions: Case $g_1>0$: The sufficient condition of stability is $\alpha_g = 0$, since such a ground state eigenvalue implies $\alpha \ge 0$ and $\beta> 0$ for all of the eigenstates such that the well known spectral theorem gives \cite{Bronski, Deconinck, Courant} $\lambda_1 \lambda_2\ge 0$. The corresponding sufficient conditions of instability reads $\alpha_g < 0$ and $\beta_g> 0$. Case $g_1<0$: The ground state eigenvalues satisfy the inequality $\beta_g<\alpha_g\le 0$. So the sufficient condition of instability is $\alpha_g= 0$. In all the other cases, we don't know whether $\lambda_1 \lambda_2$ is certainly positive or negative, so the linear stabilities are analytically undetermined. It is worth noting that Eq. (4) infers $R(x)$ to be one of the eigenstates of $L_1$ with eigenvalue $\alpha_R=0$. Therefore, if $R(x)$ is a ground state, the above stability and instability conditions indicate that this state is stable for $g_1>0$, and unstable (or metastable) for $g_1<0$. \bf Note that all the above-mentioned results are valid for arbitrary time-independent potential. \rm We will take the BEC held in an optical lattice as a concrete physical example to evidence these results. \bf In the lattice potential case, \rm the above-mentioned sufficient conditions agree with the stability and instability criterions established by the authors of Ref. \cite{Bronski}. We shall apply the sufficient stability and instability conditions to find the corresponding stability and instability regions on the parameter space, and apply these results to study the stabilization of the considered BEC system. For an arbitrary time-independent potential, the eigenequation $L_1 u=\alpha u$ can be rewritten as the integral form \cite{whai2} \begin{eqnarray} u&=&u_1+u_2, \nonumber \\ u_1&=&q^{-1}e^{-qx}\int e^{qx}f u dx, \ u_2=-q^{-1}e^{qx}\int e^{-qx}f u dx, \nonumber \\ f&=&q^2/2+\alpha+\mu-V(x)-g_1R^2(x). \end{eqnarray} where $q>0$ is a real constant. This integral equation can be directly proved by taking the second derivative from its both sides. The integrals in Eq. (13) are indefinite, what means that the solutions are defined with accuracy of two additive constants. While the eigenequation $L_1 u=\alpha u$ just is a second order equation which also implies two arbitrary constants determined by the boundary conditions. It is the two additive constants to make the integral equation (13) completely equivalent to the eigenequation. The stability requires the eigenstate to be bounded and the possible bounded solution $u$ must satisfy the boundedness condition $\lim_{x\rightarrow\pm\infty}\int e^{\mp qx}f udx=0$. Under this condition and for the \bf lattice potential case,\rm we can apply the l'H$\ddot{o}$pital rule to get the superior limit \cite{Chong} \begin{eqnarray} \overline {\lim_{x\rightarrow\pm\infty}}u\le \overline{\lim_{x\rightarrow\pm\infty}}u_1+\overline{\lim_{x\rightarrow\pm\infty}}u_2 =2q^{-2}\overline{\lim_{x\rightarrow\pm\infty}}(f u). \end{eqnarray} Note that there is not the usual limit, because of the periodicity of lattice potential. It is clear that the solution of linear equation $L_1 u=\alpha u$ can be taken as $u(x)=Au'(x)$ with arbitrary constant $A$ and any solution $u'(x)$ such that one can always select $u$ to obey $\overline{\lim}_{x\rightarrow\pm\infty}u>0$. Thus Eq. (14) implies $2q^{-2}\overline{\lim}_{x\rightarrow\pm\infty}f\ge 1$, namely \begin{eqnarray} \alpha\ge -\{\mu+\overline{\lim_{x\rightarrow\pm\infty}}[-V(x)-g_1R^2(x)]\}=\alpha_g. \end{eqnarray} For the eigenequation $L_3 v=\beta v$ after using $3g_1$ instead of $g_1$, the same calculations give \begin{eqnarray} \beta\ge -\{\mu+\overline{\lim_{x\rightarrow\pm\infty}}[-V(x)-3g_1R^2(x)]\}=\beta_g. \end{eqnarray} Combining Eq. (15) with the stability sufficient condition \cite{Bronski} $\alpha_g= 0$ for $g_1>0$, we get the parameter region of stability \begin{eqnarray} \mu=\mu_s = -\overline{\lim_{x\rightarrow\pm\infty}}\ [-V(x)-g_1R^2(x)] \ \ \ for \ \ g_1>0, \end{eqnarray} which contains the relation among $\mu, g_1$ and the potential parameters. Applying Eqs. (15) and (16) to the instability sufficient conditions \cite{Bronski} $\alpha_g< 0,\ \beta_g> 0$ for $g_1>0$ and $\alpha_g= 0$ for $g_1<0$, we get the parameter regions of instability \begin{eqnarray} &-&\overline{\lim_{x\rightarrow\pm\infty}}\ [-V(x)-g_1R^2(x)]< \mu=\mu_{in} < -\overline{\lim_{x\rightarrow\pm\infty}}\ [-V(x)-3g_1R^2(x)] \ \ for \ g_1>0;\nonumber \\ & & \mu_{in} = -\overline{\lim_{x\rightarrow\pm\infty}}\ [-V(x)-g_1R^2(x)] \ \ \ for \ \ g_1<0. \end{eqnarray} By the sufficient conditions we mean that the stationary state $R(x)e^{-i\mu t}$ of Eq. (1) is certainly stable for the $\mu$ values in the parameter region fixed by Eq. (17), and the stationary states are certainly unstable for the $\mu$ values in any region of Eq. (18). The dynamical stabilities are undetermined outside. We now see the physical meaning of the stability relation in Eq. (17) for the stationary states of BEC with zero current density. Setting the sum of external potential and internal interaction as $U(x)=V(x)+g_1R^2(x)$ with periodic $V(x)$ and bounded $R(x)$, when $U(x)\ge B$ is satisfied for all $x$ values and a fixed constant $B$, Eq. (17) implies $\mu_s= B\le U(x)$. Namely the sufficient stability condition means that if the chemical potential is equal to the minimum of $U(x)$, the considered states are certainly stable. For a known state the stability can be easily examined by using Eq. (17). We have tested the exact solutions given in Ref. \cite{Bronski} for the potential $V(x)=-V_0 sn^2(x,k)$ and found that some of them have the instabilities and undetermined stabilities, where $|V_0|$ is the potential depth and $sn(x,k)$ the Jacobian elliptic sine function with $k$ being the modulus. Substituting one of the exact solutions, $g_1R^2(x)=-(1+V_0/k^2)[1-k^2 sn^2(x,k)]$ with the potential depth $-V_0\ge k^2$ and chemical potential $\mu=-1-V_0/k^2+k^2/2$ [see Eq. (12) of Phys. Rev. E63, 036612(2001)], into Eq. (17) yields the stability parameter relation $\mu_s=-1-V_0/k^2$. A difference of $k^2/2$ exists between the $\mu_s$ value required by the stability condition and the chemical potential $\mu$ in the exact solution, namely the stability criterion (17) is not met here. This assertion differs from the result of Ref. \cite{Bronski}, where this solution fits their stability criterion and the stability is independent of the parameters $k$ and $V_0$. However, when the potential depth $|V_0|$ is much greater than the modulus $k$ (e.g. $V_0=-1$ and $k=0.2$), we have the chemical potential near the stability relation (17) ($ \mu=24.02=\mu_s+0.02\approx \mu_s$). This infers the higher stability being associated with a smaller value of the modulus $k$ and a relatively greater $|V_0|$ value. Thus our stability parameter criterion suggests that for a known solution with instability or undetermined stability one can raise the practical stability by adjusting the system parameter (e.g. the above $k$ and $|V_0|$) to approach the values of the stability region in Eq. (17). Generally, constructing a stable exact solution of GPE is not easy, because of the non-integrability of Eq. (4) with periodic potential. However, in the large-$N$ limit, we can fulfil the criterion (17) for the case of a repulsive nonlinearity, since the Thomas-Fermi (TF) approximation \cite{Dalfovo} $U(x)=\mu_{TF}$ just fits the stability relation. Therefore, it is practical relevant to prepare such a stable TF state $R(x,\mu_{TF})$ by increasing the condensed atom number $N$. Given the number $N$ and the periodic boundary condition experimentally, from the normalization condition $N=n\int_0^{\pi} R^2(x,\mu_{TF})dx=n\int_0^{\pi} [\mu_{TF}-V(x)]dx/g_1$ we derive the chemical potential of the stable TF state \begin{eqnarray} \mu_{TF}=\mu_s=\frac{Ng_1}{n\pi}+\frac {1}{\pi}\int_0^{\pi} V(x)dx \end{eqnarray} which is related to the atom number $N$ and the potential strength $V_0$ and period $K (k)$, where $n\sim 100$ is the lattice number. In fact, noticing the dependence of $R=R(x,\mu)$ on $\mu$ in Eq. (4), the normalization condition of any known state can also lead to $\mu=\mu (N)$ and $R=R(x, N)$. Applying them to eliminate $\mu$ in Eqs. (17) and (18) will give the corresponding relationships among the experimental parameters $N, g_1,V_0$ and $K(k)$. So we can control the instability of the known state by selecting the experimental parameters to fit or to approach the stability region of Eq. (17). In many practical cases, we cannot obtain the exact solution of Eq. (4) for some periodic potentials, that necessitates the numerical investigation. In order to fit (or near) the stability region in Eq. (17) and to avoid the instability regions of Eq. (18), we could use Eq. (19) to estimate and adjusted the chemical potential in region $\mu\approx \mu_{TF}$ such that the stability of the numerical solutions of Eq. (4) can also be established or improved. On the other hand, in the case of arbitrary time-independent potential, for some known unstable solutions $R=R(x,\mu_{in})$ from Eqs. (10) and (12) we can experimentally set and adjust the initially controllable perturbation as a control signal \cite{Burger} to suppress the exponentially fast growth of $T_i(t)$. Although the phase $\theta$ and amplitude $R$ are time-independent in the considered case, the initial perturbations can result in the nontrivial and time-dependent corrections to the phase and atomic-number density. From Eq. (5) we find their first corrections as \begin{eqnarray} &&\triangle\theta(x,t) \approx \arctan [\varepsilon T_2(t)\varphi_2(x)/R(x)]\approx \varepsilon T_2(t)\varphi_2(x)/R(x), \nonumber \\ && \triangle |\psi|^2(x,t)\approx 2\varepsilon T_1(t)\varphi_1(x)R(x), \end{eqnarray} which are initially proportional to $T_1(0)$ and $T_2(0)$ respectively. Making use of Eq. (20), the adjustments to the initially controllable perturbations can be performed by trimming the number density $|\psi|^2$, velocity field $(\triangle\theta)_x$ and their time derivatives which are proportional to the corresponding trimming velocities. Given Eqs. (10) and (12), we know the stability initial criterion \begin{eqnarray} \lambda^2=-\lambda_1 \lambda_2=\dot T_1(0)\dot T_2(0)/[T_1(0)T_2(0)]<0. \end{eqnarray} Once Eq. (21) is satisfied in the adjustments to the initial perturbations, Eq. (12) becomes the periodic solution which implies the stability. Although we cannot determine the initial values $\dot T_i(0)$ and $ T_i(0)$, experimentally, the number density can be adjusted by varying the condensed atom number, and the adjustments to superfluid velocity may be related to a displacement $\triangle x$ of a magnetic potential \cite{Burger}. According to Eqs. (20) and (21), if we initially increases (or decreases) both the relative derivative $\dot T_2(0)/ T_2(0)=\frac{\partial \triangle\theta_x(x,t)}{\partial t}|_{t=0}/\triangle\theta_x(x,0)$ of flow velocity and the relative derivative $\dot T_1(0)/ T_1(0)=\frac{\partial \triangle |\psi|^2(x,t)}{\partial t}|_{t=0}/\triangle |\psi|^2(x,0)$ of atomic number density, the stability initial criterion (21) is destroyed and the system will become unstable. But when one of them is increased and another is decreased simultaneously, the stability criterion (21) is satisfied and the possible instability is suppressed. These assertions may be tasted experimentally. \section{Conclusions and discussions} In conclusion, we have investigated the dynamical stability, instability and undetermined stability of a quasi-1D BEC in the stationary states for time-independent external potential and atomic scattering length, and fixed atomic number. After space-time separation of variables, we derive the general solutions of the linearized time-evolution equations for the trivial phase case and give a stability criterion related to the initial conditions. As an important example, we evidence the stability criterion analytically for the BEC held in an optical lattice potential. By using the known sufficient conditions of stability and instability \cite{Bronski}, several parameter regions of stability and instability are shown. Our results contain some new stability predictions which can be tested with current experimental setups. Finally, we stress that applying our stability initial criterion and parameter region one can stabilize the considered BEC system by adjusting the system parameters experimentally to enter or near the stability region of Eq. (17) on the parameter space. For the parameters out of the stability region we can also establish or improve the stability by adjusting the initial flow velocity and atomic number density to fit or approach the stability initial criterion. \bf Acknowledgment \rm This work was supported by the National Natural Science Foundation of China under Grant No. 10575034 and by the Key Laboratory of Magnetic Resonance and Atomic and Molecular Physics of China under Grant No. T152504. {} \end{document}
\begin{document} \title[Asymptotic stability of the compressible Euler-Maxwell equations ] {Asymptotic stability of stationary solutions to the compressible Euler-Maxwell equations } \author{Qingqing Liu} \address{(QQL) The Hubei Key Laboratory of Mathematical Physics, School of Mathematics and Statistics, Central China Normal University, Wuhan, 430079, P. R. China} \email{[email protected]} \author{Changjiang Zhu*} \address{(CJZ) The Hubei Key Laboratory of Mathematical Physics, School of Mathematics and Statistics, Central China Normal University, Wuhan, 430079, P. R. China} \email{[email protected]} \thanks{*Corresponding author. Email: [email protected] } \date{\today} \keywords{Compressible Euler-Maxwell equations, stationary solutions, asymptotic stability} \subjclass[2000]{35Q35, 35P20} \begin{abstract} In this paper, we are concerned with the compressible Euler-Maxwell equations with a nonconstant background density (e.g. of ions) in three dimensional space. There exist stationary solutions when the background density is a small perturbation of a positive constant state. We first show the asymptotic stability of solutions to the Cauchy problem near the stationary state provided that the initial perturbation is sufficiently small. Moreover the convergence rates are obtained by combining the $L^p$-$L^q$ estimates for the linearized equations with time-weighted estimate. \end{abstract} \maketitle \tableofcontents \section{Introduction} The dynamics of two separate compressible fluids of ions and electrons interacting with their self-consistent electromagnetic field in plasma physics can be described by the compressible 2-fluid Euler-Maxwell equations \cite{Besse,Rishbeth}. In this paper, we consider the following one-fluid compressible Euler-Maxwell system when the background density $n_{b}$ is a function of spatial variable and the electrons flow is isentropic (see \cite{Duan,UK,USK} when $n_{b}=const.$), taking the form of \begin{eqnarray}\label{1.1} &&\left\{\begin{aligned} &\partial_t n+\nabla\cdot(nu)=0,\\ &\partial_t u+u \cdot \nabla u+\frac{1}{n}\nabla p(n)=-(E+u\times B)-\nu u,\\ &\partial_t E-\nabla\times B=nu,\\ &\partial_t B+\nabla \times E=0,\\ &\nabla \cdot E=n_{b}(x)-n, \ \ \nabla \cdot B=0. \end{aligned}\right. \end{eqnarray} Here, $n=n(t,x)\geq 0 $ is the electron density, $ u=u(t,x)\in \mathbb{R}^{3}$ is the electron velocity, $ E=E(t,x)\in \mathbb{R}^{3}$, $ B=B(t,x)\in \mathbb{R}^{3}$, for $ t>0, \ x \in \mathbb{R}^{3} $, denote electron and magnetic fields respectively. Initial data is given as \begin{eqnarray}\label{1.2} [n,u,E,B]|_{t=0}=[n_{0},u_{ 0},E_{0},B_0],\ \ \ x\in\mathbb{R}^{3}, \end{eqnarray} with the compatible conditions \begin{eqnarray}\label{1.3} \nabla \cdot E_0=n_{b}(x)-n_{0}, \ \ \nabla \cdot B_0=0, \ \ \ x\in\mathbb{R}^{3}. \end{eqnarray} The pressure function $ p(\cdot)$ of the flow depending only on the density satisfies the power law $p(n)=A n^{\gamma}$ with constants $A>0$ and the adiabatic exponent $\gamma >1 $. Constant $\nu>0$ is the velocity relaxation frequency. In this paper, we set $ A=1,\ \nu=1$ without loss of generality. $n_{b}(x)$ denotes the stationary background ion density satisfying \begin{eqnarray*} n_{b}(x)\rightarrow n_{\infty}, \ \ \ \ \textrm{as}\ \ \ \ |x|\rightarrow \infty, \end{eqnarray*} for a positive constant state $n_{\infty}>0$. Throughout this paper, we take $n_{\infty}=1$ for simplicity. In comparison with the Euler-Maxwell system studied in \cite{Duan}, where the background density is a uniform constant, the naturally existing steady states of system \eqref{1.1} are no longer constants $[1,0,0,0]$. The stationary equations to the Cauchy problem \eqref{1.1}-\eqref{1.2} are given as \begin{eqnarray}\label{sta.eq0} \left\{\begin{aligned} &\frac{1}{n_{st}}\nabla p(n_{st})=-E_{st},\\ &\nabla \times E_{st}=0,\\ &\nabla \cdot E_{st}=n_{b}(x)-n_{st}. \end{aligned}\right. \end{eqnarray} First, in this paper, we prove the existence of the stationary solutions to the Cauchy problem \eqref{1.1}-\eqref{1.2} under some conditions on the background density $n_{b}(x)$. For this purpose, let us define the weighted norm $\|\cdot\|_{W_{k}^{m,2}}$ by \begin{eqnarray}\label{def.norm} \|g\|_{W_{k}^{m,2}}=\left(\sum_{|\alpha|\leq m}\int_{\mathbb{R}^{3}}(1+|x|)^{k}|\partial^{\alpha}_{x}g(x)|^2dx\right)^{\frac{1}{2}}, \end{eqnarray} for suitable $g=g(x)$ and integers $m\geq0$, $k\geq0$. Actually, one has the following theorem. \begin{theorem}\label{sta.existence} For integers $m\geq 2$ and $k\geq 0$, suppose that $\|n_{b}-1\|_{W_{k}^{m,2}}$ is small enough. Then the stationary problem \eqref{sta.eq0} admits a unique solution $(n_{st},E_{st})\in L^\infty(0, T; W_{k}^{m,2})$ satisfying \begin{eqnarray}\label{sta.pro} \|n_{st}-1\|_{{W_{k}^{m,2}}}\leq C \|n_{b}-1\|_{W_{k}^{m,2}},\ \ \ \|E_{st}\|_{{W_{k}^{m-1,2}}}\leq C \|n_{b}-1\|_{W_{k}^{m,2}}, \end{eqnarray} for some constant $C$. \end{theorem} There have been extensive investigations into the simplified Euler-Maxwell system where all the physical parameters are set to unity. For the one-fluid Euler-Maxwell system, by using the fractional Godunov scheme as well as the compensated compactness argument, Chen-Jerome-Wang in \cite{Chen} proved global existence of weak solutions to the initial-boundary value problem in one space dimension for arbitrarily large initial data in $L^{\infty}$. Jerome in \cite{Jerome} established a local smooth solution theory for the Cauchy problem over $\mathbb{R}^3$ by adapting the classical semigroup-resolvent approach of Kato in \cite{Kato}. Recently, Duan in \cite{Duan} proved the existence and uniqueness of global solutions in the framework of smooth solutions with small amplitude, moreover the detailed analysis of Green's function to the linearized system was made to derive the optimal time-decay rates of perturbed solutions. The similar results are independently given by Ueda-Wang-Kawashima in \cite{USK} and Ueda-Kawashima in \cite{UK} by using the pure time-weighted energy method. For the the original two-fluid Euler-Maxwell systems with various parameters, the limits as some parameters go to zero have been studied recently. Peng-Wang in \cite{Peng ,PW1,PW2} justified the convergence of the one-fluid compressible Euler-Maxwell system to the incompressible Euler system, compressible Euler-Poisson system and an electron magnetohydrodynamics system for well-prepared smooth initial data. These asymptotic limits are respectively called non-relativistic limit, the quasi-neutral limit and the limit of their combination. Recently, Hajjej and Peng in \cite{HP} considered the zero-relaxation limits for periodic smooth solutions of Euler-Maxwell systems. For the 2-fluid Euler-Maxwell system, depending on the choice of physical parameters, especially the coefficients of $u_{\pm}$ were assumed $\nu_{+}=\nu_{-}$, Duan-Liu-Zhu in \cite{DLZ} obtained the existence and the time-decay rates of the solutions. Much more studies have been made for the Euler-Poisson system when the magnetic field is absent; see \cite{Guo,GuoPausader,Luo,Deng,Smoller,Chae} and references therein for discussion and analysis of the different issues such as the existence of global smooth irrotational flow \cite{Guo} for an electron fluid and \cite{GuoPausader} for the ion dynamics, large time behavior of solutions \cite{Luo}, stability of star solutions \cite{Deng,Smoller} and finite time blow-up \cite{Chae}. However, there are few results on the global existence of solutions to the Euler-Maxwell system when the non-moving ions provide a nonconstant background $n_{b}(x)$, whereas in many papers related to one-fluid Euler-Maxwell system $n_{b}=1$. In this paper, we prove that there exists a stationary solution when the background density is a small perturbation of a positive constant state and we show the asymptotic stability of the stationary solution and then obtain the convergence rate of the global solution towards the stationary solution. The main result is stated as follows. Notations will be explained at the end of this section. \begin{theorem}\label{Corolary} Let $ N\geq 3$ and $ \eqref{1.3}$ hold. Suppose $\|n_{b}-1\|_{W_{0}^{N+1,2}}$ is small enough. Then there are $ \delta_{0}>0$, $ C_{0}>0$ such that if \begin{eqnarray*} \|[n_{0}-n_{st},u_{0},E_{0}-E_{st},B_{0}]\|_{N} \leq \delta_{0}, \end{eqnarray*} then, the Cauchy problem $\eqref{1.1}$-$\eqref{1.2}$ admits a unique global solution $[n(t,x),u(t,x),E(t,x),B(t,x)] $ satisfying \begin{eqnarray*} [n-n_{st},u,E-E_{st},B]\in C([0,\infty);H^{N}(\mathbb{R}^{3}))\cap {\rm Lip}([0,\infty);H^{N-1}(\mathbb{R}^{3})), \end{eqnarray*} and \begin{eqnarray*} \sup_{t \geq 0}\|[n(t)-n_{st},u(t),E(t)-E_{st},B(t)]\|_{N}\leq C_{0} \|[n_{0}-n_{st},u_{0},E_{0}-E_{st},B_{0}]\|_{N}. \end{eqnarray*} Moreover, there are $\delta_{1}>0$, $ C_{1}>0$ such that if \begin{eqnarray*} \|[n_{0}-n_{st},u_{0},E_{0}-E_{st},B_{0}]\|_{N+3}+\|[u_{0},E_{0}-E_{st},B_{0}]\|_{L^{1}}\leq \delta_{1}, \end{eqnarray*} and $\|n_{b}-1\|_{W_{0}^{N+4,2}}$ is small enough, then the solution $[n(t,x),u(t,x),E(t,x),B(t,x)] $ satisfies that for any $ t \geq 0$, \begin{eqnarray}\label{UN.decay} \|[n(t)-n_{st},u(t),B(t),E(t)-E_{st}]\|_{N} \leq C_{1} (1+t)^{-\frac{3}{4}}, \end{eqnarray} \begin{eqnarray}\label{UhN.decay} \|\nabla[n(t)-n_{st},u(t),B(t),E(t)-E_{st}]\|_{N-1} \leq C_{1} (1+t)^{-\frac{5}{4}}. \end{eqnarray} More precisely, if \begin{eqnarray*} \|[n_{0}-n_{st},u_{0},E_{0}-E_{st},B_{0}]\|_{6}+\|[u_{0},E_{0}-E_{st},B_{0}]\|_{L^{1}}\leq \delta_{1}, \end{eqnarray*} and $\|n_{b}-1\|_{W_{0}^{7,2}}$ is small enough, we have \begin{eqnarray}\label{sigmau.decay} \|[n(t)-n_{st},u(t)]\| \leq C_{1} (1+t)^{-\frac{5}{4}}, \end{eqnarray} \begin{eqnarray}\label{EB.decay} \|[E(t)-E_{st},B(t)]\|\leq C_{1}(1+t)^{-\frac{3}{4}}. \end{eqnarray} If \begin{eqnarray*} \|[n_{0}-n_{st},u_{0},E_{0}-E_{st},B_{0}]\|_{7}+\|[u_{0},E_{0}-E_{st},B_{0}]\|_{L^{1}}\leq \delta_{1}, \end{eqnarray*} and $\|n_{b}-1\|_{W_{0}^{8,2}}$ is small enough, then $E(t)$ satisfies \begin{eqnarray}\label{E.decay} \|E(t)-E_{st}\|\leq C_{1}(1+t)^{-\frac{5}{4}}. \end{eqnarray} \end{theorem} The proof of existence in Theorem \ref{Corolary} is based on the classical energy method. As in \cite{Duan}, the key point is to obtain the uniform-in-time {\it a priori} estimates in the form of $$ \mathcal{E}_N(\bar{V}(t))+\lambda \int_0^t\mathcal{D}_N(\bar{V}(s))\,ds\leq \mathcal{E}_N(\bar{V}_0), $$ where $\bar{V}(t)$ is the perturbation of solutions, and $\mathcal{E}_N(\cdot)$, $\mathcal{D}_N(\cdot)$ denote the energy functional and energy dissipation rate functional. Here if we make the energy estimates like what Duan did in \cite{Duan}, it is difficult to control the highest-order derivative of $\bar{E}$ because of the regularity-loss type in the sense that $[\bar{E},\bar{B}]$ is time-space integrable up to $N-1$ order only. In this paper, we modify the energy estimates by choosing a weighted function $1+\sigma_{st}+\Phi(\sigma_{st})$ which plays a vital role in closing the energy estimates. Furthermore, for the convergence rates of perturbed solutions in Theorem 1.1, we can not analyze the corresponding linearized system of \eqref{1.1} around the steady state $[n_{st},0,E_{st},0]$ directly. In this case, the Fourier analysis fails due to the difficulty of variant coefficients. Here, the main idea follows from \cite{Duan} for combining energy estimates with the linearized results in \cite{Duan}. In the process of obtaining the fastest decay rates of the perturbed solution, the great difficulty is to deal with these linear nonhomogeneous sources including $\rho_{st}$, which can not bring enough decay rates. Whereas in \cite{Duan}, the nonhomogeneous sources are at least quadratically nonlinear. To overcome this difficulty, we make iteration for the inequalities \eqref{sec5.ENV0} and $\eqref{sec5.high}$ together. In theorem \ref{Corolary}, we only capture the same time-decay properties of $u,\ E-E_{st}$ and $B$ as \cite{Duan} except $n-n_{st}$. $\|n-n_{st}\|$ decays as $(1+t)^{-\frac{5}{4}}$ in the fastest way, because the nonhomogeneous sources containing $\rho_{st}$ decay at most the same as $\sqrt{\mathcal{E}^h_N(\cdot)}$. The similar work was done for Vlasov-Poisson-Boltzmann system, where the background density is also a function of spatial variable. Duan and Yang in \cite{RY} considered the stability of the stationary states which were given by an elliptic equation with the exponential nonlinearity. The optimal time-decay of the Vlasov-Poisson-Boltzmann system in $\mathbb{R}^{3}$ was obtained by Duan and Strain in \cite{DS}. We also mention the work Duan-Ukai-Yang-Zhao in \cite{RSYZ}, Duan-Liu-Ukai-Yang in \cite{DLUY} for the study of optimal convergence rates of the compressible Navier-Stokes equations with potential forces. Their proofs were based on the combination of spectral analysis and energy estimates. Recently, Duan-Ukai-Yang in \cite{RSY} developed a method of the combination of the spectral analysis and energy estimates to deal with the optimal time decay for study of equations of gas motion. We further remark the result in \cite{RY}, the existence of solution to the elliptic equation $\Delta \phi=e^{\phi}-\bar{\rho}(x)$ has been proved when $\|\bar{\rho}-1\|_{W_{k}^{m,\infty}}$ is sufficiently small, where the weighted norm $\|\cdot\|_{W_{k}^{m,\infty}}$ is defined by \begin{eqnarray}\label{def.norm1} \|g\|_{W_{k}^{m,\infty}}=\sup_{x\in\mathbb{R}^{3}}(1+|x|)^{k}\sum_{|\alpha|\leq m}|\partial^{\alpha}_{x}g(x)| \end{eqnarray} for suitable $g=g(x)$ and integers $m\geq0$, $k\geq0$, the stability of the perturbed solutions can be proved when $\|\bar{\rho}-1\|_{W_{2}^{N+1,\infty}}$ is sufficiently small. We can also prove the stability of stationary solutions in the framework of \cite{RY} if $\|n_{b}-1\|_{W_{0}^{N+1,\infty}}$ is sufficiently small. In order to obtain the same convergence rates, $\|n_{b}-1\|_{W_{2}^{N+4,\infty}}$ should be sufficiently small in the process of dealing with $\rho_{st}\bar{u}$ as in Section \ref{sec4}, $$ \|\rho_{st} \bar{u}\|_{L^1} \leq \|\rho_{st}\|\left\|\bar{u} \right\| \leq C \|\rho_{st}\|_{W_{2}^{N+4,\infty}}\| \bar{u}\|. $$ Notice that $W_{2}^{N+4,\infty}\subseteq W_{0}^{N+4,2}$, it seems to be better to consider the existence of steady states in the weighted Sobolev space $W_{k}^{m,2}$. Let us introduce some notations for the use throughout this paper. $C$ denotes some positive (generally large) constant and $ \lambda$ denotes some positive (generally small) constant, where both $C$ and $ \lambda$ may take different values in different places. For two quantities $a$ and $b$, $a\sim b$ means $\lambda a \leq b \leq \frac{1}{\lambda} a $ for a generic constant $0<\lambda<1$. For any integer $m\geq 0$, we use $H^{m}$, $\dot{H}^{m}$ to denote the usual Sobolev space $H^{m}(\mathbb{R}^{3})$ and the corresponding $m$-order homogeneous Sobolev space, respectively. Set $L^{2}=H^{m}$ when $m = 0$. For simplicity, the norm of $ H^{m}$ is denoted by $\|\cdot\|_{m} $ with $\|\cdot \|=\|\cdot\|_{0}$. We use $ \langle\cdot, \cdot \rangle$ to denote the inner product over the Hilbert space $ L^{2}(\mathbb{R}^{3})$, i.e. \begin{eqnarray*} \langle f,g \rangle=\int_{\mathbb{R}^{3}} f(x)g(x)dx,\ \ \ \ f = f(x),\ \ g = g(x)\in L^2(\mathbb{R}^{3}). \end{eqnarray*} For a multi-index $\alpha = [\alpha_1, \alpha_2, \alpha_3]$, we denote $\partial^{\alpha} = \partial^{\alpha_{1}}_ {x_1}\partial^{\alpha_{2}}_ {x_2} \partial^{\alpha_{3}}_ {x_3} $. The length of $ \alpha$ is $|\alpha| = \alpha_1 + \alpha_2 + \alpha_3$. For simplicity, we also set $\partial_{j}=\partial_{x_{j}}$ for $j = 1, 2, 3$. We conclude this section by stating the arrangement of the rest of this paper. In Section 2, we prove the existence of the stationary solution. In Section 3, we reformulate the Cauchy problem under consideration and obtain asymptotic stability of solutions near the stationary state provided that the initial perturbation is sufficiently small. In Section 4, we study the time-decay rates of solutions to the stationary solutions by combining the $L^p$-$L^q$ time-decay property of the linearized homogeneous system with time-weighted estimate. \section{Existence of stationary solution}\label{sec2} In this section, we will prove the existence of stationary solutions to $\eqref{sta.eq0}$ by using the contraction mapping theorem. From $\eqref{sta.eq0}_2$, there exists $\phi_{st}$ such that $E_{st}=\nabla \phi_{st}$, it turns equation $\eqref{sta.eq0}$ into \begin{eqnarray}\label{sta.eq1} \left\{\begin{aligned} &\frac{1}{n_{st}}\nabla p(n_{st})=-\nabla\phi_{st},\\ &\Delta\phi_{st}=n_{b}(x)-n_{st}. \end{aligned}\right. \end{eqnarray} We introduce the nonlinear transformation (cf. \cite{Deng}) \begin{eqnarray}\label{sta.tra} Q_{st}=\frac{\gamma}{\gamma-1}(n_{st}^{\gamma-1}-1). \end{eqnarray} From $\eqref{sta.eq1}$ and $\eqref{sta.tra}$, we derive the following elliptic equation \begin{eqnarray}\label{sta.ellip} \Delta Q_{st}=\left(\frac{\gamma-1}{\gamma}Q_{st}+1\right)^{\frac{1}{\gamma-1}}-n_{b}(x). \end{eqnarray} For convenience, we replace $Q_{st}$ by $\phi$ in the following. Equation $\eqref{sta.ellip}$ can be rewritten as the integral form \begin{eqnarray*} \phi=T(\phi)=G*\left(\left(\frac{\gamma-1}{\gamma}\phi+1\right)^{\frac{1}{\gamma-1}}-\frac{1}{\gamma}\phi -n_{b}(x)\right), \end{eqnarray*} where $G=G(x)$ given by \begin{eqnarray*} G(x)=-\frac{1}{4\pi|x|}e^{-\tfrac{1}{\sqrt{\gamma}}|x|} \end{eqnarray*} is the fundamental solution to the linear elliptic equation $ \Delta_{x}G-\frac{1}{\gamma}G=0$. Thus \eqref{sta.ellip} admits a solution if and only if the nonlinear mapping $T$ has a fixed point. Define \begin{eqnarray*} \mathscr{B}_{m,k}(B)=\{\phi\in W^{m,2}_{k}(\mathbb{R}^{3});\|\phi\|_{W^{m,2}_{k}}\leq B\|n_{b}-1\|_{W^{m,2}_{k}},\ m\geq2\} \end{eqnarray*} for some constant $B$ to be determined later. Next, we prove that if $\|n_{b}-1\|_{W^{m,2}_{k}} $ is small enough, there exists a constant $B$ such that $T:\mathscr{B}_{m,k}(B)\rightarrow \mathscr{B}_{m,k}(B) $ is a contraction mapping. In fact, for simplicity, let us denote \begin{eqnarray*} g(x)=\left(\frac{\gamma-1}{\gamma}x+1\right)^{\frac{1}{\gamma-1}}-\frac{1}{\gamma}x-1. \end{eqnarray*} Then it holds that \begin{eqnarray}\label{T.phi} T(\phi)(x)=-\int_{\mathbb{R}^{3}}\frac{1}{4\pi|x-y|}e^{-\tfrac{1}{\sqrt{\gamma}}|x-y|} [g(\phi(y))-(n_{b}(y)-1)]dy. \end{eqnarray} Taking derivatives $ \partial_{x}^{\alpha}$ on both sides of $ \eqref{T.phi}$, one has \begin{eqnarray}\label{T.estimate} \arraycolsep=1.5pt \begin{array}[b]{rl} \partial_{x}^{\alpha}T(\phi)(x)=&\displaystyle-(-1)^{|\alpha|} \int_{\mathbb{R}^{3}}\frac{1}{4\pi|x-y|}e^{-\tfrac{1}{\sqrt{\gamma}}|x-y|} [\partial^{\alpha}_{y} g(\phi(y))-\partial^{\alpha}_{y}(n_{b}(y)-1)]dy\\[5mm] =&-(-1)^{|\alpha|}G*(\partial^{\alpha}g(\phi)-\partial^{\alpha}(n_{b}-1)). \end{array} \end{eqnarray} Here let's list some properties of the operator $G*$. \begin{lemma}\label{Pro.OG} For any $k\geq 0$, it holds that \begin{eqnarray}\label{pro.Gdecay} \int_{\mathbb{R}^{3}}\frac{1}{|y|}e^{-\tfrac{1}{\sqrt{\gamma}}|y|}\frac{1}{(1+|x-y|)^{k}}dy\leq \frac{C_{k}}{(1+|x|)^{k}}, \end{eqnarray} and for any $f\in W_{k}^{m,2}$, \begin{eqnarray}\label{pro.G} \|(1+|x|)^{\frac{k}{2}}(G*f)\|\leq C_{k}^{\frac{1}{2}}\|G\|_{L^{1}}^{\frac{1}{2}}\|(1+|x|)^{\frac{k}{2}}f\|. \end{eqnarray} \end{lemma} \textit{Proof.} \eqref{pro.Gdecay} has been proved in \cite{RY}. We only prove \eqref{pro.G} by using $\eqref{pro.Gdecay}$. \begin{eqnarray*} \arraycolsep=1.5pt \begin{array}[b]{rl} \displaystyle\left| \int_{\mathbb{R}^{3}}G(x-y)f(y)dy \right| \leq & \displaystyle \int_{\mathbb{R}^{3}}\frac{|G(x-y)|^{\frac{1}{2}}}{(1+|y|)^{\frac{k}{2}}} |G(x-y)|^{\frac{1}{2}}(1+|y|)^{\frac{k}{2}}|f(y)|dy\\[5mm] \leq & \displaystyle \left(\int_{\mathbb{R}^{3}}\frac{|G(x-y)|}{(1+|y|)^{k}}dy\right)^{\frac{1}{2}} \left(\int_{\mathbb{R}^{3}}|G(x-y)|(1+|y|)^{k}|f(y)|^2dy\right)^{\frac{1}{2}}\\[5mm] \leq & \displaystyle \frac{C_{k}^{\frac{1}{2}}}{(1+|x|)^{\frac{k}{2}}} \left(\int_{\mathbb{R}^{3}}|G(x-y)|(1+|y|)^{k}|f(y)|^2dy\right)^{\frac{1}{2}}. \end{array} \end{eqnarray*} Then \begin{eqnarray*} \arraycolsep=1.5pt \begin{array}[b]{rl} & \displaystyle \int_{\mathbb{R}^{3}}(1+|x|)^{k}\left| \int_{\mathbb{R}^{3}}G(x-y)f(y)dy \right|^2dx\\[5mm] \leq & \displaystyle C_{k} \int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}|G(x-y)|(1+|y|)^{k}|f(y)|^2dydx\\[5mm] = & \displaystyle C_{k} \int_{\mathbb{R}^{3}}\int_{\mathbb{R}^{3}}|G(x-y)|(1+|y|)^{k}|f(y)|^2dxdy\\[5mm] = & \displaystyle C_{k} \int_{\mathbb{R}^{3}}(1+|y|)^{k}|f(y)|^2 dy \int_{\mathbb{R}^{3}}|G(x-y)|dx\\[5mm] =& C_{k}\|G\|_{L^{1}}\|(1+|x|)^{\frac{k}{2}}f\|^2. \end{array} \end{eqnarray*} \textbf{Remark:} When $k=0$, $C_{k}=\|G\|_{L^{1}}$, \eqref{pro.G} is in accordance with Young inequality. By \eqref{pro.G} and \eqref{T.estimate}, one has \begin{eqnarray*} \arraycolsep=1.5pt \begin{array}[b]{rl} & \|(1+|x|)^{\frac{k}{2}}\partial_{x}^{\alpha}T(\phi)(x)\|\\[3mm] \leq & C\|(1+|x|)^{\frac{k}{2}}\partial^{\alpha}g(\phi)\| +C\|(1+|x|)^{\frac{k}{2}}\partial^{\alpha}(n_{b}-1))\|. \end{array} \end{eqnarray*} By the definition $\eqref{def.norm}$ of the norm $ \|\cdot\|_{W^{m,2}_{k}}$, one has \begin{eqnarray}\label{T.nb} \arraycolsep=1.5pt \begin{array}[b]{rl} \|T(\phi)(x)\|_{W^{m,2}_{k}} = & \displaystyle \left(\sum_{|\alpha|\leq m}\|(1+|x|)^{\frac{k}{2}}\partial_{x}^{\alpha}T(\phi)(x)\|^2\right)^{\frac{1}{2}}\\[5mm] \leq & \displaystyle C\left(\sum_{|\alpha|\leq m}\|(1+|x|)^{\frac{k}{2}}\partial^{\alpha}g(\phi)\|^2\right)^{\frac{1}{2}} +C\left(\sum_{|\alpha|\leq m}\|(1+|x|)^{\frac{k}{2}}\partial^{\alpha}(n_{b}-1)\|^2\right)^{\frac{1}{2}}\\[5mm] \leq & \displaystyle C\left(\sum_{|\alpha|\leq m}\|(1+|x|)^{\frac{k}{2}}\partial^{\alpha}g(\phi)\|^2\right)^{\frac{1}{2}} +C\|n_{b}-1\|_{W^{m,2}_{k}}. \end{array} \end{eqnarray} On the other hand, note \begin{eqnarray*} g(\phi)=\int_{0}^{1}\int_{0}^{\theta}g''(\tau\phi)d\tau d\theta \phi^{2}\triangleq h(\phi)\phi^{2}, \end{eqnarray*} where $g''(x)=\frac{2-\gamma}{\gamma^{2}}\left(\frac{\gamma-1}{\gamma}x+1\right)^{\frac{3-2\gamma}{\gamma-1}}$. It is straightforward to check that \begin{eqnarray*} \|(1+|x|)^{\frac{k}{2}}\partial^{\alpha}(h(\phi)\phi^{2})\|\leq \sum_{\beta_{1}+\beta_{2}+\beta_{3}=\alpha}C_{\beta_1,\beta_2,\beta_{3}}^{\alpha} \|(1+|x|)^{\frac{k}{2}}\partial^{\beta_{1}}h(\phi)\partial^{\beta_2}\phi\partial^{\beta_3}\phi\|. \end{eqnarray*} In addition, one has the following claim. \textbf{Claim}: \begin{eqnarray}\label{T.gphi} \|(1+|x|)^{\frac{k}{2}}\partial^{\beta_{1}}h(\phi)\partial^{\beta_2}\phi\partial^{\beta_3}\phi\|\leq C \|\phi\|^2_{W^{m,2}_{k}}. \end{eqnarray} \textit{Proof of claim:} We prove \eqref{T.gphi} by two cases. \textbf{Case 1.} $\beta_{1}=0$. In this case, $|\beta_{2}|+|\beta_{3}|\leq m$, thus one can suppose $|\beta_{2}|\leq [\frac{m}{2}]$ by the symmetry of $\beta_{2}$ and $\beta_{3}$. This deduces \begin{eqnarray*} \arraycolsep=1.5pt \begin{array}[b]{rl} \|(1+|x|)^{\frac{k}{2}}h(\phi)\partial^{\beta_2}\phi\partial^{\beta_3}\phi\| \leq & \|h(\phi)\|_{L^{\infty}}\|\partial^{\beta_2}\phi\|_{L^{6}}\|(1+|x|)^{\frac{k}{2}}\partial^{\beta_3}\phi\|_{L^{3}}\\[3mm] \leq & C \|\nabla \partial^{\beta_2}\phi\|\|(1+|x|)^{\frac{k}{2}}\partial^{\beta_3}\phi\|_{1}\\[3mm] \leq & C \|\phi\|^2_{W^{m,2}_{k}}. \end{array} \end{eqnarray*} Here, we have used that $h(\cdot)$ is a continuous function in the argument, \begin{eqnarray*} \|\phi\|_{L^{\infty}}\leq C\|\nabla \phi\|_{H^{1}}\leq C\|\phi\|_{W^{m,2}_{k}}\leq C \|n_{b}-1\|_{W^{m,2}_{k}}\ll 1, \end{eqnarray*} and $m\geq 2$. \textbf{Case 2}. $|\beta_{1}|\geq 1$. Notice that \begin{eqnarray*} \partial^{\beta_{1}}h(\phi)=\sum_{l=1}^{|\beta_{1}|}h^{(l)}(\phi) \sum_{\gamma_{1}+\gamma_{2}+\cdots\gamma_{l}=\beta_{1}} C_{\gamma_{1},\gamma_{2},\cdots\gamma_{l}}\Pi_{i=1}^{l}\partial^{\gamma_{i}}\phi, \end{eqnarray*} \eqref{T.gphi} can be similarly obtained because $h^{(m)}(\phi)$ is also bounded. Putting $\eqref{T.gphi}$ into \eqref{T.nb}, and using the above estimates, one has \begin{eqnarray}\label{T.phi.est} \|T(\phi)(x)\|_{W^{m,2}_{k}}\leq C B^2 \|n_{b}-1\|_{W^{m,2}_{k}}^2+C\|n_{b}-1\|_{W^{m,2}_{k}}. \end{eqnarray} Finally, for any $\phi_{1}=\phi_{1}(x)$ and $\phi_{2}=\phi_{2}(x)$, it holds that \begin{eqnarray*} T(\phi_{1})-T(\phi_{2})=G*(g(\phi_{1})-g(\phi_{2})) \end{eqnarray*} with \begin{eqnarray*} g(\phi_{1})-g(\phi_{2})=\int_{0}^{1}g'(\theta\phi_{1}+(1-\theta)\phi_{2})d\theta(\phi_{1}-\phi_{2}). \end{eqnarray*} Notice that for any $\phi=\phi(x)$, \begin{eqnarray*} \arraycolsep=1.5pt \begin{array}{rcl} g'(\phi)&=&\displaystyle\frac{1}{\gamma}\left(\frac{\gamma-1}{\gamma}\phi+1\right)^{\frac{2-\gamma}{\gamma-1}} -\frac{1}{\gamma}\\[3mm] &=&\displaystyle\int_{0}^{1}\frac{2-\gamma}{\gamma^{2}} \left(\frac{\gamma-1}{\gamma}\theta\phi+1\right)^{\frac{3-2\gamma}{\gamma-1}}d\theta\phi. \end{array} \end{eqnarray*} Then the same computations as for $\eqref{T.phi.est}$ yield \begin{eqnarray}\label{T.contract} \arraycolsep=1.5pt \begin{array}{rl} &\|T(\phi_{1})-T(\phi_{2})\|_{W_{k}^{m,2}}\\[3mm] \leq & C (\|\phi_{1}\|_{W^{m,2}_{k}}+\|\phi_{2}\|_{W^{m,2}_{k}})\|\phi_{1}-\phi_{2}\|_{W^{m,2}_{k}}. \end{array} \end{eqnarray} Combining $\eqref{T.phi.est}$ with $ \eqref{T.contract}$, the standard argument implies that $T$ has a unique fixed point $\phi$ in $ \mathscr{B}_{m,k}(B)$ for a proper constant $B$ provided that $\|n_{b}-1\|_{W^{m,2}_{k}}$ is small enough. This completes Theorem \ref{sta.existence}. Let us conclude this section with a remark. The existence of solutions to the elliptic equation \eqref{sta.ellip} can also be proved in the framework of \cite{RY} when $\|n_{b}-1\|_{W^{m,\infty}_{k}}$ is sufficiently small. We consider the existence when $\|n_{b}-1\|_{W^{m,2}_{k}}$ is sufficiently small in order to derive the more general conclusion. In fact, in the process of dealing with the stability and convergence rates, only the smallness of $\|n_{b}-1\|_{W^{m,2}_{0}}$ is assumed, and the space decay at infinity of $n_{b}(x)-1$ is not needed. \section{Stability of stationary solution} \subsection{Reformulation of the problem} Let $[n,u,E,B]$ be a smooth solution to the Cauchy problem of the Euler-Maxwell system (\ref{1.1}) with given initial data (\ref{1.2}) satisfying (\ref{1.3}). Set \begin{eqnarray}\label{2.1} &&\left\{ \begin{aligned} &\sigma(t,x)=\frac{2}{\gamma-1}\left\{\left[n\left(\frac{t}{\sqrt{\gamma}},x\right)\right] ^{\frac{\gamma-1}{2}}-1\right\}, \ \ \ v=\frac{1}{\sqrt{\gamma}}u\left(\frac{t}{\sqrt{\gamma}},x\right), \\[5mm] &\ \ \tilde{E}=\frac{1}{\sqrt{\gamma}}E\left(\frac{t}{\sqrt{\gamma}},x\right),\ \ \ \tilde{B}=\frac{1}{\sqrt{\gamma}}B\left(\frac{t}{\sqrt{\gamma}},x\right). \end{aligned}\right. \end{eqnarray} Then, $V:=[\sigma,v,\tilde{E},\tilde{B}]$ satisfies \begin{equation}\label{2.2} \left\{ \begin{aligned} &\partial_t \sigma+\left(\frac{\gamma-1}{2}\sigma+1\right)\nabla\cdot v+v\cdot \nabla \sigma=0,\\ &\partial_t v+v \cdot \nabla v+\left(\frac{\gamma-1}{2}\sigma+1\right)\nabla \sigma=-\left(\frac{1}{\sqrt{\gamma}}\tilde{E}+v\times \tilde{B}\right) -\frac{1}{\sqrt{\gamma}}v,\\ &\partial_t\tilde{E}-\frac{1}{\sqrt{\gamma}}\nabla\times\tilde{B} =\frac{1}{\sqrt{\gamma}}v+\frac{1}{\sqrt{\gamma}}[\Phi(\sigma)+\sigma]v,\\ &\partial_t \tilde{B}+\frac{1}{\sqrt{\gamma}}\nabla \times \tilde{E}=0,\\ &\nabla \cdot \tilde{E}=-\frac{1}{\sqrt{\gamma}}[\Phi(\sigma)+\sigma] +\frac{1}{\sqrt{\gamma}}(n_{b}(x)-1), \ \ \nabla \cdot \tilde{B}=0, \ \ \ t>0,\ x\in\mathbb{R}^{3}, \end{aligned}\right. \end{equation} with initial data \begin{eqnarray}\label{2.3} V|_{t=0}=V_{0}:=[\sigma_{0},v_{0},\tilde{E}_{0},\tilde{B}_{0}],\ \ x\in\mathbb{R}^{3}. \end{eqnarray} Here, $\Phi(\cdot)$ is defined by \begin{eqnarray}\label{def.phi} \Phi(\sigma)=\left(\frac{\gamma-1}{2}\sigma+1\right)^{\frac{2}{\gamma-1}}-\sigma-1, \end{eqnarray} and $V_{0}=[\sigma_{0},v_{0},\tilde{E}_{0},\tilde{B}_{0}]$ is given from $[n_{0},u_{0},E_{0},B_0]$ according to the transform (\ref{2.1}), and hence $V_{0}$ satisfies \begin{eqnarray}\label{2.4} \nabla\cdot\tilde{E}_0=-\frac{1}{\sqrt{\gamma}}[\Phi(\sigma_{0})+\sigma_{0}] +\frac{1}{\sqrt{\gamma}}(n_{b}(x)-1),\ \ \ \ \nabla \cdot \tilde{B}_0=0,\ \ \ x\in\mathbb{R}^{3}. \end{eqnarray} On the other hand, set \begin{eqnarray}\label{sta.tran} \sigma_{st}(x)=\frac{2}{\gamma-1}\left\{n_{st}(x)^{\frac{\gamma-1}{2}}-1\right\}, \ \ \ \ \tilde{E}_{st}=\frac{1}{\sqrt{\gamma}}E_{st}(x). \end{eqnarray} Then, $[\sigma_{st},\tilde{E}_{st}]$ satisfies \begin{eqnarray}\label{sta.eq} \left\{\begin{aligned} & \left(\frac{\gamma-1}{2}\sigma_{st}+1\right)\nabla\sigma_{st}=-\frac{1}{\sqrt{\gamma}}\tilde{E}_{st},\\ &\frac{1}{\sqrt{\gamma}}\nabla\times \tilde{E}_{st}=0,\\ &\nabla \cdot \tilde{E}_{st}=\frac{1}{\sqrt{\gamma}}(n_{b}(x)-1)-\frac{1}{\sqrt{\gamma}}(\Phi(\sigma_{st})+\sigma_{st}). \end{aligned}\right. \end{eqnarray} Based on the existence result proved in Section 2, we will study the stability of the stationary state $[\sigma_{st},0,\tilde{E}_{st},0]$. Set the perturbations $[\bar{\sigma},\bar{v},\bar{E},\bar{B}]$ by \begin{eqnarray*} \bar{\sigma}=\sigma-\sigma_{st},\ \ \bar{v}=v,\ \ \bar{E}=\tilde{E}-\tilde{E}_{st},\ \ \bar{B}=\tilde{B}. \end{eqnarray*} Combining \eqref{2.2} with \eqref{sta.eq}, then $\bar{V}:=[\bar{\sigma},\bar{v},\bar{E},\bar{B}]$ satisfies \begin{equation}\label{sta.equ} \left\{ \begin{aligned} &\partial_t \bar{\sigma}+(\frac{\gamma-1}{2}\bar{\sigma}+1)\nabla\cdot \bar{v}+\bar{v}\cdot \nabla \bar{\sigma} +\bar{v}\cdot \nabla \sigma_{st}+\frac{\gamma-1}{2}\sigma_{st}\nabla\cdot \bar{v}=0,\\ &\partial_t \bar{v}+\bar{v} \cdot \nabla \bar{v}+(\frac{\gamma-1}{2}\bar{\sigma}+1)\nabla \bar{\sigma} +\frac{\gamma-1}{2}\bar{\sigma}\nabla \sigma_{st}+\frac{\gamma-1}{2}\sigma_{st}\nabla\bar{\sigma}= -(\frac{1}{\sqrt{\gamma}}\bar{E}+\bar{v}\times \bar{B}) -\frac{1}{\sqrt{\gamma}}\bar{v},\\ &\partial_t\bar{E}-\frac{1}{\sqrt{\gamma}}\nabla\times \bar{B} =\frac{1}{\sqrt{\gamma}}\bar{v}+ \frac{1}{\sqrt{\gamma}}[\Phi(\bar{\sigma}+\sigma_{st})+\bar{\sigma}+\sigma_{st}]\bar{v},\\ &\partial_t \bar{B}+\frac{1}{\sqrt{\gamma}}\nabla \times \bar{E}=0,\\ &\nabla \cdot \bar{E}=-\frac{1}{\sqrt{\gamma}}[\Phi(\bar{\sigma}+\sigma_{st})-\Phi(\sigma_{st})] -\frac{1}{\sqrt{\gamma}}\bar{\sigma}, \ \ \nabla \cdot \bar{B}=0, \ \ t>0,\ x\in\mathbb{R}^{3}, \end{aligned}\right. \end{equation} with initial data \begin{eqnarray}\label{sta.equi} \bar{V}|_{t=0}=\bar{V}_{0}:=[\sigma_{0}-\sigma_{st},v_{0},\tilde{E}_{0}-\tilde{E}_{st},\tilde{B}_{0}],\ \ x\in\mathbb{R}^{3}. \end{eqnarray} Here, $\Phi(\cdot)$ is defined by \eqref{def.phi}, and $\bar{V}_{0}$ satisfies \begin{eqnarray}\label{sta.equC} \nabla \cdot \bar{E}_{0}=-\frac{1}{\sqrt{\gamma}}[\Phi(\bar{\sigma}_{0}+\sigma_{st})-\Phi(\sigma_{st})] -\frac{1}{\sqrt{\gamma}}\bar{\sigma}_{0}, \ \ \nabla \cdot \bar{B}_{0}=0, \ \ t>0,\ x\in\mathbb{R}^{3}. \end{eqnarray} In what follows, we suppose the integer $N \geq 3$. Besides, for $\bar{V}=[\bar{\sigma},\bar{v},\bar{E},\bar{B}]$, we define the full instant energy functional $\mathcal {E}_{N}(\bar{V}(t))$, the high-order instant energy functional $\mathcal {E}_{N}^{h}(\bar{V}(t))$, and the dissipation rates $\mathcal {D}_{N}(\bar{V}(t))$, $\mathcal {D}_{N}^{h}(\bar{V}(t))$ by \begin{equation}\label{de.E} \arraycolsep=1.5pt \begin{array}{rl} \mathcal{E}_{N}(\bar{V}(t))=&\displaystyle\sum_{|\alpha|\leq N}\int_{\mathbb{R}^3}(1+\sigma_{st}+\Phi(\sigma_{st})) (|\partial^{\alpha}\bar{\sigma}|^2+|\partial^{\alpha}\bar{v}|^2)dx+\|[\bar{E},\bar{B}]\|_{N}^{2}\\[5mm] &\displaystyle+\kappa_{1}\sum_{|\alpha|\leq N-1} \langle \partial^{\alpha}\bar{v},\nabla\partial^{\alpha}\bar{\sigma}\rangle+\kappa_{2}\sum_{|\alpha|\leq N-1}\langle \partial^{\alpha}\bar{v},\partial^{\alpha}\bar{E}\rangle\\[5mm] &\displaystyle-\kappa_{3}\sum_{|\alpha|\leq N-2}\langle \nabla \times \partial^{\alpha}\bar{E},\partial^{\alpha}\bar{B}\rangle, \end{array} \end{equation} and \begin{equation}\label{de.Eh} \begin{aligned} \mathcal{E}_{N}^{h}(\bar{V}(t))&=\sum_{1\leq|\alpha|\leq N}\int_{\mathbb{R}^3}(1+\sigma_{st}+\Phi(\sigma_{st})) (|\partial^{\alpha}\bar{\sigma}|^2+|\partial^{\alpha}\bar{v}|^2)dx+\|\nabla[\bar{E},\bar{B}]\|_{N-1}^{2}\\ &+\kappa_{1}\sum_{1\leq|\alpha|\leq N-1}\langle \partial^{\alpha}\bar{v},\nabla\partial^{\alpha}\bar{\sigma}\rangle+\kappa_{2}\sum_{1\leq|\alpha|\leq N-1}\langle \partial^{\alpha}\bar{v},\partial^{\alpha}\bar{E}\rangle\\[3mm] &-\kappa_{3}\sum_{1\leq |\alpha|\leq N-2}\langle \nabla \times\partial^{\alpha}\bar{E},\partial^{\alpha}\bar{B}\rangle, \end{aligned} \end{equation} respectively, where $0<\kappa_{3}\ll\kappa_{2}\ll\kappa_{1}\ll 1$ are constants to be properly chosen in the later proof. Notice that since all constants $\kappa_i$ $(i=1,2,3)$ are small enough, one has \begin{equation*} \mathcal {E}_{N}(\bar{V}(t))\sim \|[\bar{\sigma},\bar{v},\bar{E},\bar{B}] \|_{N}^{2},\quad \mathcal {E}_{N}^{h}(\bar{V}(t))\sim \|\nabla [\bar{\sigma},\bar{v},\bar{E},\bar{B}] \|_{N-1}^{2}. \end{equation*} We further define the dissipation rates $\mathcal {D}_{N}(\bar{V}(t))$, $\mathcal {D}_{N}^{h}(\bar{V}(t))$ by \begin{eqnarray}\label{de.D} \arraycolsep=1.5pt \begin{array}{rl} \mathcal {D}_{N}(\bar{V}(t))=\displaystyle \sum_{|\alpha|\leq N}\int_{\mathbb{R}^3}(1+\sigma_{st}&+\Phi(\sigma_{st}))|\partial^{\alpha}\bar{v}|^{2}dx\\[3mm] &+\|\bar{\sigma}\|_{N}^{2}+\|\nabla[\bar{E},\bar{B}]\|_{N-2}^{2}+\|\bar{E}\|^{2}, \end{array} \end{eqnarray} and \begin{eqnarray}\label{de.Dh} \arraycolsep=1.5pt \begin{array}{rl} \mathcal {D}_{N}^{h}(\bar{V}(t))=\displaystyle \sum_{1\leq|\alpha|\leq N}\int_{\mathbb{R}^3}(1+\sigma_{st}&+\Phi(\sigma_{st}))|\partial^{\alpha}\bar{v}|^{2}dx\\[3mm] &+\|\nabla\bar{\sigma}\|_{N-1}^{2}+\|\nabla^2[\bar{E},\bar{B}]\|_{N-3}^{2}+\|\nabla\bar{E}\|^{2}. \end{array} \end{eqnarray} Then, concerning the reformulated Cauchy problem $\eqref{sta.equ}$-$\eqref{sta.equi}$, one has the following global existence result. \begin{proposition}\label{pro.2.1} Suppose that $\|n_{b}-1\|_{W_{0}^{N+1,2}}$ is small enough and $\eqref{sta.equC}$ holds for given initial data $\bar{V}_{0}=[\sigma_{0}-\sigma_{st},v_{0},\tilde{E}_0-\tilde{E}_{st},\tilde{B}_{0}]$. Then, there are $\mathcal {E}_{N}(\cdot) $ and $\mathcal {D}_{N}(\cdot)$ in the form $\eqref{de.E} $ and $\eqref{de.D}$ such that the following holds true: If $\mathcal {E}_{N}(\bar{V}_{0})>0$ is small enough, the Cauchy problem $\eqref{sta.equ}$-$\eqref{sta.equi}$ admits a unique global nonzero solution $\bar{V}=[\sigma-\sigma_{st},v,\tilde{E}-\tilde{E}_{st},\tilde{B}] $ satisfying \begin{eqnarray}\label{V.satisfy} \bar{V} \in C([0,\infty);H^{N}(\mathbb{R}^{3}))\cap {\rm Lip}([0,\infty);H^{N-1}(\mathbb{R}^{3})), \end{eqnarray} and \begin{eqnarray}\label{pro.2.1j} \mathcal {E}_{N}(\bar{V}(t))+\lambda\int_{0}^{t}\mathcal {D}_{N}(\bar{V}(s))ds\leq \mathcal {E}_{N}(\bar{V}_{0}) \end{eqnarray} for any $t\geq 0$. \end{proposition} Moreover, solutions obtained in Proposition $ \ref{pro.2.1}$ indeed decay in time with some rates under some extra regularity and integrability conditions on initial data. For that, given $\bar{V}_{0}=[\sigma_{0}-\sigma_{st},v_{0},\tilde{E}_0-\tilde{E}_{st},\tilde{B}_{0}]$, set $\epsilon_{m}(\bar{V}_0)$ as \begin{eqnarray}\label{def.epsi} \epsilon_{m}(\bar{V}_0)=\|\bar{V}_{0}\|_{m}+\|[v_{0},\tilde{E}_0-\tilde{E}_{st},\tilde{B}_{0}]\|_{L^{1}}, \end{eqnarray} for the integer $m \geq 6$. Then one has the following proposition. \begin{proposition}\label{pro.2.2} Suppose that $\|n_{b}-1\|_{W_{0}^{N+4,2}}$ is small enough and $\eqref{sta.equC}$ holds for given initial data $\bar{V}_{0}=[\sigma_{0}-\sigma_{st},v_{0},\tilde{E}_0-\tilde{E}_{st},\tilde{B}_{0}]$. If $\epsilon_{N+3}(\bar{V}_{0})>0$ is small enough, then the solution $\bar{V}=[\sigma-\sigma_{st},v,\tilde{E}-\tilde{E}_{st},\tilde{B}] $ satisfies \begin{eqnarray}\label{V.decay} \|\bar{V}(t)\|_{N} \leq C \epsilon_{N+3}(\bar{V}_{0})(1+t)^{-\frac{3}{4}}, \end{eqnarray} and \begin{eqnarray}\label{nablaV.decay} \|\nabla \bar{V}(t)\|_{N-1} \leq C \epsilon_{N+3}(\bar{V}_{0})(1+t)^{-\frac{5}{4}} \end{eqnarray} for any $t\geq 0$. \end{proposition} \subsection{a priori estimates} In this subsection, we prove that the stationary solution obtained in Section \ref{sec2} is stable under small initial perturbation. We begin to use the refined energy method to obtain some uniform-in-time {\it a priori} estimates for smooth solutions to the Cauchy problem (\ref{sta.equ})-(\ref{sta.equi}). To the end, let us denote \begin{equation}\label{def.delta} \delta=\|\sigma_{st}\|_{W_{0}^{N+1,2}}=\left(\sum_{|\alpha|\leq N+1}\int_{\mathbb{R}^3}|\partial^{\alpha}_{x}\sigma_{st}|^2dx\right)^{\frac{1}{2}} \end{equation} for simplicity of presentation. A careful look at the proof of Theorem \ref{sta.existence} shows that \begin{eqnarray*} \sigma_{st}&=&\frac{2}{\gamma-1}\left\{n_{st}^{\frac{\gamma-1}{2}}-1\right\}\\ &=&\frac{2}{\gamma-1}\left\{\left(\frac{\gamma-1}{\gamma}Q_{st}+1\right)^{\frac{1}{2}}-1\right\}\\ &=&\frac{2}{\gamma}\dfrac{Q_{st}}{\left(\frac{\gamma-1}{\gamma}Q_{st}+1\right)^{\frac{1}{2}}+1}\sim Q_{st}. \end{eqnarray*} It follows that $\delta \leq C\|Q_{st}\|_{W_{0}^{N+1,2}}\leq C\|n_{b}-1\|_{W_{0}^{N+1,2}}$ is small enough. Notice that (\ref{sta.equ}) is a quasi-linear symmetric hyperbolic system. The main goal of this subsection is to prove \begin{theorem}\label{estimate}(\textrm{a priori estimates}). Let $0<T\leq \infty$ be given. Suppose $\bar{V}:=[\bar{\sigma},\bar{v},\bar{E},\bar{B}]\in C([0,T);H^{N}(\mathbb{R}^{3}))$ is smooth for $T>0$ with \begin{eqnarray}\label{3.1} \sup_{0\leq t<T}\|\bar{V}(t)\|_{N}\leq 1, \end{eqnarray} and assume that $\bar{V}$ solves the system (\ref{sta.equ}) for $t\in(0,T)$. Then, there are $\mathcal {E}_{N}(\cdot) $ and $\mathcal {D}_{N}(\cdot)$ in the form $\eqref{de.E} $ and $\eqref{de.D}$ such that \begin{eqnarray}\label{3.2} && \frac{d}{dt}\mathcal {E}_{N}(\bar{V}(t))+\lambda\mathcal {D}_{N}(\bar{V}(t))\leq C[\mathcal {E}_{N}(\bar{V}(t))^{\frac{1}{2}}+\mathcal {E}_{N}(\bar{V}(t))+\delta]\mathcal {D}_{N}(\bar{V}(t)) \end{eqnarray} for any $0\leq t<T$. \end{theorem} \begin{proof} The proof is divided into five steps. \textbf{ Step 1.} It holds that \begin{equation}\label{3.3} \begin{aligned} &\frac{1}{2}\frac{d}{dt}\left(\sum_{|\alpha|\leq N}\int_{\mathbb{R}^3}(1+\sigma_{st}+\Phi(\sigma_{st})) (|\partial^{\alpha}\bar{\sigma}|^2+|\partial^{\alpha}\bar{v}|^2)dx+\|[\bar{E},\bar{B}]\|_{N}^{2}\right)\\ &+\frac{1}{\sqrt{\gamma}}\sum_{|\alpha|\leq N}\int_{\mathbb{R}^3}(1+\sigma_{st}+\Phi(\sigma_{st}))|\partial^{\alpha}\bar{v}|^{2}dx\\ \leq & C(\|\bar{V}\|_{N}+\delta)(\|[\bar{\sigma},\bar{v}]\|^{2}+\|\nabla[\bar{\sigma},\bar{v}]\|_{N-1}^{2} +\|\nabla \bar{E}\|_{N-2}^2). \end{aligned} \end{equation} In fact, applying $\partial^{\alpha}$ to the first two equations of (\ref{sta.equ}) for $|\alpha|\leq N$ and multiplying them by $(1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{\sigma}$ and $(1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{v}$ respectively, taking integrations in $x$ and then using integration by parts give \begin{eqnarray}\label{3.4} && \begin{aligned} &\frac{1}{2}\frac{d}{dt}\int_{\mathbb{R}^3}(1+\sigma_{st}+\Phi(\sigma_{st})) (|\partial^{\alpha}\bar{\sigma}|^2+|\partial^{\alpha}\bar{v}|^2)dx+\frac{1}{\sqrt{\gamma}} \langle \partial^{\alpha}\bar{E},(1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{v}\rangle\\ &+\frac{1}{\sqrt{\gamma}}\int_{\mathbb{R}^3}(1+\sigma_{st}+\Phi(\sigma_{st}))|\partial^{\alpha}\bar{v}|^{2}dx =-\sum_{\beta<\alpha}C^{\alpha}_{\beta}I_{\alpha,\beta}(t)+I_{1}(t). \end{aligned} \end{eqnarray} Here, $I_{\alpha,\beta}(t)=I_{\alpha,\beta}^{(\sigma)}(t)+I_{\alpha,\beta}^{(v)}(t)$ with \begin{eqnarray*} \arraycolsep=1.5pt \begin{array}{rl} \displaystyle I_{\alpha,\beta}^{(\sigma)}(t)=& \displaystyle\langle \partial^{\alpha-\beta}\bar{v} \cdot \nabla \partial^{\beta}\bar{\sigma}, (1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{\sigma}\rangle\\[3mm] &\displaystyle+\frac{\gamma-1}{2}\langle\partial^{\alpha-\beta}\bar{\sigma} \partial^{\beta}\nabla\cdot\bar{v} ,(1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{\sigma}\rangle\\[3mm] & \displaystyle+\frac{\gamma-1}{2}\langle \partial^{\alpha-\beta}\sigma_{st} \partial^{\beta}\nabla\cdot\bar{v} , (1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{\sigma} \rangle\\[3mm] & \displaystyle +\langle \partial^{\alpha-\beta}\bar{v}\cdot \partial^{\beta}\nabla \sigma_{st} ,(1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{\sigma}\rangle, \end{array} \end{eqnarray*} \begin{eqnarray*} \arraycolsep=1.5pt \begin{array}{rl} \displaystyle I_{\alpha,\beta}^{(v)}(t)=& \displaystyle\langle \partial^{\alpha-\beta}\bar{v} \cdot \nabla \partial^{\beta}\bar{v} , (1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{v}\rangle\\[3mm] &\displaystyle+\frac{\gamma-1}{2}\langle\partial^{\alpha-\beta}\bar{\sigma} \nabla\partial^{\beta}\bar{\sigma},(1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{v}\rangle\\[3mm] &\displaystyle+\frac{\gamma-1}{2}\langle \partial^{\alpha-\beta}\sigma_{st} \nabla \partial^{\beta}\bar{\sigma} ,(1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{v}\rangle\\[3mm] &\displaystyle+\langle \partial^{\alpha-\beta}\bar{v}\times \partial^{\beta}\bar{B} ,(1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{v}\rangle\\[3mm] &\displaystyle+\frac{\gamma-1}{2}\langle \partial^{\alpha-\beta}\bar{\sigma} \nabla \partial^{\beta}\sigma_{st} ,(1+\sigma_{st}+\Phi(\sigma_{st})) \partial^{\alpha}\bar{v} \rangle \end{array} \end{eqnarray*} and \begin{eqnarray*} \arraycolsep=1.5pt \begin{array}{rl} I_{1}(t)=& \displaystyle\frac{1}{2}\langle \nabla \cdot \bar{v}, (1+\sigma_{st}+\Phi(\sigma_{st}))(|\partial^{\alpha}\bar{\sigma}|^{2}+|\partial^{\alpha}\bar{v}|^{2}) \rangle\\[3mm] & \displaystyle+ \frac{\gamma-1}{2}\langle \nabla \bar{\sigma}\cdot\partial^{\alpha}\bar{v} ,(1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{\sigma} \rangle-\langle \bar{v}\times \partial^{\alpha}\bar{B},(1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{v}\rangle\\[3mm] & \displaystyle+\frac{\gamma-1}{2}\langle \nabla\sigma_{st} \partial^{\alpha}\bar{v},(1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{\sigma} \rangle-\frac{\gamma-1}{2}\langle \bar{\sigma}\partial^{\alpha} \nabla\sigma_{st},(1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{v} \rangle\\[3mm] &- \displaystyle \langle \bar{v}\cdot\partial^{\alpha} \nabla\sigma_{st},(1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{\sigma} \rangle\\[3mm] &\displaystyle+\left\langle \left(\frac{\gamma-1}{2}\bar{\sigma}+1\right)\partial^{\alpha}\bar{v}, \nabla(1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{\sigma} \right\rangle\\[3mm] & \displaystyle +\frac{\gamma-1}{2}\langle \sigma_{st} \partial^{\alpha}\bar{v},\nabla(1+\sigma_{st}+\Phi(\sigma_{st}))\partial^{\alpha}\bar{\sigma} \rangle\\[3mm] &\displaystyle+\frac{1}{2}\langle\bar{v},\nabla(1+\sigma_{st}+\Phi(\sigma_{st})) (|\partial^{\alpha}\bar{\sigma}|^2+|\partial^{\alpha}\bar{v}|^{2})\rangle\triangleq\sum_{j=1}^{9}I_{1,j}(t). \end{array} \end{eqnarray*} When $|\alpha|=0$, it suffices to estimate $I_{1}(t)$ by \begin{eqnarray*} \begin{aligned} I_{1}(t)\leq &C \|\nabla \cdot \bar{v}\|(\|\bar{v}\|_{L^{6}}\|\bar{v}\|_{L^{3}} +\|\bar{\sigma}\|_{L^{6}}\|\bar{\sigma}\|_{L^{3}}) +C \|\nabla \bar{\sigma}\|\|\bar{v}\|_{L^{6}}\|\bar{\sigma}\|_{L^{3}} +C \|\bar{B}\|_{L^{\infty}}\|\bar{v}\|^{2}\\ &+C \|\nabla\sigma_{st}\|\left\|\bar{\sigma}\right\|_{L^{6}} \|\bar{v}\|_{L^3}+C\|\sigma_{st}\|_{L^{\infty}}\|\nabla\sigma_{st}\|\left\|\bar{\sigma}\right\|_{L^{6}} \|\bar{v}\|_{L^3}\\ &+\|\bar{v}\|_{L^{\infty}} \|\nabla\sigma_{st}\|(\|\bar{\sigma}\|_{L^{6}}\|\bar{\sigma}\|_{L^{3}} +\|\bar{v}\|_{L^{6}}\|\bar{v}\|_{L^{3}}) \\ \leq & C (\|[\bar{\sigma},\bar{v}]\|_{H^{1}}+\delta+\delta\|\nabla\bar{v}\|_{H^1})(\|\nabla [\bar{\sigma},\bar{v}]\|^{2}+\|[\bar{\sigma},\bar{v}]\|^{2})+ C \|\nabla \bar{B}\|_{H^{1}}\|\bar{v}\|^{2}, \end{aligned} \end{eqnarray*} which is further bounded by the r.h.s. term of (\ref{3.3}). When $|\alpha|\geq 1$, for $I_{1}(t)$, the similarity of $I_{1,1}(t)$ and $I_{1,2}(t)$ shows that we can estimate them together as follows \begin{eqnarray*} \begin{aligned} I_{1,1}(t)+I_{1,2}(t)\leq & C \|\nabla\cdot\bar{v}\|_{L^{\infty}}\|(1+\sigma_{st}+\Phi(\sigma_{st}))\|_{L^{\infty}} \|\nabla[\bar{\sigma},\bar{v}]\|_{N-1}^2\\ &+C\|\nabla\bar{\sigma}\|_{L^{\infty}}\|(1+\sigma_{st}+\Phi(\sigma_{st}))\|_{L^{\infty}} \|\nabla[\bar{\sigma},\bar{v}]\|_{N-1}^2\\ \leq & C \|[\bar{\sigma},\bar{v}]\|_{N}\|\nabla[\bar{\sigma},\bar{v}]\|_{N-1}^2. \end{aligned} \end{eqnarray*} For $I_{1,3}(t)$, $I_{1,5}(t)$ and $I_{1,6}(t)$, there are no derivative of $\bar{\sigma}$ or $\bar{v}$, then we use $L^{\infty}$ of $\bar{v}$ or $\bar{\sigma}$, \begin{eqnarray*} \begin{aligned} I_{1,3}(t)+I_{1,5}(t)+I_{1,6}(t)\leq & C \|\bar{v}\|_{L^{\infty}}\|\partial^{\alpha} \bar{B}\|\|(1+\sigma_{st}+\Phi(\sigma_{st}))\|_{L^{\infty}} \|\nabla\bar{v}\|_{N-1}\\ &+C \|\bar{\sigma}\|_{L^{\infty}}\|\partial^{\alpha} \nabla\sigma_{st}\|\|(1+\sigma_{st}+\Phi(\sigma_{st}))\|_{L^{\infty}} \|\nabla\bar{v}\|_{N-1}\\ &+C \|\bar{v}\|_{L^{\infty}}\|\partial^{\alpha} \nabla\sigma_{st}\|\|(1+\sigma_{st}+\Phi(\sigma_{st}))\|_{L^{\infty}} \|\nabla\bar{\sigma}\|_{N-1}\\ \leq & C(\delta+\|\bar{B}\|_{N})\|\nabla[\bar{\sigma},\bar{v}]\|_{N-1}^2. \end{aligned} \end{eqnarray*} For other terms of $I_{1}(t)$, both $\bar{\sigma}$ and $\bar{v}$ contain the derivative, one can use the $L^{2}$ of these terms and $L^{\infty}$ of others. Combining the above two estimates, one has \begin{eqnarray*} I_{1}(t)\leq C (\|[\bar{\sigma},\bar{v},\bar{B}]\|_{N} +\delta+\delta\|\nabla \bar{v}\|_{H^1})\|\nabla [\bar{\sigma},\bar{v}]\|_{N-1}^{2}, \end{eqnarray*} which is bounded by the r.h.s. term of (\ref{3.3}). On the other hand, since each term in $I_{\alpha,\beta}(t)$ is the integration of the four-terms product in which there is at least one term containing the derivative, one has \begin{eqnarray*} I_{\alpha,\beta}(t)\leq C (\|[\bar{\sigma},\bar{v},\bar{B}]\|_{N} +\delta+\delta\|\nabla \bar{v}\|_{H^1})\|\nabla [\bar{\sigma},\bar{v}]\|_{N-1}^{2}, \end{eqnarray*} which is also bounded by the r.h.s. term of (\ref{3.3}). From (\ref{sta.equ}), energy estimates on $\partial^{\alpha}\bar{E}$ and $\partial^{\alpha}\bar{B}$ with $|\alpha| \leq N$ give \begin{eqnarray}\label{3.5} && \begin{aligned} &\frac{1}{2}\frac{d}{dt}\|\partial^{\alpha}[\bar{E},\bar{B}]\|^{2} -\frac{1}{\sqrt{\gamma}}\langle (1+\sigma_{st}+\Phi(\sigma_{st}))\partial ^{\alpha}\bar{v},\partial^{\alpha}\bar{E}\rangle\\ =&\frac{1}{\sqrt{\gamma}}\langle \partial ^{\alpha}[(\Phi(\bar{\sigma}+\sigma_{st})-\Phi(\sigma_{st}))\bar{v}],\partial^{\alpha}\bar{E}\rangle+ \frac{1}{\sqrt{\gamma}}\langle \partial ^{\alpha}[\bar{\sigma}\bar{v}],\partial^{\alpha}\bar{E}\rangle\\ &+\frac{1}{\sqrt{\gamma}}\sum_{\beta<\alpha}C_{\beta}^{\alpha}\langle \partial^{\alpha-\beta}(1+\sigma_{st}+\Phi(\sigma_{st})) \partial^{\beta}\bar{v},\partial^{\alpha}\bar{E}\rangle\\ =&I_{2,1}(t)+I_{2,2}(t)+\sum_{\beta<\alpha}C_{\beta}^{\alpha}I_{2,\beta}(t). \end{aligned} \end{eqnarray} In a similar way as before, when $|\alpha|=0$, it suffices to estimate $I_{2,1}(t)+ I_{2,2}(t)$ by \begin{eqnarray*} I_{2,1}(t)+ I_{2,2}(t)\leq C \|\nabla \bar{\sigma}\|\cdot\|\bar{v}\|_{1}\|\bar{E}\|. \end{eqnarray*} When $|\alpha|>0$, $I_{2,1}(t)$ and $I_{2,2}(t)$ can be estimated in a similar way as in \cite{Duan}, \begin{eqnarray*} I_{2,1}(t)+ I_{2,2}(t)\leq C \|\nabla \bar{\sigma}\|_{N-1}\|\nabla \bar{v}\|_{N-1}\|\bar{E}\|_{N}. \end{eqnarray*} When $|\alpha|>0$, for each $\beta$ with $\beta<\alpha$, $I_{2,\beta}$ is estimated by three cases. \textsl{Case 1.} $|\alpha|=N$. In this case, integration by parts shows that \begin{eqnarray*} && \begin{aligned} I_{2,\beta}(t) \leq & C \delta \|\nabla \bar{v}\|_{N-1}\|\nabla\bar{E}\|_{N-2}\\ \leq & C \delta \|\nabla \bar{v}\|_{N-1}^2+C \delta \|\nabla \bar{E}\|_{N-2}^2. \end{aligned} \end{eqnarray*} \textsl{Case 2.} $|\alpha|<N $ and $|\beta|\geq 1$ which imply $|\alpha-\beta|\leq N-2$. It holds that \begin{eqnarray*} && \begin{aligned} I_{2,\beta}(t) \leq & C\|\partial^{\alpha-\beta}(1+\sigma_{st}+\Phi(\sigma_{st}))\|_{L^{\infty}} \|\partial^{\beta}\bar{v}\|\|\partial^{\alpha}\bar{E}\|\\ \leq & C\|\nabla\partial^{\alpha-\beta}(1+\sigma_{st}+\Phi(\sigma_{st}))\|_{H^{1}}\|\nabla \bar{v}\|_{N-1}\|\nabla\bar{E}\|_{N-2}\\ \leq & C \delta \|\nabla \bar{v}\|_{N-1}^2+C \delta \|\nabla \bar{E}\|_{N-2}^2. \end{aligned} \end{eqnarray*} \textsl{Case 3.} $|\alpha|<N $ and $|\beta|=0$. In this case, there is no derivative of $\bar{v}$, one can use $L^{\infty}$ of $\bar{v}$ to estimate $I_{2,\beta}(t)$, \begin{eqnarray*} && \begin{aligned} I_{2,\beta}(t) \leq & C\|\partial^{\alpha-\beta}(1+\sigma_{st}+\Phi(\sigma_{st}))\| \|\bar{v}\|_{L^{\infty}}\|\partial^{\alpha}\bar{E}\|\\ \leq & C \delta \|\nabla \bar{v}\|_{N-1}^2+C \delta \|\nabla \bar{E}\|_{N-2}^2, \end{aligned} \end{eqnarray*} which is bounded by the r.h.s. term of (\ref{3.3}). Then (\ref{3.3}) follows by taking summation of (\ref{3.4}) and (\ref{3.5}) over $|\alpha| \leq N$. Then the time evolution of the full instant energy $\|V(t)\|_{N}^{2}$ has been obtained but its dissipation rate only contains the contribution from the explicit relaxation variable $\bar{v}$. In a parallel way as \cite{Duan}, by introducing some interactive functionals, the dissipation from contributions of the rest components $\bar{\sigma}$, $\bar{E}$, and $\bar{B}$ can be recovered in turn. \textbf{Step 2.} It holds that \begin{eqnarray}\label{step2} &&\begin{aligned} &\frac{d}{dt}\mathcal {E}_{N,1}^{int}(\bar{V})+\lambda\|\bar{\sigma}\|^{2}_{N} \\ \leq & C\|\nabla\bar{v}\|_{N-1}^{2}+C(\|[\bar{\sigma}, \bar{v},\bar{B}]\|_{N}^{2}+\delta) \|\nabla[\bar{\sigma},\bar{v}]\|_{N-1}^{2}, \end{aligned} \end{eqnarray} where $\mathcal {E}_{N,1}^{int}(\cdot)$ is defined by \begin{eqnarray*} \mathcal {E}_{N,1}^{int}(\bar{V})=\sum_{|\alpha|\leq N-1}\langle \partial^{\alpha}\bar{v},\nabla\partial^{\alpha}\bar{\sigma}\rangle. \end{eqnarray*} In fact, the first two equations of $ \eqref{sta.equ}$ can be rewritten as \begin{eqnarray}\label{3.7} &&\partial_t \bar{\sigma}+\nabla \cdot \bar{v}=f_{1}, \end{eqnarray} \begin{eqnarray}\label{3.9} \partial_t \bar{v}+\nabla \bar{\sigma}+\frac{1}{\sqrt{\gamma}}\bar{E}=f_{2}-\frac{1}{\sqrt{\gamma}}\bar{v}, \end{eqnarray} where \begin{eqnarray}\label{f1f2} && \left\{ \begin{aligned} & f_{1}:=-\bar{v}\cdot \nabla\bar{\sigma}-\frac{\gamma-1}{2}\bar{\sigma}\nabla \cdot \bar{v} -\bar{v}\cdot \nabla \sigma_{st}-\frac{\gamma-1}{2}\sigma_{st}\nabla \cdot \bar{v},\\ & f_{2}:=-\bar{v}\cdot \nabla \bar{v}-\frac{\gamma-1}{2}\bar{\sigma}\nabla \bar{\sigma}-\bar{v}\times \bar{B} -\frac{\gamma-1}{2}\sigma_{st}\nabla \bar{\sigma}-\frac{\gamma-1}{2}\bar{\sigma}\nabla\sigma_{st}. \end{aligned}\right. \end{eqnarray} Let $|\alpha|\leq N-1$. Applying $\partial^{\alpha}$ to (\ref{3.9}), multiplying it by $\partial^{\alpha}\nabla \bar{\sigma}$, taking integrations in $x$ and then using integration by parts and also the final equation of (\ref{sta.equ}), replacing $\partial_{t}\bar{\sigma}$ from (\ref{3.7}) give \begin{eqnarray*} \arraycolsep=1.5pt \begin{array}{rl} &\displaystyle \frac{d}{dt}\langle \partial^{\alpha}\bar{v},\nabla \partial^{\alpha}\bar{\sigma}\rangle +\|\nabla\partial^{\alpha}\bar{\sigma}\|^{2}+\frac{1}{\gamma}\| \partial^{\alpha}\bar{\sigma}\|^2\\[3mm] =&\displaystyle -\frac{1}{\gamma}\langle \partial^{\alpha}\left(\Phi(\bar{\sigma}+\sigma_{st})-\Phi(\sigma_{st})\right), \partial^{\alpha}\bar{\sigma}\rangle+\langle\partial^{\alpha}f_{2},\nabla\partial^{\alpha}\bar{\sigma}\rangle\\[3mm] &-\displaystyle\frac{1}{\sqrt{\gamma}}\langle\partial^{\alpha}\bar{v},\nabla\partial^{\alpha}\bar{\sigma}\rangle +\|\nabla \cdot \partial^{\alpha}\bar{v}\|^{2}-\langle\partial^{\alpha}f_{1},\nabla \cdot \partial^{\alpha}\bar{v}\rangle. \end{array} \end{eqnarray*} Then, it follows from Cauchy-Schwarz inequality that \begin{eqnarray}\label{3.11} \arraycolsep=1.5pt \begin{array}{rl} &\displaystyle \frac{d}{dt}\langle \partial^{\alpha}\bar{v},\nabla \partial^{\alpha}\bar{\sigma}\rangle +\lambda(\|\nabla\partial^{\alpha}\bar{\sigma}\|^{2}+\| \partial^{\alpha}\bar{\sigma}\|^2)\\[3mm] \leq & C \displaystyle \|\nabla \cdot \partial^{\alpha}\bar{v}\|^{2}+C(\|\partial^{\alpha} \left(\Phi(\bar{\sigma}+\sigma_{st})-\Phi(\sigma_{st})\right) \|^{2} +\|\partial^{\alpha}f_{1}\|^{2}+\|\partial^{\alpha}f_{2}\|^{2}). \end{array} \end{eqnarray} Noticing that $\Phi(\sigma)$ is smooth in $\sigma$ with $\Phi'(0)=0$, one has from (\ref{f1f2}) that \begin{eqnarray*} \arraycolsep=1.5pt \begin{array}{rl} &\|\partial^{\alpha} \left(\Phi(\bar{\sigma}+\sigma_{st})-\Phi(\sigma_{st})\right) \|^{2} +\|\partial^{\alpha}f_{1}\|^{2}+\|\partial^{\alpha}f_{2}\|^{2}\\[3mm] \leq &C(\|[\bar{\sigma},\bar{v},\bar{B}]\|^{2}_{N}+\delta)\| \nabla[\bar{\sigma},\bar{v}]\|_{N-1}^{2}. \end{array} \end{eqnarray*} Here, if there is no derivative on $\bar{\sigma}$ or $\bar{v}$, then use the $L^{\infty}$ of $\bar{\sigma}$ or $\bar{v}$. Plugging this into (\ref{3.11}) taking summation over $|\alpha|\leq N-1$ yield (\ref{step2}). \textbf{Step 3.} It holds that \begin{equation}\label{step3} \begin{aligned} \dfrac{d}{dt}\mathcal {E}_{N,2}^{int}(\bar{V})+\lambda\|\bar{E}\|^{2}_{N-1} \leq C&\|[\bar{\sigma},\bar{v}]\|_{N}^{2}+C\|\bar{v}\|_{N}\|\nabla \bar{B}\|_{N-2}\\ &+C(\|[\bar{\sigma},\bar{v},\bar{B}]\|_{N}^{2}+\delta) \|\nabla[\bar{\sigma},\bar{v}]\|_{N-1}^{2}, \end{aligned} \end{equation} where $\mathcal {E}_{N,2}^{int}(\cdot)$ is defined by \begin{eqnarray*} \mathcal {E}_{N,2}^{int}(\bar{V})=\sum_{|\alpha|\leq N-1}\langle \partial^{\alpha}\bar{v},\partial^{\alpha}\bar{E}\rangle. \end{eqnarray*} Applying $\partial^{\alpha}$ to (\ref{3.9}), multiplying it by $\partial^{\alpha}\bar{E}$, taking integrations in $x$ and using integration by parts and replacing $ \partial_{t}\bar{E}$ from the third equation of (\ref{sta.equ}) give \begin{equation*} \arraycolsep=1.5pt \begin{array}{rl} &\dfrac{d}{dt}\langle \partial^{\alpha}\bar{v},\partial^{\alpha}\bar{E}\rangle+ \dfrac{1}{\sqrt{\gamma}}\|\partial^{\alpha}\bar{E}\|^{2}\\[3mm] =& \dfrac{1}{\sqrt{\gamma}}\|\partial^{\alpha}\bar{v}\|^{2}+ \dfrac{1}{\sqrt{\gamma}}\langle \partial^{\alpha}\bar{v},\nabla \times \partial^{\alpha}\bar{B}\rangle+\dfrac{1}{\sqrt{\gamma}}\langle \partial^{\alpha}\bar{v}, \partial^{\alpha}[\Phi(\bar{\sigma}+\sigma_{st})\bar{v}+(\bar{\sigma}+\sigma_{st})\bar{v}]\rangle\\[3mm] &-\langle\partial^{\alpha}\nabla \bar{\sigma}+\dfrac{1}{\sqrt{\gamma}}\partial^{\alpha}\bar{v},\partial^{\alpha}\bar{E}\rangle +\langle\partial^{\alpha}f_{2}, \partial^{\alpha}\bar{E}\rangle, \end{array} \end{equation*} which from the Cauchy-Schwarz inequality further implies \begin{equation*} \arraycolsep=1.5pt \begin{array}{rl} &\dfrac{d}{dt}\langle \partial^{\alpha}\bar{v},\partial^{\alpha}\bar{E}\rangle+ \lambda\|\partial^{\alpha}\bar{E}\|^{2}\\[3mm] \leq & C\|[\bar{\sigma},\bar{v}]\|_{N}^{2}+C\|\bar{v}\|_{N}\|\nabla \bar{B}\|_{N-2}+C(\|[\bar{\sigma},\bar{v},\bar{B}]\|_{N}^{2}+\delta) \|\nabla[\bar{\sigma},\bar{v}]\|_{N-1}^{2}. \end{array} \end{equation*} Thus $\eqref{step3}$ follows from taking summation of the above estimate over $|\alpha|\leq N-1$. \textbf{Step 4.} It holds that \begin{equation}\label{step4} \begin{aligned} \frac{d}{dt}\mathcal {E}_{N,3}^{int}(\bar{V})+\lambda\|\nabla\bar{B}\|^{2}_{N-2} \leq & C\|[\bar{v},\bar{E}]\|_{N-1}^{2}\\ &+C(\|\bar{\sigma}\|_{N}^{2}+\delta)\|\nabla \bar{v}\|_{N-1}^{2}, \end{aligned} \end{equation} where $\mathcal {E}_{N,3}^{int}(\cdot)$ is defined by \begin{eqnarray*} \mathcal {E}_{N,3}^{int}(\bar{V})=-\sum_{|\alpha|\leq N-2}\langle \nabla \times \partial^{\alpha}\bar{E},\partial^{\alpha}\bar{B}\rangle. \end{eqnarray*} In fact, for $|\alpha|\leq N-2$, applying $\partial^{\alpha}$ to the third equation of $\eqref{sta.equ}$, multiplying it by $-\partial^{\alpha}\nabla \times \bar{B}$, taking integrations in $x$ and using integration by parts and replacing $ \partial_{t}\bar{B}$ from the fourth equation of $\eqref{sta.equ}$ implie \begin{equation*} \arraycolsep=1.5pt \begin{array}{rl} & -\dfrac{d}{dt}\langle \partial^{\alpha}\bar{E},\nabla \times \partial^{\alpha}\bar{B}\rangle+ \dfrac{1}{\sqrt{\gamma}}\|\nabla\times \partial^{\alpha}\bar{B}\|^{2} \\[3mm] =&\dfrac{1}{\sqrt{\gamma}}\|\nabla\times\partial^{\alpha}\bar{E}\|^{2}-\dfrac{1}{\sqrt{\gamma}} \langle \partial^{\alpha}\bar{v},\nabla \times \partial^{\alpha}\bar{B}\rangle -\dfrac{1}{\sqrt{\gamma}}\langle \partial^{\alpha}[\Phi(\bar{\sigma}+\sigma_{st})\bar{v}+(\bar{\sigma}+\sigma_{st})\bar{v}],\nabla \times \partial^{\alpha}\bar{B}\rangle, \end{array} \end{equation*} which gives $\eqref{step4}$ by further using Cauchy-Schwarz inequality and taking summation over $|\alpha|\leq N-2$, where we also used \begin{eqnarray*} \|\partial^{\alpha}\partial_{i}\bar{B}\|=\|\partial_{i}\Delta^{-1}\nabla \times(\nabla\times\partial^{\alpha}\bar{B}) \|\leq\|\nabla\times \partial^{\alpha}\bar{B}\| \end{eqnarray*} for each $1\leq i\leq 3$, due to the fact $\partial_{i}\Delta^{-1}\nabla$ is bounded from $L^{p}$ to itself for $1<p<\infty$, cf. \cite{Stein}. \textbf{Step 5.} Now, following the four steps above, we are ready to prove $\eqref{3.2}$. Let us define \begin{eqnarray*} \mathcal {E}_{N}(\bar{V}(t))=\sum_{|\alpha|\leq N}\int_{\mathbb{R}^3}(1+\sigma_{st}+\Phi(\sigma_{st})) (|\partial^{\alpha}\bar{\sigma}|^2+|\partial^{\alpha}\bar{v}|^2)dx+\|[\bar{E},\bar{B}]\|_{N}^{2} +\sum_{i=1}^{3}\kappa_{i}\mathcal {E}^{int}_{N,i}(\bar{V}(t)), \end{eqnarray*} that is, \begin{equation}\label{3.12} \arraycolsep=1.5pt \begin{array}{rl} \mathcal{E}_{N}(\bar{V}(t))=&\displaystyle\sum_{|\alpha|\leq N}\int_{\mathbb{R}^3}(1+\sigma_{st}+\Phi(\sigma_{st})) (|\partial^{\alpha}\bar{\sigma}|^2+|\partial^{\alpha}\bar{v}|^2)dx+\|[\bar{E},\bar{B}]\|_{N}^{2}\\[3mm] &\displaystyle+\kappa_{1}\sum_{|\alpha|\leq N-1} \langle \partial^{\alpha}\bar{v},\nabla\partial^{\alpha}\bar{\sigma}\rangle+\kappa_{2}\sum_{|\alpha|\leq N-1}\langle \partial^{\alpha}\bar{v},\partial^{\alpha}\bar{E}\rangle\\[3mm] &\displaystyle-\kappa_{3}\sum_{|\alpha|\leq N-2}\langle \nabla \times \partial^{\alpha}\bar{E},\partial^{\alpha}\bar{B}\rangle \end{array} \end{equation} for constants $0<\kappa_{3}\ll\kappa_{2}\ll\kappa_{1}\ll 1$ to be determined. Notice that as long as $ 0<\kappa_{i}\ll 1$ is small enough for $i=1,2,3$, and $\sigma_{st}+\Phi(\sigma_{st}) $ depending only on $ x$ is sufficiently small compared with $1$, then $\mathcal{E}_{N}(\bar{V}(t))\sim \|\bar{V}(t)\|^{2}_{N}$ holds true. Moreover, letting $0<\kappa_{3}\ll\kappa_{2}\ll\kappa_{1}\ll 1$ with $\kappa_{2}^{3/2}\ll\kappa_{3}$, the sum of $\eqref{3.3}\times \kappa_{1}$, $\eqref{step2}\times \kappa_{2}$, $\eqref{step4}\times \kappa_{3}$ implies that there are $\lambda>0$, $C>0$ such that $\eqref{3.2}$ holds true with $\mathcal {D}_{N}(\cdot)$ defined in $\eqref{de.D}$. Here, we have used the following Cauchy-Schwarz inequality: \begin{eqnarray*} 2 \kappa_{2} \|\bar{v}\|_{N}\|\nabla \bar {B}\|_{N-2}\leq \kappa_{2}^{1/2}\|\bar{v}\|_{N}^{2}+\kappa_{2}^{3/2}\|\nabla\bar{B}\|^{2}_{N-2}. \end{eqnarray*} Due to $\kappa_{2}^{3/2}\ll \kappa_{3}$, both terms on the r.h.s. of the above inequality were absorbed. This completes the proof of Theorem $\ref{estimate}$. \end{proof} Since $\eqref{sta.equ}$ is a quasi-linear symmetric hyperbolic system, the short-time existence can be proved in much more general case as in \cite{Kato}; see also (Theorem 1.2, Proposition 1.3, and Proposition 1.4 in Chapter 16 of \cite{Taylor}). From Theorem \ref{estimate} and the continuity argument, it is easy to see that $ \mathcal {E}_{N}(\bar{V}(t)) $ is bounded uniformly in time under the assumptions that $\mathcal {E}_{N}(\bar{V}_{0})>0$ and $\|n_{b}-1\|_{W_{0}^{N+1,2}}$ are small enough. Therefore, the global existence of solutions satisfying \eqref{V.satisfy} and \eqref{pro.2.1j} follows in the standard way; see also \cite{Duan}. This completes the proof of Proposition \ref{pro.2.1}.\qed \section{Decay in time for the non-linear system}\label{sec4} In this section, we are devoted to the rate of the convergence of solution to the equilibrium $[n_{st},0,E_{st},0]$ for the system \eqref{1.1} over $\mathbb{R}^3$. In fact by setting \begin{eqnarray*} \bar{\rho}=n-n_{st},\ \ \bar{u}=u,\ \ E_{1}=E-E_{st},\ \ B_{1}=B, \end{eqnarray*} and \begin{eqnarray*} \rho_{st}=n_{st}-1, \end{eqnarray*} then $\bar{U}:=[\bar{\rho},\bar{u},E_{1},B_{1}]$ satisfies \begin{equation}\label{rhost} \left\{ \begin{aligned} &\partial_t \bar{\rho}+\nabla\cdot \bar{u}=g_{1} ,\\ &\partial_t \bar{u}+\bar{u} + E_{1} +\gamma \nabla\bar{\rho}=g_{2}, \\ &\partial_t E_{1}-\nabla\times B_{1}-\bar{u}=g_{3},\\ &\partial_t B_{1}+\nabla \times E_{1}=0,\\ &\nabla \cdot E_{1}=-\bar{\rho}, \ \ \nabla \cdot B_{1}=0, \ \ \ t>0,\ x\in\mathbb{R}^{3},\\ \end{aligned}\right. \end{equation} with initial data \begin{eqnarray}\label{rhosti} \begin{aligned} \bar{U}|_{t=0}=\bar{U}_{0}:=&[\bar{\rho}_{0},\bar{u}_{0},E_{1,0},B_{1,0}]\\ =&[n_0-n_{st},u_0,E_{0}-E_{st},B_0], \ \ \ x\in\mathbb{R}^{3}, \end{aligned} \end{eqnarray} satisfying the compatible conditions \begin{eqnarray}\label{rhostC} \nabla \cdot E_{1,0}=-\bar{\rho}_{0}, \ \ \nabla \cdot B_{1,0}=0. \end{eqnarray} Here the nonlinear source terms take the form of \begin{equation}\label{sec5.ggg} \arraycolsep=1.5pt \left\{ \begin{aligned} & g_{1}=-\nabla\cdot[(\bar{\rho}+\rho_{st}) \bar{u}],\\ &\begin{array}[b]{rcl} g_{2}&=&-\bar{u} \cdot \nabla \bar{u}-\bar{u}\times B_{1} -\gamma [(\bar{\rho}+1+\rho_{st})^{\gamma-2}-1]\nabla\bar{\rho}\\ &&-\gamma [(1+\bar{\rho}+\rho_{st})^{\gamma-2}-(1+\rho_{st})^{\gamma-2}]\nabla\rho_{st}, \end{array}\\ & g_{3}=(\bar{\rho}+\rho_{st}) \bar{u}. \end{aligned}\right. \end{equation} In what follows, we will denote $[\rho,u,E,B]$ as the solution to the the following linearized equation of \eqref{rhost}: \begin{equation}\label{DJ} \left\{ \begin{aligned} &\partial_t \rho+\nabla\cdot u=0,\\ &\partial_t u+u+ E +\gamma \nabla\rho=0,\\ &\partial_t E-\nabla\times B-u=0,\\ &\partial_t B+\nabla \times E=0,\\ &\nabla \cdot E=-\rho, \ \ \nabla \cdot B=0, \ \ \ t>0, \ \ x\in\mathbb{R}^{3},\\ \end{aligned}\right. \end{equation} with given initial data \begin{eqnarray}\label{2.61} U|_{t=0}=\bar{U}_{0}:=[\bar{\rho}_{0},\bar{u}_{0},E_{1,0},B_{1,0}], \ \ \ x\in\mathbb{R}^{3}, \end{eqnarray} satisfying the compatible conditions \eqref{rhostC}. For the above linearized equations, the $L^{p}$-$L^{q}$ time-decay property was proved by Duan in \cite{Duan}. We list only some special $L^{p}$-$L^{q}$ time decay properties in the following proposition. \begin{proposition}\label{thm.decay} Suppose $U(t)=e^{tL}\bar{U}_{0}$ is the solution to the Cauchy problem \eqref{DJ}-\eqref{2.61} with the initial data $\bar{U}_{0}=[\bar{\rho}_{0},\bar{u}_{0},E_{1,0},B_{1,0}] $ satisfying \eqref{rhostC}. Then, $U=[\rho,u,E,B]$ satisfies the following time-decay property: \begin{eqnarray}\label{col.decay1} && \left\{ \begin{aligned} & \|\rho(t)\|\leq C e^{-\frac{t}{2}}\|[\bar{\rho}_{0},\bar{u}_{0}]\|,\\ & \|u(t)\| \leq C e^{-\frac{t}{2}}\|\bar{\rho}_{0}\|+C(1+t)^{-\frac{5}{4}} \|[\bar{u}_{0}, E_{1,0},B_{1,0}]\|_{L^1\cap \dot{H}^{2}},\\ &\|E(t)\|\leq C (1+t)^{-\frac{5}{4}} \|[\bar{u}_{0}, E_{1,0},B_{1,0}]\|_{L^1\cap \dot{H}^{3}},\\ &\|B(t)\|\leq C (1+t)^{-\frac{3}{4}} \|[\bar{u}_{0}, E_{1,0},B_{1,0}]\|_{L^1\cap \dot{H}^{2}}, \end{aligned}\right. \end{eqnarray} and \begin{eqnarray}\label{col.decayinfty1} && \left\{ \begin{aligned} & \|\rho(t)\|_{\infty}\leq C e^{-\frac{t}{2}}\|[\bar{\rho}_{0},\bar{u}_{0}]\|_{L^{2}\cap\dot{H}^{2}},\\ & \|u(t)\|_{\infty} \leq C e^{-\frac{t}{2}}\|\bar{\rho}_{0}\|_{L^{2}\cap\dot{H}^{2}}+C(1+t)^{-2} \|[\bar{u}_{0}, E_{1,0},B_{1,0}]\|_{L^1\cap \dot{H}^{5}},\\ &\|E(t)\|_{\infty}\leq C (1+t)^{-2} \|[\bar{u}_{0}, E_{1,0},B_{1,0}]\|_{L^1\cap \dot{H}^{6}},\\ &\|B(t)\|_{\infty}\leq C (1+t)^{-\frac{3}{2}} \|[\bar{u}_{0}, E_{1,0},B_{1,0}]\|_{L^1\cap \dot{H}^{5}}, \end{aligned}\right. \end{eqnarray} and, moreover, \begin{eqnarray}\label{col.EB} && \left\{ \begin{aligned} &\|\nabla B(t)\|\leq C (1+t)^{-\frac{5}{4}} \|[\bar{u}_{0}, E_{1,0},B_{1,0}]\|_{ L^1 \cap \dot{H}^{4}},\\ & \|\nabla^{N}[E(t),B(t)]\|\leq C(1+t)^{-\frac{5}{4}} \|[\bar{u}_{0}, E_{1,0},B_{1,0}]\|_{ L^1 \cap \dot{H}^{N+3}}. \end{aligned}\right. \end{eqnarray} \end{proposition} In what follows, since we shall apply the linear $L^{p}$-$L^{q}$ time-decay property of the homogeneous system \eqref{DJ}, we need the mild form of the non-linear Cauchy problem \eqref{rhost}-\eqref{rhosti}. From now on, we always denote $\bar{U}=[\bar{\rho},\bar{u},E_{1},B_{1}]$ to the non-linear Cauchy problem $\eqref{rhost}$-$\eqref{rhosti}$. Then, by Duhamel's principle, the solution $\bar{U}$ can be formally written as \begin{eqnarray}\label{sec5.U} \bar{U}(t)=e^{tL}\bar{U}_{0}+\int_{0}^{t}e^{(t-s)L}[g_{1}(s),g_{2}(s),g_{3}(s),0]d s, \end{eqnarray} where $e^{tL}\bar{U}_{0}$ denotes the solution to the Cauchy problem $\eqref{DJ}$-$\eqref{2.61}$ without nonlinear sources. The following two lemmas give the full and high-order energy estimates. \begin{lemma}\label{lem.V} Let $\bar{V}=[\bar{\sigma},\bar{v},\bar{E},\bar{B}]$ be the solution to the Cauchy problem $\eqref{sta.equ}$--$ \eqref{sta.equi}$ with initial data $\bar{V}_{0}=[\bar{\sigma}_{0},\bar{v}_{0},\bar{E}_{0},\bar{B}_{0}]$ satisfying $\eqref{sta.equC}$. Then, if $\mathcal {E}_{N}(\bar{V}_{0})$ and $\|n_{b}-1\|_{W_{0}^{N+1,2}}$ are sufficiently small, \begin{eqnarray}\label{sec5.ENV0} \dfrac{d}{dt}\mathcal {E}_{N}(\bar{V}(t))+\lambda \mathcal {D}_{N}(\bar{V}(t)) \leq 0 \end{eqnarray} holds for any $t>0$, where $\mathcal {E}_{N}(\bar{V}(t))$, $\mathcal {D}_{N}(\bar{V}(t))$ are defined in the form of $\eqref{de.E}$ and $\eqref{de.D}$, respectively. \end{lemma} \begin{proof} It can be seen directly from the proof of Theorem \ref{estimate}. \end{proof} \begin{lemma}\label{estimate2} Let $\bar{V}=[\bar{\sigma},\bar{v},\bar{E},\bar{B}]$ be the solution to the Cauchy problem $\eqref{sta.equ}$-$\eqref{sta.equi}$ with initial data $\bar{V}_{0}=[\bar{\sigma}_{0},\bar{v}_{0},\bar{E}_{0},\bar{B}_{0}]$ satisfying $\eqref{sta.equC}$ in the sense of Proposition $\ref{pro.2.1}$. Then if $ \mathcal {E}_{N}(\bar{V}_{0})$ and $\|n_{b}-1\|_{W_{0}^{N+1,2}}$ are sufficiently small, there are the high-order instant energy functional $\mathcal {E}_{N}^{h}(\cdot)$ and the corresponding dissipation rate $\mathcal {D}_{N}^{h}(\cdot)$ such that \begin{eqnarray}\label{sec5.high} && \frac{d}{dt}\mathcal {E}_{N}^{h}(\bar{V}(t))+\lambda\mathcal {D}^{h}_{N}(\bar{V}(t))\leq 0, \end{eqnarray} holds for any $ t \geq 0$. \end{lemma} \begin{proof} The proof can be done by modifying the proof of Theorem $\ref{estimate}$ a little. In fact, by letting the energy estimates made only on the high-order derivatives, then corresponding to $\eqref{3.3}$, $\eqref{step2}$, $\eqref{step3}$ and $\eqref{step4}$, it can be re-verified that \begin{equation*} \arraycolsep=1.5pt \begin{array}{rl} &\displaystyle \frac{1}{2}\frac{d}{dt}\left(\sum_{1\leq|\alpha|\leq N}\int_{\mathbb{R}^3}(1+\sigma_{st}+\Phi(\sigma_{st})) (|\partial^{\alpha}\bar{\sigma}|^2+|\partial^{\alpha}\bar{v}|^2)dx+\|\nabla[\bar{E},\bar{B}]\|_{N-1}^{2}\right)\\[5mm] &\displaystyle+\frac{1}{\sqrt{\gamma}}\sum_{1\leq|\alpha|\leq N}\int_{\mathbb{R}^3}(1+\sigma_{st}+\Phi(\sigma_{st}))|\partial^{\alpha}\bar{v}|^{2}dx\\[5mm] \leq & \displaystyle C(\|\bar{V}\|_{N}+\delta)(\|\nabla[\bar{\sigma},\bar{v}]\|_{N-1}^{2} +\|\nabla \bar{E}\|_{N-2}^2), \end{array} \end{equation*} \begin{eqnarray*} \frac{d}{dt}\sum_{1\leq|\alpha|\leq N-1}\langle \partial^{\alpha}\bar{v},\nabla\partial^{\alpha}\bar{\sigma}\rangle+\lambda\|\nabla\bar{\sigma}\|^{2}_{N-1} \leq C\|\nabla^2\bar{v}\|_{N-2}^{2}+C(\|[\bar{\sigma}, \bar{v},\bar{B}]\|_{N}^{2}+\delta) \|\nabla[\bar{\sigma},\bar{v}]\|_{N-1}^{2}, \end{eqnarray*} \begin{eqnarray*} \begin{aligned} \dfrac{d}{dt}\sum_{1\leq|\alpha|\leq N-1}\langle \partial^{\alpha}\bar{v},\partial^{\alpha}\bar{E}\rangle+\lambda\|\nabla\bar{E}\|^{2}_{N-2} \leq C&\|\nabla[\bar{\sigma},\bar{v}]\|_{N-1}^{2}+C\|\nabla\bar{v}\|_{N-1}\|\nabla^2 \bar{B}\|_{N-3}\\ &+C(\|[\bar{\sigma},\bar{v},\bar{B}]\|_{N}^{2}+\delta) \|\nabla[\bar{\sigma},\bar{v}]\|_{N-1}^{2}, \end{aligned} \end{eqnarray*} and \begin{eqnarray*} &&\begin{aligned} & -\frac{d}{dt}\sum_{1\leq |\alpha|\leq N-2}\langle \nabla \times\partial^{\alpha}\bar{E},\partial^{\alpha}\bar{B}\rangle+\lambda\|\nabla^{2}\bar{B}\|^{2}_{N-3}\\ \leq & C\|\nabla^2\bar{E}\|_{N-3}^{2} +C\|\nabla\bar{v}\|_{N-3}^2+C(\|\bar{\sigma}\|_{N}^{2}+\delta)\|\nabla \bar{v}\|_{N-1}^{2}. \end{aligned} \end{eqnarray*} Here, the details of proof are omitted for simplicity. Now, similar to $\eqref{3.12}$, let us define \begin{equation}\label{def.high} \begin{aligned} \mathcal{E}_{N}^{h}(\bar{V}(t))&=\sum_{1\leq|\alpha|\leq N}\int_{\mathbb{R}^3}(1+\sigma_{st}+\Phi(\sigma_{st})) (|\partial^{\alpha}\bar{\sigma}|^2+|\partial^{\alpha}\bar{v}|^2)dx+\|\nabla[\bar{E},\bar{B}]\|_{N-1}^{2}\\ &+\kappa_{1}\sum_{1\leq|\alpha|\leq N-1}\langle \partial^{\alpha}\bar{v},\nabla\partial^{\alpha}\bar{\sigma}\rangle+\kappa_{2}\sum_{1\leq|\alpha|\leq N-1}\langle \partial^{\alpha}\bar{v},\partial^{\alpha}\bar{E}\rangle\\[3mm] &-\kappa_{3}\sum_{1\leq |\alpha|\leq N-2}\langle \nabla \times\partial^{\alpha}\bar{E},\partial^{\alpha}\bar{B}\rangle. \end{aligned} \end{equation} Similarly, one can choose $0<\kappa_{3}\ll\kappa_{2}\ll\kappa_{1}\ll 1$ with $\kappa_{2}^{3/2}\ll\kappa_{3}$ such that $\mathcal {E}_{N}^{h}(\bar{V}(t))\sim \|\nabla \bar{V}(t)\|_{N-1}^{2}$ because $\sigma_{st}+\Phi(\sigma_{st}) $ depends only on $ x$ sufficiently small compared with $1$. Furthermore, the linear combination of previously obtained four estimates with coefficients corresponding to $\eqref{def.high}$ yields $\eqref{sec5.high}$ with $\mathcal {D}_{N}^{h}(\cdot)$ defined in $\eqref{de.Dh}$. This completes the proof of Lemma \ref{estimate2}. \end{proof} Now, we begin with the time-weighted estimate and iteration for the Lyapunov inequality $\eqref{sec5.ENV0}$. Let $\ell \geq 0$. Multiplying $\eqref{sec5.ENV0}$ by $(1+t)^{\ell}$ and taking integration over $[0,t]$ give \begin{eqnarray*} \begin{aligned} & (1+t)^{\ell}\mathcal {E}_{N}(\bar{V}(t))+\lambda \int_{0}^{t}(1+s)^{\ell}\mathcal {D}_{N}(\bar{V}(s))d s \\ \leq & \mathcal {E}_{N}(\bar{V}_{0})+ \ell \int_{0}^{t}(1+s)^{\ell-1}\mathcal {E}_{N}(\bar{V}(s))d s. \end{aligned} \end{eqnarray*} Noticing \begin{eqnarray*} \mathcal {E}_{N}(\bar{V}(t)) \leq C (D_{N+1}(\bar{V}(t))+\| \bar{B}\|^{2}), \end{eqnarray*} it follows that \begin{eqnarray*} \begin{aligned} & (1+t)^{\ell}\mathcal {E}_{N}(\bar{V}(t))+\lambda \int_{0}^{t}(1+s)^{\ell}\mathcal {D}_{N}(\bar{V}(s))d s \\ \leq & \mathcal {E}_{N}(\bar{V}_{0})+ C \ell \int_{0}^{t}(1+s)^{\ell-1}\| \bar{B}(s)\|^{2}d s+ C\ell\int_{0}^{t}(1+s)^{\ell-1}\mathcal {D}_{N+1}(\bar{V}(s))d s. \end{aligned} \end{eqnarray*} Similarly, it holds that \begin{eqnarray*} \begin{aligned} & (1+t)^{\ell-1}\mathcal {E}_{N+1}(\bar{V}(t))+\lambda \int_{0}^{t}(1+s)^{\ell-1}\mathcal {D}_{N+1}(\bar{V}(s))d s \\ \leq & \mathcal {E}_{N+1}(\bar{V}_{0})+ C (\ell-1) \int_{0}^{t}(1+s)^{\ell-2}\| \bar{B}(s)\|^{2}ds + C(\ell-1)\int_{0}^{t}(1+s)^{\ell-2}\mathcal {D}_{N+2}(\bar{V}(s))d s, \end{aligned} \end{eqnarray*} and \begin{eqnarray*} \mathcal {E}_{N+2}(\bar{V}(t))+\lambda \int_{0}^{t}\mathcal {D}_{N+2}(\bar{V}(s))d s \leq \mathcal {E}_{N+2}(\bar{V}_{0}). \end{eqnarray*} Then, for $1<\ell<2$, it follows by iterating the above estimates that \begin{eqnarray}\label{sec5.ED} \begin{aligned} & (1+t)^{\ell}\mathcal {E}_{N}(\bar{V}(t))+\lambda \int_{0}^{t}(1+s)^{\ell}\mathcal {D}_{N}(\bar{V}(s))d s \\ \leq & C \mathcal {E}_{N+2}(\bar{V}_{0})+ C \int_{0}^{t}(1+s)^{\ell-1}\| \bar{B}(s)\|^{2}d s. \end{aligned} \end{eqnarray} Similarly, for $2<\kappa<3$, the time-weighted estimate and iteration for the Lyapunov inequality $\eqref{sec5.high}$ give \begin{eqnarray*} \begin{aligned} & (1+t)^{\kappa}\mathcal {E}_{N}^h(\bar{V}(t))+\lambda \int_{0}^{t}(1+s)^{\kappa}\mathcal {D}_{N}^h(\bar{V}(s))d s \\ \leq & C \mathcal {E}_{N+3}^h(\bar{V}_{0})+ C \int_{0}^{t}(1+s)^{\kappa-1}\| \nabla\bar{B}(s)\|^{2}d s. \end{aligned} \end{eqnarray*} Here the smallness of $\|n_{b}-1\|_{W_{0}^{N+4,2}}$ has been used in the process of iteration for the Lyapunov inequalities $\eqref{sec5.ENV0}$ and $\eqref{sec5.high}$. Taking $\kappa=l+1$, it holds that \begin{multline}\label{sec5.EhD} (1+t)^{l+1}\mathcal {E}_{N}^h(\bar{V}(t))+\lambda \int_{0}^{t}(1+s)^{l+1}\mathcal {D}_{N}^h(\bar{V}(s))d s \\ \leq C \mathcal {E}_{N+3}^h(\bar{V}_{0})+ C \int_{0}^{t}(1+s)^{l}\| \nabla\bar{B}(s)\|^{2}d s\\ \leq C \mathcal {E}_{N+3}^h(\bar{V}_{0})+ C \int_{0}^{t}(1+s)^{\ell}\mathcal {D}_{N}(\bar{V}(s))d s. \end{multline} Combining $\eqref{sec5.ED}$ with $\eqref{sec5.EhD}$, we have \begin{multline}\label{sec5.EDEhD} (1+t)^{\ell}\mathcal {E}_{N}(\bar{V}(t))+ \int_{0}^{t}(1+s)^{\ell}\mathcal {D}_{N}(\bar{V}(s))d s\\ +(1+t)^{l+1}\mathcal {E}_{N}^h(\bar{V}(t))+ \int_{0}^{t}(1+s)^{l+1}\mathcal {D}_{N}^h(\bar{V}(s))d s \\ \leq C \mathcal {E}_{N+3}(\bar{V}_{0})+ C \int_{0}^{t}(1+s)^{\ell-1}\| \bar{B}(s)\|^{2}d s. \end{multline} For this time, to estimate the integral term on the r.h.s. of $\eqref{sec5.EDEhD}$, let's define \begin{eqnarray}\label{sec5.def} \mathcal {E}_{N,\infty}(\bar{V}(t))=\sup\limits_{0\leq s \leq t} \ \left\{(1+s)^{\frac{3}{2}}\mathcal {E}_{N}(\bar{V}(s))+(1+s)^{\frac{5}{2}}\mathcal {E}_{N}^h(\bar{V}(s))\right\}, \end{eqnarray} \begin{eqnarray}\label{sec5.defL} L_{0}(t)=\sup\limits_{0\leq s \leq t} (1+s)^{\frac{5}{2}}\|[\bar{\rho},\bar{u}]\|^{2}. \end{eqnarray} Then, we have the following \begin{lemma}\label{lem.Bsigma} For any $t\geq0$, it holds that: \begin{eqnarray}\label{lem.tildeB} &&\begin{aligned} \|\bar{B}(t)\|^2\leq C (1+t)^{-\frac{3}{2}}\left(\|[\bar{\sigma}_{0},\bar{v}_{0}]\|^{2}+ \|[\bar{v}_{0},\right.& \bar{E}_{0},\bar{B}_{0}]\|^2_{L^1\cap \dot{H}^{2}}\\ &\left.+[\mathcal {E}_{N,\infty}(\bar{V}(t))]^2+\delta^2 \mathcal {E}_{N,\infty}(\bar{V}(t))\right). \end{aligned} \end{eqnarray} \end{lemma} \begin{proof} Applying the fourth linear estimate on $B$ in $\eqref{col.decay1}$ to the mild form \eqref{sec5.U} gives \begin{eqnarray}\label{sec5.decayB} &&\begin{aligned} \|B_{1}(t)\|\leq C (1+t)^{-\frac{3}{4}} \|[\bar{u}_{0},& E_{1,0},B_{1,0}]\|_{L^1\cap \dot{H}^{2}}\\ &+C \int_{0}^{t}(1+t-s)^{-\frac{3}{4}}\|[g_{2}(s),g_{3}(s)]\|_{L^{1}\cap\dot{H}^{2}}ds. \end{aligned} \end{eqnarray} Applying the $L^{2}$ linear estimate on $u$ in $\eqref{col.decay1}$ to the mild form $\eqref{sec5.U}$, \begin{eqnarray}\label{baruL2} &&\begin{aligned} \|\bar{u}(t)\| \leq C(1+t)^{-\frac{5}{4}}( &\|\bar{\rho}_{0}\|+\|[\bar{u}_{0}, E_{1,0},B_{1,0}]\|_{L^{1}\cap\dot{H}^{2}})\\ &+C \int_{0}^{t}(1+t-s)^{-\frac{5}{4}}\left(\|g_{1}(s)\|+\|[g_{2}(s),g_{3}(s)]\|_{L^{1}\cap \dot{H}^{2}}\right)ds. \end{aligned} \end{eqnarray} Applying the $L^{2}$ linear estimate on $\rho$ in $\eqref{col.decay1}$ to $\eqref{sec5.U}$, one has \begin{eqnarray}\label{rhoL2} \|\bar{\rho}(t)\|\leq C e^{-\frac{t}{2}}\|[\bar{\rho}_{0},\bar{u}_{0}]\|+ C \int_{0}^{t}e^{-\frac{t-s}{2}}\|[g_{1}(s),g_{2}(s)]\|d s. \end{eqnarray} Recall the definition $\eqref{sec5.ggg}$ of $g_{1}$, $g_{2}$ and $g_{3}$, \begin{eqnarray*} \begin{aligned} & g_{1}(s)=-\rho_{st} \nabla \cdot \bar{u}-\bar{\rho} \nabla \cdot \bar{u}-\bar{u} \cdot \nabla \rho_{st}-\bar{u} \cdot \nabla \bar{\rho},\\ & g_{2}(s)\sim \bar{u} \cdot \nabla \bar{u} + \bar{u}\times B_{1} +\bar{\rho}\nabla \bar{\rho}+ \rho_{st}\nabla \bar{\rho} +\bar{\rho}\nabla \rho_{st},\\ & g_{3}(s)=\bar{\rho} \bar{u}+\rho_{st} \bar{u}. \end{aligned} \end{eqnarray*} Firstly, we estimate those terms including $\rho_{st}$. It follows that \begin{eqnarray*} \begin{aligned} &\|\rho_{st}\nabla \cdot \bar{u}\|\leq \|\rho_{st}\|_{L^{\infty}}\|\nabla \bar{u}\|,\ \ \ \ \ \|\bar{u} \cdot \nabla \rho_{st}\|\leq \|\nabla\rho_{st}\|\|\bar{u}\|_{L^{\infty}}\leq \|\nabla\rho_{st}\|\|\nabla\bar{u}\|_{H^{1}},\\ &\|\rho_{st}\nabla \bar{\rho}\|_{L^1}\leq \|\rho_{st}\|\|\nabla \bar{\rho}\|,\ \ \ \ \ \|\rho_{st}\nabla \bar{\rho}\|\leq \|\rho_{st}\|_{L^{\infty}}\|\nabla\bar{\rho}\|\leq \|\nabla\rho_{st}\|_{H^{1}}\|\nabla\bar{\rho}\|,\\ & \|\bar{\rho} \nabla \rho_{st}\|_{L^1}\leq \|\nabla\rho_{st}\|\|\bar{\rho}\|,\ \ \ \ \|\nabla\rho_{st}\bar{\rho}\|\leq \|\bar{\rho}\|_{L^{\infty}}\|\rho_{st}\|\leq \|\rho_{st}\|\|\nabla\bar{\rho}\|_{H^{1}},\\ &\|\rho_{st} \bar{u}\|_{L^1} \leq \|\rho_{st}\|\|\bar{u}\|,\\ \end{aligned} \end{eqnarray*} and for $|\alpha|=2$, one has \begin{eqnarray*} &&\begin{aligned} \|\partial^{\alpha}(\rho_{st}\nabla \bar{\rho})\|\leq & \|\rho_{st}\partial^{\alpha}\nabla\bar{\rho}\| +\|\partial^{\alpha}(\rho_{st}\nabla \bar{\rho})-\rho_{st}\partial^{\alpha}\nabla\bar{\rho}\|\\[3mm] \leq & \|\rho_{st}\|_{L^{\infty}}\|\partial^{\alpha}\nabla\bar{\rho}\| +C\|\nabla\rho_{st}\|_{H^{|\alpha|-1}}\|\nabla\bar{\rho}\|_{L^{\infty}}+C\|\nabla \rho_{st}\|_{L^{\infty}}\|\nabla \bar{\rho}\|_{H^{|\alpha|-1}}\\[3mm] \leq & C\delta\|\nabla \bar{\rho}\|_{H^{2}}, \end{aligned} \end{eqnarray*} where we have used the estimate $\|\partial^{\alpha}(f g)-f \partial^{\alpha}g\|\leq C\|\nabla f\|_{H^{k-1}}\|g\|_{L^{\infty}}+C\|\nabla f\|_{L^{\infty}}\|g\|_{H^{k-1}}$, for any $|\alpha|=k$. Similarly, it holds that \begin{eqnarray*} \begin{aligned} \|\partial^{\alpha}(\rho_{st} \bar{u})\|\leq & \|\bar{u} \partial^{\alpha}\rho_{st}\| +\|\partial^{\alpha}(\rho_{st}\bar{u})-\bar{u} \partial^{\alpha}\rho_{st}\| \leq C \delta \|\nabla \bar{u}\|_{H^{2}}, \end{aligned} \end{eqnarray*} \begin{eqnarray*} \begin{aligned} & \|\partial^{\alpha}(\bar{\rho} \nabla \rho_{st})\|\leq C \delta \|\nabla \bar{\rho}\|_{H^{2}}. \end{aligned} \end{eqnarray*} It is straightforward to verify that for any $0\leq s\leq t$, \begin{eqnarray}\label{dec.g2g3} \begin{aligned} \|[g_{2}(s),g_{3}(s)]\|_{L^{1}}\leq & C \|\bar{u}\|\|\nabla \bar{u}\|+\|\bar{u}\|\|B_{1}\|+\|\bar{\rho}\|\|\bar{u}\|+\|\bar{\rho}\|\|\nabla \bar{\rho}\|\\ & +C(\|\rho_{st}\nabla \bar{\rho}\|_{L^1}+\|\rho_{st} \bar{u}\|_{L^1}+\|\bar{\rho} \nabla \rho_{st}\|_{L^1} )\\ &\leq C \mathcal {E}_{N}(\bar{U}(s))+ C \delta \sqrt{\mathcal {E}_{N}^h(\bar{U}(s))}+C \delta \|[\bar{\rho},\bar{u}]\|, \end{aligned} \end{eqnarray} \begin{eqnarray}\label{dec.g2g3H} \begin{aligned} \|[g_{2}(s),g_{3}(s)]\|_{\dot{H}^{2}}\leq & C\mathcal {E}_{N}(\bar{U}(s))+ C \delta \sqrt{\mathcal {E}_{N}^h(\bar{U}(s))}, \end{aligned} \end{eqnarray} and \begin{eqnarray}\label{dec.g1g2} \begin{aligned} \|[g_{1}(s),g_{2}(s)]\|\leq & C\mathcal {E}_{N}(\bar{U}(s))+ C \delta \sqrt{\mathcal {E}_{N}^h(\bar{U}(s))}. \end{aligned} \end{eqnarray} Notice that $ \mathcal {E}_{N}(\bar{U}(s))\leq C \mathcal {E}_{N}(\bar{V}(\sqrt{\gamma}s))$. From $\eqref{sec5.def}$ and $\eqref{sec5.defL}$, for any $0\leq s\leq t$, \begin{eqnarray*} \mathcal {E}_{N}(\bar{V}(\sqrt{\gamma}s))\leq (1+\sqrt{\gamma}s)^{-\frac{3}{2}}\mathcal {E}_{N,\infty}(\bar{V}(\sqrt{\gamma}t)), \end{eqnarray*} \begin{eqnarray*} \mathcal {E}_{N}^h(\bar{V}(\sqrt{\gamma}s))\leq (1+\sqrt{\gamma}s)^{-\frac{5}{2}}\mathcal {E}_{N,\infty}(\bar{V}(\sqrt{\gamma}t)), \end{eqnarray*} \begin{eqnarray*} \|[\bar{\rho},\bar{u}](s)\|\leq \sqrt{L_{0}(t)}(1+s)^{-\frac{5}{4}}. \end{eqnarray*} Then, it follows that for $0\leq s \leq t$, \begin{multline*} \|[g_{2}(s),g_{3}(s)]\|_{L^{1}}\leq C(1+\sqrt{\gamma}s)^{-\frac{3}{2}}\mathcal {E}_{N,\infty}(\bar{V}(\sqrt{\gamma}t))\\ +C \delta (1+\sqrt{\gamma}s)^{-\frac{5}{4}}\sqrt{\mathcal {E}_{N,\infty}(\bar{V}(\sqrt{\gamma}t))}+C\delta \sqrt{L_{0}(t)}(1+s)^{-\frac{5}{4}}, \end{multline*} \begin{eqnarray*} \begin{aligned} \|[g_{2}(s),g_{3}(s)]\|_{\dot{H}^{2}}\leq & C(1+\sqrt{\gamma}s)^{-\frac{3}{2}}\mathcal {E}_{N,\infty}(V(\sqrt{\gamma}t))\\ &+C \delta (1+\sqrt{\gamma}s)^{-\frac{5}{4}}\sqrt{\mathcal {E}_{N,\infty}(\bar{V}(\sqrt{\gamma}t))}, \end{aligned} \end{eqnarray*} \begin{eqnarray*} \begin{aligned} \|[g_{1}(s),g_{2}(s)]\|\leq & C(1+\sqrt{\gamma}s)^{-\frac{3}{2}}\mathcal {E}_{N,\infty}(V(\sqrt{\gamma}t))\\ &+C \delta (1+\sqrt{\gamma}s)^{-\frac{5}{4}}\sqrt{\mathcal {E}_{N,\infty}(\bar{V}(\sqrt{\gamma}t))}. \end{aligned} \end{eqnarray*} Putting the above inequalities into \eqref{sec5.decayB}, $\eqref{baruL2}$ and \eqref{rhoL2} respectively gives \begin{multline}\label{sec5.decayB1} \|B_{1}(t)\|\leq C (1+t)^{-\frac{3}{4}}\Big\{ \|[\bar{u}_{0}, E_{1,0},B_{1,0}]\|_{L^1\cap \dot{H}^{2}}+\mathcal {E}_{N,\infty}(\bar{V}(\sqrt{\gamma}t))\\ +\delta\sqrt{L_{0}(t)}+\delta \sqrt{\mathcal {E}_{N,\infty}(\bar{V}(\sqrt{\gamma}t))}\Big\}, \end{multline} \begin{multline}\label{sec5.decayu} \|\bar{u}(t)\|\leq C (1+t)^{-\frac{5}{4}}\Big\{ \|\bar{\rho}_{0}\|+\|[\bar{u}_{0}, E_{1,0},B_{1,0}]\|_{L^{1}\cap\dot{H}^{2}}+\mathcal {E}_{N,\infty}(\bar{V}(\sqrt{\gamma}t))\\ +\delta\sqrt{L_{0}(t)}+\delta \sqrt{\mathcal {E}_{N,\infty}(\bar{V}(\sqrt{\gamma}t))}\Big\}, \end{multline} \begin{eqnarray}\label{sec5.decayrho} \begin{aligned} \|\bar{\rho}(t)\|\leq C (1+t)^{-\frac{5}{4}}\Big\{ \|[\bar{\rho}_{0},u_{0}]\|+\mathcal {E}_{N,\infty}(\bar{V}&(\sqrt{\gamma}t))\\ &+\delta \sqrt{\mathcal {E}_{N,\infty}(\bar{V}(\sqrt{\gamma}t))}\Big\}. \end{aligned} \end{eqnarray} The definition of $L_{0}(t)$, \eqref{sec5.decayu} and \eqref{sec5.decayrho} further imply that \begin{multline}\label{bou.L} L_{0}(t)\leq C\|[\bar{\rho}_{0},u_{0}]\|^{2}+C\|[\bar{u}_{0}, E_{1,0},B_{1,0}]\|_{L^{1}\cap\dot{H}^{2}}^{2}\\ +C\left[\mathcal {E}_{N,\infty}(\bar{V}(\sqrt{\gamma}t))\right]^{2}+C\delta^{2}\mathcal {E}_{N,\infty}(\bar{V}(\sqrt{\gamma}t)), \end{multline} where we have used that $\delta$ is small enough. Plugging the above estimate into \eqref{sec5.decayB1} implies $\eqref{lem.tildeB}$, since $\|\bar{ B}(t)\|\leq C \| B_{1}(t/\sqrt{\gamma})\|$ and $[\bar{\rho},\bar{u},E_{1},B_{1}]$ is equivalent with $[\bar{\sigma},\bar{v},\bar{E},\bar{B}]$ up to a positive constant. This completes the proof of Lemma $\ref{lem.Bsigma}$. \end{proof} Now, the rest is to prove the uniform-in-time bound of $\mathcal {E}_{N,\infty}(\bar{V}(t))$ which yields the time-decay rates of the Lyapunov functionals $\mathcal {E}_{N}(\bar{V}(t))$ and $\mathcal {E}_{N}^h(\bar{V}(t))$ thus $\|\bar{V}(t)\|_{N}^{2}$, $\|\nabla\bar{V}(t)\|_{N-1}^{2}$. In fact, by taking $\ell =\frac{3}{2}+\epsilon$ in $\eqref{sec5.EDEhD}$ with $\epsilon>0$ small enough, one has \begin{multline*} (1+t)^{\frac{3}{2}+\epsilon}\mathcal {E}_{N}(\bar{V}(t))+ \int_{0}^{t}(1+s)^{\frac{3}{2}+\epsilon}\mathcal {D}_{N}(\bar{V}(s))d s\\ +(1+t)^{\frac{5}{2}+\epsilon}\mathcal {E}_{N}^h(\bar{V}(t))+ \int_{0}^{t}(1+s)^{\frac{5}{2}+\epsilon}\mathcal {D}_{N}^h(\bar{V}(s))d s \\ \leq C \mathcal {E}_{N+3}(\bar{V}_{0})+ C \int_{0}^{t}(1+s)^{\frac{1}{2}+\epsilon}\| \bar{B}(s)\|^{2}d s. \end{multline*} Here, using $\eqref{lem.tildeB}$ and the fact $\mathcal {E}_{N,\infty}(\bar{V}(t))$ is non-decreasing in $t$, it further holds that \begin{eqnarray*} \begin{aligned} \int_{0}^{t}(1+s)^{\frac{1}{2}+\epsilon}\| \bar{B}(s)\|^{2}d s\leq C(1+t)^{\epsilon}\Big\{\|[\bar{\sigma}_{0},&\bar{v}_{0}]\|^{2}+ \|[\bar{v}_{0},\bar{E}_{0},\bar{B}_{0}]\|^2_{L^1\cap \dot{H}^{2}}\\ &+[\mathcal {E}_{N,\infty}(\bar{V}(t))]^2 +\delta ^2 \mathcal {E}_{N,\infty}(\bar{V}(t))\Big\}. \end{aligned} \end{eqnarray*} Therefore, it follows that \begin{multline*} (1+t)^{\frac{3}{2}+\epsilon}\mathcal {E}_{N}(\bar{V}(t))+(1+t)^{\frac{5}{2}+\epsilon}\mathcal {E}_{N}^h(\bar{V}(t))\\ + \int_{0}^{t}(1+s)^{\frac{3}{2}+\epsilon}\mathcal {D}_{N}(\bar{V}(s))d s + \int_{0}^{t}(1+s)^{\frac{5}{2}+\epsilon}\mathcal {D}_{N}^h(\bar{V}(s))d s \\ \leq C \mathcal {E}_{N+3}(\bar{V}_{0})+ C (1+t)^{\epsilon}\left(\|[\bar{\sigma}_{0},\bar{v}_{0}]\|^{2}+ \|[\bar{v}_{0},\bar{E}_{0},\bar{B}_{0}]\|^2_{L^1\cap \dot{H}^{2}}\right.\\ \left.+[\mathcal {E}_{N,\infty}(\bar{V}(t))]^2 +\delta^2 \mathcal {E}_{N,\infty}(\bar{V}(t))\right), \end{multline*} which implies \begin{multline*} (1+t)^{\frac{3}{2}}\mathcal {E}_{N}(\bar{V}(t))+(1+t)^{\frac{5}{2}}\mathcal {E}_{N}^h(\bar{V}(t)) \leq C \Big\{ \mathcal {E}_{N+3}(\bar{V}_{0})+ \|[\bar{v}_{0},\bar{E}_{0},\bar{B}_{0}]\|^2_{L^1}\\ +[\mathcal {E}_{N,\infty}(\bar{V}(t))]^2 +\delta ^2 \mathcal {E}_{N,\infty}(\bar{V}(t))\Big\}, \end{multline*} and thus \begin{eqnarray}\label{ENb} \mathcal {E}_{N,\infty}(\bar{V}(t)) \leq C \left( \epsilon_{N+3}(\bar{V}_{0})^{2}+ \mathcal {E}_{N,\infty}(\bar{V}(t))^{2}\right). \end{eqnarray} Here, we have used that $\delta$ is small enough. Recall the definition of $\epsilon_{N+3}(\bar{V}_{0})$, since $\epsilon_{N+3}(\bar{V}_{0})>0$ is sufficiently small, $\mathcal {E}_{N,\infty}(\bar{V}(t)) \leq C \epsilon_{N+3}(\bar{V}_{0})^{2}$ holds true for any $t\geq 0$, which implies \begin{eqnarray}\label{UN} \|\bar{V}(t)\|_{N} \leq C \mathcal {E}_{N}(\bar{V}(t))^{1/2} \leq C \epsilon_{N+3}(\bar{V}_{0})(1+t)^{-\frac{3}{4}}, \end{eqnarray} \begin{eqnarray}\label{nablaUN} \|\nabla\bar{V}(t)\|_{N-1} \leq C \mathcal {E}_{N}^{h}(\bar{V}(t))^{1/2} \leq C \epsilon_{N+3}(\bar{V}_{0})(1+t)^{-\frac{5}{4}}. \end{eqnarray} The definition of $L_{0}(t)$, the uniform-in-time bound of $\mathcal {E}_{N,\infty}(\bar{V}(t))$ and $\eqref{bou.L}$ show that \begin{eqnarray*} \|[\bar{\rho},\bar{u}](t)\| \leq C \epsilon_{N+3}(\bar{V}_{0})(1+t)^{-\frac{5}{4}}. \end{eqnarray*} In addition, applying the $L^{2}$ linear estimate on $E$ in $\eqref{col.decay1}$ to the mild form $\eqref{sec5.U}$, \begin{eqnarray*} &&\begin{aligned} \|E_{1}(t)\|\leq C (1+t)^{-\frac{5}{4}} \|[\bar{u}_{0},& E_{1,0},B_{1,0}]\|_{L^1\cap \dot{H}^{3}}\\ &+C \int_{0}^{t}(1+t-s)^{-\frac{5}{4}}\|[g_{2}(s),g_{3}(s)]\|_{L^{1}\cap \dot{H}^{3}}ds. \end{aligned} \end{eqnarray*} Since by $\eqref{UN}$ and $\eqref{nablaUN}$, similar to obtaining $\eqref{dec.g2g3}$ and $\eqref{dec.g2g3H}$, we have \begin{eqnarray*} \begin{aligned} &\|[g_{2}(s),g_{3}(s)]\|_{L^{1}\cap \dot{H}^{3}}\leq C\|\bar{U}(t)\|^{2}_{4}+ C\delta\|\nabla \bar{U}(t)\|_{3}+C\delta\|[\bar{\rho},\bar{u}]\|\leq C\epsilon_{7}(\bar{V}_{0})(1+t)^{-\frac{5}{4}}, \end{aligned} \end{eqnarray*} it follows that \begin{eqnarray}\label{uL2} \|E_{1}(t)\| \leq C\epsilon_{7}(\bar{V}_{0})(1+t)^{-\frac{5}{4}}. \end{eqnarray} This completes Theorem \ref{Corolary}. \noindent{\bf Acknowledgements:}\ \ The first author Qingqing Liu would like to thank Dr. Renjun Duan for his guidance and continuous help. The research was supported by the National Natural Science Foundation of China $\#$11071093, the PhD specialized grant of the Ministry of Education of China $\#$20100144110001, and the Special Fund for Basic Scientific Research of Central Colleges $\#$CCNU10C01001, $\#$CCNU12C01001. \bigbreak \end{document}
\begin{equation}gin{document} \mathbb{T}itle[Carath\'eodory extremal functions] {Carath\'eodory extremal functions on the symmetrized bidisc} \author{Jim Agler} ^\astdress{Department of Mathematics, University of California at San Diego, CA \mathbb{T}extup{92103}, USA} \author{Zinaida A. Lykova} ^\astdress{School of Mathematics, Statistics and Physics, Newcastle University, Newcastle upon Tyne NE\mathbb{T}extup{1} \mathbb{T}extup{7}RU, U.K.} \email{[email protected]} \author{N. J. Young} ^\astdress{School of Mathematics, Statistics and Physics, Newcastle University, Newcastle upon Tyne NE1 7RU, U.K. {\em and} School of Mathematics, Leeds University, Leeds LS2 9JT, U.K.} \email{[email protected]} \mathbb{D}ate{5th May 2018} \mathbb{D}edicatory{To Rien Kaashoek in esteem and friendship} \subjclass[2010]{32A07, 53C22, 54C15, 47A57, 32F45, 47A25, 30E05} \keywords{Carath\'eodory extremal functions, symmetrized bidisc, model formulae, realization formulae} \mathbb{T}hanks{Partially supported by National Science Foundation Grants DMS 1361720 and 1665260, the UK Engineering and Physical Sciences Research Council grant EP/N03242X/1, the London Mathematical Society Grant 41730 and Newcastle University} \begin{equation}gin{abstract} We show how realization theory can be used to find the solutions of the Carath\'eodory extremal problem on the symmetrized bidisc \[ G \stackrel{\mathbb{R}m{def}}{=} \{(z+w,zw):|z|<1, \, |w|<1\}. \] We show that, generically, solutions are unique up to composition with automorphisms of the disc. We also obtain formulae for large classes of extremal functions for the Carath\'eodory problems for tangents of non-generic types. \end{abstract} \maketitle \sloppy \fussy \pagenumbering{arabic} \setcounter{page}{1} \section*{Introduction}\label{intro} A constant thread in the research of Marinus Kaashoek over several decades has been the power of realization theory applied to a wide variety of problems in analysis. Among his many contributions in this area we mention his monograph \mathbb{C}ite{bgk}, written with his longstanding collaborators Israel Gohberg and Harm Bart, which was an early and influential work in the area, and his more recent papers \mathbb{C}ite{KaavS2014,FHK2014}. Realization theory uses explicit formulae for functions in terms of operators on Hilbert space to prove function-theoretic results. In this paper we continue along the Bart-Gohberg-Kaashoek path by using realization theory to prove results in complex geometry. Specifically, we are interested in the geometry of the {\em symmetrized bidisc} \[ G \stackrel{\mathbb{R}m{def}}{=} \{(z+w,zw):|z|<1, \, |w|<1\}, \] a domain in $\mathbb{C}^2$ that has been much studied in the last two decades: see \mathbb{C}ite{cos04,ez05,jp04,bhatta,sarkar,tryb,aly2016}, along with many other papers. We shall use realization theory to prove detailed results about the {\em Carath\'eodory extremal problem} on $G$, defined as follows (see \mathbb{C}ite{kob98, jp}). Consider a domain (that is, a connected open set) $\Omega$ in $\mathbb{C}^n$. For domains $\Omega_1,\ \Omega_2$, we denote by $\Omega_2(\Omega_1)$ the set of holomorphic maps from $\Omega_1$ to $\Omega_2$. A point in the complex tangent bundle $T\Omega$ of $\Omega$ will be called a {\em tangent} (to $\Omega$). Thus if $\mathbb{D}e\mathbb{D}f (\la,v)$ is a tangent to $\Omega$ then $\la\in \Omega$ and $v$ is a point in the complex tangent space $T_\la \Omega \sim \mathbb{C}^n$ of $\Omega$ at $\la$. We say that $\mathbb{D}e$ is a {\em \nd} tangent if $v\neq 0$. We write $|\mathbb{C}dot|$ for the Poincar\'e metric on $T\mathbb{D}$: \[ |(z, v)| \mathbb{D}f \frac{|v|}{1-|z|^2} \quad \mbox{ for } z\in\mathbb{D}, \, v\in\mathbb{C}. \] The {\em Carath\'eodory} or {\em Carath\'eodory-Reiffen pseudometric} \mathbb{C}ite{jp} on $\Omega$ is the Finsler pseudometric $\mathbb{C}ar{\mathbb{C}dot}$ on $T\Omega$ defined for $\mathbb{D}e=(\la,v)\in T\Omega$ by \begin{equation}gin{align}\label{carprob} \mathbb{C}ar{\mathbb{D}e} & \mathbb{D}f \sup_{F\in \mathbb{D}(\Omega)} |F_*(\mathbb{D}e)| \notag \\ &= \sup_{F\in \mathbb{D}(\Omega)} \frac{ |D_vF(\la)|}{1-|F(\la)|^2}. \end{align} Here $F_*$ is the standard notation for the pushforward of $\mathbb{D}e$ by the map $F$ to an element of $T\mathbb{D}$, given by \[ \ip{g}{F_*(\mathbb{D}e)}=\ip{g\mathbb{C}irc F}{\mathbb{D}e} \] for any analytic function $g$ in a neighbourhood of $F(\la)$. The {\em Carath\'eodory extremal problem} $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar \mathbb{D}e$ on $\Omega$ is to calculate $\mathbb{C}ar{\mathbb{D}e}$ for a given $\mathbb{D}e\in T\Omega$, and to find the corresponding extremal functions, which is to say, the functions $F\in\mathbb{D}(\Omega)$ for which the supremum in equation \eqref{carprob} is attained. We shall also say that $F$ {\em solves} $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ to mean that $F$ is an extremal function for $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$. For a general domain $\Omega$ one cannot expect to find either $\mathbb{C}ar{\mathbb{C}dot}$ or the corresponding extremal functions explicitly. In a few cases, however, there are more or less explicit formulae for $\mathbb{C}ar{\mathbb{D}e}$. In particular, when $\Omega=G$, $\mathbb{C}ar{\mathbb{C}dot}$ is a metric on $TG$ (it is positive for \nd tangents) and the following result obtains \mathbb{C}ite[Theorem 1.1 and Corollary 4.3]{ay2004}. We use the co-ordinates $(s^1,s^2)$ for a point of $G$. \begin{equation}gin{theorem}\label{extthm10} Let $\mathbb{D}e$ be a \nd tangent vector in $TG$. There exists $\omega\in\mathbb{T}$ such that the function in $\mathbb{D}(G)$ given by \begin{equation}q\label{defPhi} \Phi_\omega(s^1,s^2) \mathbb{D}f \frac{2\omega s^2-s^1}{2-\omega s^1} \end{equation}q is extremal for the Carath\'eodory problem $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar{\mathbb{D}e}$ in $G$. \end{theorem} It follows that $\mathbb{C}ar{\mathbb{D}e}$ can be obtained as the maximum modulus of a fractional quadratic function over the unit circle \mathbb{C}ite[Corollary 4.4]{ay2004}\footnote{Unfortunately there is an $\omega$ missing in equation (4.7) of \mathbb{C}ite{ay2004}. The derivation given there shows that the correct formula is the present one.}: if $\mathbb{D}e=\left((s^1,s^2), v\mathbb{R}ight) \in TG$ then \begin{equation}gin{align*} \mathbb{C}ar{\mathbb{D}e}&= \sup_{\omega\in\mathbb{T}} |(\Phi_\omega)_*(\mathbb{D}e)| \\ &=\sup_{\omega\in\mathbb{T}}\left|\frac{v_1(1-\omega^2 s^2)-v_2\omega(2-\omega s^1)}{(s^1-\overline {s^1} s^2)\omega^2-2(1-|s^2|^2)\omega +\bar s-\overline {s^2} s^1}\mathbb{R}ight|. \end{align*} Hence $\mathbb{C}ar{\mathbb{D}e}$ can easily be calculated numerically to any desired accuracy. In the latter equation we use superscripts (in $s^1, s^2$) and squares (of $\omega$, $|s^2|$). The question arises: what are the extremal functions for the problem $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar{\mathbb{D}e}$? By Theorem \mathbb{R}ef{extthm10}, there is an extremal function for $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar{\mathbb{D}e}$ of the form $\Phi_\omega$ for some $\omega$ in $\mathbb{T}$, but are there others? It is clear that if $F$ is an extremal function for $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ then so is $m\mathbb{C}irc F$ for any automorphism $m$ of $\mathbb{D}$, by the invariance of the Poincar\'e metric on $\mathbb{D}$. We shall say that the solution of $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ is {\em essentially unique} if, for every pair of extremal functions $F_1,F_2$ for $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$, there exists an automorphism $m$ of $\mathbb{D}$ such that $F_2=m\mathbb{C}irc F_1$. We show in Theorem \mathbb{R}ef{ess1} that, for any \nd tangent $\mathbb{D}e\in TG$, if there is a {\em unique} $\omega$ in $\mathbb{T}$ such that $\Phi_\omega$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$, then the solution of $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ {\em is} essentially unique. Indeed, for any point $\la\in G$, the solution of $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar(\la,v)$ is essentially unique for generic directions $v$ (Corollary \mathbb{R}ef{genuniq}). We also derive (in Section \mathbb{R}ef{royal}) a parametrization of all solutions of $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ in the special case that $\mathbb{D}e$ is tangent to the `royal variety' $(s^1)^2=4s^2$ in $G$, and in Sections \mathbb{R}ef{flat} and \mathbb{R}ef{purelybalanced} we obtain large classes of Carath\'eodory extremals for two other classes of tangents, called {\em flat} and {\em purely balanced} tangents. The question of the essential uniqueness of solutions of $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ in domains including $G$ was studied by L. Kosi\'nski and W. Zwonek in \mathbb{C}ite{kos}. Their terminology and methods differ from ours; we explain the relation of their Theorem 5.3 to our Theorem \mathbb{R}ef{ess1} in Section \mathbb{R}ef{relation}. Incidentally, the authors comment that very little is known about the set of all Carath\'eodory extremals for a given tangent in a domain. As far as the domain $G$ goes, in this paper we derive a substantial amount of information, even though we do not achieve a complete description of all Carath\'eodory extremals on $G$. The main tool we use is a model formula for analytic functions from $G$ to the closed unit disc $\mathbb{D}^-$ proved in \mathbb{C}ite{AY2017} and stated below as Definition \mathbb{R}ef{defGmodel} and Theorem \mathbb{R}ef{modelGthm}. Model formulae and realization formulae for a class of functions are essentially equivalent: one can pass back and forth between them by standard methods (algebraic manipulation in one direction, lurking isometry arguments in the other). \section{Five types of tangent}\label{5types} There are certainly \nd tangents $\mathbb{D}e\in TG$ for which the solution of $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ is not essentially unique. Consider, for example, $\mathbb{D}e$ of the form \[ \mathbb{D}e=\left( (2z,z^2), 2c(1,z)\mathbb{R}ight) \] for some $z\in\mathbb{D}$ and nonzero complex $c$. We call such a tangent {\em royal}: it is tangent to the `royal variety' \[ \mathbb{C}alr\mathbb{D}f \{(2z,z^2):z\in\mathbb{D}\} \] in $G$. By a simple calculation, for any $\omega\in\mathbb{T}$, \[ \Phi_\omega(2z,z^2)= -z, \qquad D_v\Phi_\omega(2z,z^2)=- c, \] where $v=2c(1,z)$, so that $\Phi_\omega(2z,z^2)$ and $ D_v\Phi_\omega(2z,z^2)$ are independent of $\omega$. It follows from Theorem \mathbb{R}ef {extthm10} that $\Phi_\omega$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ for {\em all} $\omega\in\mathbb{T}$ and that \begin{equation}q\label{carroyal}\ \mathbb{C}ar{\mathbb{D}e}=\frac{|D_v\Phi_\omega(2z,z^2)|}{1-|\Phi_\omega(2z,z^2)|^2}= \frac{|c|}{1-|z|^2}. \end{equation}q Now if $\omega_1,\ \omega_2$ are distinct points of $\mathbb{T}$, there is no automorphism $m$ of $\mathbb{D}$ such that $\Phi_{\omega_1}=m\mathbb{C}irc\Phi_{\omega_2}$; this is a consequence of the fact that $(2\bar\omega,\bar\omega^2)$ is the unique singularity of $\Phi_\omega$ in the closure $\Ga$ of $G$. Hence the solution of $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ is not essentially unique. Similar conclusions hold for another interesting class of tangents, which we call {\em flat}. These are the tangents of the form \[ (\la,v)=\left( (\begin{equation}ta+\bar\begin{equation}ta z,z), c(\bar\begin{equation}ta,1)\mathbb{R}ight) \] for some $\begin{equation}ta \in\mathbb{D}$ and $c\in\mathbb{C}\setminus \{0\}$. It is an entertaining calculation to show that \begin{equation}q\label{entertain} \mathbb{C}ar{(\la,v)}=\frac{|D_v\Phi_\omega(\la)|}{1-|\Phi_\omega(\la)|^2}=\frac{|c|}{1-|z|^2} \end{equation}q for all $\omega\in\mathbb{T}$. Again, the solution to $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar(\la,v)$ is far from being essentially unique. There are also tangents $\mathbb{D}e\in TG$ such that $\Phi_\omega$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ for exactly two values of $\omega$ in $\mathbb{T}$; we call these {\em purely balanced} tangents. They can be described concretely as follows. For any hyperbolic automorphism $m$ of $\mathbb{D}$ (that is, one that has two fixed points $\omega_1$ and $\omega_2$ in $\mathbb{T}$) let $h_m$ in $G(\mathbb{D})$ be given by \[ h_m(z)=(z+m(z),zm(z)) \] for $z\in\mathbb{D}$. A purely balanced tangent has the form \begin{equation}q\label{pbexpress} \mathbb{D}e=(h_m(z),c h_m'(z)) \end{equation}q for some hyperbolic automorphism $m$ of $\mathbb{D}$, some $z\in\mathbb{D}$ and some $c\in \mathbb{C}\setminus\{0\}$. It is easy to see that, for $\omega\in\mathbb{T}$, the composition $\Phi_\omega\mathbb{C}irc h_m$ is a rational inner function of degree at most $2$ and that the degree reduces to $1$ precisely when $\omega$ is either $\bar\omega_1$ or $\bar\omega_2$. Thus, for these two values of $\omega$ (and only these), $\Phi_\omega\mathbb{C}irc h_m$ is an automorphism of $\mathbb{D}$. It follows that $\Phi_\omega$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar \mathbb{D}e$ if and only if $\omega=\bar\omega_1$ or $\bar\omega_2$. A fourth type of tangent, which we call {\em exceptional}, is similar to the purely balanced type, but differs in that the hyperbolic automorphism $m$ of $\mathbb{D}$ is replaced by a {\em parabolic} automorphism, that is, an automorphism $m$ of $\mathbb{D}$ which has a single fixed point $\omega_1$ in $\mathbb{T}$, which has multiplicity $2$. The same argument as in the previous paragraph shows that $\Phi_\omega$ solves the Carath\'eodory problem if and only if $\omega=\bar\omega_1$. The fifth and final type of tangent is called {\em purely unbalanced}. It consists of the tangents $\mathbb{D}e=(\la,v)\in TG$ such that $\Phi_\omega$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar \mathbb{D}e$ for a unique value $\e^{\ii t_0}$ of $\omega$ in $\mathbb{T}$ and \begin{equation}q\label{disting} \left. \frac{d^2}{dt^2} \frac{|D_v\Phi_{\e^{\ii t}}(\la)|}{1-|\Phi_{\e^{\ii t}}(\la)|^2}\mathbb{R}ight|_{t=t_0} < 0. \end{equation}q The last inequality distinguishes purely unbalanced from exceptional tangents -- the left hand side of equation \eqref{disting} is equal to zero for exceptional tangents. The five types of tangent are discussed at length in our paper \mathbb{C}ite{aly2016}. We proved \mathbb{C}ite[Theorem 3.6]{aly2016} a `pentachotomy theorem', which states that every \nd tangent in $TG$ is of exactly one of the above five types. We also give, for a representative tangent of each type, a cartoon showing the unique complex geodesic in $G$ touched by the tangent \mathbb{C}ite[Appendix B]{aly2016}. It follows trivially from Theorem \mathbb{R}ef{extthm10} that, for every \nd tangent $\mathbb{D}e\in TG$, either \begin{equation}gin{enumerate} \item[\mathbb{R}m{(1)}] there exists a unique $\omega\in\mathbb{T}$ such that $\Phi_\omega$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$, or \item[\mathbb{R}m{(2)}] there exist at least two values of $\omega$ in $\mathbb{T}$ such that $\Phi_\omega$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$. \end{enumerate} The above discussion shows that Case (1) obtains for purely unbalanced and exceptional tangents, while Case (2) holds for royal, flat and purely balanced tangents. For the purpose of this paper, the message to be drawn is that Case (1) is generic in the following sense. Consider a point $\la\in G$. Each tangent $v$ in $T_\la G$ has a `complex direction' $\mathbb{C} v$, which is a one-dimensional subspace of $\mathbb{C}^2$, or in other words, a point of the projective space $\mathbb{C}^{2\mathbb{T}imes 2}hrm{CP}^2$. The directions corresponding to the royal (if any) and flat tangents at $\la$ are just single points in $\mathbb{C}^{2\mathbb{T}imes 2}hrm{CP}^2$, while, from the constructive nature of the expression \eqref{pbexpress} for a purely balanced tangent, it is easy to show that there is a smooth one-real-parameter curve of purely balanced directions (see \mathbb{C}ite[Section 1]{aly2017}). It follows that the set of directions $\mathbb{C} v\in \mathbb{C}^{2\mathbb{T}imes 2}hrm{CP}^2$ for which a unique $\Phi_\omega$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ contains a dense open set in $\mathbb{C}^{2\mathbb{T}imes 2}hrm{CP}^2$. To summarise: \begin{equation}gin{proposition}\label{generic} For every $\la\in G$ there exists a dense open set $V_\la$ in $\mathbb{C}^{2\mathbb{T}imes 2}hrm{CP}^2$ such that whenever $\mathbb{C} v \in V_\la$, there exists a unique $\omega\in\mathbb{T}$ such that $\Phi_\omega$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar (\la,v)$. \end{proposition} \section{Tangents with a unique extremal $\Phi_\omega$}\label{Uniq} In Section \mathbb{R}ef{5types} we discussed extremal functions of the special form $\Phi_\omega, \ \omega \in\mathbb{T}$, for the Carath\'eodory problem in $G$. However, there is no reason to expect that the $\Phi_\omega$ will be the only extremal functions. For example, if $\mathbb{D}e=(\la,v)$ is a \nd tangent and $\Phi_{\omega_1}, \mathbb{D}ots,\Phi_{\omega_k}$ all solve $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar \mathbb{D}e$, then one can generate a large class of other extremal functions as follows. Choose an automorphism $m_j$ of $\mathbb{D}$ such that $m_j\mathbb{C}irc \Phi_{\omega_j}(\la)=0$ and $D_v(m_j\mathbb{C}irc\Phi_{\omega_j})(\la_j)> 0$ for $ j=1,\mathbb{D}ots,k$. Then each $m_j\mathbb{C}irc \Phi_{\omega_j}$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$, and so does any convex combination of them. Nevertheless, if there is a {\em unique} $\omega\in\mathbb{T}$ such that $\Phi_\omega$ is extremal for $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ then the solution of $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ {\em is} essentially unique. \begin{equation}gin{theorem}\label{ess1} Let $\mathbb{D}e$ be a \nd tangent in $G$ such that $\Phi_\omega$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ for a unique value of $\omega$ in $\mathbb{T}$. If $\psi$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ then there exists an automorphism $m$ of $\mathbb{D}$ such that $\psi=m\mathbb{C}irc \Phi_\omega$. \end{theorem} For the proof recall the following model formula \mathbb{C}ite[Definition 2.1 and Theorem 2.2]{AY2017}. \begin{equation}gin{definition}\label{defGmodel} A $G$-\emph{model} for a function $\varphi$ on $G$ is a triple $(\mathbb{C}alm,T,u)$ where $\mathbb{C}alm$ is a separable Hilbert space, $T$ is a contraction acting on $\mathbb{C}alm$ and $u:G \mathbb{T}o \mathbb{C}alm$ is an analytic map such that, for all $s,t\in G$, \begin{equation}q\label{modelform} 1-\overline{\varphi(t)}\varphi(s)= \ip{ (1-t_T^* s_T) u(s)}{u(t)}_\mathbb{C}alm \end{equation}q where, for $s\in G$, \[ s_T \mathbb{D}f (2s^2T-s^1)(2-s^1T)^{-1}. \] A $G$-model $(\mathbb{C}alm,T,u)$ is {\em unitary} if $T$ is a unitary operator on $\mathbb{C}alm$. \end{definition} For any domain $\Omega$ we define the {\em Schur class } $\ess(\Omega)$ to be the set of holomorphic maps from $\Omega$ to the closed unit disc $\mathbb{D}^-$. \begin{equation}gin{theorem}\label{modelGthm} Let $\varphi$ be a function on $G$. The following three statements are equivalent. \begin{equation}gin{enumerate} \item [\mathbb{R}m (1)] $\varphi\in\ess(G)$; \item [\mathbb{R}m (2)] $\varphi$ has a $G$-model; \item [\mathbb{R}m (3)] $\varphi$ has a unitary $G$-model $(\mathbb{C}alm, T, u)$. \end{enumerate} \end{theorem} From a $G$-model of a function $\varphi\in\ess(G)$ one may easily proceed by means of a standard lurking isometry argument to a realization formula \[ \varphi(s)=A+Bs_T(1-Ds_T)^{-1} C, \quad \mbox{ all } s\in G, \] for $\varphi$, where $ABCD$ is a contractive or unitary colligation on $\mathbb{C}\oplus\mathbb{C}alm$. However, for the present purpose it is convenient to work directly from the $G$-model. We also require a long-established fact about $G$ \mathbb{C}ite{ay2004}, related to the fact that the Carath\'eodory and Kobayashi metrics on $TG$ coincide. \begin{equation}gin{lemma}\label{k=c} If $\mathbb{D}e$ is a \nd tangent to $G$ and $\varphi$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ then there exists $k$ in $G(\mathbb{D})$ such that $\varphi\mathbb{C}irc k=\idd$. Moreover, if $\psi$ is any solution of $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ then $\psi\mathbb{C}irc k$ is an automorphism of $\mathbb{D}$. \end{lemma} We shall need some minor measure-theoretic technicalities. \begin{equation}gin{lemma}\label{baspos} Let $Y$ be a set and let \[ A:\mathbb{T}\mathbb{T}imes Y\mathbb{T}imes Y \mathbb{T}o \mathbb{C} \] be a map such that \begin{equation}gin{enumerate}[\mathbb{R}m (1)] \item $A(\mathbb{C}dot,z,w)$ is continuous on $\mathbb{T}$ for every $z,w \in Y$; \item $A(\eta,\mathbb{C}dot,\mathbb{C}dot)$ is a positive kernel on $Y$ for every $\eta\in\mathbb{T}$. \end{enumerate} Let $\mathbb{C}alm$ be a separable Hilbert space, let $T$ be a unitary operator on $\mathbb{C}alm$ with spectral resolution \[ T=\int_\mathbb{T} \eta \, \mathbb{D}d E(\eta) \] and let $v:Y\mathbb{T}o\mathbb{C}alm$ be a mapping. Let \begin{equation}q\label{defC} C(z,w)= \int_\mathbb{T} A(\eta,z,w)\, \ip{\mathbb{D}d E(\eta)v(z)}{v(w)} \end{equation}q for all $z,w \in Y$. Then $C$ is a positive kernel on $Y$. \end{lemma} \begin{equation}gin{proof} Consider any finite subset $\{z_1,\mathbb{D}ots,z_N\}$ of $Y$. We must show that the $N\mathbb{T}imes N$ matrix \[ \bbm C(z_i,z_j) \ebm_{i,j=1}^N \] is positive. Since $A(\mathbb{C}dot,z_i,z_j)$ is continuous on $\mathbb{T}$ for each $i$ and $j$, we may approximate the $N\mathbb{T}imes N$-matrix-valued function $[A(\mathbb{C}dot,z_i,z_j)]$ uniformly on $\mathbb{T}$ by integrable simple functions of the form \[ [f_{ij}]= \sum_\ell b^\ell \mathbb{C}hi_{\mathbb{T}au_\ell} \] for some $N\mathbb{T}imes N$ matrices $b^\ell$ and Borel sets $\mathbb{T}au_\ell$, where $\mathbb{C}hi$ denotes `characteristic function'. Moreover we may do this in such a way that each $b^\ell$ is a value $[A(\eta,z_i,z_j)]$ for some $\eta\in\mathbb{T}$, hence is positive. Then \begin{equation}q\label{approxsum} \bbm \int_\mathbb{T}au f_{ij}(\eta)\, \ip{\mathbb{D}d E(\eta) z_i}{z_j} \ebm_{i,j=1}^N =\sum_\ell b^\ell * \bbm \ip{E(\mathbb{T}au_\ell)v_i}{v_j} \ebm_{i,j=1}^N \end{equation}q where $*$ denotes the Schur (or Hadamard) product of matrices. Since the matrix $\bbm \ip{E(\mathbb{T}au_\ell)v_i}{v_j} \ebm$ is positive and the Schur product of positive matrices is positive, every approximating sum of the form \eqref{approxsum} is positive, and hence the integral in equation \eqref{defC} is a positive matrix. \end{proof} \begin{equation}gin{lemma}\label{meas2} For $i,j=1,2$ let $a_{ij}:\mathbb{T}\mathbb{T}o\mathbb{C}$ be continuous and let each $a_{ij}$ have only finitely many zeros in $\mathbb{T}$. Let $\nu_{ij}$ be a complex-valued Borel measure on $\mathbb{T}$ such that, for every Borel set $\mathbb{T}au$ in $\mathbb{T}$, \[ \bbm \nu_{ij}(\mathbb{T}au) \ebm_{i,j=1}^2 \geq 0. \] Let $X$ be a Borel subset of $\mathbb{T}$ and suppose that \[ \bbm a_{ij}(\eta) \ebm_{i,j=1}^2\; \mbox{ is positive and of rank } 2 \mbox{ for all } \eta \in X. \] Let \[ C= \bbm c_{ij}\ebm_{i,j=1}^2 \] where \[ c_{ij}= \int_X a_{ij}(\eta)\, \mathbb{D}d \nu_{ij}(\eta) \qquad \mbox{ for }i,j=1,2. \] If $\mathbb{R}ank C \leq 1$ then either $c_{11}=0$ or $c_{22}=0$. \end{lemma} \begin{equation}gin{proof} By hypothesis the set \[ Z\mathbb{D}f \bigcup_{i,j=1}^2 \{ \eta\in\mathbb{T}: a_{ij}(\eta)=0\} \] is finite. Exactly as in the proof of Lemma \mathbb{R}ef{baspos}, for any Borel set $\mathbb{T}au$ in $\mathbb{T}$, \begin{equation}q\label{posint} \bbm \int_\mathbb{T}au a_{ij}\, \mathbb{D}d\nu_{ij} \ebm_{ij=1}^2 \geq 0. \end{equation}q Suppose that $C$ has rank at most $1$ but $c_{11}$ and $c_{22}$ are both nonzero. Then there exists a nonzero $2\mathbb{T}imes 1$ matrix $c=[c_1 \, c_2]^T$ such that $C=cc^*$ for $i,j=1,2$ and $c_1, \ c_2$ are nonzero. For any Borel set $\mathbb{T}au\subset X$, \[ \bbm \int_\mathbb{T}au a_{ij}\, \mathbb{D}d\nu_{ij} \ebm \leq \bbm \int_\mathbb{T}au+\int_{X\setminus\mathbb{T}au} a_{ij}\, \mathbb{D}d\nu_{ij} \ebm = \bbm \int_{X} a_{ij}\, \mathbb{D}d\nu_{ij} \ebm= C= cc^*. \] Consequently there exists a unique $\mu(\mathbb{T}au) \in [0,1]$ such that \begin{equation}q\label{gotmu} \bbm \int_\mathbb{T}au a_{ij}\, \mathbb{D}d\nu_{ij} \ebm = \mu(\mathbb{T}au) C. \end{equation}q It is easily seen that $\mu$ is a Borel probability measure on $X$. Note that if $\eta\in Z$, say $a_{ij}(\eta)=0$, then on taking $\mathbb{T}au=\{\eta\}$ in equation \eqref{gotmu}, we deduce that \[ \mu(\{\eta\})c_i\bar c_j =0. \] Since $c_1, c_2$ are nonzero, it follows that $\mu(\{\eta\})=0$. Hence $\mu(Z)=0$. Equation \eqref{gotmu} states that $\mu$ is absolutely continuous with respect to $\nu_{ij}$ on $X$ and the Radon-Nikodym derivative is given by \[ c_i \bar c_j \frac{\mathbb{D}d \mu}{\mathbb{D}d\nu_{ij} }= a_{ij} \] for $i,j=1,2$. Hence, on $X\setminus Z$, \begin{equation}q\label{RNd} \mathbb{D}d\nu_{ij} = \frac{c_i\bar c_j}{ a_{ij}} \mathbb{D}d\mu, \qquad i,j=1,2. \end{equation}q Pick a compact subset $K$ of $X\setminus Z$ such that $\mu(K)>0$. This is possible, since $\mu(X\setminus Z) =1$ and Borel measures on $\mathbb{T}$ are automatically regular. By compactness, there exists a point $\eta_0\in K$ such that, for every open neighbourhood $U$ of $\eta_0$, \[ \mu(U\mathbb{C}ap K) >0. \] Notice that, for $\eta\in \mathbb{T}\setminus Z$, \[ \mathbb{D}et\bbm \mathbb{D}s \frac{c_i\bar c_j}{ a_{ij}(\eta)} \ebm_{i,j=1}^2 = -\frac {|c_1c_2|^2\mathbb{D}et\bbm a_{ij}(\eta)\ebm}{a_{11}(\eta)a_{22}(\eta)|a_{12}(\eta)|^2} < 0. \] Thus $[c_i \bar c_j a_{ij}(\eta_0)^{-1}]$ has a negative eigenvalue. Therefore there exists a unit vector $x\in\mathbb{C}^2$, an $\varepsilon >0$ and an open neighourhood $U$ of $\eta_0$ in $\mathbb{T}$ such that \[ \ip{ \bbm c_i\bar c_j a_{ij}(\eta)^{-1}\ebm x}{x} < -\varepsilon \] for all $\eta\in U$. We then have \begin{equation}gin{align*} \ip { \bbm \nu_{ij}(U\mathbb{C}ap K) \ebm x}{x} &= \ip{\int_{U\mathbb{C}ap K} \bbm c_i\bar c_j a_{ij}(\eta)^{-1} \ebm \mathbb{D}d\mu(\eta)x}{x} \\ &= \int_{U\mathbb{C}ap K} \ip{ \bbm c_i\bar c_j a_{ij}(\eta)^{-1} \ebm x}{x} \mathbb{D}d\mu(\eta) \\ & < -\varepsilon \mu(U\mathbb{C}ap K) \\ & < 0. \end{align*} This contradicts the positivity of the matricial measure $\bbm \nu_{ij}\ebm$. Hence either $c_1=0$ or $c_2=0$. \end{proof} \begin{equation}gin{proof}[Proof of Theorem {\mathbb{R}m \mathbb{R}ef{ess1}}] Let $\mathbb{D}e$ be a \nd tangent to $G$ such that $\Phi_\omega$ is the unique function from the collection $\{\Phi_\eta\}_{\eta\in\mathbb{T}}$ that solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$. Let $\psi$ be a solution of $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$. We must find an automorphism $m$ of $\mathbb{D}$ such that $\psi=m\mathbb{C}irc\Phi_\omega$. By Lemma \mathbb{R}ef{k=c}, there exists $k$ in $G(\mathbb{D})$ such that \begin{equation}q\label{getk} \Phi_\omega\mathbb{C}irc k= \idd, \end{equation}q and moreover, the function \begin{equation}q\label{gotm} m\mathbb{D}f \psi\mathbb{C}irc k \end{equation}q is an automorphism of $\mathbb{D}$. Let \begin{equation}q\label{defphi} \varphi=m^{-1}\mathbb{C}irc \psi. \end{equation}q Then \begin{equation}q\label{propphi} \varphi\mathbb{C}irc k= m^{-1}\mathbb{C}irc \psi\mathbb{C}irc k = m^{-1}\mathbb{C}irc m=\idd. \end{equation}q By Theorem \mathbb{R}ef{modelGthm}, there is a unitary $G$-model $(\mathbb{C}alm, T, u)$ for $\varphi$. By the Spectral Theorem for unitary operators, there is a spectral measure $E(.)$ on $\mathbb{T}$ with values in $\mathbb{C}alb(\mathbb{C}alm)$ such that \[ T= \int_\mathbb{T} \eta \; \mathbb{D}d E(\eta). \] Thus, for $s\in G$, \[ s_T = (2s^2T-s^1)(2-s^1T)^{-1} =\int_\mathbb{T} \Phi_\eta(s) \; \mathbb{D}d E(\eta). \] Therefore, for all $s,t \in G$, \begin{equation}gin{align}\label{63.1} 1-\overline{\varphi(t)}\varphi(s) &= \ip{ (1-t_T^* s_T) u(s)}{u(t)}_\mathbb{C}alm \notag \\ &= \int_\mathbb{T} \left( 1-\overline{\Phi_\eta(t)}\Phi_\eta(s)\mathbb{R}ight) \ip{\mathbb{D}d E(\eta)u(s)}{u(t)}_\mathbb{C}alm. \end{align} Consider $z,w\in\mathbb{D}$, put $s=k(z), \, t=k(w)$ in equation \eqref{63.1}. Invoke equation \eqref{propphi} and divide equation \eqref{63.1} through by $1-\bar w z$ to obtain, for $z,w\in\mathbb{D}$, \begin{equation}gin{align} 1 &= \int_{\{\omega\}}+\int_{\mathbb{T}\setminus\{\omega\}} \frac{1-\overline{\Phi_\eta \mathbb{C}irc k(w)}\Phi_\eta\mathbb{C}irc k(z)}{1-\bar w z} \ip{\mathbb{D}d E(\eta)u\mathbb{C}irc k(z)}{u\mathbb{C}irc k(w)} \notag \\ &= I_1+I_2 \label{63.2} \end{align} where \begin{equation}gin{align}\label{defI2} I_1(z,w) &= \ip{E(\{\omega\})u\mathbb{C}irc k(z)}{u\mathbb{C}irc k(w)},\notag\\ I_2(z,w) &= \int_{\mathbb{T}\setminus \{\omega\}} \frac{1-\overline{\Phi_\eta \mathbb{C}irc k(w)}\Phi_\eta\mathbb{C}irc k(z)}{1-\bar w z} \ip{\mathbb{D}d E(\eta)u\mathbb{C}irc k(z)}{u\mathbb{C}irc k(w)}. \end{align} The left hand side $1$ of equation \eqref {63.2} is a positive kernel of rank one on $\mathbb{D}$, and $I_1$ is also a positive kernel. The integrand in $I_2$ is a positive kernel on $\mathbb{D}$ for each $\eta\in\mathbb{T}$, by Pick's theorem, since $\Phi_\eta\mathbb{C}irc k$ is in the Schur class. Hence, by Lemma \mathbb{R}ef{baspos}, $I_2$ is also a positive kernel on $\mathbb{D}$. Since $I_1+I_2$ has rank $1$, it follows that $I_2$ has rank at most $1$ as a kernel on $\mathbb{D}$. By hypothesis, $\Phi_\eta$ does {\em not} solve $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ for any $\eta\in \mathbb{T}\setminus\{\omega\}$. Therefore $\Phi_\eta\mathbb{C}irc k$ is a Blaschke product of degree $2$, and consequently, for any choice of distinct points $z_1,z_2$ in $\mathbb{D}$, the $2\mathbb{T}imes 2$ matrix \begin{equation}q\label{defaij} \bbm a_{ij}(\eta) \ebm_{i,j=1}^2\mathbb{D}f \bbm \mathbb{D}s \frac{1-\overline{\Phi_\eta \mathbb{C}irc k(z_i)}\Phi_\eta\mathbb{C}irc k(z_j)}{1-\bar z_i z_j}\ebm_{i,j=1}^2 \end{equation}q is a positive matrix of rank $2$ for every $\eta\in \mathbb{T}\setminus\{\omega\}$. In particular, $a_{11}(\eta)>0$ for all $\eta\in \mathbb{T}\setminus\{\omega\}$. Moreover, each $a_{ij}$ has only finitely many zeros in $\mathbb{T}$, as may be seen from the fact that $a_{ij}$ is a ratio of trigonometric polynomials in $\eta$. To be explicit, if we temporarily write $k=(k^1,k^2):\mathbb{D}\mathbb{T}o G$, then equation \eqref{defaij} expands to $a_{ij}(\eta)=P(\eta)/Q(\eta)$ where \begin{equation}gin{align*} P(\eta) &=4\left(1-\overline{k^2(z_i)}k^2(z_j)\mathbb{R}ight)-2\eta\left(k^1(z_j)- \overline{k^1(z_i)}k^2(z_j)\mathbb{R}ight) \\ & \hspace*{2cm} -2\bar\eta\left(\overline{k^1(z_i)} - \overline{k^2(z_i)}k^1(z_j)\mathbb{R}ight), \\ Q(\eta)&= (1-\bar z_iz_j)(2-\eta k^1(z_i))^-(2-\eta k^1(z_j)). \end{align*} Let \[ \nu_{ij} = \ip{E(\mathbb{C}dot) u\mathbb{C}irc k(z_i)}{u\mathbb{C}irc k(z_j)}. \] Clearly $[\nu_{ij}(\mathbb{T}au)] \geq 0$ for every Borel subset $\mathbb{T}au$ of $ \mathbb{T}\setminus\{\omega\}$. By definition \eqref{defI2}, \[ I_2(z_i,z_j) = \int_{\mathbb{T}\setminus\{\omega\}} a_{ij} \, \mathbb{D}d \nu_{ij} \] for $i,j=1,2$. Moreover, by equation \eqref{63.2}, \[ [I_2(z_i,z_j)]\leq [I_1(z_i,z_j)]+[I_2(z_i,z_j)] =\bbm 1&1\\1&1\ebm. \] It follows that \begin{equation}q\label{I2kap} \bbm \int_{\mathbb{T}\setminus\{\omega\}} a_{ij} \, \mathbb{D}d \nu_{ij} \ebm = [I_2(z_i,z_j] = \kappa \bbm 1&1\\1&1\ebm \end{equation}q for some $\kappa \in [0,1]$. We may now apply Lemma \mathbb{R}ef{meas2} with $X=\mathbb{T}\setminus \{\omega\}$ to deduce that $\kappa=0$ and hence $I_2(z_i,z_j)=0$. In particular, \[ 0=I_2(z_1,z_1)= \int_{\mathbb{T}\setminus\{\omega\}} a_{11} \, \mathbb{D}d\nu_{11}. \] Since $a_{11}>0$ on $\mathbb{T}\setminus\{\omega\}$, it follows that $\nu_{11}(\mathbb{T}\setminus\{\omega\})=0$, which is to say that \begin{equation}q\label{2nd} E(\mathbb{T}\setminus\{\omega\}) u\mathbb{C}irc k(z_1) = 0. \end{equation}q Since $z_1,z_2$ were chosen arbitrarily in $\mathbb{T}\setminus\{\omega\}$, we have $I_2 \equiv 0$ and therefore, by equation \eqref{63.2}, \begin{equation}q\label{2I1} 1= I_1= \ip{E(\{\omega\})u\mathbb{C}irc k(z)}{u\mathbb{C}irc k(w)} \end{equation}q for all $z,w\in\mathbb{D}$. It follows that \[ \|E(\{\omega\})u\mathbb{C}irc k(z) -E(\{\omega\})u\mathbb{C}irc k(w)\|^2=0 \] for all $z,w$, and hence that there exists a unit vector $x\in\mathbb{C}alm$ such that \[ E(\{\omega\})u\mathbb{C}irc k(z) =x \] for all $z\in\mathbb{D}$. In equation \eqref{63.1}, choose $t=k(w)$ for some $w\in \mathbb{D}$. Since $\Phi_\omega\mathbb{C}irc k=\idd$, we have for all $s\in G$, \begin{equation}gin{align*} 1-\bar w\varphi(s)&= 1-\overline{\varphi\mathbb{C}irc k(w)} \varphi(s) \\ &=\int_{\{\omega\}}+\int_{\mathbb{T}\setminus\{\omega\}} \left(1-\overline{\Phi_\eta\mathbb{C}irc k(w)}\Phi_\eta(s)\mathbb{R}ight) \ip{\mathbb{D}d E(\eta)u(s)}{u\mathbb{C}irc k(w)} \\ &=(1-\bar w \Phi_\omega(s))\ip{u(s)}{x} + \\ &\hspace*{1cm} \int_{\mathbb{T}\setminus\{\omega\}} \left(1-\overline{\Phi_\eta\mathbb{C}irc k(w)}\Phi_\eta(s)\mathbb{R}ight) \ip{\mathbb{D}d E(\eta)u(s)}{u\mathbb{C}irc k(w)}. \end{align*} In view of equation \eqref{2nd}, the scalar spectral measure in the second term on the right hand side is zero on $\mathbb{T}\setminus \{\omega\}$. Hence the integral is zero, and so, for all $s\in G$ and $w\in\mathbb{D}$, \begin{equation}gin{align}\label{64.1} 1-\bar w \varphi(s)&= (1-\bar w \Phi_\omega(s))\ip{u(s)}{x}. \end{align} Put $w=0$ to deduce that \[ \ip{u(s)}{x}=1 \] for all $s\in G$, then equate coefficients of $\bar w$ to obtain $\varphi=\Phi_\omega$. Hence, by equation \eqref{gotm}, \[ \psi=m\mathbb{C}irc \varphi=m\mathbb{C}irc \Phi_\omega \] as required. \end{proof} On combining Theorem \mathbb{R}ef{ess1} and Proposition \mathbb{R}ef{generic} we obtain the statement in the abstract. \begin{equation}gin{corollary}\label{genuniq} Let $\la\in G$. For a generic direction $\mathbb{C} v$ in $\mathbb{C}^{2\mathbb{T}imes 2}hrm{CP}^2$, the solution of the Carath\'eodory problem $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar (\la, v)$ is essentially unique. \end{corollary} It will sometimes be useful in the sequel to distinguish a particular Carath\'eodory extremal function from a class of functions that are equivalent up to composition with automorphisms of $\mathbb{D}$. Consider any tangent $\mathbb{D}e\in TG$ and any solution $\varphi$ of $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$. The functions $m\mathbb{C}irc\varphi$, with $m$ an automorphism of $\mathbb{D}$, also solve $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$, and among them there is exactly one that has the property \[ m\mathbb{C}irc\varphi(\la)=0 \quad \mbox{ and } \quad D_v(m\mathbb{C}irc \varphi)(\la) > 0, \] or equivalently, \begin{equation}q\label{special} (m\mathbb{C}irc\varphi)_*(\mathbb{D}e) = (0,\mathbb{C}ar{\mathbb{D}e}). \end{equation}q We shall say that $\varphi$ is {\em well aligned at} $\mathbb{D}e$ if $\varphi_*(\mathbb{D}e)=(0,\mathbb{C}ar{\mathbb{D}e})$. With this terminology the following is a re-statement of Theorem \mathbb{R}ef{ess1}. \begin{equation}gin{corollary} If $\mathbb{D}e$ is a \nd tangent in $G$ such that $\Phi_\omega$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ for a unique value of $\omega$ in $\mathbb{T}$ then there is a unique well-aligned solution of $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$. It is expressible as $m\mathbb{C}irc \Phi_\omega$ for some automorphism $m$ of $\mathbb{D}$. \ \end{corollary} \section{Royal tangents}\label{royal} At the opposite extreme from the tangents studied in the last section are the royal tangents to $G$. Recall that these have the form \begin{equation}q\label{roytgt} \mathbb{D}e=\left((2z,z^2),2c(1,z)\mathbb{R}ight) \end{equation}q for some $z\in\mathbb{D}$ and nonzero complex number $c$. As we observed in Section \mathbb{R}ef{5types}, \[ \mathbb{C}ar{\mathbb{D}e}= \frac{|c|}{1-|z|^2} \] and {\em all} $\Phi_\omega, \omega\in\mathbb{T}$, solve $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$. In this section we shall describe {\em all} extremal functions for $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ for royal tangents $\mathbb{D}e$, not just those of the form $\Phi_\omega$. \begin{equation}gin{theorem}\label{royalthm} Let $\mathbb{D}e\in TG$ be the royal tangent \begin{equation}q\label{roytang} \mathbb{D}e=\left((2z,z^2),2c(1,z)\mathbb{R}ight) \end{equation}q for some $z\in\mathbb{D}$ and $c\in\mathbb{C}\setminus\{0\}$. A function $\varphi\in\mathbb{D}(G)$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ if and only if there exists an automorphism $m$ of $\mathbb{D}$ and $\Psi\in\ess(G)$ such that, for all $s\in G$, \begin{equation}q\label{theformula} \varphi(s)=m\left( \half s^1 +\mathbb{T}frac 14 ((s^1)^2-4s^2)\frac{\Psi(s)}{1-\half s^1\Psi(s)}\mathbb{R}ight). \end{equation}q \end{theorem} \begin{equation}gin{proof} We shall lift the problem $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ to a Carath\'eodory problem on the bidisc $\mathbb{D}^2$, where we can use the results of \mathbb{C}ite{AgM2} on the Nevanlinna-Pick problem on the bidisc. Let $\pi:\mathbb{D}^2\mathbb{T}o G$ be the `symmetrization map', \[ \pi(\la^1,\la^2)=(\la^1+\la^2,\la^1\la^2) \] and let $k:\mathbb{D}\mathbb{T}o\mathbb{D}^2$ be given by $k(\zeta)=(\zeta,\zeta)$ for $\zeta\in\mathbb{D}$. Consider the royal tangent $\mathbb{D}e$ of equation \eqref{roytang} and let \[ \mathbb{D}e_{zc}=\left((z,z),(c,c)\mathbb{R}ight) \in T\mathbb{D}^2. \] Observe that \[ \pi'(\la)= \bbm 1 & 1\\ \la^2 & \la^1 \ebm, \] and so \begin{equation}q\label{dedezc} \pi_*(\mathbb{D}e_{zc})= \left(\pi(z,z),\pi'(z,z)(c,c)\mathbb{R}ight)= \left((2z,z^2),2c(1,z)\mathbb{R}ight)=\mathbb{D}e, \end{equation}q while \[ k_*((z,c))=(k(z),k'(z)c)=\left((z,z),(c,c)\mathbb{R}ight)=\mathbb{D}e_{zc}. \] Consider any $\varphi\in\mathbb{D}(G)$. Figure 1 illustrates the situation. \begin{equation}gin{center} \begin{equation}gin{figure} \includegraphics {allroyal} \mathbb{C}aption{} \end{figure} \end{center} It is known that every Carath\'eodory problem on the bidisc is solved by one of the two co-ordinate functions $F_j(\la)=\la^j$ for $j=1$ or $2$ (for a proof see, for example, \mathbb{C}ite[Theorem 2.3]{aly2016}). Thus \begin{equation}gin{align*} \mathbb{C}ar{\mathbb{D}e_{zc}}^{\mathbb{D}^2}&= \max_{j=1,2} \frac{|D_{(c,c)}F_j(z,z)|}{1-|F_j(z,z)|^2} \\ &=\frac{|c|}{1-|z|^2} \\ &=\mathbb{C}ar{\mathbb{D}e}. \end{align*} Here of course the superscript $\mathbb{D}^2$ indicates the Carath\'eodory extremal problem on the bidisc. Hence, for $\varphi\in\mathbb{D}(G)$, \begin{equation}gin{align}\label{equiv1} \varphi\mathbb{C}irc\pi\mbox{ solves } \mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e_{zc} &\iff |(\varphi\mathbb{C}irc\pi)_*(\mathbb{D}e_{zc})|=\frac{|c|}{1-|z|^2} \notag\\ &\iff |\varphi_*\mathbb{C}irc \pi_*(\mathbb{D}e_{zc})|=\frac{|c|}{1-|z|^2} \quad \mbox{ by the chain rule}\notag\\ &\iff |\varphi_*(\mathbb{D}e)|=\frac{|c|}{1-|z|^2} \quad \hspace*{1.2cm}\mbox{ by equation \eqref{dedezc}} \notag \\ &\iff \varphi\mbox{ solves }\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e. \end{align} Next observe that a function $\psi\in\mathbb{D}(\mathbb{D}^2)$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e_{zc}$ if and only if $\psi\mathbb{C}irc k$ is an automorphism of $\mathbb{D}$. For if $\psi\mathbb{C}irc k$ is an automorphism of $\mathbb{D}$ then it satisfies \[ |(z,c)| = |(\psi\mathbb{C}irc k)_*(z,c)|= |\psi_*\mathbb{C}irc k_*(z,c)|= |\psi_*(\mathbb{D}e_{zc})|, \] which is to say that $\psi$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e_{zc}$. Conversely, if $\psi$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e_{zc}$ then $\psi\mathbb{C}irc k$ is an analytic self-map of $\mathbb{D}$ that preserves the Poincar\'e metric of a \nd tangent to $\mathbb{D}$, and is therefore (by the Schwarz-Pick lemma) an automorphism of $\mathbb{D}$. On combining this observation with equivalence \eqref{equiv1} we deduce that \begin{equation}gin{align}\label{equiv2} \varphi \mbox{ solves } \mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e\iff & \mbox{ there exists an automorphism }m \mbox{ of } \mathbb{D} \notag \\ & \mbox{ such that } m^{-1}\mathbb{C}irc \varphi\mathbb{C}irc\pi \mathbb{C}irc k= \idd. \end{align} For a function $f\in\mathbb{D}(\mathbb{D}^2)$, it is easy to see that $f\mathbb{C}irc k=\idd$ if and only if $f$ solves the Nevanlinna-Pick problem \begin{equation}q\label{NPD2} (0,0) \mapsto 0, \qquad (\half,\half) \mapsto \half. \end{equation}q See \mathbb{C}ite[Subsection 11.5]{AgM2} for the Nevanlinna-Pick problem in the bidisc. Hence \begin{equation}gin{align}\label{equiv3} \varphi \mbox{ solves } \mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e & \iff \mbox{ there exists an automorphism }m \mbox{ of } \mathbb{D} \mbox{ such that } \notag\\ & m^{-1}\mathbb{C}irc \varphi\mathbb{C}irc\pi\mbox{ solves the Nevanlinna-Pick problem \eqref{NPD2}.} \end{align} In \mathbb{C}ite[Subsection 11.6]{AgM2} Agler and McCarthy use realization theory to show the following. {\em A function $f\in\ess(\mathbb{D}^2)$ satisfies the interpolation conditions \begin{equation}q\label{interp} f(0,0)=0, \qquad f(\half,\half)=\half \end{equation}q if and only if there exist $t\in[0,1]$ and $\mathbb{C}^{2\mathbb{T}imes 2}hbb{T}heta$ in the Schur class of the bidisc such that, for all $\la\in\mathbb{D}^2$, \begin{equation}q\label{formpsi} f(\la)= t\la^1+(1-t)\la^2+ t(1-t)(\la^1-\la^2)^2 \frac{\mathbb{C}^{2\mathbb{T}imes 2}hbb{T}heta(\la)}{1-[(1-t)\la^1+t\la^2]\mathbb{C}^{2\mathbb{T}imes 2}hbb{T}heta(\la)}. \end{equation}q } Inspection of the formula \eqref{formpsi} reveals that $f$ is symmetric if and only if $t=\half$ and $\mathbb{C}^{2\mathbb{T}imes 2}hbb{T}heta$ is symmetric. Hence the symmetric functions in $\ess(\mathbb{D}^2)$ that satisfy the conditions \eqref{interp} are those given by \begin{equation}gin{align}\label{formf} f(\la) &=\half \la^1+\half\la^2 + \mathbb{T}frac 14 (\la^1-\la^2)^2 \frac{\mathbb{C}^{2\mathbb{T}imes 2}hbb{T}heta(\la)}{1-\half(\la^1+\la^2)\mathbb{C}^{2\mathbb{T}imes 2}hbb{T}heta(\la)} \end{align} for some symmetric $\mathbb{C}^{2\mathbb{T}imes 2}hbb{T}heta \in\ess(\mathbb{D}^2)$. Such a $\mathbb{C}^{2\mathbb{T}imes 2}hbb{T}heta$ induces a unique function $\Psi\in\ess(G)$ such that $\mathbb{C}^{2\mathbb{T}imes 2}hbb{T}heta=\Psi\mathbb{C}irc\pi$, and we may write the symmetric solutions $f$ of the problem \eqref{interp} in the form $f=\mathbb{T}ilde f\mathbb{C}irc\pi$ where, for all $s=(s^1,s^2)$ in $G$, \begin{equation}q\label{chipsi} \mathbb{T}ilde f(s)= \half s^1 +\mathbb{T}frac 14 ((s^1)^2-4s^2)\frac{\Psi(s)}{1-\half s^1\Psi(s)}. \end{equation}q Let $\varphi$ solve $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$. By the equivalence \eqref{equiv3}, there exists an automorphism $m$ of $\mathbb{D}$ such that $ m^{-1}\mathbb{C}irc \varphi\mathbb{C}irc\pi$ solves the Nevanlinna-Pick problem \eqref{NPD2}. Clearly $m^{-1}\mathbb{C}irc\varphi\mathbb{C}irc\pi$ is symmetric. Hence there exists $\Psi\in\ess(G)$ such that, for all $\la\in\mathbb{D}^2$, \begin{equation}q\label{formpsibis} m^{-1}\mathbb{C}irc\varphi(s)= \half s^1 +\mathbb{T}frac 14 ((s^1)^2-4s^2)\frac{\Psi(s)}{1-\half s^1\Psi(s)}. \end{equation}q Thus $\varphi$ is indeed given by the formula \eqref{theformula}. Conversely, suppose that for some automorphism $m$ of $\mathbb{D}$ and $\Psi\in\ess(G)$, a function $\varphi$ is defined by equation \eqref{theformula}. Let $f=m^{-1}\mathbb{C}irc\varphi\mathbb{C}irc\pi$ Then $f$ is given by the formula \eqref{formf}, where $\mathbb{C}^{2\mathbb{T}imes 2}hbb{T}heta=\Psi\mathbb{C}irc\pi$. Hence $f$ is a symmetric function that satisfies the interpolation conditions \eqref{interp}. By the equivalence \eqref{equiv3}, $\varphi$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$. \end{proof} \section{Flat tangents}\label{flat} In this section we shall give a description of a large class of Carath\'eodory extremals for a flat tangent. Recall that a flat tangent has the form \begin{equation}q\label{aflatgeo} \mathbb{D}e=\left( (\begin{equation}ta+\bar\begin{equation}ta z,z), c(\bar\begin{equation}ta,1)\mathbb{R}ight) \end{equation}q for some $z\in\mathbb{D}$ and $c\neq 0$, where $\begin{equation}ta\in\mathbb{D}$. Such a tangent touches the `flat geodesic' \[ F_\begin{equation}ta\mathbb{D}f \{ (\begin{equation}ta+\bar\begin{equation}ta w, w):w\in\mathbb{D}\}. \] The description depends on a remarkable property of sets of the form $\mathbb{C}alr\mathbb{C}up F_\begin{equation}ta, \ \begin{equation}ta\in\mathbb{D}$: they have the norm-preserving extension property in $G$ \mathbb{C}ite[Theorem 10.1]{aly2016}. That is, if $g$ is any bounded analytic function on the variety $\mathbb{C}alr\mathbb{C}up F_\begin{equation}ta$, then there exists an analytic function $\mathbb{T}ilde g$ on $G$ such that $g=\mathbb{T}ilde g|\mathbb{C}alr\mathbb{C}up F_\begin{equation}ta$ and the supremum norms of $g$ and $\mathbb{T}ilde g$ coincide. Indeed, the proof of \mathbb{C}ite[Theorem 10.1]{aly2016} gives an explicit formula for one such $\mathbb{T}ilde g$ in terms of a Herglotz-type integral. Let us call the norm-preserving extension $\mathbb{T}ilde g$ of $g$ constructed in \mathbb{C}ite[Chapter 10]{aly2016} the {\em special extension} of $g$ to $G$. It is a simple calculation to show that $\mathbb{C}alr$ and $F_\begin{equation}ta$ have a single point in common. By equation \eqref{entertain}, for $\mathbb{D}e$ in equation \eqref{aflatgeo} \[ \mathbb{C}ar{\mathbb{D}e} =\frac{|c|}{1-|z|^2}. \] \begin{equation}gin{theorem} Let $\mathbb{D}e$ be the flat tangent \begin{equation}q\label{aflatgeo2} \mathbb{D}e=\left( (\begin{equation}ta+\bar\begin{equation}ta z,z), c(\bar\begin{equation}ta,1)\mathbb{R}ight) \end{equation}q to $G$, where $\begin{equation}ta\in\mathbb{D}$ and $c\in\mathbb{C}\setminus\{0\}$. Let $\zeta,\eta$ be the points in $\mathbb{D}$ such that \[ (2\zeta,\zeta^2)=(\begin{equation}ta+\bar\begin{equation}ta\eta, \eta) \in \mathbb{C}alr\mathbb{C}ap F_\begin{equation}ta \] and let $m$ be the unique automorphism of $\mathbb{D}$ such that \[ m_*((z,c))=(0,\mathbb{C}ar{\mathbb{D}e}). \] For every function $h\in\ess(\mathbb{D})$ such that $h(\zeta)=m(\eta)$ the special extension $\mathbb{T}ilde g$ to $G$ of the function \begin{equation}q\label{defg} g: \mathbb{C}alr\mathbb{C}up F_\begin{equation}ta\mathbb{T}o \mathbb{D}, \quad (2w,w^2)\mapsto h(w), \quad (\begin{equation}ta+\bar\begin{equation}ta w,w) \mapsto m(w) \end{equation}q for $w\in\mathbb{D}$ is a well-aligned Carath\'eodory extremal function for $\mathbb{D}e$. \end{theorem} \begin{equation}gin{proof} First observe that there is indeed a unique automorphism $m$ of $\mathbb{D}$ such that $m_*((z,c))=(0,\mathbb{C}ar{\mathbb{D}e})$, by the Schwarz-Pick Lemma. Let \[ k(w)=(\begin{equation}ta+\bar\begin{equation}ta w,w) \quad \mbox{ for } w\in\mathbb{D}, \] so that $F_\begin{equation}ta=k(\mathbb{D})$ and $k_*((z,c))=\mathbb{D}e$. By the definition \eqref{defg} of $g$, $g\mathbb{C}irc k=m$. Consider any function $h\in\ess(\mathbb{D})$ such that $h(\zeta)=m(\eta)$. By \mathbb{C}ite[Lemma 10.5]{aly2016}, the function $g$ defined by equations \eqref{defg} is analytic on $\mathbb{C}alr\mathbb{C}up F_\begin{equation}ta$. We claim that the special extension $\mathbb{T}ilde g$ of $ g$ to $G$ is a well-aligned Carath\'eodory extremal function for $\mathbb{D}e$. By \mathbb{C}ite[Theorem 10.1]{aly2016}, $\mathbb{T}ilde g \in\mathbb{D}(G)$. Moreover \begin{equation}gin{align*} (\mathbb{T}ilde g)_*(\mathbb{D}e) &=(\mathbb{T}ilde g)_*\mathbb{C}irc k_*((z,c)) \\ &=(\mathbb{T}ilde g\mathbb{C}irc k)_*((z,c)) \\ &=(g\mathbb{C}irc k)_*((z,c)) \\ &=m_*((z,c))\\ &= (0,\mathbb{C}ar{\mathbb{D}e}) \end{align*} as required. Thus the Poincar\'e metric of $(\mathbb{T}ilde g)_*(\mathbb{D}e)$ on $T\mathbb{D}$ is \[ |(\mathbb{T}ilde g)_*(\mathbb{D}e)| = |(0,\mathbb{C}ar{\mathbb{D}e})| = \mathbb{C}ar{\mathbb{D}e}. \] Therefore $(\mathbb{T}ilde g)_*$ is a well aligned Carath\'eodory extremal function for $\mathbb{D}e$. \end{proof} Clearly the map $g\mapsto \mathbb{T}ilde g$ is injective, and so this procedure yields a large class of Carath\'eodory extremals for $\mathbb{D}e$, parametrized by the Schur class. \begin{equation}gin{remark} \mathbb{R}m In the converse direction, if $\varphi$ is any well-aligned Carath\'eodory extremal for $\mathbb{D}e$, then $\varphi$ is a norm-preserving extension of its restriction to $\mathbb{C}alr\mathbb{C}up F_\begin{equation}ta$, which is a function of the type \eqref{defg}. Thus the class of all well-aligned Carath\'eodory extremal functions for $\mathbb{D}e$ is given by the set of norm-preserving analytic extensions to $G$ of $g$ in equation \eqref{defg}, as $h$ ranges over functions in the Schur class taking the value $m(\eta)$ at $\zeta$. Typically there will be many such extensions of $g$, as can be seen from the proof of \mathbb{C}ite[Theorem 10.1]{aly2016}. An extension is obtained as the Cayley transform of a function defined by a Herglotz-type integral with respect to a probability measure $\mu$ on $\mathbb{T}^2$. In the proof of \mathbb{C}ite[Lemma 10.8]{aly2016}, $\mu$ is chosen to be the product of two measures $\mu_\mathbb{C}alr$ and $\mu_\mathbb{C}alf$ on $\mathbb{T}$; examination of the proof shows that one can equally well choose any measure $\mu$ on $\mathbb{T}^2$ such that \[ \mu(A\mathbb{T}imes\mathbb{T}) = \mu_\mathbb{C}alr(A), \quad \mu(\mathbb{T}\mathbb{T}imes A)= \mu_\mathbb{C}alf(A)\quad \mbox{ for all Borel sets } A \mbox{ in }\mathbb{T}. \] Thus each choice of $h\in\ess(\mathbb{D})$ satisfying $h(\zeta)=m(\eta)$ can be expected to give rise to many well-aligned Carath\'eodory extremals for $\mathbb{D}e$. \end{remark} \section{Purely balanced tangents}\label{purelybalanced} In this section we find a large class of Carath\'eodory extremals for purely balanced tangents in $G$ by exploiting an embedding of $G$ into the bidisc. \begin{equation}gin{lemma}\label{injective} Let \[ \Phi=(\Phi_{\omega_1}, \Phi_{\omega_2}): G \mathbb{T}o \mathbb{D}^2 \] where $\omega_1,\omega_2$ are distinct points in $\mathbb{T}$. Then $\Phi$ is an injective map from $G$ to $\mathbb{D}^2$. \end{lemma} \begin{equation}gin{proof} Suppose $\Phi$ is not injective. Then there exist distinct points $(s^1,s^2)$, $ (t^1,t^2)\in G$ such that $\Phi_{\omega_j}(s^1,s^2)=\Phi_{\omega_j}(t^1,t^2)$ for $j = 1, 2$. On expanding and simplifying this relation we deduce that \[ s^1-t^1-2\omega_j(s^2-t^2) -\omega_j^2(s^1 t^2-t^1 s^2)=0. \] A little manipulation demonstrates that both $(s^1,s^2)$ and $(t^1,t^2)$ lie on the complex line \[ \ell\mathbb{D}f\{(s^1,s^2)\in\mathbb{C}^2: (\omega_1 +\omega_2)s^1 -2\omega_1\omega_2 s^2 =2\}. \] However, $\ell$ does not meet $G$. For suppose that $(s^1,s^2)\in \ell\mathbb{C}ap G$. Then there exists $\begin{equation}ta\in\mathbb{D}$ such that \begin{equation}gin{align*} s^1&=\begin{equation}ta+\bar\begin{equation}ta s^2,\\ 2\omega_1\omega_2 s^2&= (\omega_1 +\omega_2)s^1-2 = (\omega_1 +\omega_2)(\begin{equation}ta+\bar\begin{equation}ta s^2)-2. \end{align*} On solving the last equation for $s^2$ we find that \[ s^2=-\bar\omega_1\bar\omega_2\frac{2-(\omega_1+\omega_2)\begin{equation}ta}{2-(\bar\omega_1+\bar\omega_2)\bar\begin{equation}ta}, \] whence $|s^2|=1$, contrary to the hypothesis that $(s^1,s^2)\in G$. Hence $\Phi$ is injective on $G$. \end{proof} \begin{equation}gin{remark} \mathbb{R}m $\Phi$ has an analytic extension to the set $\Ga\setminus\{(2\bar\omega_1,\bar\omega_1^2),(2\bar\omega_2,\bar\omega_2^2)\}$, where $\Ga$ is the closure of $G$ in $\mathbb{C}^2$. However this extension is {\em not} injective: it takes the constant value $(-\bar\omega_2,-\bar\omega_1)$ on a curve lying in $\partial G$. \end{remark} \begin{equation}gin{theorem}\label{purebalextremals} Let $\mathbb{D}e=(\la,v)$ be a purely balanced tangent to $G$ and let $\Phi_\omega$ solve $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ for the two distinct points $\omega_1, \omega_2 \in\mathbb{T}$. Let $m_j$ be the automorphism of $\mathbb{D}$ such that $m_j\mathbb{C}irc \Phi_{\omega_j}$ is well aligned at $\mathbb{D}e$ for $j=1,2$ and let \begin{equation}q\label{defPhi2} \Phi=(\Phi^1,\Phi^2)=(m_1\mathbb{C}irc\Phi_{\omega_1},m_2\mathbb{C}irc \Phi_{\omega_2}):G\mathbb{T}o\mathbb{D}^2. \end{equation}q For every $t\in [0,1]$ and every function $\mathbb{C}^{2\mathbb{T}imes 2}hbb{T}heta$ in the Schur class of the bidisc the function \begin{equation}gin{align} \label{formextrem} F&=t\Phi^1+(1-t)\Phi^2+\notag \\ &\hspace*{1cm}t(1-t)(\Phi^1-\Phi^2)^2\frac{\mathbb{C}^{2\mathbb{T}imes 2}hbb{T}heta\mathbb{C}irc\Phi}{1-[(1-t)\Phi^1+t\Phi^2]\mathbb{C}^{2\mathbb{T}imes 2}hbb{T}heta\mathbb{C}irc\Phi} \end{align} is a well-aligned Carath\'eodory extremal function for $\mathbb{D}e$. \end{theorem} \begin{equation}gin{proof} By Lemma \mathbb{R}ef{injective}, $\Phi$ maps $G$ injectively into $\mathbb{D}^2$. By choice of $m_j$, \[ (m_j\mathbb{C}irc\Phi_{\omega_j})_*(\mathbb{D}e)=(0, \mathbb{C}ar{\mathbb{D}e}). \] Hence \[ \Phi_*(\mathbb{D}e)= \left((0,0),\mathbb{C}ar{\mathbb{D}e}(1,1)\mathbb{R}ight), \] which is tangent to the diagonal $\{(w,w):w\in\mathbb{D}\}$ of the bidisc. Since the diagonal is a complex geodesic in $\mathbb{D}^2$, we have \[ \mathbb{C}ar{\Phi_*(\mathbb{D}e)} = (0,\mathbb{C}ar{\mathbb{D}e}). \] As in Section \mathbb{R}ef{royal}, we appeal to \mathbb{C}ite[Subsection 11.6]{AgM2} to assert that, for every $t\in[0,1]$ and every function $\mathbb{C}^{2\mathbb{T}imes 2}hbb{T}heta$ in the Schur class of the bidisc, the function $f\in\mathbb{C}(\mathbb{D}^2)$ given by \begin{equation}q\label{formulaf} f(\la)= t\la^1+(1-t)\la^2+t(1-t)^2\frac{ \mathbb{C}^{2\mathbb{T}imes 2}hbb{T}heta(\la)}{1-[(1-t)\la^1+t\la^2]\mathbb{C}^{2\mathbb{T}imes 2}hbb{T}heta(\la)} \end{equation}q solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar(\Phi_*(\mathbb{D}e))$. For every such $f$ the function $ F\mathbb{D}f f\mathbb{C}irc\Phi: G\mathbb{T}o \mathbb{D}$ satisfies \[ F_*(\mathbb{D}e) = (f\mathbb{C}irc\Phi)_*(\mathbb{D}e)=f_*(\Phi_*(\mathbb{D}e))=(0,\mathbb{C}ar{\mathbb{D}e}). \] Thus $F$ is a well-aligned Carath\'eodory extremal for $\mathbb{D}e$. On writing out $F$ using equation \eqref{formulaf} we obtain equation \eqref{formextrem}. \end{proof} \begin{equation}gin{remark} \mathbb{R}m The range of $\Phi$ is a subset of $\mathbb{D}^2$ containing $(0,0)$ and is necessarily nonconvex, by virtue of a result of Costara \mathbb{C}ite{cos04} to the effect that $G$ is not isomorphic to any convex domain. $\Phi(G)$ is open in $\mathbb{D}^2$, since the Jacobian determinant of $(\Phi_{\omega_1},\Phi_{\omega_2})$ at $(s^1,s^2)$ is \[ \frac{4(\omega_1-\omega_2)(1-\omega_1\omega_2 s^2)}{(2-\omega_1s^1)^2(2-\omega_2s^1)^2} \] which has no zero in $G$. Carath\'eodory extremals $F$ given by equation \eqref{formulaf} have the property that the map $F\mathbb{C}irc \Phi^{-1}$ on $\Phi(G)$ extends analytically to a map in $\mathbb{D}(\mathbb{D}^2)$. There may be other Carath\'eodory extremals $\varphi$ for $\mathbb{D}e$ for which $\varphi\mathbb{C}irc\Phi^{-1}$ does not so extend. Accordingly we do not claim that the Carath\'eodory extremals described in Theorem \mathbb{R}ef{purebalextremals} constitute all extremals for a purely balanced tangent. \end{remark} \section{Relation to a result of L. Kosi\'nski and W. Zwonek}\label{relation} Our main result in Section \mathbb{R}ef{Uniq}, on the essential uniqueness of solutions of $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ for purely unbalanced and exceptional tangents, can be deduced from \mathbb{C}ite[Theorem 5.3]{kos} and some known facts about the geometry of $G$. However, the terminology and methods of Kosi\'nski and Zwonek are quite different from ours, and we feel it is worth explaining their statement in our terminology. Kosi\'nski and Zwonek speak of left inverses of complex geodesics where we speak of Carath\'eodory extremal functions for \nd tangents. These are essentially equivalent notions. By a {\em complex geodesic} in $G$ they mean a holomorphic map from $\mathbb{D}$ to $G$ which has a holomorphic left inverse. Two complex geodesics $h$ and $k$ are {\em equivalent} if there is an automorphism $m$ of $\mathbb{D}$ such that $h=k\mathbb{C}irc m$, or, what is the same, if $h(\mathbb{D})=k(\mathbb{D})$. It is known (for example \mathbb{C}ite[Theorem A.10]{ay2004}) that, for every \nd tangent $\mathbb{D}e$ to $G$, there is a {\em unique} complex geodesic $k$ of $G$ up to equivalence such that $\mathbb{D}e$ is tangent to $k(\mathbb{D})$. A function $\varphi\in\mathbb{D}(G)$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ if and only if $\varphi\mathbb{C}irc k$ is an automorphism of $\mathbb{D}$. Hence, for any complex geodesic $k$ and any \nd tangent $\mathbb{D}e$ to $k(\mathbb{D})$, to say that $k$ has a unique left inverse up to equivalence is the same as to say that $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$ has an essentially unique solution. Kosi\'nski and Zwonek also use a different classification of types of complex geodesics (or equivalently tangent vectors) in $G$, taken from \mathbb{C}ite{pz05}. There it is shown that every complex geodesic $k$ in $G$, up to composition with automorphisms of $\mathbb{D}$ on the right and of $G$ on the left, is of one of the following types. \begin{equation}gin{enumerate}[\mathbb{R}m (1)] \item \[ k(z)= (B(\sqrt{z})+B(-\sqrt{z}), B(\sqrt{z}) B(-\sqrt{z}) \] where $B$ is a non-constant Blaschke product of degree $1$ or $2$ satisfying $B(0)=0$; \item \[ k(z)= (z+m(z),zm(z)) \] where $m$ is an automorphism of $\mathbb{D}$ having no fixed point in $\mathbb{D}$. \end{enumerate} These types correspond to our terminology from \mathbb{C}ite{aly2016} (or from Section \mathbb{R}ef{5types}) in the following way. Recall that an automorphism of $\mathbb{D}$ is either the identity, elliptic, parabolic or hyperbolic, meaning that the set $\{z\in\mathbb{D}^-:m(z)=z\}$ consists of either all of $\mathbb{D}^-$, a single point of $\mathbb{D}$, a single point of $\mathbb{T}$ or two points in $\mathbb{T}$. \begin{equation}gin{enumerate} \item[\mathbb{R}m (1a)] If $B$ has degree $1$, so that $B(z)=cz$ for some $c\in\mathbb{T}$ then, up to equivalence, $k(z)=(0,-c^2z)$. These we call the {\em flat geodesics}. The general tangents to flat geodesics are the flat tangents described in Section \mathbb{R}ef{5types}, that is $\mathbb{D}e=\left((\begin{equation}ta+\bar\begin{equation}ta z,z),c(\bar\begin{equation}ta,1)\mathbb{R}ight)$ for some $\begin{equation}ta\in\mathbb{D}, \, z\in\mathbb{D}$ and nonzero $c\in\mathbb{C}$. \item[\mathbb{R}m (1b)] If $B(z)=cz^2$ for some $c\in\mathbb{T}$ then $k(z)= (2cz, c^2z^2)$. Thus $k(\mathbb{D})$ is the royal variety $\mathbb{C}alr$, and the tangents to $k(\mathbb{D})$ are the royal tangents. \item[\mathbb{R}m (1c)] If $B$ has degree $2$ but is not of the form (1b), say $B(z)=cz(z-\al)/(1-\bar\al z)$ where $c\in\mathbb{T}$ and $\al\in\mathbb{D}\setminus \{0\}$, then \[ k(z)= \frac{\left(2c(1-|\al|^2)z, c^2z(z-\al^2)\mathbb{R}ight)}{1-\bar\al^2z}. \] Here $k(\mathbb{D})$ is not $\mathbb{C}alr$ but it meets $\mathbb{C}alr$ (at the point $(0,0)$). It follows that $k(\mathbb{D})$ is a purely unbalanced geodesic and the tangents to $k(\mathbb{D})$ are the purely unbalanced tangents. \item[\mathbb{R}m (2a)] If $m$ is a hyperbolic automorphism of $\mathbb{D}$ then $k(\mathbb{D})$ is a purely balanced geodesic and its tangents are purely balanced tangents. \item[\mathbb{R}m (2b)] If $m$ is a parabolic automorphism of $\mathbb{D}$ then $k(\mathbb{D})$ is an exceptional geodesic, and its tangents are exceptional tangents. \end{enumerate} With this description, Theorem 5.3 of \mathbb{C}ite{kos} can be paraphrased as stating that a complex geodesic $k$ of $G$ has a unique left inverse (up to equivalence) if and only if $k$ is of one of the forms (1c) or (2b). These are precisely the purely unbalanced and exceptional cases in our terminology, that is, the cases of tangents $\mathbb{D}e$ for which there is a unique $\omega\in\mathbb{T}$ such that $\Phi_\omega$ solves $\mathbb{C}^{2\mathbb{T}imes 2}hbb{C}ar\mathbb{D}e$, in agreement with our Theorem \mathbb{R}ef{ess1}. The authors prove their theorem with the aid of a result of Agler and McCarthy on the uniqueness of solutions of $3$-point Nevanlinna-Pick problems on the bidisc \mathbb{C}ite[Theorem 12.13]{AgM2}. They also use the same example from Subsection 11.6 of \mathbb{C}ite{AgM2} which we use for different purposes in Sections \mathbb{R}ef{royal} and \mathbb{R}ef{purelybalanced}. \begin{equation}gin{thebibliography}{1} \bibitem {aly2017} J. Agler, Z. A. Lykova and N. J. Young, A geometric characterization of the symmetrized bidisc, preprint. \bibitem{aly2016} J. Agler, Z. A. Lykova and N. J. Young, Geodesics, retracts, and the extension property in the symmetrized bidisc, 106 pages, to appear in {\em Memoirs of the American Mathematical Society}, arXiv:1603.04030 . \bibitem{AgM2} J. Agler and J. E. McCarthy, {\em Pick interpolation and Hilbert function spaces}, Graduate Studies in Mathematics {\bf 44}, American Mathematical Society, Providence, RI, 2002. \bibitem{ay2004} J. Agler and N. J. Young, The hyperbolic geometry of the symmetrised bidisc, {\em J. Geometric Analysis} {\bf 14} (2004) 375--403. \bibitem{AY2017} J. Agler and N. J. Young, Realization of functions on the symmetrized bidisc, {\em J. Math. Anal. Applic.} {\bf 453}(1) (2017) 227-- 240. \bibitem{bgk} H. Bart, I. C. Gohberg and M. A. Kaashoek, {\em Minimal factorization of matrix and operator functions}, Birkh\"auser Verlag, Basel, 1979, 277 pp. \bibitem{bhatta} T. Bhattacharyya, S. Pal and S. Shyam Roy, Dilations of $\Ga$-contractions by solving operator equations, {\em Advances in Mathematics} {\bf 230} (2012) 577--606. \bibitem{cos04} C.~Costara, The symmetrized bidisc and Lempert's theorem, {\em Bull. Lond. Math. Soc.} {\bf 36} ( 2004) 656--662. \bibitem{ez05} A. Edigarian and W. Zwonek, Geometry of the symmetrized polydisc, {\em Arch. Math. (Basel)} {\bf 84} (2005) 364--374. \bibitem {FHK2014} A.E. Frazho, S. ter Horst and M.A. Kaashoek, State space formulas for stable rational matrix solutions of a Leech problem, {\em Indagationes Math.} {\bf 25} (2014) 250--274. \bibitem{jp04} M. Jarnicki and P. Pflug, On automorphisms of the symmetrised bidisc, {\em Arch. Math. (Basel)} {\bf 83} (2004) 264--266. \bibitem{jp} M. Jarnicki and P. Pflug, {\em Invariant Distances and Metrics in Complex Analysis}, 2nd Extended Edition, De Gruyter, Berlin, 2013. \bibitem{KaavS2014} M.A. Kaashoek and F. van Schagen, The inverse problem for Ellis-Gohberg orthogonal matrix functions, {\em Integral Equ. Oper. Theory} {\bf 80} (2014) 527--555. \bibitem{kob98} S. Kobayashi, {\em Hyperbolic Complex Spaces}, Grundlehren der mathematischen Wissenschaften {\bf 318}, Springer Verlag 1998. \bibitem{kos} L. Kosi\'nski and W. Zwonek, Nevanlinna-Pick problem and uniqueness of left inverses in convex domains, symmetrized bidisc and tetrablock {\em J. Geom. Analysis} {\bf 26} (2016) 1863--1890. \bibitem{pz05} P.~Pflug and W.~Zwonek, Description of all complex geodesics in the symmetrized bidisc, {\em Bull. London Math. Soc.} {\bf 37} (2005) 575--584. \bibitem{sarkar} J. Sarkar, Operator theory on symmetrized bidisc, {\em Indiana Univ. Math. J. } {\bf 64} (2015) 847--873. \bibitem{tryb} M. Trybula, Invariant metrics on the symmetrized bidisc, {\em Complex Variables and Elliptic Equations} {\bf 60} (4) (2015) 559--565. \end{thebibliography} \end{document}
\mathfrak{m}athfrak{b}egin{document} \mathfrak{m}athfrak{t}itle[On Complete conformally flat submanifolds with nullity]{On Complete Conformally flat submanifolds with nullity in Euclidean space} \alphauthor{Christos-Raent Onti} \partialate{} \mathfrak{m}aketitle \mathfrak{m}athfrak{b}egin{abstract} In this note, we investigate conformally flat submanifolds of Euclidean space with positive index of relative nullity. Let $M^n$ be a complete conformally flat manifold and let $f\mathfrak{m}athfrak{c}olon M^n\mathfrak{m}athfrak{t}o \mathord{\mathbb R}^m$ be an isometric immersion. We prove the following results: (1) If the index of relative nullity is at least two, then $M^n$ is flat and $f$ is a cylinder over a flat submanifold. (2) If the scalar curvature of $M^n$ is non-negative and the index of relative nullity is positive, then $f$ is a cylinder over a submanifold with constant non-negative sectional curvature. (3) If the scalar curvature of $M^n$ is non-zero and the index of relative nullity is constant and equal to one, then $f$ is a cylinder over a $(n-1)$-dimensional submanifold with non-zero constant sectional curvature. \end{abstract} \renewcommand{\alpharabic{footnote}}{\fnsymbol{footnote}} \footnotetext{\emph{2010 Mathematics Subject Classification.} Primary 53B25, 53C40, 53C42.} \renewcommand{\alpharabic{footnote}}{\alpharabic{footnote}} \renewcommand{\alpharabic{footnote}}{\fnsymbol{footnote}} \footnotetext{\emph{Keywords.} Conformally flat submanifolds, index of relative nullity, scalar curvature} \renewcommand{\alpharabic{footnote}}{\alpharabic{footnote}} \mathfrak{s}ection{Introduction} A Riemannian manifold $M^n$ is said to be \emph{conformally flat} if each point lies in an open neighborhood conformal to an open subset of the Euclidean space $\mathord{\mathbb R}^n$. The geometry and topology of such Riemannian manifolds have been investigated by several authors from the intrinsic point of view. Some of the many papers are \mathfrak{m}athfrak{c}ite{cat16,ku49,ku50,cadjnd11,cahe06,no93,sy88}. Around 1919, Cartan \mathfrak{m}athfrak{c}ite{car17} initiated the investigation of such Riemannian manifolds from the submanifold point of view by studying the case of conformally flat Euclidean hypersurfaces (see also \mathfrak{m}athfrak{c}ite{mmf85,pin85}). In 1977, Moore \mathfrak{m}athfrak{c}ite{mo77} extended Cartan's result in higher (but still low) codimension (see also \mathfrak{m}athfrak{c}ite{df96,df99,mm78}). Recently, the author, in collaboration with Dajczer and Vlachos, investigated in \mathfrak{m}athfrak{c}ite{dov18} the case of conformally flat submanifolds with flat normal bundle in arbitrary codimension (see also \mathfrak{m}athfrak{c}ite{dote11}). In this short note, we address and deal with the following: {\nablaoindent{\mathfrak{m}athfrak{b}f Problem.}} {{\sf i}t Classify complete conformally flat submanifolds of Euclidean space with positive index of relative nullity and arbitrary codimension. } Recall that the {{\sf i}t index of relative nullity} at a point $x{\sf i}n M^n$ of a submanifold $f\mathfrak{m}athfrak{c}olon M^n\mathfrak{m}athfrak{t}o \mathord{\mathbb R}^m$ is defined as the dimension of the kernel of its second fundamental form $\alphalpha\mathfrak{m}athfrak{c}olon TM\mathfrak{m}athfrak{t}imes TM\mathfrak{m}athfrak{t}o N_fM$, with values in the normal bundle. The first result provides a complete answer in the case where the index of relative nullity is at least two, and is stated as follows: \mathfrak{m}athfrak{b}egin{theorem}\label{main2} Let $M^n$ be a complete, conformally flat manifold and let $f\mathfrak{m}athfrak{c}olon M^n\mathfrak{m}athfrak{t}o\mathord{\mathbb R}^m$ be an isometric immersion with index of relative nullity at least two at any point of $M^n$. Then $M^n$ is flat and $f$ is a cylinder over a flat submanifold. \end{theorem} The next result provides a complete answer in the case where the scalar curvature is non-negative. \mathfrak{m}athfrak{b}egin{theorem}\label{main1} Let $M^n$ be a complete, conformally flat manifold with non-negative scalar curvature and let $f\mathfrak{m}athfrak{c}olon M^n\mathfrak{m}athfrak{t}o\mathord{\mathbb R}^m$ be an isometric immersion with positive index of relative nullity. Then $f$ is a cylinder over a submanifold with constant non-negative sectional curvature. \end{theorem} Observe that there are complete conformally flat manifolds such that the scalar curvature is non-negative while the sectional curvature is not. Easy examples are the Riemannian products $M^n=\mathord{\mathbb S}^{n-m}\mathfrak{m}athfrak{t}imes\mathord{\mathbb H}^m,\ n\mathfrak{m}athfrak{g}eq 2m,$ where $\mathord{\mathbb S}^{n-m}$ and $\mathord{\mathbb H}^m$ are the sphere and the hyperbolic space of sectional curvature $1$ and $-1$, respectively. Finally, the next result provides a complete answer (both local and global) in the case where the scalar is non-zero and the index of relative nullity is constant and equal to one. \mathfrak{m}athfrak{b}egin{theorem}\label{main3} Let $M^n$ be a conformally flat manifold with non-zero scalar curvature and let $f\mathfrak{m}athfrak{c}olon M^n\mathfrak{m}athfrak{t}o\mathord{\mathbb R}^m$ be an isometric immersion with constant index of relative nullity equal to one. Then $f$ is locally either a cylinder over a $(n-1)$-dimensional submanifold with non-zero constant sectional curvature or a cone over a $(n-1)$-dimensional spherical submanifold with constant sectional curvature. Moreover, if $M^n$ is complete, then $f$ is globally a cylinder over a $(n-1)$-dimensional submanifold with non-zero constant sectional curvature. \end{theorem} \mathfrak{m}athfrak{b}egin{remarks}{\rm (I) If the ambient space form in Theorem \ref{main1} is replaced by the sphere $\mathord{\mathbb S}_c^m$ of constant sectional curvature $c$, then an intrinsic classification can be obtained, provided that ${\mathfrak{s}cal}(M^n)\mathfrak{m}athfrak{g}eq c(n-1)$. This classification follows from a result of Carron and Herzlich \mathfrak{m}athfrak{c}ite{cahe06}, since in this case $M^n$ turns out to have non-negative Ricci curvature. However, we do not obtain any (direct) information on the immersion $f$. \\[1mm] (II) If $f\mathfrak{m}athfrak{c}olon M^n\mathfrak{m}athfrak{t}o\mathord{\mathbb Q}_c^m$ is an isometric immersion of a conformally flat manifold into a space form of constant sectional curvature $c$, then one can prove the following: (i) If the index of relative nullity is at least two, then $M^n$ has constant sectional curvature $c$. In particular, if $f$ is also minimal, then $f$ is totally geodesic. (ii) If the index of relative nullity is constant and equal to one, then $f$ is a $1$-generalized cone (for the definition, see \mathfrak{m}athfrak{c}ite{dov18}) over an isometric immersion $F\mathfrak{m}athfrak{c}olon \mathord{\mathbb S}igma^{n-1}\mathfrak{m}athfrak{t}o \mathord{\mathbb Q}_{\mathfrak{m}athfrak{t}ilde c}^{m-1}$ into an umbilical submanifold of $\mathord{\mathbb Q}_c^m$. } \end{remarks} \mathfrak{m}athfrak{b}egin{notes} {\rm The special case of minimal conformally flat hypersurfaces $f\mathfrak{m}athfrak{c}olon M^n\mathfrak{m}athfrak{t}o \mathord{\mathbb Q}_c^{n+1},$ $n\mathfrak{m}athfrak{g}eq 4,$ was treated by do Carmo and Dajczer in \mathfrak{m}athfrak{c}ite{DCD} (without any additional assumption on the index of relative nullity), where they showed that these are actually generalized catenoids, extending that way a previous result due to Blair \mathfrak{m}athfrak{c}ite{Blair} for the case $c=0$. For the \mathfrak{m}athfrak{q}uotes{neighbor} class of Einstein manifolds one can prove that: any minimal isometric immersion $f\mathfrak{m}athfrak{c}olon M^n\mathfrak{m}athfrak{t}o \mathord{\mathbb Q}_c^m$ of an Einstein manifold with positive index of relative nullity is totally geodesic. A related result of Di Scala \mathfrak{m}athfrak{c}ite{discala}, in the case where the ambient space is the Euclidean one, states that: any minimal isometric immersion $f\mathfrak{m}athfrak{c}olon M^n\mathfrak{m}athfrak{t}o \mathord{\mathbb R}^m$ of a K\"{a}hler-Einstein manifold is totally geodesic. However, it is not yet known if the assumption on K\"{a}hler can be dropped (this was conjectured by Di Scala in the same paper). Of course, in some special cases the conjecture is true, as have already been pointed out in \mathfrak{m}athfrak{c}ite{discala}. Finally, we note that Di Scala's theorem still holds true if the K\"{a}hler (intrinsic) assumption is replaced by the (extrinsic) assumption on $f$ having flat normal bundle. This follows directly from N\"{o}lker's theorem \mathfrak{m}athfrak{c}ite{no90}, since, in this case, $f$ has homothetical Gauss map. } \end{notes} \mathfrak{s}ection{Preliminaries} In this section we recall some basic facts and definitions. Let $M^n$ be a Riemannian manifold and let $f\mathfrak{m}athfrak{c}olon M^n\mathfrak{m}athfrak{t}o \mathord{\mathbb R}^m$ be an isometric immersion. The \emph{index of relative nullity} $\nablau(x)$ at $x{\sf i}n M^n$ is the dimension of the \emph{relative nullity subspace} $\mathord{\mathbb D}elta(x)\mathfrak{su}bset T_xM$ given by $$ \mathord{\mathbb D}elta(x)=\{X{\sf i}n T_xM: \alphalpha(X,Y)=0\;\;\mathfrak{m}box{for all}\;\;Y{\sf i}n T_xM\}. $$ It is a standard fact that on any open subset where the index of relative nullity $\nablau(x)$ is constant, the relative nullity distribution $x\mathfrak{m}apsto \mathord{\mathbb D}elta(x)$ is integrable and its leaves are totally geodesic in $M^n$ and $\mathord{\mathbb R}^m$. Moreover, if $M^n$ is complete then the leaves are also complete along the open subset where the index reaches its minimum (see \mathfrak{m}athfrak{c}ite{dajczer}). If $M^n$ splits as a Riemannian product $M^n=\mathord{\mathbb S}igma^{n-k}\mathfrak{m}athfrak{t}imes \mathord{\mathbb R}^k$ and there is an isometric immersion $F\mathfrak{m}athfrak{c}olon \mathord{\mathbb S}igma^{n-k}\mathfrak{m}athfrak{t}o \mathord{\mathbb R}^{m-k}$ such that $f=F\mathfrak{m}athfrak{t}imes {\rm id}_{\mathord{\mathbb R}^k}$, then we say that $f$ is a $k$-cylinder (or simply a cylinder) over $F$. The following is due to Hartman \mathfrak{m}athfrak{c}ite{har70}; cf. \mathfrak{m}athfrak{c}ite{dt}. \mathfrak{m}athfrak{b}egin{theorem}\label{hartman} Let $M^n$ be a complete Riemannian manifold with non-negative Ricci curvature and let $f\mathfrak{m}athfrak{c}olon M^n\mathfrak{m}athfrak{t}o \mathord{\mathbb R}^m$ be an isometric immersion with minimal index of relative nullity $\nablau_0>0$. Then $f$ is a $\nablau_0$-cylinder. \end{theorem} A smooth tangent distribution $D$ is called {{\sf i}t totally umbilical} if there exists a smooth section $\partialelta{\sf i}n \Gamma(D^\mathfrak{m}athfrak{p}erp)$ such that $$ \langle\nabla_X Y, T\rangle=\langleX,Y\rangle\langle\partialelta,T\rangle $$ for all $X,Y{\sf i}n D$ and $T{\sf i}n D^\mathfrak{m}athfrak{p}erp$. The following is contained in \mathfrak{m}athfrak{c}ite{dt}. \mathfrak{m}athfrak{b}egin{proposition}\label{prop} Let $f\mathfrak{m}athfrak{c}olon M^n\mathfrak{m}athfrak{t}o \mathord{\mathbb R}^m$ be an isometric immersion of a Riemannian manifold with constant index of relative nullity $\nablau=1$. Assume that the conullity distribution $\mathord{\mathbb D}elta^\mathfrak{m}athfrak{p}erp$ is totally umbilical (respectively, totally geodesic). Then $f$ is locally a cone over an isometric immersion $F\mathfrak{m}athfrak{c}olon \mathord{\mathbb S}igma^{n-1}\mathfrak{m}athfrak{t}o\mathord{\mathbb S}^{m-1}\mathfrak{su}bset\mathord{\mathbb R}^m$ (respectively, a cylinder over an isometric immersion $F\mathfrak{m}athfrak{c}olon \mathord{\mathbb S}igma^{n-1}\mathfrak{m}athfrak{t}o\mathord{\mathbb R}^{m-1}\mathfrak{su}bset\mathord{\mathbb R}^m$). \end{proposition} We also need the following two well-known results; cf. \mathfrak{m}athfrak{c}ite{dt}. \mathfrak{m}athfrak{b}egin{proposition}\label{conflat} A Riemannian product is conformally flat if and only if one of the following possibilities holds: \mathfrak{m}athfrak{b}egin{enumerate}[(i)] {\sf i}tem One of the factors is one-dimensional and the other one has constant sectional curvature. {\sf i}tem Both factors have dimension greater than one and are either both flat or have opposite constant sectional curvatures. \end{enumerate} \end{proposition} \mathfrak{m}athfrak{b}egin{proposition}\label{conflat2} Let $M=M_1\mathfrak{m}athfrak{t}imes_\rho M_2$ be a warped product manifold. If $M_1$ has dimension one then $M$ is conformally flat if and only if $M_2$ has constant sectional curvature. \end{proposition} \mathfrak{s}ection{The proofs} Let $M^n$ be a conformally flat manifold and let $f\mathfrak{m}athfrak{c}olon M^n\mathfrak{m}athfrak{t}o \mathord{\mathbb R}^m$ be an isometric immersion. It is well-known that in this case the curvature tensor has the form $$ R(X,Y,Z,W) = L(X,W)\langleY,Z\rangle-L(X,Z)\langleY,W\rangle+L(Y,Z)\langleX,W\rangle-L(Y,W)\langleX,Z\rangle $$ in terms of the Schouten tensor given by \mathfrak{m}athfrak{b}e\label{shouten} L(X,Y)=\frac{1}{n-2}\left({\rm{Ric}}(X,Y)-\frac{s}{2(n-1)}\langleX,Y\rangle\right) \end{equation} where $s$ denotes the scalar curvature. In particular, the sectional curvature is given by \mathfrak{m}athfrak{b}e\label{seccur} K(X,Y)=L(X,X)+L(Y,Y) \end{equation} where $X,Y{\sf i}n TM$ are orthonormal vectors. A straightforward computation of the Ricci tensor using the Gauss equation \mathfrak{m}athfrak{b}e\label{eqgauss} R(X,Y,Z,W)=\langle\alphalpha(X,W),\alphalpha(Y,Z)\rangle-\langle\alphalpha(X,Z),\alphalpha(Y,W)\rangle \end{equation} yields \mathfrak{m}athfrak{b}e\label{ricci} {\rm{Ric}}(X,Y) =\langlenH,\alphalpha(X,Y)\rangle-\mathfrak{su}m_{j=1}^n\langle\alphalpha(X,X_j),\alphalpha(Y,X_j)\rangle \end{equation} where $X_1,\partialots,X_n$ is an orthonormal tangent basis. We obtain from \eqref{seccur} and \eqref{eqgauss} that \mathfrak{m}athfrak{b}e\label{dec2} L(X,X)+L(Y,Y)=\langle\alpha(X,X),\alpha(Y,Y)\rangle-\|\alpha(X,Y)\|^2 \end{equation} for any pair $X,Y{\sf i}n TM$ of orthonormal vectors. Using \eqref{shouten} it follows from \eqref{dec2} that \mathfrak{m}athfrak{b}e {\rm{Ric}}(X,X)+{\rm{Ric}}(Y,Y) = \frac{s}{n-1}+(n-2)(\langle\alpha(X,X),\alpha(Y,Y)\rangle-\|\alpha(X,Y)\|^2) \label{ric} \end{equation} for any pair $X,Y{\sf i}n TM$ of orthonormal vectors. Now, assume that $\nablau>0$ and choose a unit length $X{\sf i}n \mathord{\mathbb D}elta$. Using \eqref{ricci}, it follows from \eqref{ric} that \mathfrak{m}athfrak{b}e\label{ric1} {\rm{Ric}}(Y,Y) = \frac{s}{n-1} \end{equation} for all unit length $Y\mathfrak{m}athfrak{p}erp X$. {\nablaoindent {{\sf i}t Proof of Theorem \ref{main2}:}} It follows from \eqref{ricci} and \eqref{ric1} that $s=0$. Thus, it follows from \eqref{ric1} that $M^n$ is Ricci flat. Since $M^n$ is conformally flat we obtain that $M^n$ is flat. The desired result follows from Theorem \ref{hartman} and Proposition \ref{conflat}. \mathfrak{m}athfrak{q}ed {\nablaoindent {{\sf i}t Proof of Theorem \ref{main1}:}} It follows from \eqref{ric1} that ${\rm{Ric}}\mathfrak{m}athfrak{g}eq 0$. The desired result follows from Theorem \ref{hartman} and Proposition \ref{conflat}. \mathfrak{m}athfrak{q}ed {\nablaoindent {{\sf i}t Proof of Theorem \ref{main3}:}} It follows from \eqref{dec2}, \eqref{shouten} and \eqref{ricci} that \mathfrak{m}athfrak{b}e\label{eq1} L(Y,Y)=-L(X,X)=\frac{s}{2(n-1)(n-2)}=:h\ \ \mathfrak{m}athfrak{t}ext{and} \ \ L(X,Y)=0 \end{equation} for any unit length vectors $X{\sf i}n \mathord{\mathbb D}elta$ and $Y{\sf i}n \mathord{\mathbb D}elta^\mathfrak{m}athfrak{p}erp$. Moreover, we have that \mathfrak{m}athfrak{b}e\label{eq2} L(Y,Z)=0 \end{equation} for any pair $Y,Z{\sf i}n \mathord{\mathbb D}elta^\mathfrak{m}athfrak{p}erp$ of orthonormal vectors. Indeed, if $Y$ and $Z$ are two such vectors then using \eqref{eq1} we get $$ L(Y+Z,Y+Z)=2h, $$ and \eqref{eq2} follows. Now, since $M^n$ is conformally flat we have that $L$ is a Codazzi tensor. Thus $$ (\nabla_Y L)(X,X)=(\nabla_X L)(Y,X) $$ for all unit length $X{\sf i}n \mathord{\mathbb D}elta$ and $Y{\sf i}n \mathord{\mathbb D}elta^\mathfrak{m}athfrak{p}erp$. It follows, using \eqref{eq1} and \eqref{eq2} that $$ Y(h)=0, $$ for all unit length $Y{\sf i}n \mathord{\mathbb D}elta^\mathfrak{m}athfrak{p}erp$. Therefore $$ h=h(t)=h(\mathfrak{m}athfrak{g}amma(t)),\ t{\sf i}n I, $$ where $\mathfrak{m}athfrak{g}amma\mathfrak{m}athfrak{c}olon I\mathfrak{su}bset \mathord{\mathbb R}\mathfrak{m}athfrak{t}o M^n$ is a leaf of the nullity distribution $\mathord{\mathbb D}elta$, parametrized by arc length. Using again the fact that $L$ is a Codazzi tensor, we get $$ (\nabla_{\mathfrak{m}athfrak{g}amma'(t)} L)(Y,Z)=(\nabla_Y L)(\mathfrak{m}athfrak{g}amma'(t),Z) $$ for all unit length $Y,Z{\sf i}n \mathord{\mathbb D}elta^\mathfrak{m}athfrak{p}erp$, or equivalently, $$ \langleY,Z\rangle\langle\mathfrak{m}athfrak{g}rad\log\mathfrak{s}qrt{|h(t)|},\mathfrak{m}athfrak{g}amma'(t)\rangle=\langle\nabla_Y Z,\mathfrak{m}athfrak{g}amma'(t)\rangle $$ for all unit length $Y,Z{\sf i}n \mathord{\mathbb D}elta^\mathfrak{m}athfrak{p}erp$, where we have used again equations \eqref{eq1} and \eqref{eq2}. Thus, if the scalar curvature is constant, then $\mathord{\mathbb D}elta^\mathfrak{m}athfrak{p}erp$ is totally geodesic and the desired result follows from Propositions \ref{prop} and \ref{conflat}. On the other hand, if the scalar is not constant then $\mathord{\mathbb D}elta^\mathfrak{m}athfrak{p}erp$ is totally umbilical and the desired result follows from Propositions \ref{prop} and \ref{conflat2}. Finally, if $M^n$ is complete then the result is immediate and the proof is complete. \mathfrak{m}athfrak{q}ed \mathfrak{m}athfrak{b}egin{thebibliography}{lll} \mathfrak{m}athfrak{b}ib{Blair}{article}{ author={Blair, D. E.}, title={On a generalization of the catenoid}, journal={Canad. J. Math.}, volume={27}, date={1975}, pages={231--236}, } \mathfrak{m}athfrak{b}ib{DCD}{article}{ author={do Carmo, M.}, author={Dajczer, M.}, title={Rotation hypersurfaces in spaces of constant curvature}, journal={Trans. Amer. Math. Soc.}, volume={277}, date={1983}, number={2}, pages={685--709}, } \mathfrak{m}athfrak{b}ib{cahe06}{article}{ author={Carron, G.}, author={Herzlich, M.}, title={Conformally flat manifolds with nonnegative Ricci curvature}, journal={Compos. Math.}, volume={142}, date={2006}, number={3}, pages={798--810}, } \mathfrak{m}athfrak{b}ib{car17}{article}{ author={Cartan, E.}, title={La d\'{e}formation des hypersurfaces dans l'espace conforme r\'{e}el \`a $n \mathfrak{m}athfrak{g}e 5$ dimensions}, language={French}, journal={Bull. Soc. Math. France}, volume={45}, date={1917}, pages={57--121}, } \mathfrak{m}athfrak{b}ib{cadjnd11}{article}{ author={Catino, G.}, author={Djadli, Z.}, author={Ndiaye, C. B.}, title={A sphere theorem on locally conformally flat even-dimensional manifolds}, journal={Manuscripta Math.}, volume={136}, date={2011}, number={1-2}, pages={237--247}, } \mathfrak{m}athfrak{b}ib{cat16}{article}{ author={Catino, G.}, title={On conformally flat manifolds with constant positive scalar curvature}, journal={Proc. Amer. Math. Soc.}, volume={144}, date={2016}, number={6}, pages={2627--2634}, } \mathfrak{m}athfrak{b}ibitem{dajczer} M. Dajczer, {{\sf i}t ``Submanifolds and Isometric Immersions"}, Math. Lecture Ser. 13, Publish or Perish Inc. Houston, 1990. \mathfrak{m}athfrak{b}ib{dt}{book}{ author={Dajczer, M.}, author={Tojeiro, R.}, title={Submanifold theory beyond an introduction}, series={Universitext}, publisher={Springer US}, date={2019}, } \mathfrak{m}athfrak{b}ib{df96}{article}{ author={Dajczer, M.}, author={Florit, L.A.}, title={On conformally flat submanifolds}, journal={Comm. Anal. Geom.}, volume={4}, date={1996}, number={1-2}, pages={261--284}, } \mathfrak{m}athfrak{b}ib{df99}{article}{ author={Dajczer, M.}, author={Florit, L.}, title={Euclidean conformally flat submanifolds in codimension two obtained as intersections}, journal={Proc. Amer. Math. Soc.}, volume={127}, date={1999}, number={1}, pages={265--269}, } \mathfrak{m}athfrak{b}ibitem{dov18} M. Dajczer, C.-R. Onti and Th. Vlachos, {{\sf i}t ``Conformally flat submanifolds with flat normal bundle"}. ArXiv e-prints (2018), available at {\mathfrak{s}f https://arxiv.org/abs/1810.06968}. \mathfrak{m}athfrak{b}ib{mmf85}{article}{ author={do Carmo, M.}, author={Dajczer, M.}, author={Mercuri, F.}, title={Compact conformally flat hypersurfaces}, journal={Trans. Amer. Math. Soc.}, volume={288}, date={1985}, number={1}, pages={189--203}, } \mathfrak{m}athfrak{b}ib{discala}{article}{ author={Di Scala, A. J.}, title={Minimal immersions of K\"{a}hler manifolds into Euclidean spaces}, journal={Bull. London Math. Soc.}, volume={35}, date={2003}, number={6}, pages={825--827}, } \mathfrak{m}athfrak{b}ib{dote11}{article}{ author={Donaldson, N.}, author={Terng, C.-L.}, title={Conformally flat submanifolds in spheres and integrable systems}, journal={Tohoku Math. J. (2)}, volume={63}, date={2011}, number={2}, pages={277--302}, } \mathfrak{m}athfrak{b}ib{har70}{article}{ author={Hartman, P.}, title={On the isometric immersions in Euclidean space of manifolds with nonnegative sectional curvatures. II}, journal={Trans. Amer. Math. Soc.}, volume={147}, date={1970}, pages={529--540}, } \mathfrak{m}athfrak{b}ib{ku49}{article}{ author={Kuiper, N. H.}, title={On conformally-flat spaces in the large}, journal={Ann. of Math. (2)}, volume={50}, date={1949}, pages={916--924}, } \mathfrak{m}athfrak{b}ib{ku50}{article}{ author={Kuiper, N. H.}, title={On compact conformally Euclidean spaces of dimension $>2$}, journal={Ann. of Math. (2)}, volume={52}, date={1950}, pages={478--490}, } \mathfrak{m}athfrak{b}ib{mo77}{article}{ author={Moore, J. D.}, title={Conformally flat submanifolds of Euclidean space}, journal={Math. Ann.}, volume={225}, date={1977}, number={1}, pages={89--97}, } \mathfrak{m}athfrak{b}ib{mm78}{article}{ author={Moore, J. D.}, author={Morvan, J.-M.}, title={Sous-vari\'{e}t\'{e}s conform\'{e}ment plates de codimension quatre}, language={French, with English summary}, journal={C. R. Acad. Sci. Paris S\'{e}r. A-B}, volume={287}, date={1978}, number={8}, pages={A655--A657}, } \mathfrak{m}athfrak{b}ib{no90}{article}{ author={N\"olker, S.}, title={Isometric immersions with homothetical Gauss map}, journal={Geom. Dedicata}, volume={34}, date={1990}, number={3}, pages={271--280}, } \mathfrak{m}athfrak{b}ib{no93}{article}{ author={Noronha, M. H.}, title={Some compact conformally flat manifolds with nonnegative scalar curvature}, journal={Geom. Dedicata}, volume={47}, date={1993}, number={3}, pages={255--268}, } \mathfrak{m}athfrak{b}ib{pin85}{article}{ author={Pinkall, U.}, title={Compact conformally flat hypersurfaces}, conference={ title={Conformal geometry}, address={Bonn}, date={1985/1986}, }, book={ series={Aspects Math., E12}, publisher={Vieweg, Braunschweig}, }, date={1988}, pages={217--236}, } \mathfrak{m}athfrak{b}ib{sy88}{article}{ author={Schoen, R.}, author={Yau, S.-T.}, title={Conformally flat manifolds, Kleinian groups and scalar curvature}, journal={Invent. Math.}, volume={92}, date={1988}, number={1}, pages={47--71}, } \end{thebibliography} \end{document}
\begin{document} \pagestyle{plain} \title{The ``Grothendieck to Lascoux'' conjecture} \author{Victor Reiner} \address{Dept.~of Mathematics, Univ.~of Minnesota, Minneapolis, MN 55455, USA} \email{[email protected]} \author{Alexander Yong} \address{Dept.~of Mathematics, Univ.~of Illinois at Urbana-Champaign, Urbana, IL 61801, USA} \email{[email protected]} \date{February 18, 2021} \maketitle \begin{abstract} This report formulates a conjectural combinatorial rule that positively expands Grothendieck polynomials into Lascoux polynomials. It generalizes one such formula expanding Schubert polynomials into key polynomials, and refines another one expanding stable Grothendieck polynomials. \end{abstract} \section{The open problem} We set up the notation needed to state the problem, Conjecture~\ref{conj:main} below. \subsection{Grothendieck and Lascoux polynomials} Define operators $\partial_i, \pi_i$ on polynomials $f\in {\mathbb Z}[\beta][x_1,\ldots,x_n]$ \[\partial_i(f) =\frac{1-s_i f}{x_i-x_{i+1}} \mbox{\ and \ } \pi_i(f)=\partial_i((1+\beta x_{i+1})f)\] where $s_{i}=(i\leftrightarrow i+1)$ is a simple transposition in the symmetric group $S_n$. The transposition $s_i$ acts on $f\in {\mathbb Z}[\beta][x_1,\ldots,x_n]$ by permuting $x_i$ and $x_{i+1}$. \begin{Definition}[{A.~Lascoux-M.-P.~Sch\"utzenberger~\cite{LasSch2}}] The $\beta$-\emph{Grothendieck polynomial} ${\mathfrak G}_w^{(\beta)}$ is recursively defined by the initial condition \[ {\mathfrak G}^{(\beta)}_{w_0}=x_1^{n-1}x_2^{n-2}\cdots x_{n-1}\] where $w_0$ is the \emph{longest permutation} that swaps $i \leftrightarrow n+1-i$, and then setting \[{\mathfrak G}^{(\beta)}_w=\pi_i({\mathfrak G}^{(\beta)}_{ws_i}) \text{\ if $w(i)<w(i+1)$.}\] \end{Definition} \noindent The $\beta$ parameter was introduced by S.~Fomin--A.~N.~Kirillov~\cite{Fomin.Kirillov}; \cite{LasSch2} uses $\beta=-1$. Define a further family of operators $\widetilde{\pi}_i$ on polynomials \emph{via} \[{\widetilde \pi}_i (f)=\partial_i(x_i(1+\beta x_{i+1})f).\] Also, let ${\sf Comp}$ be the set of \emph{(weak) compositions}, that is, $\alpha=(\alpha_1,\alpha_2,\ldots)\in {\mathbb N}^{\infty}$ having finitely many nonzero entries, where $\mathbb{N}=\{0,1,2,\ldots\}$. \begin{Definition}[{A.~Lascoux~\cite{Las03}}] The $\beta$-\emph{Lascoux polynomials} $\Omega_{\alpha}^{(\beta)}$ are again defined recursively. For $\alpha\in {\sf Comp}$, define \[\Omega^{(\beta)}_{\alpha}= \begin{cases} x_1^{\alpha_1} x_2^{\alpha_2} x_{3}^{\alpha_3}\cdots &\text{\ if $\alpha_1\geq \alpha_2\geq\alpha_3\geq\ldots$},\\ {\widetilde \pi}_i (\Omega^{(\beta)}_{\alpha s_i} ) &\text{ \ if $\alpha_i<\alpha_{i+1}$.} \end{cases} \] \end{Definition} The nomenclature ``Lascoux polynomial'' first appears in C.~Monical's \cite{Monical:skyline}. We also refer to A.~Lascoux's \cite{Lascoux}, A.~N.~Kirillov's \cite{Kirillov}, C.~Monical-O.~Pechenik-D.~Searles' \cite{MPS}, O.~Pechenik-D.~Searles' \cite{PS19}, and the references therein for more about both families of polynomials. \subsection{Increasing tableaux and $K$-jeu de taquin}\label{sec:Kjdt} We need some notions from \cite{Thomas.Yong:K}. \begin{Definition} An \emph{increasing tableaux} of shape $\nu/\lambda$ is a filling of $\nu/\lambda$ using $\{1,2,\ldots,|\nu/\lambda|\}$ such that the labels of $T$ strictly increase along rows and columns. \end{Definition} Let ${\sf INC}(\nu/\lambda)$ denote the set of all increasing tableaux of shape $\nu/\lambda$. \begin{Definition} A \emph{short ribbon} $R$ is a skew shape without a $2\times 2$ subshape, where each row and column has at most two boxes, and each box is filled with one of two symbols, but adjacent boxes are filled differently. Two boxes lie in the same \emph{component} of $R$ if there is a path between them passing through boxes that are adjacent vertically or horizontally. \end{Definition} \begin{Definition} Define ${\tt switch}(R)$ to be the same short ribbon as $R$ but where, in each non-singleton component, each box is filled with the other symbol. \end{Definition} For example: \[R=\tableau{&&&{\circ}\\&{\circ}&{\bullet}\\{\circ}&{\bullet}} \mbox{ \ \ \ \ \ \ \ \ \ \ \ \ \ \ ${\tt switch}(R)= \tableau{&&&{\circ}\\&{\bullet}&{\circ}\\{\bullet}&{\circ}}$.} \] In what follows, we assume $\nu/\lambda$ is contained in an ambient rectangle $\Lambda$. \begin{Definition} An \emph{outer corner} of a skew shape $\nu/\lambda$ is a maximally northwest box of $\Lambda/\nu$. \end{Definition} Given $T\in {\sf INC}(\nu/\lambda)$, consider a \emph{set} of outer corners $\{x_i\}$ filled with $\bullet$. Let $m$ be the maximum value label appearing in $T$. Define ${\tt revKjdt}_{\{x_i\}}(T)$ as follows: let $R_m$ be the short ribbon consisting of $\bullet$ and $m$. Apply ${\tt switch}(R_m)$. Now let $R_{m-1}$ be the short ribbon consisting of $\bullet$ and $m-1$ and apply ${\tt switch}(R_{m-1})$. Repeat until one applies ${\tt switch}(R_1)$, and then erase the $\bullet$ entries. For example, if $\nu/\lambda=(3,2,1)/(2,1)$ is contained in $\Lambda=(3,3,3,3)$ and we might have \[T=\tableau{&&{2}\\&{2}&\bullet \\{1}&\bullet\\ \bullet} \mapsto \tableau{&&\bullet\\&{\bullet}&{2} \\{1}&2 \\ \bullet} \mapsto \tableau{&&\bullet\\&{\bullet}&{2} \\{\bullet }&2 \\ 1} =\tableau{&&\\&&2\\&2\\1}={\tt revKjdt}_{\{x_i\}}(T). \] \begin{Definition} \emph{A reverse $K$-rectification} of $T$ is any sequence of ${\tt revKjdt}$-slides giving a reverse straight tableau ${\tt revKrect}(T)$. \end{Definition} Continuing the previous example, one can perform the following ${\tt revKjdt}$-slides: \[\tableau{&&\\&&2\\&2&{\bullet}\\1} \ \ \ \tableau{&&\\&&\bullet\\&\bullet&{2}\\1} \ \ \ \tableau{&&\\&&\\&&{2}\\1&\bullet} \ \ \ \tableau{&&\\&&\\&&{2}\\\bullet&1} \ \ \ \tableau{&&\\&&\\&&{2}\\ &1&\bullet} \ \ \ \tableau{&&\\&&\\&&{\bullet}\\ &1&2} \ \ \ \tableau{&&\\&&\\&&\\ &1&2} \] to conclude ${\tt revKrect}(T)=\tableau{1&2}$. Unlike textbook jeu de taquin, ${\tt revKrect}$ might depend on the choice of ${\tt revKjdt}$-slides used (see \cite[Example~1.6]{Thomas.Yong:K}, \cite[Example~3.4]{Buch.Samuel}). Given $P\in {\sf INC}(\nu)$ we define the \emph{left key} $K_{-}(P)$ to be a tableau, using ${\tt revKjdt}$, as follows. By definition, the first columns of $P$ and $K_{-}(P)$ agree. Assume that the first $\ell$ columns of $K_{-}(P)$ have been determined. Apply reverse rectification of the increasing tableau $P^{(\ell+1)}$ comprised of the first $\ell+1$ columns of $P$, inside the smallest rectangle $\Lambda^{(\ell+1)}$ that $P^{(\ell+1)}$ fits inside. \emph{For specificity, we define ${\tt revKrect}$ by using the leftmost outer corner for each intermediate ${\tt revKjdt}$-slide.} Let $C^{(\ell+1)}$ be the leftmost column in the reverse rectification of $P^{(\ell+1)}$. Then $C^{(\ell+1)}$ (after upward-justification) is the $(\ell+1)$-st column of $K_{-}(P)$. Repeating this, the end result is $K_{-}(P)$. \begin{Example} \label{left-key-example} The reader can check that if \[P=\tableau{1 & 2 & 3 & 5 & 7\\ 2 & 4 & 5 & 6\\ 4 & 6 } \text{ \ then \ } K_{-}(P)=\tableau{1 & 1 & 1 & 1 & 2 \\ 2 & 2 & 2 & 2\\ 4 & 4} .\] The first two columns are easily seen from the definition, as reverse rectification does nothing. To compute the third column one works out this ${\tt revKrect}$: \[\tableau{1 & 2 &3\\ 2 & 4 & 5 \\ 4 &6 &\bullet}\to \tableau{1 & 2 &3\\ 2 & 4 & 5 \\ 4 &\bullet & 6 }\to \tableau{1 & 2 &3\\ 2 & \bullet & 5 \\ \bullet &4 & 6}\to \tableau{1 & \bullet &3\\ \bullet & 2 & 5 \\ 2 &4 & 6}\to \tableau{\bullet & 1 &3\\ 1 & 2 & 5 \\ 2 &4 & 6}. \] The first column $C^{(3)}=\tableau{1 \\ 2}$ of the rightmost tableau is the third column of $K_{-}(P)$. \qed \end{Example} Notice in our example, $K_{-}(P)$ has the same (straight) shape as $P$. Also, $K_{-}(P)$ is a \emph{key}: the set of labels in column $i$ are contained in the set of labels in column $i-1$ for $i\geq 2$. These properties always hold, and are proven in Section~\ref{sec:aproof}. Let ${\sf content}(K_{-}(P))$ be the usual content of a semistandard tableau; here ${\sf content}(K_{-}(P))=(4,5,0,2)$. \subsection{Reduced and Hecke words} Let $\ell(w)$ be the \emph{Coxeter length} of $w\in S_n$, that is, \[\ell(w)=\#\{1\leq i<j\leq n: w(i)>w(j)\}.\] \begin{Definition} A sequence $(i_1,i_2,\ldots,i_{\ell(w)})$ is a \emph{reduced word} for $w$ if $s_{i_1}s_{i_2}\cdots s_{i_{\ell(w)}}=w$. \end{Definition} Let ${\sf Red}(w)$ denote the set of reduced words for $w$. \begin{Definition} The \emph{Hecke monoid} ${\mathcal H}_{n}$ is generated by $u_1,u_2,\ldots,u_{n-1}$, subject to: \begin{align*} u_i^2 & \equiv u_i\\ u_i u_j & \equiv u_j u_i \text{\ \ \ \ if $|i-j|>1$}\\ u_i u_{i+1} u_i & \equiv u_{i+1} u_i u_{i+1} \end{align*} \end{Definition} \begin{Definition} A sequence $(i_1,i_2,\ldots,i_N) \in {\mathbb N}^N$ is a \emph{Hecke word} for $w\in S_n$ if \[ u_{i_1} u_{i_2}\cdots u_{i_N}\equiv u_{a_1} u_{a_2}\cdots u_{a_{\ell(w)}}, \text{\ for some $(a_1,\ldots, a_{\ell(w)})\in {\sf Red}(w)$.} \] \end{Definition} \begin{Definition} For any tableau $P$, we will read off a \emph{word} denoted ${\sf word}(P)$, concatenating its rightmost column read top-to-bottom, then its next-to-rightmost column, etc. For example, the tableau $P$ in Example~\ref{left-key-example} has ${\sf word}(P)=(7,5,6,3,5,2,4,6,1,2,4)$. \end{Definition} \subsection{The ``Grothendieck to Lascoux'' conjecture} This is the open problem of this report: \begin{Conjecture} \label{conj:main} \[{\mathfrak G}^{(\beta)}_w=\sum_{P} \beta^{\# {\sf boxes}(P)-\ell(w)}\Omega^{(\beta)}_{{\sf content}(K_{-}(P))}\] where $P$ is any straight-shape increasing tableau such that ${\sf word}(P)$ is a Hecke word for $w$. \end{Conjecture} \begin{Example} If $w=31524$ the increasing tableaux and the left keys are respectively \begin{equation} \label{eqn:Nov2abc} P = \tableau{1 & 2 &4 \\ 3}, \ \ \ \tableau{1 & 2\\ 3 &4}, \ \ \ \tableau{1 & 2 & 4 \\ 3 &4}; \ \ K_{-}(P)=\tableau{1 & 1 &1 \\ 3}, \ \ \ \tableau{1 & 1\\ 3 & 3}, \ \ \ \tableau{1 & 1 & 1 \\ 3 &3}. \end{equation} For instance, if $P$ is the rightmost increasing tableaux, then ${\sf word}(P)=(4,2,4,1,3)$. Now \[u_4 u_2 u_4 u_1 u_3\equiv u_2 u_4^2 u_1 u_3\equiv u_2 u_4 u_1 u_3,\] and $s_2 s_4 s_1 s_3 =31524$. Conjecture~\ref{conj:main} predicts ${\mathfrak G}^{(\beta)}_{31524}=\Omega_{301}^{(\beta)}+\Omega_{202}^{(\beta)} + \beta \, \Omega_{302}^{(\beta)}$.\qed \end{Example} Conjecture~\ref{conj:main} generalizes one formula \cite{Reiner.Shimo} and refines another \cite{BKSTY}. It has been exhaustively checked (with computer assistance) for $n\leq 7$ (and spot-checked for $n=8,9$). It says the $\mathfrak{G}_w^{(\beta)}$ to $\Omega^{(\beta)}_{\alpha}$ expansion is positive; this is also open. The rest of this report surveys known and related results that motivate Conjecture~\ref{conj:main}. The proof that the shapes of $P$ and $K_{-}(P)$ agree, and that $K_{-}(P)$ is a key, is in Section~\ref{sec:aproof}. \section{History of the problem}\label{sec:2} During the preparation of \cite{BKSTY}, M.~Shimozono privately conjectured to the second author that ${\mathfrak G}_w^{(\beta)}$ expands positively in the $\Omega_{\alpha}^{(\beta)}$'s; he also suggested ideas towards a rule. Conjecture~\ref{conj:main} was formulated in September 2011 during a visit of the first author to UIUC. There are two limiting cases of Conjecture~\ref{conj:main}, as explained now. \subsection{The $\beta=0$ specialization and stable-limit symmetry} \begin{Definition} The \emph{key polynomial} (or type $A$ \emph{Demazure character}) is $\kappa_{\alpha}:=\Omega_{\alpha}^{(0)}$. \end{Definition} References about key polynomials include \cite{LasSch90, Reiner.Shimo, Lascoux}. A tableau formula for $\kappa_{\alpha}$ is in \cite{LasSch90} (see also \cite{Reiner.Shimo}). From the definitions, \begin{equation} \label{eqn:deformationNov3} \Omega_{\alpha}^{(\beta)}=\kappa_{\alpha}+\sum_{k>0}\beta^k p_k, \end{equation} where $p_k$ is a homogeneous polynomial in $x_1,x_2,\ldots$ of degree $|\alpha|+k$, where $|\alpha|=\sum_{i\geq 1}\alpha_i$. \begin{Definition}\label{def:Schubert} The \emph{Schubert polynomial}\footnote{It represents the class of a Schubert variety $X_w$ in the flag variety $GL_n/B$ under Borel's isomorphism (see \cite{Fulton}). The \emph{Grothendieck polynomial} ${\mathfrak G}_w:={\mathfrak G}_w^{(-1)}$ similarly represents the Schubert structure sheaf ${\mathcal O}_{X_w}$ in the Grothendieck ring $K^0(GL_n/B)$ of algebraic vector bundles on $GL_n/B$. This explains the ``combinatorial $K$-theory'' nomenclature \cite{Buch:CKT}.} is ${\mathfrak S}_w:={\mathfrak G}^{(0)}_w$. \end{Definition} A combinatorial rule for ${\mathfrak G}_w^{(\beta)}$ as a sum of ${\mathfrak S}_v$'s is given by C.~Lenart's \cite{Lenart99}. All the aforementioned polynomial families (Lascoux, Key, Grothendieck, Schubert) are ${\mathbb Z}[\beta]$-linear bases of ${\mathbb Z}[\beta][x_1,x_2,\ldots]$. There are symmetric versions of these polynomials. \begin{Definition} The $\beta$-\emph{stable Grothendieck polynomial} is \[G^{(\beta)}_w(x_1,x_2,\ldots)=\lim_{n\to\infty}{\mathfrak G}^{(\beta)}_{1^n\times w}(X),\] where $(1^n\times w)(i)=i$ if $1\leq i\leq n$ and $(1^n\times w)(i)=w(i)+n$ if $i>n$. \end{Definition} \begin{Definition}\label{def:Stanley} The \emph{stable Schubert polynomial} is $F_w:=G_w^{(0)}$. This is also known as the \emph{Stanley symmetric polynomial}. \end{Definition} \begin{Definition} A permutation $w$ is \emph{Grassmannian at position $k$} if $w(i)< w(i+1)$ for $i\neq k$. To such $w$, define a partition $\lambda=\lambda(w)$ by $\lambda_i=w(k-i+1)-(k-i+1)$ for $1\leq i\leq k$. \end{Definition} \begin{Definition} A \emph{set-valued semistandard Young tableaux} $T$ of shape $\lambda$ is a filling of the boxes of $\lambda$ with nonempty sets such that if one chooses a singleton from each set, the result is a semistandard Young tableaux (row weakly increasing and column strict). \end{Definition} \begin{Theorem}[\cite{Buch}] Let $w$ be a Grassmannian permutation of shape $\lambda$. Then \[ G^{(\beta)}_{\lambda}:=G^{(\beta)}_w=\sum_{T}\beta^{\#{\sf labels}(T)-|\lambda|}x^T, \] where the sum is over set-valued semistandard Young tableaux $T$ of shape $\lambda$, and $x^T:=\prod_{i\geq 1} x_i^{\#i\in T}$. \end{Theorem} \begin{Definition} The \emph{Schur function} is $s_{\lambda}:=G^{(0)}_{\lambda}$. \end{Definition} Summarizing, one has a commutative diagram \begin{equation} \label{KDemazure-square} \begin{array}{ccc} \Omega_\alpha^{(\beta)} & \longrightarrow & \kappa_\alpha \\ \downarrow & & \downarrow \\ G_{\lambda(\alpha)}^{(\beta)} & \longrightarrow & s_{\lambda(\alpha)} \\ \end{array} \end{equation} with horizontal arrows indicating $\beta=0$ specialization, and vertical arrows called \emph{stabilization}: for the right vertical arrow, let $\lambda(\alpha)$ be the sorting of $\alpha$, then if $N>n$, \[s_{\lambda(\alpha)}(x_1,\ldots,x_n)=\kappa_{(0^N,\alpha)}(x_1,\ldots,x_n,0,0,\ldots \ ).\] \subsection{Monomial expansion formulas} \begin{Theorem}[\cite{Fomin.Kirillov}]\label{grothform} \[{\mathfrak G}^{(\beta)}_{w}(X)=\sum_{({\bf a},{\bf i})} \beta^{N-\ell(w)}x^{\bf i}\] where the sum is over all pairs of sequences $({\bf a},{\bf i})$ (called compatible sequences) such that \begin{itemize} \item[(a)] ${\bf a}=(a_1,a_2,\cdots,a_N)$ is a Hecke word for $w$; \item[(b)] ${\bf i}=(i_1,i_2,\cdots,i_N)$ has $1\leq i_1\leq i_2\leq\ldots\leq i_N$ \item[(c)] $i_j\leq a_j$; and \item[(d)] $a_j\leq a_{j+1}\implies i_j<i_{j+1}$. \end{itemize} \end{Theorem} \begin{Theorem}[\cite{Fomin.Kirillov}]\label{stablegrothform} \[G_w^{(\beta)}=\sum_{({\bf a},{\bf i})} \beta^{N-\ell(w)}x^{\bf i}\] where $({\bf a},{\bf i})$ satisfies (a), (b) and (d) above. \end{Theorem} Therefore, one has a commutative diagram, with the same arrows \begin{equation} \label{Grothendieck-square} \begin{array}{ccc} \mathfrak{G}_w^{(\beta)}\overset{{\rm Thm~\ref{grothform}}}{=}\sum_{({\bf a},{\bf i})} \beta^{N-\ell(w)} x^{\bf i}& \longrightarrow & \Schub_w=\sum_{({\bf a},{\bf i})} x^{\bf i}\\ \downarrow & & \downarrow \\ G_w^{(\beta)}\overset{{\rm Thm~\ref{stablegrothform}}}{=}\sum_{({\bf a},{\bf i})} \beta^{N-\ell(w)} x^{\bf i} & \longrightarrow & F_w=\sum_{({\bf a},{\bf i})} x^{\bf i}. \\ \end{array} \end{equation} In view of Definitions~\ref{def:Schubert} and~\ref{def:Stanley}, the expressions in the right column sum over $({\bf a},{\bf i})$ with ${\bf a} \in {\sf Red}(w)$, versus sums over Hecke words ${\bf a}$ for $w$ in their left column counterparts. The diagram \eqref{Grothendieck-square} specializes when $w=w(\lambda)$ is Grassmannian, giving \begin{equation} \label{Grassmannian-square} \begin{array}{ccc} G^{(\beta)}_\lambda(x_1,\ldots,x_n) & \longrightarrow & s_\lambda(x_1,\ldots,x_n)\\ \downarrow & & \downarrow \\ G^{(\beta)}_\lambda & \longrightarrow & s_\lambda. \\ \end{array} \end{equation} \subsection{Prior expansion formulas} Conjecture~\ref{conj:main} generalizes a relationship between the Schubert and key polynomials. \begin{Theorem}[\cite{LS89, Reiner.Shimo}]\label{thm:Schubtokey} \[{\mathfrak S}_w=\sum_{P} \kappa_{{\sf content}(K_{-}(P))}\] where the sum is over all increasing tableaux $P$ such that ${\sf word}(P)\in {\sf Red}(w)$. \end{Theorem} To be precise, in the formulation given in \cite[Theorem~4]{LS89}, the description of the ``left nil key'' $K_{-}(P)$ differs from our definition. We are asserting (proof omitted) that in the case of the $P$ in Theorem~\ref{thm:Schubtokey}, the two definitions agree. This is because ${\tt revKjdt}$ can be used to compute the insertion tableau of \emph{Hecke insertion} \cite{BKSTY} which specializes to \emph{Edelman-Greene insertion} \cite{EG}; see \cite{TY:Advapp}. Theorem~\ref{thm:Schubtokey} is the non-symmetric version of the following result: \begin{Theorem}[\cite{fomin.greene:noncommutative}]\label{thm:FGEG} Let $a_{w,\lambda}=\#\{P\in {\sf INC}(\lambda): {\sf word}(P)\in {\sf Red}(w)\}$. Then \[F_w=\sum_{\lambda} a_{w,\lambda}s_{\lambda}.\] \end{Theorem} The next result generalizes Theorem~\ref{thm:FGEG}. Conjecture~\ref{conj:main} is its non-symmetric version: \begin{Theorem}[\cite{BKSTY}]\label{thm:BKSTY} Let $b_{w,\lambda}=\#\{P\in {\sf INC}(\lambda)\!:\! {\sf word}(P)\text{ is a Hecke word for }w\}$. Then \[G^{(\beta)}_{w}=\sum_{\lambda} \beta^{|\lambda|-\ell(w)} \ b_{w,\lambda} G_{\lambda}^{(\beta)}.\] \end{Theorem} \begin{Example}\label{exa:31524} $G_{31524}^{(\beta)}=G_{31}^{(\beta)}+G_{22}^{(\beta)}+\beta G_{32}^{(\beta)}$. This is witnessed by the $P$ tableaux of (\ref{eqn:Nov2abc}). Notice that Conjecture~\ref{conj:main} subdivides the witnessing tableaux $P$ for $b_{w,\lambda}$ according to ${\sf content}(K_{-}(P))$. It is in this sense that Conjecture~\ref{conj:main} is a refinement of Theorem~\ref{thm:BKSTY}.\qed \end{Example} In conclusion, Conjecture~\ref{conj:main} captures some known facts, as expressed in this diagram \begin{equation} \label{tableau-expansion-square} \begin{array}{ccc} \mathfrak{G}_w^{(\beta)} \overset{\mathrm{Conj~\ref{conj:main}}}{=}\sum_P \beta^{|{\sf shape}(P)|-\ell(w)} \Omega_{{\sf content}(K_-(P))}^{(\beta)} & \longrightarrow & \Schub_w \overset{\mathrm{Thm~\ref{thm:Schubtokey}}}{=} \sum_P \kappa_{{\sf content}(K_-(P))} \\ \downarrow & & \downarrow \\ G_w^{(\beta)} \overset{\mathrm{Thm~\ref{thm:BKSTY}}}{=}\sum_P \beta^{|{\sf shape}(P)|-\ell(w)} G_{{\sf shape}(P)}^{(\beta)} & \longrightarrow &F_w \overset{\mathrm{Thm~\ref{thm:FGEG}}}{=} \sum_P s_{{\sf shape}(P)}.\\ \end{array} \end{equation} Here ${\sf shape}(P)$ is the partition of $P$. The horizontal, vertical maps are as before. \section{Further discussion} \subsection{Formulas for Lascoux polynomials} Combinatorial rules for the Lascoux polynomials are in V.~Buciumas--T.~Scrimshaw--K.~Weber \cite{Scrimshaw}. Another rule, generalizing the Kohnert moves of \cite{Kohnert}, was conjectured in \cite{Ross.Yong}.\footnote{The conjecture is accidentally misstated there. See the corrected version \url{https://faculty.math.illinois.edu/~ayong/polynomials.Seminaire.revision.2017.pdf} which is consistent with the 2011 report by C.~Ross \url{https://faculty.math.illinois.edu/~ayong/student_projects/Ross.pdf}.} Also, see the skyline conjectural rule of C.~Monical \cite{Monical:skyline}. Finally, \cite[Theorem~5]{Reiner.Shimo} gives an alternative formula for $\kappa_{\alpha}$ in terms of compatible sequences; we do not know a generalization of this formula to $\Omega_{\alpha}^{(\beta)}$. \subsection{Warning about stable-limits} The results of Section~\ref{sec:2} suggest combinatorial properties for stable-limit polynomials will hold for their non-symmetric versions. This is not always true. S.~Fomin-C.~Greene proved the following result (\emph{cf.} C.~Lenart's \cite{Lenart00}): \begin{Theorem}[\cite{fomin.greene:noncommutative}] \label{thm:FGthing} \begin{equation} \label{eqn:FGthingexp} G_w^{(\beta)}=\sum_{\lambda} \beta^{|\lambda|-\ell(w)}d_{w,\lambda}s_{\lambda} \end{equation} where $d_{w,\lambda}$ counts tableaux $P$ of shape $\lambda$ that are row strict and column weakly increasing, such that ${\sf word}(P)$ is a Hecke word for $w$. \end{Theorem} Thus, using the Grassmannian permutation $w=s_4 s_1 s_2 s_3 = 23514$, \[G_{2,1,1}^{(\beta)}=s_{2,1,1}+\beta(3s_{2,1,1,1}+s_{2,2,1})+\cdots.\] If one expands $\Omega^{(\beta)}_{\alpha}$ in the keys (the non-symmetric analogue (\ref{eqn:FGthingexp}); see (\ref{KDemazure-square})), positivity fails: \[\Omega^{(\beta)}_{1,0,2,1}=\kappa_{1,0,2,1}+\beta (2\kappa_{1,1,2,1}+\kappa_{2,0,2,1}+\kappa_{1,2,2} -\kappa_{2,1,2})+\cdots.\] \subsection{Proof that $K_{-}(P)$ is a key of the same shape as $P$}\label{sec:aproof} We first show ${\sf shape}(K_{-}(P))={\sf shape}(P)$. In the notation of $K_{-}(P)$'s description, it suffices to argue that the length $b$ of $C^{(\ell+1)}$ equals the length $t$ of the $\ell+1$ (\emph{i.e.}, rightmost) column of $P^{(\ell+1)}$. To see this, consider the general situation of an increasing tableau $T$ contained in an $r\times s$ dimension rectangle $\Lambda$, and a second increasing tableau in $\Lambda$ that is complementary to $T$. Recall the notion of ${\tt Kinfusion}$ defined in \cite[Section~3]{Thomas.Yong:K}. In fact, \begin{equation} \label{eqn:Feb9abc} {\tt Kinfusion}(T,U)=(A,B) \text{\ where $A={\tt Krect}(U)$ and $B={\tt revKrect}(A)$.} \end{equation} The rectification $A$ uses the inner corners defined by $T$. Similarly the reverse rectification $B$ uses the outer corners defined by $U$. Given $V\in {\sf INC}(\nu/\lambda)$ define ${\sf LDS}(V)$ to be the length of the longest decreasing subsequence of the left to right, bottom to top, row reading word of $V$. This is true: \begin{Theorem}[{\cite[Theorem~6.1]{Thomas.Yong:K}, \emph{cf.} \cite[Corollary~6.8]{Buch.Samuel}}]\label{thm:LDS} ${\sf LDS}$ is invariant under $K$-theoretic (reverse) jeu de taquin slides. \end{Theorem} Now, suppose $t$ and $u$ are the lengths of the $s$-th (possibly empty) column of $T$ and $U$, respectively. Similarly, let $a$ and $b$ be the length of the first (leftmost) columns of $A$ and $B$, respectively. Thus $t+u=a+b=r$. By Theorem~\ref{thm:LDS}, $u={\sf LDS}(U)={\sf LDS}(A)=a$. Thus $b=t$. The result follows from (\ref{eqn:Feb9abc}) and setting $T=P^{(\ell+1)}$ and $U$ being any complementary increasing tableau to $T$ inside the smallest rectangle $\Lambda^{(\ell+1)}$ that $P^{(\ell+1)}$ sits inside. To see that $K_{-}(P)$ is a key we use an argument of G.~Orelowitz: Since we choose the leftmost outer corner at each slide of ${\tt revKrect}$, when computing $C^{(\ell+1)}$ we begin by computing ${\tt revKrect}(P^{(\ell)})$ as a partial ${\tt revKrect}$ of $P^{(\ell+1)}$. At this point, $C^{(\ell)}$ is the leftmost column of this partial {\tt revKrect}. Thus, when completing the ${\tt revKrect}(P^{(\ell+1)})$, by the definition of ${\tt revKjdt}$, the entries of $C^{(\ell+1)}$ are contained in those of $C^{(\ell)}$, as desired. \qed The above shape argument does not depend on the specific choice of ${\tt revKrect}$ used at each stage of the definition of $K_{-}(P)$. We suspect this choice does not affect $K_{-}(P)$ being a key, however, the choice we use (suggested by G.~Orelowitz) makes the proof easy. \end{document}
\betagin{document} \betagin{abstract} For a finite graph $F$ and a value $p \in [0,1]$, let $I(F,p)$ denote the largest $y$ for which there is a sequence of graphs of edge density approaching $p$ so that the induced $F$-density of the sequence approaches $y$. In this short note, we show that for all $F$ on at least three vertices and $p \in (0,1)$, the binomial random graph $G(n,p)$ has induced $F$-density strictly less than $I(F,p).$ This provides a negative answer to a problem posed by Liu, Mubayi and Reiher. \varepsilonnd{abstract} \maketitle \section{Introduction} For a finite labeled graph $G$ with vertex set $V(G)$ and edge set $E(G)$, recall that the edge density of $G$ is given by $\rho(G) = |E(G)|/ \binom{|V(G)|}{2}\,.$ Given another finite labeled graph $F$, let $$N(F,G) := \leqslantft|\{\varphi : V(F) \hookrightarrow V(G) : (a,b) \in E(F) \iff (\varphi(a),\varphi(b)) \in E(G)\} \right| $$ be the number of induced copies of $F$ in $G$ and define the induced $F$-density of $G$ to be $$\rho(F,G) := \frac{N(F,G)}{(|V(G)|)_{|V(F)|}}$$ where we write $(x)_k := x(x-1)\cdots(x-(k-1))$ for the falling factorial. Finally, define the maximum induced $F$-density at edge density $p \in [0,1]$ via \betagin{equation*} I(F,p) := \sup\leqslantft\{y : \varepsilonxists~\{G_n\}_{n\geqslant 1}, \lim_{n\to\infty} |V(G_n)| = \infty, \lim_{n \to \infty} \rho(G_n) = p, \lim_{n \to \infty} \rho(F,G_n) =y\right\}\,. \varepsilonnd{equation*} Informally, $I(F,p)$ is the largest induced $F$-density among large graphs of edge density approaching $p$. The maximum value of $I(F,p)$ over $p \in [0,1]$ is exactly the inducibility of $F$, introduced by Pippenger and Golumbic \cite{pippenger1975inducibility}. Linearity of expectation shows that the expected induced $F$-density in the binomial random graph $G(n,p)$ is precisely $$\operatorname{rand}(F,p) := p^{|E(F)|}(1-p)^{\binom{n}{2} - |E(F)|}\,.$$ By basic concentration estimates, if we set $G_n$ to be an instance of $G(n,p)$ for each $n$, then we almost-surely have $\rho(G_n) \to p$ and $\rho(F,G_n) \to \operatorname{rand}(F,P)$. As such, we always have $\operatorname{rand}(F,p) \leqslant I(F,p).$ Even-Zohar and Linial \cite{even2015note} suggested exploring the performance of random constructions in maximizing the inducibility of $F$. In particular, they left open whether for $F$ given by the disjoint union of a path of length $3$ and an isolated vertex, the inducibility is achieved (in the limit) by $G(n,3/10)$. Perhaps suggesting that binomial random graphs can be optimal inducers in some examples, Liu, Mubayi and Reiher asked ``an easier question'' \cite[Problem~1.6]{liu2023feasible} whether there is a graph $F$ and $p \in (0,1)$ so that $I(F,p) = \operatorname{rand}(F,p)$\footnote{We note that Liu, Mubayi and Reiher work with unlabeled graphs rather than labeled graphs, but this only changes the quantities $N(F,G)$ by a factor depending only on $F$.}. In this short note we provide a negative answer to this question. \betagin{theorem}{\lambda}bel{thm:main} For each finite labeled graph $F$ with $|V(F)| \geqslant 3$ and for all $p \in (0,1)$, $$I(F,p) > \operatorname{rand}(F,p).$$ \varepsilonnd{theorem} We observe that if $|V(F)| \leqslant 2$ then for all $G$, $\rho(F,G)$ is a function solely of the edge density $\rho(G)$. \\ Our approach is to work in the limiting setting of \varepsilonmph{graphons} rather than sequences of finite graphs. Once we define the appropriate notions such as edge density and induced subgraph density for graphons, we algorithmically construct a perturbation to the graphon corresponding to $G(n,p)$ such that the perturbed graphon achieves a higher induced $F$-density than $G(n,p)$. Perhaps surprisingly, our perturbation (\cref{prop:perturbative-choice}) completely ignores all information about $F$ except for $|V(F)|$ and the parity of $|E(F)|$; in particular, this shows that one can ``beat'' $G(n,p)$ for counts of induced subgraphs for all graphs on a fixed number of vertices and parity of number of edges simultaneously. Consider a finite labeled graph $F$. Identify its vertex set $V(F)$ with $[m]$ and write $E$ for its edge set. Let $\overline{E} := \binom{[m]}{2} \setminus E$ be the set of non-edges of $F$. Recall that a \varepsilonmph{graphon} is a symmetric measurable function $W:[0,1]^2 \to [0,1]$. For a graphon $W$, the edge density is given by \betagin{equation*} \rho(W) = \int_{[0,1]^2}W(x_1,x_2)\,dx_1\,dx_2 \varepsilonnd{equation*} and the induced density of $F$ in $W$ is given by \betagin{equation*} \rho_F(W) = \int_{[0,1]^m} \prod_{e \in E} W(x_{e_1},x_{e_2}) \prod_{f \in \mathbf{a}r{E}}(1 - W(x_{f_1},x_{f_2})) \,dx_1 dx_2\cdots dx_m\,. \varepsilonnd{equation*} We note that the constant graphon $W_p \varepsilonquiv p$ is the limit of the random graphs $G(n,p)$ and that $\rho_F(W_p) = \operatorname{rand}(F,p)$. It follows from standard considerations (e.g.~\cite[Lemma~2.4]{lovasz2006limits}) that $I(F,p)$ can be recast as an optimization problem over graphons. \betagin{fact} {\lambda}bel{fact:graphons} For every finite labeled graph $F$ and $p \in [0,1]$ we have $$I(F,p) = \sup_W\{ \rho_F(W) : \rho(W) = p\}\,. $$ \varepsilonnd{fact} Our approach is to construct a small perturbation of $W_p \varepsilonquiv p$ of edge density zero that bumps up the induced density of $F$. More precisely, in the next section, we will establish the following proposition, which immediately implies \cref{thm:main} by \cref{fact:graphons}. \betagin{proposition} {\lambda}bel{prop:construct-perturbation} For every finite labeled graph $F$ with $|V(F)|\geqslant 3$ and $p \in (0,1)$, there exists a symmetric measurable function $\Deltalta = \Deltalta(F,p):[0,1]^2 \to \mathbb R$ such that: \betagin{itemize} \item $W_p + \Deltalta : [0,1]^2 \to [0,1]$, \item $\rho(W_p + \Deltalta) = \rho(W_p) = p$, and \item $\rho_F(W_p + \Deltalta) > \rho_F(W_p) = \operatorname{rand}(F,p)$. \varepsilonnd{itemize} \varepsilonnd{proposition} In contrast to \cref{thm:main}, a flag algebra computation by Even-Zohar and Linial \cite[Table 2]{even2015note} strongly suggests that there is a graph on five vertices for which the maximum inducibility is achieved by the random bipartite graph $G(n,n,5/6)$. Curiously, there seems to be only one graph on five vertices for which the maximum inducibility is likely achieved by a random bipartite graph. An interesting problem is to understand for which graphs this phenomenon can occur. \betagin{problem} {\lambda}bel{problem} For which graphs $F$ and densities $p \in (0,1)$ is $I(F,p)$ achieved at random bipartite graphs? \varepsilonnd{problem} One potential direction to a partial solution to \cref{problem} would be to eliminate some family of graphs $F$ by a perturbative approach similar to the proof of \cref{thm:main}. \section{Proof of Proposition~\ref{prop:construct-perturbation} } Let $\Deltalta:[0,1]^2 \to \mathbb R$ be a symmetric, measurable function. For a finite labeled graph $H$, define $$\Phi_H(\Deltalta) = \int_{[0,1]^{|V_H|}} \prod_{e \in E(H)}\Deltalta(x_{e_1},x_{e_2}) \,d\mathbf{x}\,. $$ Note that if $H_0$ and $H_1$ are isomorphic, then $\Phi_{H_0}(\Deltalta) = \Phi_{H_1}(\Deltalta)$ for all $\Deltalta$. In the case when $\Deltalta$ is a graphon, the function $\Phi_H$ counts the density of (not necessarily induced) copies of $H$ in $\Deltalta$. Throughout this section, we fix a finite labeled graph $F$. We identify its vertex set $V(F)$ with $[m]$ and write $E$ for the set of its edges and $\overline{E}$ for the set of its non-edges. \betagin{lemma} {\lambda}bel{lem:expansion} For a symmetric function $\Deltalta:[0,1]^2 \to \mathbb R$, we have the expansion $$\rho_F(W_p + \Deltalta) = \operatorname{rand}(F,p) +\sum_{H} c_{H,F}(p) \Phi_H(\Deltalta),$$ where the sum is over non-empty labeled subgraphs of the complete graph $K_m$ and $c_{H,F}(p)$ are polynomials in $p$ depending only on $H$ and $F$. Further, $c_{K_m} = (-1)^{|\overline{E}|}$. \varepsilonnd{lemma} \betagin{proof} This follows from writing \[\rho_F(W_p+\Deltalta) = \int_{[0,1]^m} \prod_{e \in E} (p + \Deltalta(x_{e_1},x_{e_2}) \prod_{f \in \overline{E}}(1 - p - \Deltalta(x_{f_1},x_{f_2})) \,d \mathbf{x}\] and expanding the products. \varepsilonnd{proof} The key component in the proof of \cref{prop:construct-perturbation} is the following. \betagin{proposition}{\lambda}bel{prop:perturbative-choice} For each $m \in \mathbb{N}$ and $t \in \mathbb R$, there is a bounded symmetric measurable function $\Deltalta:[0,1]^2 \to \mathbb R$ such that $\Phi_{K_m}(\Deltalta) = t$ and for all $\varepsilonmptyset \subsetneq H \subsetneq K_m$ we have $\Phi_H(\Deltalta) = 0$. \varepsilonnd{proposition} \betagin{proof}[Proof of \cref{prop:construct-perturbation} given \cref{prop:perturbative-choice}] Let $\Deltalta_0$ be the bounded symmetric function guaranteed by \cref{prop:perturbative-choice} with $t = (-1)^{|\mathbf{a}r{E}|}$. Since $p \in (0,1)$ and $\Deltalta_0$ is bounded, there exists $R > 0$ large enough so that $|\Deltalta_0|/R \leqslant \min\{p,1-p\}/2$. Let $\Deltalta = \Deltalta_0/R$. Then we note that $W:= W_p + \Deltalta$ is a graphon with \[\rho(W) = \rho_{K_2}(W) = \rho_{K_2}(W_p) + \rho_{K_2}(\Deltalta) = \rho_{K_2}(W_p) = p\] and by \cref{lem:expansion}, \[\rho_F(W) = \operatorname{rand}(F,p) + (-1)^{|\overline{E}|}\cdot t \cdot R^{-\binom{m}{2}} = \operatorname{rand}(F,p) + R^{-\binom{m}{2}}\,. \qedhere\] \varepsilonnd{proof} It remains to prove \cref{prop:perturbative-choice}. Note that if $H$ is the union of vertex disjoint graphs $H_1$ and $H_2$, then $\Phi_{H}(\Deltalta) = \Phi_{H_1}(\Deltalta)\Phi_{H_2}(\Deltalta)$. Therefore, it suffices to prove \cref{prop:perturbative-choice} with the second condition holding for all \varepsilonmph{connected} $\varepsilonmptyset \subsetneq H \subsetneq K_m$. Let $\mathcal{G}$ be the set of connected subgraphs of $K_m$ up to isomorphism; let $N = |\mathcal{G}|$ and $M = \sum_{H \in \mathcal{G}} |V_H|$. Linearly order the elements of $\mathcal{G}$ as $H_1,\ldots,H_N$ where if we have $H_i \subset H_j$ then $i \geqslant j$; as such, we have that $H_1 = K_m$. For any vector $\mathbf{a} \in \mathbb R^N$, we will construct a weighted labeled graph $G_{\mathbf{a}}$ on $M$ vertices, which will be the disjoint union of weighted copies of $(H_i)_{i \in [N]}$, as follows: for each graph $H_i$, we fix one edge arbitrarily and assign it weight $a_i$, and subsequently assign all other edges in $H_i$ to have weight $1$. The graph $G_{\mathbf{a}}$ is the disjoint union of these weighted graphs. Let $\Deltalta_{\mathbf{a}}: [0,1]^2 \to \mathbb R$ be the symmetric function associated to the weighted graph $G_{\mathbf{a}}$, where given a finite labeled weighted graph $H$, we associate a symmetric function $\Deltalta_H: [0,1]^2 \to \mathbb R$ to it as follows: break $[0,1]$ into $|V_H|$ intervals $I_1,\ldots,I_{|V_H|}$ of equal length and in the box $I_{a} \times I_b$ put the value equal to the weight of the edge $\{a,b\}$ in $H$. Set $\mathbf{e}_j$ to be the unit vector in $\mathbb R^{N}$ that has a $1$ in coordinate $j$ and $0$ in all other coordinates. We first note a fact that follows immediately by definition. \betagin{fact}{\lambda}bel{fact:diagonal} For each $j \in [N]$ there is a constant $C_j > 0$ so that for all $a \in \mathbb R$ we have $\Phi_{H_j}(\Deltalta_{a\mathbf{e}_j}) = C_j a\,.$ \varepsilonnd{fact} The next fact follows from the assumption that if $i < j$ then $H_i \nsubseteq H_j$. \betagin{fact}{\lambda}bel{fact:triangular} Let $\mathbf{a}, \widehat{\mathbf{a}} \in \mathbb R^{N}$ differ only in coordinate $j$. Then for all $i < j$ we have $\Phi_{H_i}(\Deltalta_{\mathbf{a}}) = \Phi_{H_i}(\Deltalta_{\widehat{\mathbf{a}}})\,.$ \varepsilonnd{fact} We are now ready to prove \cref{prop:perturbative-choice} \betagin{proof}[Proof of \cref{prop:perturbative-choice}] We will iteratively construct a sequence of vectors $\mathbf{a}^{(1)},\dots, \mathbf{a}^{(N)}$ with the following properties: \betagin{itemize} \item $\mathbf{a}^{(1)} = s_1 \mathbf{e}_1$, $\mathbf{a}^{(j+1)} = \mathbf{a}^{(j)} + s_{j+1} \mathbf{e}_{j+1}$ for $s_1,\dots, s_N\in \mathbb R$, \item $\Phi_{H_1}(\Deltalta_{\mathbf{a}^{(j)}}) = t$ for all $j \in [N]$, and \item $\Phi_{H_i}(\Deltalta_{\mathbf{a}^{(j)}}) = 0$ for all $j \in [N]$ and $1 < i \leqslant j$. \varepsilonnd{itemize} Notice that for any such sequence, $\Deltalta_{\mathbf{a}^{(N)}}$ satisfies the conclusion of \cref{prop:perturbative-choice}. \paragraph{\varepsilonmph{Initialization}} By \cref{fact:diagonal}, we may choose $s_1 \in \mathbb R$ so that for $\mathbf{a}^{(1)} = s_1 \mathbf{e}_1$ we have $\Phi_{H_1}(\Deltalta_{\mathbf{a}^{(1)}}) = t$. This is the only constraint required at this stage. \paragraph{\varepsilonmph{Iteration}} Suppose that for some $j\geqslant 1$, we have constructed a sequence $\mathbf{a}^{(1)},\dots,\mathbf{a}^{(j)}$ satisfying the above properties. We set $\mathbf{a}^{(j+1)} = \mathbf{a}^{(j)} + s_{j+1}\mathbf{e}_{j+1}$, for $s_{j+1} \in \mathbb R$ to be chosen momentarily. By construction, the first property above is satisfied. Moreover, by \cref{fact:triangular}, for all $i\leqslant j$, we have that for any choice of $s_{j+1} \in \mathbb R$, \[\Phi_{H_i}(\Deltalta_{\mathbf{a}^{(j+1)}}) = \Phi_{H_i}(\Deltalta_{\mathbf{a}^{(j)}}).\] Therefore, it suffices to choose $s_{j+1} \in \mathbb R$ such that \[\Phi_{H_{j+1}}(\Deltalta_{\mathbf{a}^{(j+1)}}) = 0.\] Since $H_{j+1}$ is connected and $G_{\mathbf{a}^{(j)}}$ is vertex disjoint from $G_{\mathbf{e}_{j+1}}$, it follows that for any set $E_{j+1}$ satisfying $\varepsilonmptyset \subsetneq E_{j+1} \subsetneq E(H_{j+1})$ we have \[\int_{[0,1]^{|V_H|}}\prod_{e\in E_{j+1}}\Deltalta_{\mathbf{a}^{(j)}}(x_{e_1},x_{e_2})\prod_{f \in E(H_{j+1})\setminus E_{j+1}}\Deltalta_{\mathbf{e}_{j+1}}(x_{f_1}, x_{f_2})d\mathbf{x} = 0,\] which implies that \betagin{align*} \Phi_{H_{j+1}}(\Deltalta_{\mathbf{a}^{(j+1)}}) &= \Phi_{H_{j+1}}(\Deltalta_{\mathbf{a}^{(j)}}) + \Phi_{H_{j+1}}(\Deltalta_{s_{j+1}\mathbf{e}_{j+1}}) \\ &= \Phi_{H_{j+1}}(\Deltalta_{\mathbf{a}^{(j)}})+ s_{j+1}C_{j+1}, \varepsilonnd{align*} where the last equality uses \cref{fact:diagonal}. Setting $s_{j+1} = -\Phi_{H_{j+1}}(\Deltalta_{\mathbf{a}^{(j)}})/C_{j+1} \in \mathbb R$ completes the iterative step. \qedhere \varepsilonnd{proof} \section*{Acknowledgments} The authors thank Emily Cairncross and Dhruv Mubayi for introducing this problem to us. M.M.\ is supported in part by NSF grant DMS-2137623. \varepsilonnd{document}
\begin{document} \title{RISQ - reduced instruction set quantum computers} \begin{abstract} \noindent Candidates for quantum computing which offer only restricted control, {\it e.g.}, due to lack of access to individual qubits, are not useful for {\it general purpose} quantum computing. We present concrete proposals for the use of systems with such limitations as RISQ - {\it reduced instruction set} quantum computers and devices - for simulation of quantum dynamics, for multi-particle entanglement and squeezing of collective spin variables. These tasks are useful in their own right, and they also provide experimental probes for the functioning of quantum gates in pre-mature proto-types of quantum computers. \end{abstract} \section{Introduction} Notwithstanding dedicated theoretical and experimental efforts, progress in practical implementation of quantum computing is not advancing rapidly. Quantum computing is based on the superposition principle which, when applied to an information carrying system, suggests that parallel processing of a large number of states, {\it e.g.}, to be identified with inputs to a function, is possible. Only recently it was realized that unitary evolution of superposition states followed by measurements, allowed by quantum theory, provides computational powers, exceeding the one of classical computers \cite{shor,grover}. Since then many strategies for practical quantum computing have been investigated. Due to the coincidence of few-qubit quantum computing and ingredients of the advanced field of optimal control \cite{control}, already exercised extensively in molecular magnetic resonance spectroscopy, researchers in that field have been able to rapidly implement a number of theoretical quantum computing proposals \cite{nmr}, but the molecular systems are not promising for larger scale computation. Ingredients from the past years' successful experimental control of atoms and single quantized field modes \cite{qfm,qfm2}, trapped ions \cite{ions}, donor spin states in solids \cite{kane}, quantum dots \cite{dots} and Josephson junctions \cite{josephson} have been tailored to yield proposals for single qubit and two-qubit operations, scalable, in principle, to quantum computation with an arbitrary number of qubits. Experimental research groups now study these proposals, and it is clear that we will see much progress in the coming years, but also that quantum computing is not going to be easy. The development of quantum computers and the progress of our research in quantum computing, is further hampered by the fact that a small quantum computer is of little practical use, and so is a large one which only ``gets the answer almost right" (in contrast to one that ``gets the correct answer, but only sometimes"). We suggest in this paper to identify applications of quantum computers with reduced capabilities. We shall use the name RISQ computers (Reduced Instruction Set Quantum computers) for such devices, and we present examples of RISQ computers which may be used to solve quantum problems, much in the spirit of Feynman's proposal for quantum computing \cite{feynman}. It was suggested by Lloyd \cite{lloyd} that the restriction of physics problems due to locality and symmetries makes such a computer potentially much less demanding to realize in practice than the general purpose quantum computer. Indeed, we shall show that the reduced capabilities of our RISQ systems may just coincide with these physical restrictions, so that they do not present obstacles to ``Feynman quantum computing". We point out that for atoms and ions, entanglement produced by RISQ mechanisms may improve spectroscopic resolution, atomic clocks, and length and frequency standards. We believe that RISQ ideas will soon lead to operative and useful devices. We shall focus on practical proposals. In Section 2, we present a scheme for quantum gates and multi-particle entanglement in ion traps, which may be applied for full scale quantum computing, but which may also be applied in a RISQ version without experimental access to individual ions in the trap. In Section 3, we discuss quantum computing with neutral atoms in optical lattices in a RISQ version without access to the individual atoms, useful for simulations of anti-ferromagnetism and for improvement of atomic clocks beyond the fundamental projection noise limit. Section 4 concludes the paper with an optimistic view on quantum information processing as a tool in physics. \section{Ion trap quantum computers} \subsection{General purpose quantum computing in ion traps} At low temperatures trapped ions freeze into a crystal where the Coulomb repulsion among the ions equilibrates the confining force from the trapping potential. The vibrations of the ions are strongly coupled due to the Coulomb interaction, and in the harmonic oscillator approximation they form a set of collective vibrational modes. One may excite one of these modes by tuning a laser to one of the upper or lower sidebands of the ions, {\it i.e.}, by choosing the frequency of the laser equal to the resonance frequency of an internal transition in an ion plus or minus the vibrational frequency. The laser is then on resonance with an excitation of the internal transition and a simultaneous change in the vibrational motion. This coupling of internal and external degrees of freedom has been extensively used for precise control of the quantum state of trapped ions \cite{ions}, and in 1995 Cirac and Zoller proposed that the ion trap can be used for quantum computing \cite{cirac}. In the ion trap quantum computer a qubit is represented by the internal states of an ion. Long lived states, like for instance hyperfine structure states, are preferred. Single qubit rotation and two qubit gates are achieved by focusing a laser on each ion and by exploiting the collective vibrations for interaction between the ions. In the original proposal \cite{cirac} the system is restricted to the joint motional ground state of the ions. By tuning a laser to a sideband, a vibration is excited if the ion irradiated is in a certain internal state. Upon subsequent laser irradiation of another ion, the internal state of that ion is changed only if the vibrational motion is excited. At the end of the resulting two-qubit gate the vibrational excitation is removed and additional gates may subsequently be implemented. Under the assumptions of perfect access to the ions and complete absence of decoherence, the trapped ions can be used to compute any mathematical function, and since the ions can be initially set to a superposition of all register states, one simultaneously obtains the evaluation of all function values - the magic parallelism of quantum computing. By electron shelving \cite{shelving}, the state of each qubit can be read out very effectively at the end of the calculation, if one can distinguish fluorescence from the different ions in the trap. \subsection{Bichromatic excitation scheme} We now describe our proposal \cite{bic1} for the efficient production of a two-qubit Hamiltonian like $\sigma_{y,i}\sigma_{y,j}$, where the Pauli matrices acting on individual ions $i$ and $j$ represent the two-level qubit systems, {\it e.g.}, with $|0\ranglengle_i$ and $|1\ranglengle_i$ being the eigenstates of $\sigma_{z,i}$. A Hamiltonian proportional with $\sigma_{y,i}$ provides a rotation between the two states $|0\ranglengle_i$ and $|1\ranglengle_i$, and products of such operators provide conditional operations which suffice to build a general purpose quantum computer. We illuminate the two ions of interest with light of two different frequencies, $\omega_{1,2}=\omega_{eg} \pm \delta$, where $\omega_{eg}$ is the internal state transition frequency, and $\delta$ is a detuning, not far from the trap frequency $\nu$. In Fig.~\ref{detunings}, we illustrate the action of such a bichromatic laser field on the state of the two ions of interest. As shown in the figure, the initial and final states $|ggn\ranglengle$ and $|een\ranglengle$, separated by $2\omega_{eg}=\omega_1+\omega_2$ are resonantly coupled, and so are the degenerate states $|egn\ranglengle$ and $|gen\ranglengle$, where the first (second) letter denotes the internal state $e$ or $g$ of the $i^{th}$ ($j^{th}$) ion and $n$ is the quantum number for the relevant vibrational mode of the trap. These resonant couplings lead to an effective Hamiltonian of the form $\sigma^+_i\sigma^+_j+\sigma^-_i\sigma^-_j- \sigma^+_i\sigma^-_j-\sigma^-_i\sigma^+_j \propto \sigma_{y,i}\sigma_{y,j}$. \begin{figure} \caption{Energy level diagram for two ions with quantized vibrational motion illuminated with bichromatic light. The one photon transitions indicated in the figure are not resonant, $\delta \neq \nu$, so only the two-photon transitions shown from $|ggn\rangle$ to $|een\rangle$ and from $|egn\rangle$ to $|gen\rangle$ are resonant.} \label{detunings} \end{figure} \noindent The restriction of the dynamics to the resonantly coupled states applies in two interesting limits: \subsubsection{Weak fields} We can choose the fields so weak and the detuning from the sideband so large that the intermediate states with $n\pm 1$ photons are not populated in the process. It turns out \cite{bic1,bic3} that as long as the ions remain in the Lamb-Dicke regime, {\it i.e.}, their spatial excursions are restricted to a small fraction of the wavelength of the exciting radiation, the internal state transition is insensitive to the vibrational quantum number $n$. This is due to interference between the interaction paths: The transition via an upper sideband excitation $|n+1\ranglengle$, has a strength of $n+1$ ($\sqrt{n+1}$ from raising and $\sqrt{n+1}$ from lowering the vibrational quantum number), and the transition via $|n-1\ranglengle$ yields a factor of $n$. Due to opposite signs of the intermediate state energy mismatch, the terms interfere destructively, and the $n$ dependence disappears from the coupling. The coherent evolution of the internal atomic state is thus insensitive to the vibrational quantum numbers, and it may be observed with ions in any superposition or mixture of vibrational states, even if the ions exchange vibrational energy with a surrounding reservoir. The control of the thermal motion is of great difficulty in ion trap experiments, and the tolerance to vibrations is a major asset of our bichromatic proposal. In the RISQ section below, we show that the bichromatic gate can also be applied with interesting results without individual access to the ions in the trap, which removes another technical complication for experiments. \subsubsection{Strong fields} In the Lamb-Dicke limit with lasers detuned by $\pm\delta$ our bichromatic interaction Hamiltonian can be written in the interaction picture with respect to the atomic and vibrational Hamiltonian \begin{equation} \label{interaction} H_{\mbox{int}}= -\sqrt{2}\eta\Omega J_y [x\cos(\nu-\delta)t +p\sin(\nu-\delta)t], \end{equation} where we have introduced the dimensionless position and momentum operators for the centre-of-mass vibrational mode $x=\frac{1}{\sqrt{2}}(a+a^\dagger)$ and $p=\frac{i}{\sqrt{2}}(a^\dagger-a)$, and where we have introduced the collective internal state observable $J_y = \frac{\hbar}{2} (\sigma_{y,i}+\sigma_{y,j})$ of the two ions illuminated. $\Omega$ is the Rabi frequency of the field-atom coupling, and $\eta$ is the Lamb-Dicke parameter. The exact propagator for the Hamiltonian~(\ref{interaction}) can be represented by the ansatz \begin{equation} \label{u} U(t)={\rm e}^{-iA(t)J_y^2}{\rm e}^{-iF(t)J_y x}{\rm e}^{-iG(t)J_y p}, \end{equation} where the Schr{\"o}dinger equation $i\frac{d}{dt}U(t)=HU(t)$ leads to the expressions $F(t)=-\sqrt{2}\eta\Omega\int_0^t \cos((\nu-\delta)t')dt', \ G(t)=-\sqrt{2}\eta\Omega\int_0^t \sin((\nu-\delta)t')dt'$, and $A(t)=\sqrt{2}\eta\Omega\int_0^t F(t')\sin((\nu-\delta)t')dt'$. If $F(t)$ and $G(t)$ both vanish after a period $\tau$, the propagator reduces to $U(\tau)={\rm e}^{-iA(\tau)J_y^2}$ at this instant, {\it i.e.}, the vibrational motion is returned to its original state, be it the ground state or any vibrationally excited state, and we are left with an internal state evolution which is {\it independent} of the external vibrational state \cite{footnote}. Note that $(\sigma_y)^2=1$ implies that $J_y^2 = \frac{\hbar^2}{4}(2+2\sigma_{y,i}\sigma_{y,j})$, yielding precisely the interaction that we need. The timing so that $G(\tau)$ and $F(\tau)$ vanish allows faster gate operation than in Section 2.2.1, because we tolerate that the internal state is strongly entangled with the vibrational motion in the course of the the gate. For comparison we show in Fig.~\ref{twogates} the accomplishments of (a) the slow gate and (b) the fast gate evolution. The slow gate is correctly described by Eq.~(\ref{u}), which simplifies because $F(t)$ and $G(t)$ are always small. The slow gate may be stopped when $A(t)\approx -(\Omega \eta)^2t/(\nu-\delta)$ has acquired its desired value, irrespective of the current, small values of $F(t)$ and $G(t)$. (For illustrational purposes, small but non-zero values of $F(t)$ and $G(t)$ were chosen, leading to the small fast oscillations in the figure). To implement the fast gate with a specific value of $A(\tau)$, one must choose parameters to fulfill $F(\tau)=G(\tau)=0$. In Fig.~\ref{twogates} (b) $F(\tau)=G(\tau)=0$ is achieved at the times $\tau\nu \approx k\cdot 125$, where $k$ is any integer. \begin{figure} \caption{Time evolution of density matrix elements according to ~(\ref{u} \label{twogates} \end{figure} \subsection{RISQ computing in ion traps} \subsubsection{Feynman computing} The trapped ions or suitable subspaces of states of the ions can be used to represent other physical systems with the same Hilbert space dimension. In the spirit of Feynman's proposal for quantum computing \cite{feynman}, the trapped ions may thus be used for simulation of such other systems. Let us consider a specific example, where we apply Hamiltonians which are acting identically on all ions, {\it e.g.}, because laser fields extend over the whole ion cloud rather than being focused down on one or two ions. It follows that a state of the system, which is initially symmetrical under exchange of different ions, will remain symmetrical. A convenient representation of such states is given by the eigenstates of a fictitious total angular momentum, $|JM\ranglengle$, the so-called Dicke states \cite{Dicke}. (Every single two-level ion is generically described by $2\times 2$ Pauli spin matrices, and the associated fictitious spin 1/2 add up to a total $J=N/2$ angular momentum.) In the Dicke representation, $N=2J$ is the total number of ions, and $M$ counts the number of excited ions, $N_e=J+M$. A single resonant laser field, which excites all ions with same amplitude acts as the angular momentum raising operator $J_+$ on the symmetrical states (and the adjoint lowering operator $J_-$), and effectively it acts as a geometrical rotation of the state vector. Other operators like $J_y^2$ are of more interest, in particular if the interaction can be applied to the system in a pulsed fashion to yield the kicked, non-linear rotor, which is a key example of a classically chaotic system. It is of course not sufficient to make the identification between the states of the relevant system and the states of the trapped ions. We also have to find a way to implement the collective $J_y^2$ Hamiltonian. In terms of individual raising and lowering operators, it is apparently necessary to introduce interactions among all the particles of the form of the single pair interaction described in the previous subsection. This, however, turns out to be easier than to carry out even a single two-qubit computation: If the trap contains a larger number of ions, which are {\it all illuminated by the bichromatic light}, any two ions can together resonantly perform the transitions illustrated in Fig, 1, and the Hamiltonian automatically involves the sum over all pairs in the trap. This sum is nothing but the collective operator $J_y^2$. The interest in studying the $J_y^2$ Hamiltonian was recently stressed by Milburn \cite{footnote}, and it is emphasized by Haake \cite{Haake} in this issue of J. Mod. Opt. By a simple translation of Haake's arguments for the atom-cavity coupling to the mathematically equivalent trapped ion dynamics, we observe that if ions with more than two levels are used, interaction Hamiltonians of a more complicated structure can be tailored, to simulate, {\it e.g.}, the SU(2) Lipkin model \cite{Haake}. \subsubsection{Multiparticle entanglement} \label{multisec} It turns out \cite{bic2} that the multi-atom collective operator $J_y^2 = (\frac{\hbar}{2}\sum \sigma_{y,i})^2$ generates a maximally entangled state if it is applied to a whole ensemble of ground state ions, \begin{eqnarray} |\Psi\ranglengle =|gg ... g\ranglengle \rightarrow \frac{1}{\sqrt{2}}( {\rm e}^{i\phi_g}|gg ... g\ranglengle + {\rm e}^{i\phi_e}|ee ... e\ranglengle). \label{ghz} \end{eqnarray} These states have several very interesting applications both in fundamental physics and technology. They are Schr{\"o}dinger cat superpositions of states of mesoscopic separation, and they are ideal for spectroscopic investigations. In current frequency standards the atoms or ions are independent, and when they are interrogated by the same field, the outcome of a measurement fluctuates as the square root of the number of atoms $N$. The relative frequency uncertainty in samples with many atoms thus behaves like $\frac{1}{\sqrt{N}}$. If the duration of the measurement is shorter than the coherence time of the atomic coherence, which is typically the case in atomic frequency standards, by binding the ions together as in Eq.~(\ref{ghz}) we are sensitive to the Bohr frequency between $|gg ...g\ranglengle$ and $|ee...e\rangle$ which is proportional to $N$, and consequently the frequency uncertainty is proportional to $\frac{1}{N}$ \cite{bollinger}. If the duration of the frequency measurement exceeds the time scale of internal atomic decoherence $\tau_{dec}$, the shorter coherence time $\tau_{dec}/N$ of the entangled state actually leads to the same resolution for that state and for an uncorrelated ensemble of atoms \cite{Huelga}. The successful implementation of our proposal to produce the state in Eq.~(\ref{ghz}) with four ions was recently reported by the NIST group in Boulder \cite{nature}. \section{Optical lattice quantum computers} \subsection{General purpose quantum computing in optical lattices} In Refs. \cite{gatecirac,gatebrennen} two different methods to perform a coherent evolution of the joint state of pairs of atoms in an optical lattice were proposed. Both methods involve displacement of two optical lattices with respect to each other. Each lattice traps one of the two internal states $|0\ranglengle$ and $|1\ranglengle$ of the atoms. Initially, the two lattices are on top of each other and the atoms are assumed to be cooled to the vibrational ground state in the lattices. The lattice containing the $|1\ranglengle$ component of the wavefunction is now displaced so that if an atom (at the lattice site $k$) is in $|1\ranglengle$, it is transferred to the vicinity of the neighbouring atom (at the lattice site $k+1$) if this is in $|0\ranglengle$, causing an interaction between the two atoms. See Fig.~\ref{displace}. The atoms interact through controlled collisions or through optically induced dipole-dipole interactions. After the interaction, the lattices are returned to their initial position and the internal states of each atom may be subject to single particle unitary evolution. The displacement and the interaction with the neighbour yields a certain phase shift $\phi$ on the $|1\ranglengle_k |0\ranglengle_{k+1}$ component of the wavefunction, i.e., \begin{eqnarray} |0\ranglengle_k|0\ranglengle_{k+1}\rightarrow& |0\ranglengle_k|0\ranglengle_{k+1} &|0\ranglengle_k|1\ranglengle_{k+1} \rightarrow |0\ranglengle_k |1\ranglengle_{k+1} \nonumber \\ |1\ranglengle_k|0\ranglengle_{k+1}\rightarrow& e^{i\phi}|1\ranglengle_k |0\ranglengle_{k+1}\hspace{0.25cm}&|1\ranglengle_k|1\ranglengle_{k+1} \rightarrow |1\ranglengle_k|1\ranglengle_{k+1}, \label{phaseshift} \end{eqnarray} where $|a\ranglengle_k$ ($a=0$ or $1$) refers to the state of the atom at the $k$'th lattice site. In \cite{gatecirac, gatebrennen} it is suggested to build a general purpose quantum computer in an optical lattice based on the two-atom gates in Eq. (\ref{phaseshift}) and single atom control, which is possible by directing a laser beam on each atom. \begin{figure} \caption{(a) Two overlapping lattices trapping the two internal states $|0\ranglengle$ (black circle) and $|1\ranglengle$ (white circle). (b) The lattices are displaced so that if an atom is in the $|1\ranglengle$ state, it is moved close to the neighbouring atom if this is in $|0\ranglengle$ causing an interaction between the two atoms. (c) The lattices are returned to their initial position, where the non-interacting atoms may be driven by external fields.} \label{displace} \end{figure} \subsection{RISQ computing in optical lattices} Individual access to atoms in an optical lattice is not a realistic demand. The lattice sites are spaced by only a fraction of the optical wave length, and hence focusing of a light beam will not yield single site resolution. One may construct field configurations or magnetic micro-traps with periods larger than optical wavelengths \cite{spacing} and still use the internal state selective translation and interaction to implement the two-qubit gate. In this section, however, we shall show that there may be interesting possibilities in the optical lattices, despite the lack of access to the individual atoms. We describe how atoms in an optical lattice may be manipulated to simulate spin-spin interactions which are used to describe ferro-magnetism and antiferro-magnetism in condensed matter physics \cite{lattice}. We also show that with a specific choice of interaction we may generate spin squeezed states \cite{ueda} which may be used to enhance spectroscopic resolution\cite{winsq}, {\it e.g.}, in atomic clocks. \subsubsection{Feynman computing in an optical lattice} Our two level quantum systems conveniently describe spin $1/2$ particles with the two states $|0\ranglengle_k$ and $|1\ranglengle_k$ representing eigenstates of the $j_{z,k}$-operator $j_{z,k}|m\ranglengle_k=m|m\ranglengle_k,\ m_z=\pm 1/2$ ($\hbar=1$). The phase-shifted component of the wavefunction in Eq.~(\ref{phaseshift}) can thus be identified with the operator $(j_{z,k}+1/2)(j_{z,k+1}-1/2)$, and the total evolution composed of the lattice translations and the interaction induced phase shift may be described by the unitary operator $e^{-iHt}$ with the Hamiltonian $H=\chi(j_{z,k}+1/2)(j_{z,k+1}-1/2)$ and time $t=\phi / \chi$. In a filled lattice all atoms are brought into contact with their nearest neighbour according to (\ref{phaseshift}), and the evolution is described by the Hamiltonian $H=\chi \sum_k (j_{z,k}+1/2)(j_{z,k+1}-1/2)$. If we neglect boundary terms this Hamiltonian reduces to \begin{equation} H_{zz}= \chi \sum_{<k,l>} j_{z,k}j_{z,l}, \label{kunz} \end{equation} where the sum is over nearest neighbours. By appropriately displacing the lattice we may extend the sum to nearest neighbours in two and three dimensions. $H_{zz}$ coincides with the Ising-model Hamiltonian \cite{ising,reif} introduced to describe ferro-magnetism. Hence, by elementary lattice displacements we perform a quantum simulation of a ferro-magnet (or of an antiferro-magnet depending on the sign of $\chi$). This is an extraordinary example of Feynman quantum computing which is grossly simplified by the locality and the translational invariance of the physical model. A resonant $\pi/2$-pulse acting simultaneously on all atoms rotates the $j_z$-operators into $j_x$-operators, $e^{ij_{y,k}\pi/2} j_{z,k}e^{-ij_{y,k}\pi/2}=j_{x,k}$. Hence, by applying $\pi/2$-pulses, in conjunction with the displacement sequence, we turn $H_{zz}$ into $H_{xx}$ and $H_{yy}$, the second and third term in the more general Heisenberg-model Hamiltonian \cite{Heisenberg} \begin{equation} H_f=\sum_{<k,l>} \chi j_{z,k}j_{z,l}+ \eta j_{x,k}j_{x,l} + \lambda j_{y,k}j_{y,l}. \label{ferro} \end{equation} By adjusting the duration of the interaction with the neighbours we may adjust the coefficients $\chi$, $\eta$ and $\lambda$ to any values. We cannot, however, produce $H_f$ by simply applying $H_{zz}$ for the desired time $t$, followed by $H_{xx}$ and $H_{yy}$, because the different Hamiltonians in Eq.~(\ref{ferro}) do not commute. Instead we choose short time steps, i.e., small phase shifts $\phi$ in Eq.~(\ref{phaseshift}), and by repeated application of $H_{zz}$, $H_{xx}$ and $H_{yy}$, we approximate the action of $H_f$ with an error of order $\phi^2$. A host of magnetic phenomena may now be simulated on our optical lattice: Spin waves, solitons, topological excitations, two magnon bound states, etc. Models for magnetic phenomena have interesting thermodynamic behaviour and we propose to carry out calculations for non-vanishing temperature by optically pumping a fraction of the atoms to the $|1/2\ranglengle$ state. The randomness of the pumping introduces entropy into the system and produces a micro-canonical \cite{reif} realization of a finite temperature. The results of the simulation may be read out by optical diffraction of light, sensitive to the internal atomic states. Although individual atoms may not be resolved, optical detection may also be used to read out magnetic structures on a spatial scale of a few lattice periods. For a few atoms the system may be simulated numerically on a classical computer. In Fig.~\ref{wave} we show the propagation of a spin wave in a one-dimensional string of 15 atoms which are initially in the $|-1/2\ranglengle$ state. For illustrational purposes we assume that the central spin is flipped at $t=0$. The Hamiltonian (\ref{ferro}) which can be implemented without access to the individual atoms then causes a spin wave to propagate to the left and right. \begin{figure} \caption{Propagation of a spin wave in a one dimensional string. The central atom is flipped at $t=0$, and repeated application of $H_{zz} \label{wave} \end{figure} So far we have assumed that the lattice contains one atom at each lattice site and that all atoms are cooled to the vibrational ground state. The present experimental status in optical lattices is that atoms can be cooled to the vibrational ground-state in 2D \cite{groundstate}. A mean filling factor of unity in 3D is reported in \cite{depue}, but when at most a single atom is permitted at each lattice site a mean occupation of 0.44 is achieved. The interaction in a partially filled lattice may be described by the Hamiltonian $H=\sum_{k,l} \chi_{k,l} h_k(j_{z,k}+1/2)h_l(j_{z,l}-1/2)$, where the stochastic variable $h_k$ is $1$ $(0)$ if a lattice site is filled (empty), and where the coupling constants $\chi_{k,l}$ between atoms $k$ and $l$ vanish unless the atoms are brought into contact by the lattice displacements. If we displace the atoms so that $\chi_{k,l}$ is symmetric in $k$ and $l$, we produce the Hamiltonian $ H=\sum_{k,l} \chi_{k,l} h_k j_{x,k} h_l j_{x,l}.$ This Hamiltonian models magnetism in random structures, and it might shed light on morphology properties, and, {\it e.g.}, percolation \cite{percolation}. \subsubsection{Multi-particle entanglement and spin squeezing} Polarization rotation spectroscopy and high precision atomic fountain clocks are now limited by the $1/\sqrt{N}$ sensitivity discussed in Sec. \ref{multisec} \cite{jens,precision}. In \cite{ueda} it is suggested to produce spin squeezed states which redistribute the uncertainty unevenly between collective spin components like $J_x$ and $J_y$, so that measurements, sensitive to the component with reduced uncertainty, become more precise. Spin squeezing resulting from absorption of non-classical light has been suggested \cite{kuzmich} and demonstrated experimentally \cite{jan}. Ref. \cite{ueda} presents an analysis of squeezing obtained from the non-linear couplings $H=\chi J_x^2$ and $H=\chi(J_x^2-J_y^2)$. The product $J_x^2$ involves terms $j_{x,k}j_{x,l}$ for all atoms $k$ and $l$, and this coupling may be produced by displacing the lattices several times so that the $|1/2\ranglengle$ component of each atom visits every lattice site and interacts with all other atoms. In a large lattice such multiple displacements are not desirable, they may be too difficult to control precisely, and they take too much time. We shall show, however, that substantial spin-squeezing occurs through interaction with {\it only a few} nearby atoms. If each atom visits only its nearest neighbour, $\chi_{k,l}=\chi \delta_{k+1,l}$, we find that the mean spin vector is in the negative $z$ direction and it has the expectation value $<J_z>=-\frac{N}{2}\cos^2(\chi t)$. The time dependent variance of the spin component $J_{\theta}=\cos(\theta)J_x+\sin(\theta)J_y$ with $\theta=-\pi/4$ is obtained by a lengthy, but straightforward, calculation \begin{equation} (\displaystyleelta J_{-\pi/4})^2 =\frac{N}{4}\left[1+\frac{1}{4} \sin^2(\chi t)-\sin(\chi t)\right]. \label{dj} \end{equation} \begin{figure} \caption{Squeezing in a one-dimensional lattice with 15 atoms. (a) Evolution of $(\displaystyleelta J_\theta)^2$ during interaction with 1, 2, and 3 neighbours (full, dashed, and short dashed line, respectively). (b) Minimum attainable squeezing parameter $\xi^2$ for filling factors $p$=100\% ($\diamond$), 50\% (+), 25\% ($\Box$), and 10\% ($\times$) as functions of the number of sites visited. } \label{partsqueez} \end{figure} Fig.~\ref{partsqueez} (a) shows the evolution of $(\displaystyleelta J_\theta)^2$ when we visit 1, 2, and 3 neighbours. We assume the same phase shift for all collisions, i.e., all non-vanishing $\chi_{k,l}$ are identical. The squeezing angle $\theta=-\pi/4$ is optimal for short times $\chi t << 1$. For longer times the optimal angle deviates from $-\pi/4$, and we plot the variance $(\displaystyleelta J_{\theta})^2$ minimized with respect to the angle $\theta$. If $\frac{1}{\sqrt{2}} ({\rm e}^{-i\theta/2}|1/2\ranglengle+ {\rm e}^{i\theta/2}|-1/2\ranglengle)$ is rotated into $|1/2\ranglengle$, subbinomial counting statistics of the $|1/2\ranglengle$ population provides an easily accessible experimental signature of squeezing of $J_\theta$. In \cite{winsq} it is shown that if spectroscopy is performed with $N$ particles, the reduction in the frequency variance due to squeezing is given by the quantity \begin{equation} \xi^2=\frac{N\langle \displaystyleelta J_\theta\ranglengle^2}{\langle J_z \ranglengle^2 }, \label{xi} \end{equation} and in Fig.~\ref{partsqueez} (b) we show the minimum value of $\xi^2$ as a function of the number of neighbours visited. We have performed simulations of squeezing in a partially filled one dimensional lattice. In our model each lattice site contains an atom with a probability $p$, and the size of the lattice is adjusted to accommodate 15 atoms. The calculations shown in Fig.~\ref{partsqueez} (b) demonstrate that considerable squeezing may be achieved by visiting just a few neighbours even in dilute lattices. \section{Outlook} Our examples with ion trap and optical lattice quantum computers explicitly confirm the assumption \cite{feynman,lloyd} that a quantum computer aimed at the solution of a quantum problem may be easier to realize in practice than a general purpose quantum computer, because the desired solution is governed by physical interactions which are constrained, {\it e.g.}, by locality and symmetries. The problems addressed here, the kicked non-linear rotor, the Lipkin model, and the Heisenberg model of ferromagnetism and anti-ferromagnetism, almost implement themselves in the quantum computer proposals with trapped ions and atoms. The arguments are general, and one may readily conclude that other proposals for quantum computing offer similar approaches to these models, and that a large variety of quantum physics problems may be implemented much more easily than the more mathematical algorithms of Shor \cite{shor} and Grover \cite{grover}. If a computation can be carried out with only few operations and in a very short time, the problem of errors is substantially reduced, and it seems realistic that we may soon perform interesting calculations which are really impossible to carry out on a classical computer. Extra optimism derives from the fact that small imprecisions in our manipulation of the system translates into small errors in the value of the physical parameters in the simulated problem, so that, {\it e.g.} the spin wave in Figure 4, might move at a different speed, but the essential physics is still preserved. The errors are 'normal' and may well be below the precision required, unlike the outcome of a factoring or search algorithm, where a wrong result is useless, and where we have to rely on the exact result to appear with finite probability. Both trapped ions and atoms in optical lattices are systems which can be used in high precision spectroscopic measurements. We have shown that collective operators for an ensemble of ions or atoms can be squeezed, yielding an improvement of the precision in such measurements. Unlike manufactured systems, like quantum dots or Josephson junctions, given isotopes of ions or atoms are identical, and they can serve as primary time standards. There will hence be a continuing demand to improve experiments on these systems, irrespective of their prospects for full scale quantum computation. Already present atomic clocks are operating at the projection noise limit \cite{precision}, and multi-particle entanglement and spin-squeezing, in one way or another, will come in handy. Noise reduction derived from multi-particle entanglement provides a macroscopic experimental signature of the microscopic interaction between the atoms, and hence it may help to diagnose gates in an atomic proto-type quantum computer. Quantum effects are not only subject of experimental investigation. In the hands of experimental physicists wave mechanics is used in SQUIDs and in atom interferometers for sensitive measurements of fields and inertial effects; electron tunneling is used in the scanning tunneling microscopes; the existence of discrete spectral lines is used for metrology; ... . Quantum information {\it is} in use in physics, and further developments in quantum information can find applications, ranging from the use of spin-squeezed and Schr\"odinger cat like states to, {\it e.g.}, Grover's and Shor's algorithms as methods to distinguish between external influences on a physical system \cite{farhi} and to effectively estimate values of complex phase factors \cite{griffith}. We have addressed the use of quantum information as a theorist's computational tool, and in a recent paper \cite{preskill}, Preskill envisions the use of quantum computation for a wide range of many-body problems. A 'symbiosis' between quantum information and these physical problems can even be imagined since, {\it e.g.}, topological field theories may in turn suggest stronger models and new algorithms for quantum computing \cite{kitaev}. Quantum computing only works, if it can be implemented on a quantum system. There are good chances that RISQ implementations exist for many of the physics problems amenable to quantum computing. To identify such problems, and maybe even some mathematical problems tractable by RISQ, is both an interesting and useful challenge for quantum information theory. \end{document}
\begin{document} \title{Calculus on symplectic manifolds} \author[Michael Eastwood]{Michael Eastwood} \address{\hskip-\parindent School of Mathematical Sciences\\ University of Adelaide\\ SA 5005\\ Australia} \email{[email protected]} \author[Jan Slov\'ak]{Jan Slov\'ak} \address{\hskip-\parindent Department of Mathematics and Statistics\\ Masaryk University,\newline 611 37 Brno, Czech Republic} \email{[email protected]} \subjclass{53D05, 53B35} \thanks{This research was supported by the Czech Grant Agency. The authors would like to thank the Agency for their generous support under Grant P201/12/G028.} \thanks{This work was also supported by the Simons Foundation grant 346300 and the Polish Government MNiSW 2015--2019 matching fund. It was completed whilst the authors were visiting the Banach Centre at IMPAN in Warsaw for the Simons Semester `Symmetry and Geometric Stuctures.'} \begin{abstract} On a symplectic manifold, there is a natural elliptic complex replacing the de~Rham complex. It can be coupled to a vector bundle with connection and, when the curvature of this connection is constrained to be a multiple of the symplectic form, we find a new complex. In particular, on complex projective space with its Fubini--Study form and connection, we can build a series of differential complexes akin to the Bernstein--Gelfand--Gelfand complexes from parabolic differential geometry. \end{abstract} \renewcommand{\textup{2010} Mathematics Subject Classification}{\textup{2010} Mathematics Subject Classification} \maketitle \section{Introduction} Throughout this article $M$ will be a smooth manifold of dimension $2n$ equipped with a symplectic form $J_{ab}$. Here, we are using Penrose's abstract index notation~\cite{OT} and non-degeneracy of this $2$-form says that there is a skew contravariant $2$-form $J^{ab}$ such that $J_{ab}J^{ac}=\delta_b{}^c$ where $\delta_b{}^c$ is the canonical pairing between vectors and co-vectors. Let $\Wedge^k$ denote the bundle of $k$-forms on~$M$. The homomorphism $$\Wedge^k\to\Wedge^{k-2}\enskip\mbox{given by}\enskip \phi_{abc\cdots d}\mapsto J^{ab}\phi_{abc\cdots d}$$ is surjective for $2\leq k\leq n$ with non-trivial kernel, corresponding to the irreducible representation $$\rule[-10pt]{20pt}{0pt}\begin{picture}(145,10) \put(5,3){\line(1,0){20}} \put(5,2.6){\makebox(0,0){$\bullet$}} \put(20,2.6){\makebox(0,0){$\bullet$}} \put(36,2.6){\makebox(0,0){$\cdots$}} \put(50,2.6){\makebox(0,0){$\bullet$}} \put(45,3){\line(1,0){40}} \put(65,2.6){\makebox(0,0){$\bullet$}} \put(80,2.6){\makebox(0,0){$\bullet$}} \put(96,2.6){\makebox(0,0){$\cdots$}} \put(105,3){\line(1,0){20}} \put(110,2.6){\makebox(0,0){$\bullet$}} \put(125,2.6){\makebox(0,0){$\bullet$}} \put(125,5){\line(1,0){15}} \put(125,1){\line(1,0){15}} \put(140,2.6){\makebox(0,0){$\bullet$}} \put(132.5,3){\makebox(0,0){$\langle$}} \put(5,5){\makebox(0,0)[b]{\scriptsize$\vphantom{(}0$}} \put(20,5){\makebox(0,0)[b]{\scriptsize$\vphantom{(}0$}} \put(50,5){\makebox(0,0)[b]{\scriptsize$\vphantom{(}0$}} \put(65,5){\makebox(0,0)[b]{\scriptsize$\vphantom{(}1$}} \put(80,5){\makebox(0,0)[b]{\scriptsize$\vphantom{(}0$}} \put(110,5){\makebox(0,0)[b]{\scriptsize$\vphantom{(}0$}} \put(125,5){\makebox(0,0)[b]{\scriptsize$\vphantom{(}0$}} \put(140,5){\makebox(0,0)[b]{\scriptsize$\vphantom{(}0$}} \put(65,-10){\vector(0,1){8}} \put(70,-6){\makebox(0,0)[l]{\scriptsize{$k^{\mathrm{th}}$ node}}} \end{picture}\quad\mbox{of}\quad {\mathrm{Sp}}(2n,{\mathbb{R}})\subset{\mathrm{GL}}(2n,{\mathbb{R}}).$$ Denoting this bundle by $\Wedge_\perp^k$, there is a canonical splitting of the short exact sequence $$0\to\Wedge_\perp^k\raisebox{-6.3pt}{$\begin{array}{c} \rightleftarrows\\[-8pt] \mbox{\scriptsize$\pi$}\end{array}$} \Wedge^k\to\Wedge^{k-2}\to 0$$ and an elliptic complex~\cite{BEGN,E,ES,S,TY} \begin{equation} \label{RScomplex}\addtolength{\arraycolsep}{-1pt}\begin{array}{rcccccccccccc} 0&\to&\Wedge^0&\stackrel{d}{\longrightarrow}&\Wedge^1 &\stackrel{d_\perp}{\longrightarrow}&\Wedge_\perp^2 &\stackrel{d_\perp}{\longrightarrow}&\Wedge_\perp^3 &\stackrel{d_\perp}{\longrightarrow}&\cdots &\stackrel{d_\perp}{\longrightarrow}&\Wedge_\perp^{n}\\[2pt] &&&&&&&&&&&&\big\downarrow\makebox[0pt][l]{\scriptsize$d_\perp^2$}\\ 0&\leftarrow&\Wedge^0&\stackrel{d_\perp}{\longleftarrow}&\Wedge^1 &\stackrel{d_\perp}{\longleftarrow}&\Wedge_\perp^2 &\stackrel{d_\perp}{\longleftarrow}&\Wedge_\perp^3 &\stackrel{d_\perp}{\longleftarrow}&\cdots &\stackrel{d_\perp}{\longleftarrow}&\Wedge_\perp^{n} \end{array}\end{equation} where \begin{itemize} \item $d:\Wedge^0\to\Wedge^1$ is the exterior derivative, \item for $1\leq k< n$, the operator $d_\perp:\Wedge_\perp^k\to\Wedge_\perp^{k+1}$ is the composition $$\Wedge_\perp^k\hookrightarrow\Wedge^k\xrightarrow{\,d\,}\Wedge^{k+1} \xrightarrow{\,\pi\,}\Wedge_\perp^{k+1},$$ a first order operator, \item $d_\perp:\Wedge_\perp^{k+1}\to\Wedge_\perp^k$ are canonically defined first order operators, which may be seen as adjoint to $d_\perp:\Wedge_\perp^k\to\Wedge_\perp^{k+1}$, \item $d_\perp^2:\Wedge_\perp^n\to\Wedge_\perp^n$ is the composition $$\Wedge_\perp^n\xrightarrow{\,d_\perp\,}\Wedge_\perp^{n-1} \xrightarrow{\,d_\perp\,}\Wedge_\perp^n,$$ a second order operator. \end{itemize} More explicitly, formul{\ae} for these operators may be given as follows. Firstly, it is convenient to choose a {\em symplectic connection\/}~$\nabla_a$, namely a torsion-free connection such that $\nabla_aJ_{bc}=0$, equivalently $\nabla_aJ^{bc}=0$. As shown in~\cite{GRS}, for example, such connections always exist and if $\nabla_a$ is one such, then the general symplectic connection is $$\hat\nabla_a\phi_b=\nabla_a\phi_b+J^{cd}\Xi_{abc}\phi_d\quad\mbox{where} \enskip\Xi_{abc}=\Xi_{(abc)}.$$ Then, for $1\leq k<n$, the operator $d_\perp:\Wedge_\perp^k\to\Wedge_\perp^{k+1}$ is given by \begin{equation}\label{early}\textstyle\phi_{def\cdots g}\longmapsto \nabla_{[c}\phi_{def\cdots g]} -\frac{k}{2(n+1-k)}J^{ab}(\nabla_a\phi_{b[ef\cdots g})J_{cd]}\end{equation} and $d_\perp:\Wedge_\perp^{k+1}\to\Wedge_\perp^k$ is given by \begin{equation}\label{late} \psi_{cdef\cdots g}\longmapsto J^{bc}\nabla_b\psi_{cdef\cdots g}.\end{equation} Now suppose $E$ is a smooth vector bundle on $M$ and $\nabla:E\to\Wedge^1\otimes E$ is a connection. Choosing any torsion-free connection on $\Wedge^1$ induces a connection on $\Wedge^1\otimes E$ and, as is well-known, the composition $$\Wedge^1\otimes E\to \Wedge^1\otimes\Wedge^1\otimes E\to\Wedge^2\otimes E$$ does not depend on this choice. (It is the second in a well-defined sequence of differential operators \begin{equation}\label{coupled_de_Rham} E\xrightarrow{\,\nabla\,}\Wedge^1\otimes E \xrightarrow{\,\nabla\,}\Wedge^2\otimes E \xrightarrow{\,\nabla\,}\cdots \xrightarrow{\,\nabla\,}\Wedge^{2n-1}\otimes E \xrightarrow{\,\nabla\,}\Wedge^{2n}\otimes E\end{equation} known as the {\em coupled de~Rham sequence\/}.) In particular, we may define a homomorphism $\mathcal{T}heta:E\to E$ by $$\textstyle J^{ab}\nabla_a\nabla_b\Sigma=\frac1{2n}\mathcal{T}heta\Sigma \quad\mbox{for}\enskip\Sigma\in\Gamma(E).$$ It is part of the curvature of~$\nabla$ and if this is the only curvature, then \begin{equation}\label{Theta_in_the_symplectically_flat_case} (\nabla_a\nabla_b-\nabla_b\nabla_a)\Sigma=2J_{ab}\mathcal{T}heta\Sigma,\end{equation} and we shall say that $\nabla$ is {\em symplectically flat\/}. Looking back at (\ref{RScomplex}), it is easy to see that there are coupled operators $$E \begin{array}c\scriptstyle\nabla\\[-8pt] \longrightarrow\\[-10pt] \longleftarrow\\[-9pt] \scriptstyle{}\enskip\nabla_\perp\end{array} \Wedge^1\otimes E \begin{array}c\scriptstyle\;\nabla\!{}_\perp\\[-8pt] \longrightarrow\\[-10pt] \longleftarrow\\[-9pt] \scriptstyle{}\;\nabla\!{}_\perp\end{array} \Wedge_\perp^2\otimes E \begin{array}c\scriptstyle\;\nabla\!{}_\perp\\[-8pt] \longrightarrow\\[-10pt] \longleftarrow\\[-9pt] \scriptstyle{}\;\nabla\!{}_\perp\end{array} \cdots \begin{array}c\scriptstyle\;\nabla\!{}_\perp\\[-8pt] \longrightarrow\\[-10pt] \longleftarrow\\[-9pt] \scriptstyle{}\;\nabla\!{}_\perp\end{array} \Wedge_\perp^{n-1}\otimes E \begin{array}c\scriptstyle\;\nabla\!{}_\perp\\[-8pt] \longrightarrow\\[-10pt] \longleftarrow\\[-9pt] \scriptstyle{}\;\nabla\!{}_\perp\end{array} \Wedge_\perp^n\otimes E,$$ explicit formul\ae\ for which are just as in the uncoupled cases (\ref{early}) and~(\ref{late}). To complete the coupled version of (\ref{RScomplex}) let us use \begin{equation}\label{middle_operator} \textstyle\nabla^2_\perp-\frac2n\mathcal{T}heta: \Wedge_\perp^n\otimes E\longrightarrow\Wedge_\perp^n\otimes E\end{equation} for the middle operator. It is evident that $$E\stackrel{\nabla}{\longrightarrow}\Wedge^1\otimes E \xrightarrow{\,\nabla_\perp\,}\Wedge_\perp^2\otimes E$$ is a complex if and only if $\nabla$ is symplectically flat. The reason for the curvature term in (\ref{middle_operator}) is that this feature propagates as follows. \begin{thm}\label{one} Suppose $E\xrightarrow{\,\nabla\,}\Wedge^1\otimes E$ is a symplectically flat connection and define $\mathcal{T}heta:E\to E$ by~\eqref{Theta_in_the_symplectically_flat_case}. Then the coupled version of \eqref{RScomplex} $$\addtolength{\arraycolsep}{-1pt}\begin{array}{rcccccccccc} 0&\to&E&\stackrel{\nabla}{\longrightarrow}&\Wedge^1\otimes E &\stackrel{\nabla_\perp}{\longrightarrow}&\Wedge_\perp^2\otimes E &\stackrel{\nabla_\perp}{\longrightarrow}&\cdots &\stackrel{\nabla_\perp}{\longrightarrow}&\Wedge_\perp^n\otimes E\\[2pt] &&&&&&&&&& \big\downarrow \makebox[0pt][l]{\scriptsize$\nabla_\perp^2-\frac2{n}\mathcal{T}heta$}\\ 0&\leftarrow&E&\stackrel{\nabla_\perp}{\longleftarrow}&\Wedge^1\otimes E &\stackrel{\nabla_\perp}{\longleftarrow}&\Wedge_\perp^2\otimes E &\stackrel{\nabla_\perp}{\longleftarrow}&\cdots &\stackrel{\nabla_\perp}{\longleftarrow}&\Wedge_\perp^n\otimes E \end{array}\quad$$ is a complex. It is locally exact except near the beginning where $$\ker\nabla:E\to\Wedge^1\otimes E\quad\mbox{and}\quad \frac{\ker\nabla_\perp:\Wedge^1\otimes E\to\Wedge_\perp^2\otimes E} {\operatorname{im}\nabla:E\to\Wedge^1\otimes E}$$ may be identified with the kernel and cokernel, respectively, of\/ $\mathcal{T}heta$ as locally constant sheaves. \end{thm} \noindent More precision and a proof of Theorem~\ref{one} will be provided in \S\ref{rumin_seshadri}. Our next theorem yields some natural symplectically flat connections. \begin{thm}\label{two} Suppose $M$ is a\/ $2n$-dimensional symplectic manifold with symplectic connection~$\nabla_a$. Then there is a natural vector bundle\/ ${\mathcal{T}}$ on $M$ of rank $2n+2$ equipped with a connection, which is symplectically flat if and only if the curvature $R_{ab}{}^c{}_d$ of $\nabla_a$ has the form \begin{equation}\label{Vis0} R_{ab}{}^c{}_d=\delta_a{}^c\Phi_{bd}-\delta_b{}^c\Phi_{ad} +J_{ad}\Phi_{be}J^{ce}-J_{bd}\Phi_{ae}J^{ce}+2J_{ab}\Phi_{de}J^{ce}, \end{equation} for some symmetric tensor~$\Phi_{ab}$. \end{thm} \noindent In particular, the Fubini--Study connection on complex projective space is symplectic for the standard K\"ahler form and its curvature is of the form (\ref{Vis0}) for $\Phi_{ab}=g_{ab}$, the standard metric. More generally, if the symplectic connection $\nabla_a$ arises from a K\"ahler metric, then we shall see that (\ref{Vis0}) holds precisely in the case of constant holomorphic sectional curvature. After proving Theorems~\ref{one} and~\ref{two}, the remainder of this article is concerned with the consequences of Theorem~\ref{one} for the vector bundle ${\mathcal{T}}$ and those bundles, such as~$\bigodot^k\!{\mathcal{T}}$, induced from it. In particular, these consequences pertain on complex projective space where we shall find a series of elliptic complexes closely following the Bernstein-Gelfand-Gelfand complexes on the sphere $S^{2n+1}$ as a homogeneous space for the Lie group ${\mathrm{Sp}}(2n+2,{\mathbb{R}})$. This article is based on our earlier work~\cite{ES} but here we focus on the simpler case where we are given a symplectic structure as background. This results in fewer technicalities and in this article we include more detail, especially in constructing the BGG-like complexes in~\S\ref{BGG-like}. \section{The Rumin--Seshadri complex}\label{rumin_seshadri} By the {\em Rumin--Seshadri complex\/}, we mean the differential complex (\ref{RScomplex}) after~\cite{S}. However, the $4$-dimensional case is due to R.T.~Smith~\cite{Sm} and the general case is also independently due to Tseng and Yau~\cite{TY}. In this section we shall derive the coupled version of this complex as in Theorem~\ref{one}, our proof of which includes (\ref{RScomplex}) as a special case. The following lemma is also the key step in~\cite{ES}. \begin{lemma}\label{key_lemma} Suppose $E$ is a vector bundle on $M$ with symplectically flat connection $\nabla:E\to\Wedge^1\otimes E$. Define $\mathcal{T}heta:E\to E$ by~\eqref{Theta_in_the_symplectically_flat_case}. Then $\mathcal{T}heta$ has constant rank and the bundles $\ker\mathcal{T}heta$ and $\operatorname{coker}\mathcal{T}heta$ acquire from~$\nabla$, flat connections defining locally constant sheaves \underbar{$\ker\mathcal{T}heta$} and \underbar{$\operatorname{coker}\mathcal{T}heta$}, respectively. There is an elliptic complex $$\begin{array}{cccccccccc}E &\stackrel{\nabla}{\longrightarrow}&\Wedge^1\otimes E &\stackrel{\nabla}{\longrightarrow}&\Wedge^2\otimes E &\stackrel{\nabla}{\longrightarrow}&\Wedge^3\otimes E &\stackrel{\nabla}{\longrightarrow}&\Wedge^4\otimes E\\ &\begin{picture}(0,0)(0,-3) \put(-9,6){\vector(3,-2){18}}\end{picture} &\oplus &\begin{picture}(0,0)(0,-3) \put(-9,-6){\vector(3,2){18}} \put(-9,6){\vector(3,-2){18}}\end{picture} &\oplus &\begin{picture}(0,0)(0,-3) \put(-9,-6){\vector(3,2){18}} \put(-9,6){\vector(3,-2){18}}\end{picture} &\oplus &\begin{picture}(0,0)(0,-3) \put(-9,-6){\vector(3,2){18}} \put(-9,6){\vector(3,-2){18}}\end{picture} &\oplus&\cdots, \\ &&E&\longrightarrow&\Wedge^1\otimes E &\longrightarrow&\Wedge^2\otimes E &\longrightarrow&\Wedge^3\otimes E \end{array}$$ where the differentials are given by $$\Sigma\!\mapsto\!\left[\!\begin{array}{c}\nabla\Sigma\\ \mathcal{T}heta\Sigma\end{array}\!\right] \quad \left[\!\begin{array}{c}\phi\\ \eta\end{array}\!\right] \!\mapsto\!\left[\!\begin{array}{c}\nabla\phi-J\otimes\eta\\ \nabla\eta-\mathcal{T}heta\phi\end{array}\!\right] \quad\left[\!\begin{array}{c}\omega\\ \psi\end{array}\!\right] \!\mapsto\!\left[\!\begin{array}{c}\nabla\omega+J\wedge\psi\\ \nabla\psi+\mathcal{T}heta\omega\end{array}\!\right]\enskip\cdots.$$ It is locally exact save for the zeroth and first cohomologies, which may be identified with \underbar{$\ker\mathcal{T}heta$} and \underbar{$\operatorname{coker}\mathcal{T}heta$}, respectively. \end{lemma} \begin{proof} {From} (\ref{Theta_in_the_symplectically_flat_case}) the Bianchi identity for $\nabla$ reads $$0=\nabla_{[a}\big(J_{bc]}\mathcal{T}heta\big)=J_{[ab}\nabla_{c]}\mathcal{T}heta$$ and non-degeneracy of $J_{ab}$ implies that $\nabla_a\mathcal{T}heta=0$. Consequently, the homomorphism $\mathcal{T}heta$ has constant rank and the following diagram with exact rows commutes $$\addtolength{\arraycolsep}{-2pt}\begin{array}{ccccccccccc} 0&\to&\ker\mathcal{T}heta&\to&E&\xrightarrow{\,\mathcal{T}heta\,}&E&\to&\operatorname{coker}\mathcal{T}heta&\to&0\\ &&&&\downarrow\!\makebox[0pt][l]{\scriptsize$\nabla$} &&\downarrow\!\makebox[0pt][l]{\scriptsize$\nabla$}\\ 0&\to&\Wedge^1\otimes\ker\mathcal{T}heta&\to&\Wedge^1\otimes E&\xrightarrow{\,\mathcal{T}heta\,} &\Wedge^1\otimes E&\to&\Wedge^1\otimes\operatorname{coker}\mathcal{T}heta&\to&0 \end{array}$$ and yields the desired connections on $\ker\mathcal{T}heta$ and~$\operatorname{coker}\mathcal{T}heta$, which are easily seen to be flat. Ellipticity of the given complex is readily verified and, by definition, the kernel of its first differential is~\underbar{$\ker\mathcal{T}heta$}. To identify the higher local cohomology of this complex the key observation is that locally we may choose a $1$-form $\tau$ such that $d\tau=J$ and, having done this, the connection $$\Gamma(E)\ni\Sigma\stackrel{\tilde\nabla}{\longmapsto} \nabla\Sigma-\tau\otimes\mathcal{T}heta\Sigma\in \Gamma(\Wedge^1\otimes E)$$ is flat. The rest of the proof is diagram chasing, using exactness of $$E\xrightarrow{\,\tilde\nabla\,}\Wedge^1\otimes E \xrightarrow{\,\tilde\nabla\,}\Wedge^2\otimes E \xrightarrow{\,\tilde\nabla\,}\Wedge^3\otimes E \xrightarrow{\,\tilde\nabla\,}\Wedge^4\otimes E \xrightarrow{\,\tilde\nabla\,}\cdots.$$ If needed, the details are in~\cite{ES}. \end{proof} \noindent{\em Proof of Theorem~\ref{one}}. In \cite{ES}, the corresponding result \cite[Theorem~4]{ES} is proved by invoking a spectral sequence. Here, we shall, instead, prove two typical cases `by hand,' leaving the rest of the proof to the reader. For our first case, let us suppose $n\geq 3$ and prove local exactness of $$\Wedge^1\otimes E\xrightarrow{\,\nabla_\perp\,}\Wedge_\perp^2\otimes E \xrightarrow{\,\nabla_\perp\,}\Wedge_\perp^3\otimes E.$$ Thus, we are required to show that if $\omega_{ab}$ has values in $E$ and $$\textstyle\omega_{ab}=\omega_{[ab]}\qquad J^{ab}\omega_{ab}=0\qquad \nabla_{[c}\omega_{de]}=\frac1{n-1}J^{ab}(\nabla_a\omega_{b[c})J_{de]},$$ then locally there is $\phi_{a}\in\Gamma(\Wedge^1\otimes E)$ such that $$\textstyle\omega_{cd} =\nabla_{[c}\phi_{d]}-\frac1{2n}J^{ab}(\nabla_a\phi_b)J_{cd}.$$ If we set $\psi_c\equiv-\frac1{n-1}J^{ab}\nabla_a\omega_{bc}$, then $\nabla_{[c}\omega_{de]}+J_{[cd}\psi_{e]}=0$ so $$0=\nabla_{[b}\nabla_c\omega_{de]}+J_{[bc}\nabla_d\psi_{e]} =J_{[bc}\mathcal{T}heta\omega_{de]}+J_{[bc}\nabla_d\psi_{e]}$$ and since $J\wedge\underbar{\enskip}:\Wedge^2\to\Wedge^4$ is injective it follows that $$\nabla_{[c}\psi_{d]}+\mathcal{T}heta\omega_{cd}=0.$$ In other words, we have shown that $$\begin{array}{rcl} \nabla\omega+J\wedge\psi&=&0\\ \nabla\psi+\mathcal{T}heta\omega&=&0\end{array}$$ and Lemma~\ref{key_lemma} locally yields $\phi_a\in\Gamma(\Wedge^1\otimes E)$ and $\eta\in\Gamma(E)$ such that $$\begin{array}{rcl}\nabla_{[a}\phi_{b]}-J_{ab}\eta&=&\omega_{ab}\\ \nabla_a\eta-\mathcal{T}heta\phi_a&=&\psi_a\end{array}$$ In particular, $$J^{ab}\nabla_a\phi_b-2n\eta=J^{ab}\big(\nabla_a\phi_b-J_{ab}\eta\big) =J^{ab}\omega_{ab}=0$$ and, therefore, $$\textstyle\nabla_{[c}\phi_{d]}-\frac1{2n}J^{ab}(\nabla_a\phi_b)J_{cd} =\nabla_{[c}\phi_{d]}-\eta J_{cd}=\omega_{cd},$$ as required. Our second case is more involved. It is to show that \begin{equation}\label{complex} \Wedge_\perp^n\otimes E\xrightarrow{\,\nabla_\perp^2-\frac2n\mathcal{T}heta\,} \Wedge_\perp^n\otimes E\xrightarrow{\,\nabla_\perp\,} \Wedge_\perp^{n-1}\otimes E\end{equation} is locally exact. As regards $\nabla_\perp:\Wedge_\perp^n\otimes E\to\Wedge_\perp^{n-1}\otimes E$, notice that $$\textstyle J^{bc}\nabla_b\psi_{cdef\cdots g} =\frac{n+1}2J^{bc}\nabla_{[b}\psi_{cdef\cdots g]}$$ and that if $\phi_{def\cdots g}\in\Gamma(\Wedge^{k}\otimes E)$, then \begin{equation}\label{combinatorics} \textstyle J^{bc}J_{[bc}\phi_{def\cdots g]}= \frac{4(n-k)}{(k+1)(k+2)}\phi_{def\cdots g}+ \frac{k(k-1)}{(k+1)(k+2)}J_{[de}\phi_{f\cdots g]bc}J^{bc}\end{equation} so if $\phi_{def\cdots g}\in\Gamma(\Wedge_\perp^{n-1}\otimes E)$, then $$\textstyle J^{bc}J_{[bc}\phi_{def\cdots g]}= \frac4{n(n+1)}\phi_{def\cdots g}.$$ Therefore, $\nabla_\perp\psi\in\Gamma(\Wedge_\perp^{n-1}\otimes E)$ is characterised by \begin{equation}\label{trick} \textstyle J\wedge\nabla_\perp\psi=\frac2n\nabla\psi\end{equation} as an equation in $\Wedge^{n+1}\otimes E$. In particular, in $\Wedge^{n+2}\otimes E$ we find $$\textstyle J\wedge\nabla\nabla_\perp\psi =\nabla(J\wedge\nabla_\perp\psi)=\frac2n\nabla^2\psi =J\wedge\mathcal{T}heta\psi=0$$ whence $\nabla\nabla_\perp\psi$ already lies in $\Wedge^n\otimes E$ and there is no need to remove the trace as in (\ref{early}) to form $\nabla_\perp^2\psi$. Therefore, invoking (\ref{trick}) once again, the composition $$\Wedge_\perp^n\otimes E\xrightarrow{\,\nabla_\perp\,} \Wedge_\perp^{n-1}\otimes E \xrightarrow{\,\nabla_\perp\,}\Wedge_\perp^n\otimes E \xrightarrow{\,\nabla_\perp\,}\Wedge_\perp^{n-1}\otimes E$$ is characterised by $$\textstyle J\wedge\nabla_\perp^3\psi=\frac2n\nabla\nabla_\perp^2\psi =\frac2n\nabla^2\nabla_\perp\psi=\frac2nJ\wedge\mathcal{T}heta\nabla_\perp\psi =\frac2nJ\wedge\nabla_\perp\mathcal{T}heta\psi$$ and, since $J\wedge\underbar{\enskip}:\Wedge^{n-1}\to\Wedge^{n+1}$ is an isomorphism, we conclude that $\nabla_\perp^3\psi=\frac2n\nabla_\perp\mathcal{T}heta\psi$, equivalently that (\ref{complex}) is a complex. Before proceeding, let us remark on another consequence of (\ref{combinatorics}), namely that for $\nu_{cdef\cdots g}\in\Gamma(\Wedge^n\otimes E)$, \begin{equation}\label{algebra} J_{[ab}\nu_{cdef\cdots g]}=0\iff J^{cd}\nu_{cdef\cdots g}=0. \end{equation} Now to establish local exactness, suppose $\nu\in\Gamma(\Wedge_\perp^n\otimes E)$ satisfies $\nabla_\perp\nu=0$. Equivalently, according to (\ref{trick}) and~(\ref{algebra}) $$\nu\in\Gamma(\Wedge^n\otimes E)\quad\mbox{satisfies}\enskip \nabla\nu=0\enskip\mbox{and}\enskip J\wedge\nu=0.$$ Lemma~\ref{key_lemma} implies that locally there are $$\begin{array}{l}\phi\in\Gamma(\Wedge^n\otimes E)\\ \eta\in\Gamma(\Wedge^{n-1}\otimes E)\end{array}\enskip\mbox{such that}\enskip \begin{array}{rcl}\nabla\phi-J\wedge\eta&=&0\\ \nabla\eta-\mathcal{T}heta\phi&=&\nu.\end{array}$$ Since $$0\to\Wedge^{n-2}\xrightarrow{\,J\wedge\underbar{\enskip}\,}\Wedge^n \to\Wedge_\perp^n\to0$$ is exact, we can write $\phi$ uniquely as $$\phi=\psi+J\wedge\tau,$$ where $\psi\in\Gamma(\Wedge_\perp^n\otimes E)$ and $\tau\in\Gamma(\Wedge^{n-2}\otimes E)$. We conclude that $$\begin{array}{rcl}\nabla\psi-J\wedge\hat\eta&=&0\\ \nabla\hat\eta-\mathcal{T}heta\psi&=&\nu,\end{array}\enskip\mbox{(where}\enskip \hat\eta=\eta-\nabla\tau).$$ However, as discussed above, these equations say exactly that $$\textstyle\nabla_\perp^2\psi-\frac2n\mathcal{T}heta\psi=\nu,$$ and exactness is shown. $\square$ \section{Tractor bundles}\label{tractors}\label{tractor_bundles} For the rest of the article we suppose that we are given, not only a manifold $M$ with symplectic form~$J_{ab}$, but also a torsion-free connection $\nabla_a$ on the tangent bundle (and hence on all other tensor bundles) such that $\nabla_aJ_{bc}=0$. This is sometimes called a {\em Fedosov structure}~\cite{GRS} on~$M$. The curvature $R_{ab}{}^c{}_d$ of~$\nabla_a$, characterised by $$(\nabla_a\nabla_b-\nabla_b\nabla_a)X^c=R_{ab}{}^c{}_dX^d,$$ satisfies $$R_{ab}{}^c{}_d=R_{[ab]}{}^c{}_d\qquad R_{[ab}{}^c{}_{d]}=0\qquad R_{ab}{}^c{}_dJ_{ce}=R_{ab}{}^c{}_eJ_{cd}$$ and enjoys the following decomposition into irreducible parts $$R_{ab}{}^c{}_d=V_{ab}{}^c{}_d+\delta_a{}^c\Phi_{bd}-\delta_b{}^c\Phi_{ad} +J_{ad}\Phi_{be}J^{ce}-J_{bd}\Phi_{ae}J^{ce}+2J_{ab}\Phi_{de}J^{ce},$$ for some symmetric~$\Phi_{ab}$, where $V_{ab}{}^a{}_d=0$ (reflecting the branching $$\begin{picture}(15,10) \put(0,0){\line(1,0){5}} \put(0,5){\line(1,0){15}} \put(0,10){\line(1,0){15}} \put(0,0){\line(0,1){10}} \put(5,0){\line(0,1){10}} \put(10,5){\line(0,1){5}} \put(15,5){\line(0,1){5}} \end{picture}\enskip=\enskip\begin{picture}(17,10) \put(0,0){\line(1,0){5}} \put(0,5){\line(1,0){15}} \put(0,10){\line(1,0){15}} \put(0,0){\line(0,1){10}} \put(5,0){\line(0,1){10}} \put(10,5){\line(0,1){5}} \put(15,5){\line(0,1){5}} \put(17,3){\makebox(0,0){$\scriptstyle\perp$}} \end{picture}\enskip\oplus\enskip\begin{picture}(10,5) \put(0,0){\line(1,0){10}} \put(0,5){\line(1,0){10}} \put(0,0){\line(0,1){5}} \put(5,0){\line(0,1){5}} \put(10,0){\line(0,1){5}} \end{picture}$$ of representations under ${\mathrm{GL}}(2n,{\mathbb{R}})\supset{\mathrm{Sp}}(2n,{\mathbb{R}})$). Notice that \begin{equation}\label{Phi}\textstyle\Phi_{bd}=\frac1{2(n+1)}R_{ab}{}^a{}_d =\frac1{4(n+1)}J^{ae}R_{ae}{}^c{}_bJ_{cd}.\end{equation} We define the {\em standard tractor bundle\/} to be the rank $2n+2$ vector bundle ${\mathcal{T}}\equiv\Wedge^0\oplus\Wedge^1\oplus\Wedge^0$ with its {\em tractor connection\/} $$\textstyle\nabla_a\! \left[\!\begin{array}c\sigma\\ \mu_b\\ \rho\end{array}\!\right]= \left[\!\begin{array}c\nabla_a\sigma-\mu_a\\ \nabla_a\mu_b+J_{ab}\rho+\Phi_{ab}\sigma\\ \nabla_a\rho-\Phi_{ab}J^{bc}\mu_c+S_a\sigma \end{array}\!\right]\!,\enskip\mbox{where}\enskip S_a\equiv\frac1{2n+1}J^{bc}\nabla_c\Phi_{ab}.$$ Readers familiar with conformal differential geometry may recognise the form of this connection as following the tractor connection in that setting~\cite{BEG}. If needs be, we shall write {\em symplectic tractor connection\/} to distinguish the connection just defined from any alternatives. We shall need the following curvature identities. \begin{lemma}\label{curvature_identities} Let $Y_{abc}\equiv\frac1{2n+1}\nabla_cV_{ab}{}^c{}_d$. Then \begin{equation}\label{contracted_Bianchi} Y_{abc}=2\nabla_{[a}\Phi_{b]c}-2J_{c[a}S_{b]}+2J_{ab}S_c\end{equation} and \begin{equation}\label{nablaY} \begin{array}{rcr}J^{ad}\nabla_aY_{bcd} &=&J^{ad}V_{bc}{}^e{}_a\Phi_{ed} +4n(J^{ad}\Phi_{ba}\Phi_{cd}-\nabla_{[b}S_{c]})\qquad\\[3pt] &&{}+2J_{bc}J^{ad}(\nabla_aS_d-J^{ef}\Phi_{ae}\Phi_{df}). \end{array}\end{equation} \end{lemma} \begin{proof} Writing the Bianchi identity $\nabla_{[a}R_{bc]}{}^d{}_e=0$ in terms of $V_{ab}{}^c{}_d$ and $\Phi_{ab}$ yields $$\nabla_{[a}V_{bc]}{}^d{}_e=-2\delta_{[b}{}^d\nabla_a\Phi_{c]e} +2J^{df}J_{e[b}\nabla_a\Phi_{c]f}-2J^{df}J_{[bc}\nabla_{a]}\Phi_{ef}.$$ and contracting over ${}_a{}^d$ gives $$\begin{array}{rcr} \frac13\nabla_aV_{bc}{}^a{}_e &=&\frac{4(n-1)}3\nabla_{[b}\Phi_{c]e} +\frac23\big[\nabla_{[b}\Phi_{c]e}-(2n+1)J_{e[b}S_{c]}\big]\qquad\\[3pt] &&{}+\frac23\big[(2n+1)J_{bc}S_e+2\nabla_{[b}\Phi_{c]e}\big], \end{array}$$ which is easily rearranged as~(\ref{contracted_Bianchi}). For (\ref{nablaY}), firstly notice that $$J^{ad}R_{ab}{}^e{}_d=J^{ed}R_{ab}{}^a{}_d=2(n+1)J^{ed}\Phi_{bd}$$ and the Bianchi symmetry may be written as $R_{a[b}{}^e{}_{c]}=-\frac12R_{bc}{}^e{}_a$. Thus, $$\begin{array}{rcl}J^{ad}\nabla_a\nabla_b\Phi_{cd} &\!\!=\!\!&\nabla_bJ^{ad}\nabla_a\Phi_{cd} -J^{ad}R_{ab}{}^e{}_c\Phi_{ed}-J^{ad}R_{ab}{}^e{}_d\Phi_{ce}\\[3pt] &\!\!=\!\!&-(2n+1)\nabla_bS_c -J^{ad}R_{ab}{}^e{}_c\Phi_{ed}+2(n+1)J^{de}\Phi_{bd}\Phi_{ce} \end{array}$$ and so $$\textstyle J^{ad}\nabla_a\nabla_{[b}\Phi_{c]d}=-(2n+1)\nabla_{[b}S_{c]} +\frac12J^{ad}R_{bc}{}^e{}_a\Phi_{ed}+2(n+1)J^{de}\Phi_{bd}\Phi_{ce}.$$ {From} (\ref{contracted_Bianchi}) we see that $$J^{ad}\nabla_aY_{bcd}=2J^{ad}\nabla_a\nabla_{[b}\Phi_{c]d} +2\nabla_{[b}S_{c]}+2J_{bc}J^{ad}\nabla_aS_d.$$ Therefore, $$J^{ad}\nabla_aY_{bcd} =J^{ad}R_{bc}{}^e{}_a\Phi_{ed}-4n\nabla_{[b}S_{c]} +4(n+1)J^{de}\Phi_{bd}\Phi_{ce}+2J_{bc}J^{ad}\nabla_aS_d.$$ Finally, $$J^{ad}R_{bc}{}^e{}_a\Phi_{ed} =J^{ad}V_{bc}{}^e{}_a\Phi_{ed} -4J^{ad}\Phi_{ba}\Phi_{cd} -2J_{bc}J^{ad}J^{ef}\Phi_{ae}\Phi_{df},$$ so $$\begin{array}{rcl}J^{ad}\nabla_aY_{bcd} &=&J^{ad}V_{bc}{}^e{}_a\Phi_{ed} +4nJ^{ad}\Phi_{ba}\Phi_{cd} -2J_{bc}J^{ad}J^{ef}\Phi_{ae}\Phi_{df}\\[3pt] &&\quad{}-4n\nabla_{[b}S_{c]} +2J_{bc}J^{ad}\nabla_aS_d, \end{array}$$ which may be rearranged as~(\ref{nablaY}). \end{proof} \begin{prop}\label{tractor_curvature} The tractor connection ${\mathcal{T}}\to\Wedge^1\otimes{\mathcal{T}}$ preserves the non-degenerate skew form $$\left\langle\left[\!\begin{array}c\sigma\\ \mu_b\\ \rho\end{array}\!\right], \left[\!\begin{array}c\tilde\sigma\\ \tilde\mu_c\\ \tilde\rho\end{array}\!\right]\right\rangle\equiv \sigma\tilde\rho+J^{bc}\mu_b\tilde\mu_c-\rho\tilde\sigma$$ and its curvature is given by $$\setlength{\arraycolsep}{1pt}\begin{array}{rcl} (\nabla_a\nabla_a-\nabla_b\nabla_a)\!\! \left[\!\begin{array}c\sigma\\ \mu_d\\ \rho\end{array}\!\right] &=&\left[\!\begin{array}{c}0\\ -V_{ab}{}^c{}_d\mu_c +Y_{abd}\sigma\\ -Y_{abc}J^{cd}\mu_d +\frac1{2n}(J^{cd}V_{ab}{}^e{}_c\Phi_{de}-J^{cd}\nabla_cY_{abd})\sigma \end{array}\!\right]\\[20pt] &&+2J_{ab}\!\left[\!\begin{array}{c} \rho\\ J^{ce}\Phi_{cd}\mu_e-S_d\sigma\\ S_cJ^{cd}\mu_d +\frac1{2n}J^{cd}(\nabla_cS_d-J^{ef}\Phi_{ce}\Phi_{df})\sigma \end{array}\!\right]\!\!. \end{array}$$ \end{prop} \begin{proof}We expand $$\left\langle \nabla_a\!\left[\!\begin{array}c\sigma\\ \mu_b\\ \rho\end{array}\!\right], \left[\!\begin{array}c\tilde\sigma\\ \tilde\mu_c\\ \tilde\rho\end{array}\!\right]\right\rangle+\left\langle \left[\!\begin{array}c\sigma\\ \mu_b\\ \rho\end{array}\!\right], \nabla_a\!\left[\!\begin{array}c\tilde\sigma\\ \tilde\mu_c\\ \tilde\rho\end{array}\!\right]\right\rangle$$ to obtain $$\begin{array}{l}(\nabla_a\sigma-\mu_a)\tilde\rho +\sigma(\nabla\tilde\rho-\Phi_{ab}J^{bc}\tilde\mu_c+S_a\tilde\sigma)\\ \enskip{}+J^{bc}(\nabla_a\mu_b+J_{ab}\rho+\Phi_{ab}\sigma)\tilde\mu_c +J^{bc}\mu_b(\nabla_a\tilde\mu_c+J_{ac}\tilde\rho+\Phi_{ac}\tilde\sigma)\\ \quad{}-(\nabla_a\rho-\Phi_{ab}J^{bc}\mu_c+S_a\sigma)\tilde\sigma -\rho(\nabla_a\tilde\sigma-\tilde\mu_a)\end{array}$$ in which all terms cancel save for $$(\nabla_a\sigma)\tilde\rho +\sigma\nabla\tilde\rho +J^{bc}(\nabla_a\mu_b)\tilde\mu_c +J^{bc}\mu_b\nabla_a\tilde\mu_c -(\nabla_a\rho)\tilde\sigma -\rho\nabla_a\tilde\sigma,$$ which reduces to $$\nabla_a\big(\sigma\tilde\rho+J^{bc}\mu_b\tilde\mu_c-\rho\tilde\sigma\big),$$ as required. For the curvature, we readily compute $$\nabla_{[a}\nabla_{b]}\! \left[\!\begin{array}c\sigma\\ \mu_d\\ \rho\end{array}\!\right] =\left[\!\begin{array}l\nabla_{[a}\nabla_{b]}\sigma-J_{ba}\rho\\ \nabla_{[a}\nabla_{b]}\mu_d +J_{d[a}\Phi_{b]c}J^{ce}\mu_e -\Phi_{d[a}\mu_{b]} +T_{abd}\sigma\\ \nabla_{[a}\nabla_{b]}\rho- T_{abc}J^{cd}\mu_d +(\nabla_{[a}S_{b]}-J^{cd}\Phi_{ac}\Phi_{bd})\sigma \end{array}\!\right],$$ where $T_{abc}\equiv\nabla_{[a}\Phi_{b]c}-J_{c[a}S_{b]}$. Lemma~\ref{curvature_identities}, however, states that $$\textstyle T_{abc}=\frac12Y_{abc}-J_{ab}S_c$$ and $$\begin{array}{rcl}4n(\nabla_{[a}S_{b]}-J^{cd}\Phi_{ac}\Phi_{bd}) &=&J^{cd}V_{ab}{}^e{}_c\Phi_{de}-J^{cd}\nabla_cY_{abd}\\[3pt] &&{}+2J_{ab}J^{cd}(\nabla_cS_d-J^{ef}\Phi_{ce}\Phi_{df}). \end{array}$$ Therefore, $$\begin{array}{rcl}\nabla_{[a}\nabla_{b]}\! \left[\!\begin{array}c\sigma\\ \mu_d\\ \rho\end{array}\!\right] &=&\left[\!\begin{array}{c}0\\ \nabla_{[a}\nabla_{b]}\mu_d +J_{d[a}\Phi_{b]c}J^{ce}\mu_e -\Phi_{d[a}\mu_{b]} +\frac12Y_{abd}\sigma\\ -\frac12Y_{abc}J^{cd}\mu_d +\frac1{4n}(J^{cd}V_{ab}{}^e{}_c\Phi_{de}-J^{cd}\nabla_cY_{abd})\sigma \end{array}\!\right]\\[20pt] &&\enskip{}+J_{ab}\!\left[\!\begin{array}{c} \rho\\ -S_d\sigma\\ S_cJ^{cd}\mu_d +\frac1{2n}J^{cd}(\nabla_cS_d-J^{ef}\Phi_{ce}\Phi_{df})\sigma \end{array}\!\right]. \end{array}$$ Finally, $$R_{ab}{}^c{}_d\mu_c=V_{ab}{}^c{}_d\mu_c-2\Phi_{d[a}\mu_{b]} +2J_{d[a}\Phi_{b]c}J^{ce}\mu_e +2J_{ab}\Phi_{de}J^{ce}\mu_c,$$ so $$\textstyle\nabla_{[a}\nabla_{b]}\mu_d +J_{d[a}\Phi_{b]c}J^{ce}\mu_e -\Phi_{d[a}\mu_{b]} =-\frac12V_{ab}{}^c{}_d\mu_c-J_{ab}\Phi_{de}J^{ce}\mu_c$$ whence $$\begin{array}{rcl}\nabla_{[a}\nabla_{b]}\! \left[\!\begin{array}c\sigma\\ \mu_d\\ \rho\end{array}\!\right] &=&\left[\!\begin{array}{c}0\\ -\frac12V_{ab}{}^c{}_d\mu_c +\frac12Y_{abd}\sigma\\ -\frac12Y_{abc}J^{cd}\mu_d +\frac1{4n}(J^{cd}V_{ab}{}^e{}_c\Phi_{de}-J^{cd}\nabla_cY_{abd})\sigma \end{array}\!\right]\\[20pt] &&\enskip{}+J_{ab}\!\left[\!\begin{array}{c} \rho\\ J^{ce}\Phi_{cd}\mu_e-S_d\sigma\\ S_cJ^{cd}\mu_d +\frac1{2n}J^{cd}(\nabla_cS_d-J^{ef}\Phi_{ce}\Phi_{df})\sigma \end{array}\!\right], \end{array}$$ as required. \end{proof} \begin{cor}\label{symplectically_flat_tractors} The tractor connection is symplectically flat if and only if the curvature tensor $V_{ab}{}^c{}_d$ vanishes. \end{cor} \section{K\"ahler geometry} K\"ahler manifolds provide a familiar source of symplectic manifolds equipped with a compatible torsion-free connection as in~\S\ref{tractors}. In this case, the connection $\nabla_a$ is the Levi-Civita connection of a metric $g_{ab}$ and $J_a{}^b\equiv J_{ac}g^{bc}$ is an almost complex structure on~$M$ whose integrability is equivalent to the vanishing of~$\nabla_aJ_{bc}$. In K\"ahler geometry, the Riemann curvature tensor decomposes into three irreducible parts: \begin{equation}\label{Bochner_in_real_money} \begin{array}{l} R_{ab}{}^c{}_d=U_{ab}{}^c{}_d\\ \enskip{}+\delta_a{}^c\Xi_{bd}-\delta_b{}^c\Xi_{ad} -g_{ad}\Xi_b{}^c+g_{bd}\Xi_a{}^c\\ \quad{}+J_a{}^c\Sigma_{bd} -J_b{}^c\Sigma_{ad} -J_{ad}\Sigma_b{}^c +J_{bd}\Sigma_a{}^c +2J_{ab}\Sigma^c{}_d +2J^c{}_d\Sigma_{ab}\\ \enskip\quad{}+\Lambda(\delta_a{}^cg_{bd}-\delta_b{}^cg_{ad} +J_a{}^cJ_{bd} -J_b{}^cJ_{ad} +2J_{ab}J^c{}_d), \end{array}\end{equation} where indices have been raised using $g^{ab}$ and \begin{itemize} \item $U_{ab}{}^c{}_d$ is totally trace-free with respect to $g^{ab}$, $J_a{}^b$, and $J^{ab}$, \item $\Xi_{ab}$ is trace-free symmetric whilst $\Sigma_{ab}\equiv J_a{}^c\Xi_{bc}$ is skew. \end{itemize} Computing the Ricci curvature from this decomposition, we find $$R_{bd}\equiv R_{ab}{}^a{}_d=2(n+2)\Xi_{bd}+2(n+1)\Lambda g_{bd}$$ and therefore from (\ref{Phi}) conclude that $$\textstyle\Phi_{ab}=\frac{n+2}{n+1}\Xi_{ab}+\Lambda g_{ab}.$$ Hence $$\begin{array}{rcl}J_c{}^aR_{ab}{}^c{}_d &=&J_c{}^aV_{ab}{}^c{}_d -J_{bd}\Phi_a{}^a -2J_b{}^a\Phi_{da}\\[3pt] &=&J_c{}^aV_{ab}{}^c{}_d -2\frac{n+2}{n+1}\Sigma_{bd} -2(n+1)\Lambda J_{bd}.\end{array}$$ On the other hand, from (\ref{Bochner_in_real_money}) we find $$J_c{}^aR_{ab}{}^c{}_d= -2(n+2)\Sigma_{bd}-2(n+1)\Lambda J_{bd}$$ and, comparing these two expressions gives $$\textstyle J_c{}^aV_{ab}{}^c{}_d-2\frac{n+2}{n+1}\Sigma_{bd}=-2(n+2)\Sigma_{bd}$$ and we have established the following. \begin{prop}\label{Kaehler_consequence} Concerning the symplectic curvature decomposition on a K\"ahler manifold, $$\textstyle J_c{}^aV_{ab}{}^c{}_d =-2\frac{n(n+2)}{n+1}\Sigma_{bd}.$$ \end{prop} \begin{cor}\label{whenKaehlerVvanishes} The symplectic tractor connection on a K\"ahler manifold is symplectically flat if and only if the metric has constant holomorphic sectional curvature. \end{cor} \begin{proof} According to Corollary~\ref{symplectically_flat_tractors}, we have to interpret the constraint $V_{ab}{}^c{}_d=0$ in the K\"ahler case. {From} (\ref{Bochner_in_real_money}) it is already clear that $U_{ab}{}^c{}_d=0$ and Proposition~\ref{Kaehler_consequence} implies that also $\Sigma_{ab}=0$ so (\ref{Bochner_in_real_money}) reduces to $$R_{ab}{}^c{}_d=\Lambda(\delta_a{}^cg_{bd}-\delta_b{}^cg_{ad} +J_a{}^cJ_{bd} -J_b{}^cJ_{ad} +2J_{ab}J^c{}_d),$$ which is exactly the constancy of holomorphic sectional curvature. \end{proof} \section{BGG-like complexes on ${\mathbb{CP}}_n$}\label{BGG-like} Fix a real vector space ${\mathfrak{g}}_{-1}$ of dimension $2n$, let ${\mathfrak{g}}_1$ denotes its dual, and fix a non-degenerate $2$-form $J_{ab}\in\Wedge^2{\mathfrak{g}}_1$. The $(2n+1)$-dimensional Heisenberg Lie algebra may be realised as $${\mathfrak{h}}={\mathbb{R}}\oplus{\mathfrak{g}}_{-1},$$ where the first summand is the $1$-dimensional centre of ${\mathfrak{h}}$ and the Lie bracket on ${\mathfrak{g}}_{-1}$ is given by $$[X,Y]=2J_{ab}X^aY^b\in{\mathbb{R}}\hookrightarrow{\mathfrak{h}}.$$ We should admit right away that the reason for this seemingly arcane notation is that we shall soon have occasion to write \begin{equation}\label{sp(2n+2,R)} \addtolength{\arraycolsep}{-2pt}{\mathfrak{sp}}(2n+2,{\mathbb{R}}) =\begin{array}[t]{ccccccccc}{\mathfrak{g}}_{-2}&\oplus&{\mathfrak{g}}_{-1} &\oplus&{\mathfrak{g}}_0&\oplus &{\mathfrak{g}}_1&\oplus&{\mathfrak{g}}_2\\ \|&&&&\|&&&&\|\\ {\mathbb{R}} &&&&\makebox[0pt]{${\mathfrak{sp}}(2n,{\mathbb{R}})\oplus{\mathbb{R}}$\quad} &&&&{\mathbb{R}}\end{array}\end{equation} (a $|2|$-graded Lie algebra as in \cite[\S4.2.6]{CS}) and, in particular, regard ${\mathfrak{h}}={\mathbb{R}}\oplus{\mathfrak{g}}_{-1} ={\mathfrak{g}}_{-2}\oplus{\mathfrak{g}}_{-1}$ as a Lie subalgebra of ${\mathfrak{sp}}(2n+2,{\mathbb{R}})$. Be that as it may, let us suppose that ${\mathbb{V}}$ is a finite-dimensional representation of~${\mathfrak{h}}$. The Lie algebra cohomology $H^r({\mathfrak{h}},{\mathbb{V}})$ may be realised as the cohomology of the Chevalley-Eilenberg complex \begin{equation}\label{C-E} 0\to{\mathbb{V}}\to{\mathfrak{h}}^*\otimes{\mathbb{V}}\to \cdots\to\Wedge^r{\mathfrak{h}}^*\otimes{\mathbb{V}}\to \Wedge^{r+1}{\mathfrak{h}}^*\otimes{\mathbb{V}}\to\cdots\end{equation} as, for example, in~\cite[Chapter~IV]{Knapp}. We shall require, however, the following alternative realisation. \begin{lemma}\label{LieAlgBGG} There is a complex \begin{equation}\addtolength{\arraycolsep}{-2pt}\label{HeisenbergBGG} \begin{array}{rcccccccccc} 0&\to&{\mathbb{V}}&\stackrel{\partial}{\longrightarrow}& {\mathfrak{g}}_1\otimes{\mathbb{V}} &\stackrel{\partial_\perp}{\longrightarrow}& \Wedge_\perp^2{\mathfrak{g}}_1\otimes{\mathbb{V}} &\stackrel{\partial_\perp}{\longrightarrow}&\cdots &\stackrel{\partial_\perp}{\longrightarrow}& \Wedge_\perp^n{\mathfrak{g}}_1\otimes{\mathbb{V}}\\[2pt] &&&&&&&&&& \big\downarrow\\ 0&\leftarrow&{\mathbb{V}}&\stackrel{\partial_\perp}{\longleftarrow}& {\mathfrak{g}}_1\otimes{\mathbb{V}} &\stackrel{\partial_\perp}{\longleftarrow}& \Wedge_\perp^2{\mathfrak{g}}_1\otimes{\mathbb{V}} &\stackrel{\partial_\perp}{\longleftarrow}&\cdots &\stackrel{\partial_\perp}{\longleftarrow}& \Wedge_\perp^n{\mathfrak{g}}_1\otimes{\mathbb{V}} \end{array}\end{equation} whose cohomology realises $H^r({\mathfrak{h}},{\mathbb{V}})$. Here, we are writing $$\Wedge_\perp^r{\mathfrak{g}}_1 \equiv\{\omega_{abc\cdots d}\in\Wedge^r{\mathfrak{g}}_1\mid J^{ab}\omega_{abc\cdots d}=0\},$$ where $J^{ab}\in\Wedge^2{\mathfrak{g}}_{-1}$ is the inverse of $J_{ab}\in\Wedge^2{\mathfrak{g}}_1$ (let's say normalised so that $J_{ab}J^{ac}=\delta_b{}^c$). \end{lemma} \begin{proof} Notice that any representation $\rho:{\mathfrak{h}}\to\operatorname{End}({\mathbb{V}})$ is determined by its restriction to~${\mathfrak{g}}_{-1}\subset{\mathfrak{h}}$. Indeed, writing $\partial_a:{\mathfrak{g}}_{-1}\to\operatorname{End}({\mathbb{V}})$ for this restriction, to say that $\rho$ is a representation of ${\mathfrak{h}}$ is to say that \begin{equation}\label{rep_of_h}\begin{array}{rcl} (\partial_a\partial_b-\partial_b\partial_a)v &=&2J_{ab}\theta v\\ (\partial_a\theta-\theta\partial_a)v&=&0\end{array} \raisebox{2pt}{$\Big\}\quad\forall\,v\in{\mathbb{V}}$}, \end{equation} where $\theta\in\operatorname{End}({\mathbb{V}})$ is $\rho(1)$ for $1\in{\mathbb{R}}\subset{\mathfrak{h}}$. The splitting ${\mathfrak{h}}^*={\mathfrak{g}}_1\oplus{\mathbb{R}}$ allows us to write (\ref{C-E}) as \begin{equation}\label{C-E_rewritten} \addtolength{\arraycolsep}{-2pt}\begin{array}{cccccccccc} {\mathbb{V}}&\longrightarrow&{\mathfrak{h}}^*\otimes{\mathbb{V}} &\longrightarrow&\Wedge^2{\mathfrak{h}}^*\otimes{\mathbb{V}} &\longrightarrow&\Wedge^3{\mathfrak{h}}^*\otimes{\mathbb{V}} &\longrightarrow&\cdots\\ \|&&\|&&\|&&\|\\ {\mathbb{V}}&\longrightarrow&{\mathfrak{g}}_1\otimes{\mathbb{V}} &\longrightarrow&\Wedge^2{\mathfrak{g}}_1\otimes {\mathbb{V}} &\longrightarrow&\Wedge^3{\mathfrak{g}}_1\otimes {\mathbb{V}} &\longrightarrow&\cdots\,,\\ &\begin{picture}(0,0)(0,-3) \put(-9,6){\vector(3,-2){18}}\end{picture} &\oplus &\begin{picture}(0,0)(0,-3) \put(-9,-6){\vector(3,2){18}} \put(-9,6){\vector(3,-2){18}}\end{picture} &\oplus &\begin{picture}(0,0)(0,-3) \put(-9,-6){\vector(3,2){18}} \put(-9,6){\vector(3,-2){18}}\end{picture} &\oplus &\begin{picture}(0,0)(0,-3) \put(-9,-6){\vector(3,2){18}} \put(-9,6){\vector(3,-2){18}}\end{picture}\\ &&{\mathbb{V}}&\longrightarrow &{\mathfrak{g}}_1\otimes{\mathbb{V}} &\longrightarrow &\Wedge^2{\mathfrak{g}}_1\otimes{\mathbb{V}} &\longrightarrow &\cdots \end{array}\end{equation} where the differentials are given by $$v\!\mapsto\!\left[\!\begin{array}{c}\partial_av\\ \theta v\end{array}\!\right] \quad \left[\!\begin{array}{c}\phi_a\\ \eta\end{array}\!\right] \!\mapsto\!\left[\!\begin{array}{c}\partial_{[a}\phi_{b]}-J_{ab}\eta\\ \partial_a\eta-\theta\phi_a\end{array}\!\right] \quad\left[\!\begin{array}{c}\omega_{ab}\\ \psi_a\end{array}\!\right] \!\mapsto\! \left[\!\begin{array}{c}\partial_{[a}\omega_{bc]}+J_{[ab}\psi_{c]}\\ \partial_{[a}\psi_{b]}+\theta\omega_{ab}\end{array}\!\right]$$ et cetera. In particular, notice that the homomorphisms \begin{equation}\label{Jwedge} \Wedge^{r-1}{\mathfrak{g}}_1\ni\psi\longmapsto \pm J\wedge\psi\in\Wedge^{r+1}{\mathfrak{g}}_1\end{equation} are \begin{itemize} \item independent of the representation on~${\mathbb{V}}$, \item injective for $1\leq r<n$, \item an isomorphism for $r=n$, \item surjective for $n<r\leq 2n-1$. \end{itemize} Note that $\Wedge_\perp^{r+1}{\mathfrak{g}}_1$ is complementary to the image of (\ref{Jwedge}) for $1\leq r<n$. Also note the isomorphisms $$\Wedge^{2n+1-r}{\mathfrak{g}}_1 \xrightarrow{\,J\wedge J\wedge\cdots\wedge J\,} \Wedge^{r-1}{\mathfrak{g}}_1,\quad\mbox{for}\enskip n<r\leq 2n+1,$$ under which the kernel of (\ref{Jwedge}) may be identified with $$\Wedge^{2n+1-r}_\perp{\mathfrak{g}}_1, \quad\mbox{for}\enskip n<r\leq 2n-1.$$ Diagram chasing in (\ref{C-E_rewritten}) (or the spectral sequence of a filtered complex) finishes the proof. \end{proof} \noindent{\bf Remark.} Evidently, the equations (\ref{rep_of_h}) are algebraic versions of $$\begin{array}{rcl} (\nabla_a\nabla_b-\nabla_b\nabla_a)\Sigma &=&2J_{ab}\mathcal{T}heta \Sigma\\ (\nabla_a\mathcal{T}heta-\mathcal{T}heta\nabla_a)\Sigma&=&0\end{array} \raisebox{2pt}{$\Big\}\quad\forall\,\Sigma\in\Gamma(E)$},$$ which hold for a symplectically flat connection $\nabla_a$ on smooth vector bundle $E$ on~$M$. Also (\ref{C-E_rewritten}) is the evident algebraic counterpart to the differential complex of Lemma~\ref{key_lemma}. It follows that explicit formul{\ae} for the operators $\partial_\perp$ in the complex (\ref{HeisenbergBGG}) follow the differential versions (\ref{early}) and (\ref{late}) with $\Wedge_\perp^n{\mathfrak{g}}\otimes{\mathbb{V}}\to \Wedge_\perp^n{\mathfrak{g}}\otimes{\mathbb{V}}$ being given by $\partial_\perp^2-\frac2n\theta$. Let us now consider the tractor connection on ${\mathbb{CP}}_n$. According to Theorem~\ref{two}, the remarks following its statement, and the discussions in~\S\ref{tractor_bundles}, this is the connection on ${\mathcal{T}}=\Wedge^0\oplus\Wedge^1\oplus\Wedge^0$ given by $$\nabla_a\!\left[\!\begin{array}c\sigma\\ \mu_b\\ \rho\end{array}\!\right]= \left[\!\begin{array}c\nabla_a\sigma-\mu_a\\ \nabla_a\mu_b+J_{ab}\rho+g_{ab}\sigma\\ \nabla_a\rho-J_a{}^b\mu_b \end{array}\!\right]= \left[\!\begin{array}c\nabla_a\sigma\\ \nabla_a\mu_b+g_{ab}\sigma\\ \nabla_a\rho-J_a{}^b\mu_b \end{array}\!\right] +\left[\!\begin{array}c-\mu_a\\ J_{ab}\rho\\ 0\end{array}\!\right]\!.$$ The induced operator $\nabla:\Wedge^1\otimes{\mathcal{T}}\to\Wedge^2\otimes{\mathcal{T}}$ is $$\left[\!\begin{array}c\sigma_b\\ \mu_{bc}\\ \rho_b\end{array}\!\right]\longmapsto \left[\!\begin{array}c\nabla_{[a}\sigma_{b]}\\ \nabla_{[a}\mu_{b]c}+g_{c[a}\sigma_{b]}\\ \nabla_{[a}\rho_{b]}-J_{[a}{}^c\mu_{b]c} \end{array}\!\right] +\left[\!\begin{array}c\mu_{[ab]}\\ -J_{c[a}\rho_{b]}\\ 0\end{array}\!\right]\!$$ but Corollary~\ref{whenKaehlerVvanishes} says the tractor connection on ${\mathbb{CP}}_n$ is symplectically flat so we should contemplate $\nabla_\perp:\Wedge^1\otimes{\mathcal{T}}\to\Wedge_\perp^2\otimes{\mathcal{T}}$ from Theorem~\ref{one}, viz. $$\left[\!\begin{array}c\sigma_b\\ \mu_{bc}\\ \rho_b\end{array}\!\right]\longmapsto \left[\!\begin{array}c \nabla_{[a}\sigma_{b]}-\frac1{2n}J^{cd}\nabla_c\sigma_dJ_{ab}\\ \ldots\\ \ldots\end{array}\!\right] +\left[\!\begin{array}c\mu_{[ab]}-\frac1{2n}J^{cd}\mu_{cd}J_{ab}\\ -J_{c[a}\rho_{b]}-\frac1{2n}\rho_cJ_{ab}\\ 0\end{array}\!\right]\!.$$ From these formul{\ae}, let us focus attention on the homomorphisms \begin{equation}\label{attention_is_focussed} \makebox[0pt]{$\begin{array}{ccccccccc} 0&\to&{\mathcal{T}}&\to&\Wedge^1\otimes{\mathcal{T}}&\to &\Wedge_\perp^2\otimes{\mathcal{T}}&\to&\cdots\\[4pt] &&\left[\!\begin{array}c\sigma\\ \mu_b\\ \rho\end{array}\!\right]&\mapsto &\left[\!\begin{array}c-\mu_a\\ J_{ab}\rho\\ 0\end{array}\!\right]\\[22pt] &&&&\left[\!\begin{array}c\sigma_b\\ \mu_{bc}\\ \rho_b\end{array}\!\right] &\mapsto &\left[\!\begin{array}c\mu_{[ab]}-\frac1{2n}J^{cd}\mu_{cd}J_{ab}\\ -J_{c[a}\rho_{b]}-\frac1{2n}\rho_cJ_{ab}\\ 0\end{array}\!\right] \end{array}$}\end{equation} It is evident that this is a complex and that its cohomology so far is $$\textstyle\Wedge^0\mbox{ in degree }0\quad\mbox{and}\quad \bigodot^2\!\Wedge^1\mbox{ in degree }1.$$ On the other hand, one may check that the defining representation of the Lie algebra ${\mathfrak{sp}}(2n+2,{\mathbb{R}})$ on ${\mathbb{R}}^{2n+2}={\mathbb{R}}\oplus{\mathbb{R}}^{2n}\oplus{\mathbb{R}}$ restricts via (\ref{sp(2n+2,R)}) to a representation of the Heisenberg Lie algebra ${\mathfrak{h}}={\mathbb{R}}\oplus{\mathfrak{g}}_{-1}$, given explicitly by $$\begin{array}[t]{ccc} {\mathbb{R}}^{2n+2}&\xrightarrow{\,\theta\,} &{\mathbb{R}}^{2n+2}\\ \left[\!\begin{array}c\sigma\\ \mu_b\\ \rho\end{array}\!\right]&\longmapsto&\left[\!\begin{array}c\rho\\ 0\\ 0\end{array}\!\right] \end{array}\mbox{\quad and\quad}\begin{array}[t]{ccc} {\mathbb{R}}^{2n+2}&\xrightarrow{\,\partial_a\,} &{\mathfrak{g}}_{1}\otimes{\mathbb{R}}^{2n+2}\\ \left[\!\begin{array}c\sigma\\ \mu_b\\ \rho\end{array}\!\right]&\longmapsto&\left[\!\begin{array}c-\mu_a\\ J_{ab}\rho\\ 0\end{array}\!\right] \end{array}$$ (noticing that equations (\ref{rep_of_h}) hold, as they must). We may also find $\theta$ as part of the curvature of the tractor connection on~${\mathbb{CP}}_n$. Specifically, the formula from Proposition~\ref{tractor_curvature} reduces to \begin{equation}\label{tractor_curvature_on_CPn} (\nabla_a\nabla_a-\nabla_b\nabla_a)\! \left[\!\begin{array}c\sigma\\ \mu_d\\ \rho\end{array}\!\right] =2J_{ab}\!\left[\!\begin{array}{c} \rho\\ J_d{}^e\mu_e\\ -\sigma \end{array}\!\right]\end{equation} and we find $\theta$ as the top component of $\mathcal{T}heta:{\mathcal{T}}\to{\mathcal{T}}$ where $\mathcal{T}heta$ is defined by~(\ref{Theta_in_the_symplectically_flat_case}). If we now consider the entire complex from Theorem~\ref{one}, with filtration induced by $$\begin{array}{ccccccc}\Wedge^0&\subset&\Wedge^1\oplus\Wedge^0 &\subset&\Wedge^0\oplus\Wedge^1\oplus\Wedge^0&=&{\mathcal{T}}\\ \left[\!\begin{array}c 0\\ 0\\ \rho\end{array}\!\right] &&\left[\!\begin{array}c 0\\ \mu_b\\ \rho\end{array}\!\right] &&\left[\!\begin{array}c\sigma\\ \mu_b\\ \rho\end{array}\!\right] \end{array}$$ of ${\mathcal{T}}$, then the associated spectral sequence (or corresponding diagram chasing) yields (\ref{attention_is_focussed}) continuing as in (\ref{HeisenbergBGG}) including the middle operator $\nabla_\perp^2-\frac2n\theta:\Wedge_\perp^n\to\Wedge_\perp^n$. The same reasoning pertains for any Fedosov structure with $V_{ab}{}^c{}_d=0$ as in Corollary~\ref{symplectically_flat_tractors}. Evidently, this sequence of vector bundle homomorphisms is induced by the complex (\ref{HeisenbergBGG}) and, together with Lemma~\ref{LieAlgBGG}, the spectral sequence of a filtered complex (or the appropriate diagram chasing) immediately yields the following. \begin{thm}\label{towardsBGG} Suppose $\nabla_a$ is a torsion-free connection on a symplectic manifold $(M,J_{ab})$, such that $\nabla_aJ_{bc}=0$ and so that the corresponding curvature tensor $V_{ab}{}^c{}_d$ vanishes. Fix a finite-dimensional representation ${\mathbb{E}}$ of\/ ${\mathrm{Sp}}(2n+2,{\mathbb{R}})$ and let $E$ denote the associated `tractor bundle' induced from the standard tractor bundle and the representation~${\mathbb{E}}$ (so that the standard representation of\/ ${\mathrm{Sp}}(2n+2,{\mathbb{R}})$ on\/ ${\mathbb{R}}^{2n+2}$ yields the standard tractor bundle). In accordance with Corollary~\ref{symplectically_flat_tractors}, the induced `tractor connection' $\nabla:E\to\Wedge^1\otimes E$ is symplectically flat and we may define $\mathcal{T}heta:E\to E$ by~\eqref{Theta_in_the_symplectically_flat_case}. Having done this, there are complexes of differential operators $$\addtolength{\arraycolsep}{-3pt}\begin{array}{rcccccccccc} 0&\to&H^0({\mathfrak{h}},E)&\to&H^1({\mathfrak{h}},E) &\to&H^2({\mathfrak{h}},E) &\to&\cdots &\to&H^n({\mathfrak{h}},E)\\[2pt] &&&&&&&&&& \big\downarrow\\ 0&\leftarrow&H^{2n+1}({\mathfrak{h}},E)&\leftarrow &H^{2n}({\mathfrak{h}},E)&\leftarrow &H^{2n-1}({\mathfrak{h}},E) &\leftarrow&\cdots &\leftarrow&H^{n+1}({\mathfrak{h}},E) \end{array}$$ where $H^r({\mathfrak{h}},E)$ denotes the tensor bundle on $M$ that is induced by the cohomology $H^r({\mathfrak{h}},{\mathbb{E}})$ as a representation of ${\mathrm{Sp}}(2n,{\mathbb{R}})$. This complex is locally exact except near the beginning where $$\ker:H^0({\mathfrak{h}},E)\to H^1({\mathfrak{h}},E) \quad\mbox{and}\quad \frac{\ker:H^1({\mathfrak{h}},E)\to H^2({\mathfrak{h}},E)} {\operatorname{im}:H^0({\mathfrak{h}},E)\to H^1({\mathfrak{h}},E)}$$ may be identified with the locally constant sheaves \underbar{$\ker\mathcal{T}heta$} and \underbar{$\operatorname{coker}\mathcal{T}heta$}, respectively. In particular, for\/ ${\mathbb{CP}}_n$ with its Fubini--Study connection, these sheaves vanish and the complex is locally exact everywhere. \end{thm} \begin{proof} It remains only to observe that for the Fubini--Study connection we see from (\ref{tractor_curvature_on_CPn}) that $\mathcal{T}heta:{\mathcal{T}}\to{\mathcal{T}}$ is an isomorphism. \end{proof} \noindent The main point about Theorem~\ref{towardsBGG}, however, is that if the representation ${\mathbb{E}}$ of ${\mathrm{Sp}}(2n+2,{\mathbb{R}})$ is irreducible, then the representations $H^r({\mathfrak{h}},{\mathbb{E}})$ of ${\mathrm{Sp}}(2n,{\mathbb{R}})$ are also irreducible and are computed by a theorem due to Kostant~\cite{K}. Specifically, if we denote the irreducible representations of ${\mathrm{Sp}}(2n+2,{\mathbb{R}})$ and ${\mathrm{Sp}}(2n,{\mathbb{R}})$ by writing the highest weight as a linear combination of fundamental weights and recording the coefficients over the corresponding nodes of the Dynkin diagrams for $C_{n+1}$ and $C_n$, as is often done, then Kostant's Theorem says that $$\begin{array}{ccl}H^0({\mathfrak{h}},\begin{picture}(84,5) \put(4,1.5){\line(1,0){42}} \put(4,1.2){\makebox(0,0){$\bullet$}} \put(16,1.2){\makebox(0,0){$\bullet$}} \put(28,1.2){\makebox(0,0){$\bullet$}} \put(40,1.2){\makebox(0,0){$\bullet$}} \put(55,1.2){\makebox(0,0){$\cdots$}} \put(62,1.5){\line(1,0){6}} \put(68,1.2){\makebox(0,0){$\bullet$}} \put(68,0.5){\line(1,0){12}} \put(68,2.5){\line(1,0){12}} \put(74,1.5){\makebox(0,0){$\langle$}} \put(80,1.2){\makebox(0,0){$\bullet$}} \put(4,6){\makebox(0,0)[b]{$\scriptstyle a$}} \put(16,6){\makebox(0,0)[b]{$\scriptstyle b$}} \put(28,6){\makebox(0,0)[b]{$\scriptstyle c$}} \put(40,6){\makebox(0,0)[b]{$\scriptstyle d$}} \put(68,6){\makebox(0,0)[b]{$\scriptstyle e$}} \put(80,6){\makebox(0,0)[b]{$\scriptstyle f$}} \end{picture})&=&\begin{picture}(72,5) \put(4,1.5){\line(1,0){30}} \put(4,1.2){\makebox(0,0){$\bullet$}} \put(16,1.2){\makebox(0,0){$\bullet$}} \put(28,1.2){\makebox(0,0){$\bullet$}} \put(43,1.2){\makebox(0,0){$\cdots$}} \put(50,1.5){\line(1,0){6}} \put(56,1.2){\makebox(0,0){$\bullet$}} \put(56,0.5){\line(1,0){12}} \put(56,2.5){\line(1,0){12}} \put(62,1.5){\makebox(0,0){$\langle$}} \put(68,1.2){\makebox(0,0){$\bullet$}} \put(4,6){\makebox(0,0)[b]{$\scriptstyle b$}} \put(16,6){\makebox(0,0)[b]{$\scriptstyle c$}} \put(28,6){\makebox(0,0)[b]{$\scriptstyle d$}} \put(56,6){\makebox(0,0)[b]{$\scriptstyle e$}} \put(68,6){\makebox(0,0)[b]{$\scriptstyle f$}} \end{picture}\\[4pt] H^1({\mathfrak{h}},\begin{picture}(84,5) \put(4,1.5){\line(1,0){42}} \put(4,1.2){\makebox(0,0){$\bullet$}} \put(16,1.2){\makebox(0,0){$\bullet$}} \put(28,1.2){\makebox(0,0){$\bullet$}} \put(40,1.2){\makebox(0,0){$\bullet$}} \put(55,1.2){\makebox(0,0){$\cdots$}} \put(62,1.5){\line(1,0){6}} \put(68,1.2){\makebox(0,0){$\bullet$}} \put(68,0.5){\line(1,0){12}} \put(68,2.5){\line(1,0){12}} \put(74,1.5){\makebox(0,0){$\langle$}} \put(80,1.2){\makebox(0,0){$\bullet$}} \put(4,6){\makebox(0,0)[b]{$\scriptstyle a$}} \put(16,6){\makebox(0,0)[b]{$\scriptstyle b$}} \put(28,6){\makebox(0,0)[b]{$\scriptstyle c$}} \put(40,6){\makebox(0,0)[b]{$\scriptstyle d$}} \put(68,6){\makebox(0,0)[b]{$\scriptstyle e$}} \put(80,6){\makebox(0,0)[b]{$\scriptstyle f$}} \end{picture})&=&\begin{picture}(88,5)(-8,0) \put(4,1.5){\line(1,0){38}} \put(4,1.2){\makebox(0,0){$\bullet$}} \put(24,1.2){\makebox(0,0){$\bullet$}} \put(36,1.2){\makebox(0,0){$\bullet$}} \put(51,1.2){\makebox(0,0){$\cdots$}} \put(58,1.5){\line(1,0){6}} \put(64,1.2){\makebox(0,0){$\bullet$}} \put(64,0.5){\line(1,0){12}} \put(64,2.5){\line(1,0){12}} \put(70,1.5){\makebox(0,0){$\langle$}} \put(76,1.2){\makebox(0,0){$\bullet$}} \put(4,6){\makebox(0,0)[b]{$\scriptstyle a+b+1$}} \put(24,6){\makebox(0,0)[b]{$\scriptstyle c$}} \put(36,6){\makebox(0,0)[b]{$\scriptstyle d$}} \put(64,6){\makebox(0,0)[b]{$\scriptstyle e$}} \put(76,6){\makebox(0,0)[b]{$\scriptstyle f$}} \end{picture}\\[4pt] H^2({\mathfrak{h}},\begin{picture}(84,5) \put(4,1.5){\line(1,0){42}} \put(4,1.2){\makebox(0,0){$\bullet$}} \put(16,1.2){\makebox(0,0){$\bullet$}} \put(28,1.2){\makebox(0,0){$\bullet$}} \put(40,1.2){\makebox(0,0){$\bullet$}} \put(55,1.2){\makebox(0,0){$\cdots$}} \put(62,1.5){\line(1,0){6}} \put(68,1.2){\makebox(0,0){$\bullet$}} \put(68,0.5){\line(1,0){12}} \put(68,2.5){\line(1,0){12}} \put(74,1.5){\makebox(0,0){$\langle$}} \put(80,1.2){\makebox(0,0){$\bullet$}} \put(4,6){\makebox(0,0)[b]{$\scriptstyle a$}} \put(16,6){\makebox(0,0)[b]{$\scriptstyle b$}} \put(28,6){\makebox(0,0)[b]{$\scriptstyle c$}} \put(40,6){\makebox(0,0)[b]{$\scriptstyle d$}} \put(68,6){\makebox(0,0)[b]{$\scriptstyle e$}} \put(80,6){\makebox(0,0)[b]{$\scriptstyle f$}} \end{picture})&=&\begin{picture}(88,5)(0,0) \put(4,1.5){\line(1,0){46}} \put(4,1.2){\makebox(0,0){$\bullet$}} \put(24,1.2){\makebox(0,0){$\bullet$}} \put(44,1.2){\makebox(0,0){$\bullet$}} \put(59,1.2){\makebox(0,0){$\cdots$}} \put(66,1.5){\line(1,0){6}} \put(72,1.2){\makebox(0,0){$\bullet$}} \put(72,0.5){\line(1,0){12}} \put(72,2.5){\line(1,0){12}} \put(78,1.5){\makebox(0,0){$\langle$}} \put(84,1.2){\makebox(0,0){$\bullet$}} \put(4,6){\makebox(0,0)[b]{$\scriptstyle a$}} \put(24,6){\makebox(0,0)[b]{$\scriptstyle b+c+1$}} \put(44,6){\makebox(0,0)[b]{$\scriptstyle d$}} \put(72,6){\makebox(0,0)[b]{$\scriptstyle e$}} \put(84,6){\makebox(0,0)[b]{$\scriptstyle f$}} \end{picture}\\[4pt] H^3({\mathfrak{h}},\begin{picture}(84,5) \put(4,1.5){\line(1,0){42}} \put(4,1.2){\makebox(0,0){$\bullet$}} \put(16,1.2){\makebox(0,0){$\bullet$}} \put(28,1.2){\makebox(0,0){$\bullet$}} \put(40,1.2){\makebox(0,0){$\bullet$}} \put(55,1.2){\makebox(0,0){$\cdots$}} \put(62,1.5){\line(1,0){6}} \put(68,1.2){\makebox(0,0){$\bullet$}} \put(68,0.5){\line(1,0){12}} \put(68,2.5){\line(1,0){12}} \put(74,1.5){\makebox(0,0){$\langle$}} \put(80,1.2){\makebox(0,0){$\bullet$}} \put(4,6){\makebox(0,0)[b]{$\scriptstyle a$}} \put(16,6){\makebox(0,0)[b]{$\scriptstyle b$}} \put(28,6){\makebox(0,0)[b]{$\scriptstyle c$}} \put(40,6){\makebox(0,0)[b]{$\scriptstyle d$}} \put(68,6){\makebox(0,0)[b]{$\scriptstyle e$}} \put(80,6){\makebox(0,0)[b]{$\scriptstyle f$}} \end{picture})&=&\begin{picture}(88,5)(0,0) \put(4,1.5){\line(1,0){46}} \put(4,1.2){\makebox(0,0){$\bullet$}} \put(16,1.2){\makebox(0,0){$\bullet$}} \put(36,1.2){\makebox(0,0){$\bullet$}} \put(59,1.2){\makebox(0,0){$\cdots$}} \put(66,1.5){\line(1,0){6}} \put(72,1.2){\makebox(0,0){$\bullet$}} \put(72,0.5){\line(1,0){12}} \put(72,2.5){\line(1,0){12}} \put(78,1.5){\makebox(0,0){$\langle$}} \put(84,1.2){\makebox(0,0){$\bullet$}} \put(4,6){\makebox(0,0)[b]{$\scriptstyle a$}} \put(16,6){\makebox(0,0)[b]{$\scriptstyle b$}} \put(36,6){\makebox(0,0)[b]{$\scriptstyle c+d+1$}} \put(72,6){\makebox(0,0)[b]{$\scriptstyle e$}} \put(84,6){\makebox(0,0)[b]{$\scriptstyle f$}} \end{picture}\\ \vdots\\ H^n({\mathfrak{h}},\begin{picture}(84,5) \put(4,1.5){\line(1,0){42}} \put(4,1.2){\makebox(0,0){$\bullet$}} \put(16,1.2){\makebox(0,0){$\bullet$}} \put(28,1.2){\makebox(0,0){$\bullet$}} \put(40,1.2){\makebox(0,0){$\bullet$}} \put(55,1.2){\makebox(0,0){$\cdots$}} \put(62,1.5){\line(1,0){6}} \put(68,1.2){\makebox(0,0){$\bullet$}} \put(68,0.5){\line(1,0){12}} \put(68,2.5){\line(1,0){12}} \put(74,1.5){\makebox(0,0){$\langle$}} \put(80,1.2){\makebox(0,0){$\bullet$}} \put(4,6){\makebox(0,0)[b]{$\scriptstyle a$}} \put(16,6){\makebox(0,0)[b]{$\scriptstyle b$}} \put(28,6){\makebox(0,0)[b]{$\scriptstyle c$}} \put(40,6){\makebox(0,0)[b]{$\scriptstyle d$}} \put(68,6){\makebox(0,0)[b]{$\scriptstyle e$}} \put(80,6){\makebox(0,0)[b]{$\scriptstyle f$}} \end{picture})&=& \begin{picture}(84,5) \put(4,1.5){\line(1,0){30}} \put(4,1.2){\makebox(0,0){$\bullet$}} \put(16,1.2){\makebox(0,0){$\bullet$}} \put(28,1.2){\makebox(0,0){$\bullet$}} \put(43,1.2){\makebox(0,0){$\cdots$}} \put(50,1.5){\line(1,0){6}} \put(56,1.2){\makebox(0,0){$\bullet$}} \put(56,0.5){\line(1,0){22}} \put(56,2.5){\line(1,0){22}} \put(67,1.5){\makebox(0,0){$\langle$}} \put(78,1.2){\makebox(0,0){$\bullet$}} \put(4,6){\makebox(0,0)[b]{$\scriptstyle a$}} \put(16,6){\makebox(0,0)[b]{$\scriptstyle b$}} \put(28,6){\makebox(0,0)[b]{$\scriptstyle c$}} \put(56,6){\makebox(0,0)[b]{$\scriptstyle $}} \put(78,6){\makebox(0,0)[b]{$\scriptstyle e+f+1$}} \end{picture} \end{array}$$ and for $r\geq n+1$, there are isomorphisms $H^r({\mathfrak{h}},{\mathbb{E}})=H^{2n+1-r}({\mathfrak{h}},{\mathbb{E}})$. Using the same notation for the bundles $H^r({\mathfrak{h}},E)$, the complexes of Theorem~\ref{towardsBGG} become $$\begin{array}{rl}\begin{picture}(72,5) \put(4,1.5){\line(1,0){30}} \put(4,1.2){\makebox(0,0){$\bullet$}} \put(16,1.2){\makebox(0,0){$\bullet$}} \put(28,1.2){\makebox(0,0){$\bullet$}} \put(43,1.2){\makebox(0,0){$\cdots$}} \put(50,1.5){\line(1,0){6}} \put(56,1.2){\makebox(0,0){$\bullet$}} \put(56,0.5){\line(1,0){12}} \put(56,2.5){\line(1,0){12}} \put(62,1.5){\makebox(0,0){$\langle$}} \put(68,1.2){\makebox(0,0){$\bullet$}} \put(4,6){\makebox(0,0)[b]{$\scriptstyle b$}} \put(16,6){\makebox(0,0)[b]{$\scriptstyle c$}} \put(28,6){\makebox(0,0)[b]{$\scriptstyle d$}} \put(56,6){\makebox(0,0)[b]{$\scriptstyle e$}} \put(68,6){\makebox(0,0)[b]{$\scriptstyle f$}} \end{picture} &\xrightarrow{\,\nabla^{a+1}\,} \begin{picture}(88,5)(-8,0) \put(4,1.5){\line(1,0){38}} \put(4,1.2){\makebox(0,0){$\bullet$}} \put(24,1.2){\makebox(0,0){$\bullet$}} \put(36,1.2){\makebox(0,0){$\bullet$}} \put(51,1.2){\makebox(0,0){$\cdots$}} \put(58,1.5){\line(1,0){6}} \put(64,1.2){\makebox(0,0){$\bullet$}} \put(64,0.5){\line(1,0){12}} \put(64,2.5){\line(1,0){12}} \put(70,1.5){\makebox(0,0){$\langle$}} \put(76,1.2){\makebox(0,0){$\bullet$}} \put(4,6){\makebox(0,0)[b]{$\scriptstyle a+b+1$}} \put(24,6){\makebox(0,0)[b]{$\scriptstyle c$}} \put(36,6){\makebox(0,0)[b]{$\scriptstyle d$}} \put(64,6){\makebox(0,0)[b]{$\scriptstyle e$}} \put(76,6){\makebox(0,0)[b]{$\scriptstyle f$}} \end{picture}\\ &\enskip{}\xrightarrow{\,\nabla^{b+1}\,} \begin{picture}(88,5)(0,0) \put(4,1.5){\line(1,0){46}} \put(4,1.2){\makebox(0,0){$\bullet$}} \put(24,1.2){\makebox(0,0){$\bullet$}} \put(44,1.2){\makebox(0,0){$\bullet$}} \put(59,1.2){\makebox(0,0){$\cdots$}} \put(66,1.5){\line(1,0){6}} \put(72,1.2){\makebox(0,0){$\bullet$}} \put(72,0.5){\line(1,0){12}} \put(72,2.5){\line(1,0){12}} \put(78,1.5){\makebox(0,0){$\langle$}} \put(84,1.2){\makebox(0,0){$\bullet$}} \put(4,6){\makebox(0,0)[b]{$\scriptstyle a$}} \put(24,6){\makebox(0,0)[b]{$\scriptstyle b+c+1$}} \put(44,6){\makebox(0,0)[b]{$\scriptstyle d$}} \put(72,6){\makebox(0,0)[b]{$\scriptstyle e$}} \put(84,6){\makebox(0,0)[b]{$\scriptstyle f$}} \end{picture}\\ &\quad{}\xrightarrow{\,\nabla^{c+1}\,} \begin{picture}(88,5)(0,0) \put(4,1.5){\line(1,0){46}} \put(4,1.2){\makebox(0,0){$\bullet$}} \put(16,1.2){\makebox(0,0){$\bullet$}} \put(36,1.2){\makebox(0,0){$\bullet$}} \put(59,1.2){\makebox(0,0){$\cdots$}} \put(66,1.5){\line(1,0){6}} \put(72,1.2){\makebox(0,0){$\bullet$}} \put(72,0.5){\line(1,0){12}} \put(72,2.5){\line(1,0){12}} \put(78,1.5){\makebox(0,0){$\langle$}} \put(84,1.2){\makebox(0,0){$\bullet$}} \put(4,6){\makebox(0,0)[b]{$\scriptstyle a$}} \put(16,6){\makebox(0,0)[b]{$\scriptstyle b$}} \put(36,6){\makebox(0,0)[b]{$\scriptstyle c+d+1$}} \put(72,6){\makebox(0,0)[b]{$\scriptstyle e$}} \put(84,6){\makebox(0,0)[b]{$\scriptstyle f$}} \end{picture}\\[-4pt] &\qquad\vdots\\ &\qquad{}\xrightarrow{\,\nabla^{e+1}\,} \begin{picture}(84,5) \put(4,1.5){\line(1,0){30}} \put(4,1.2){\makebox(0,0){$\bullet$}} \put(16,1.2){\makebox(0,0){$\bullet$}} \put(28,1.2){\makebox(0,0){$\bullet$}} \put(43,1.2){\makebox(0,0){$\cdots$}} \put(50,1.5){\line(1,0){6}} \put(56,1.2){\makebox(0,0){$\bullet$}} \put(56,0.5){\line(1,0){22}} \put(56,2.5){\line(1,0){22}} \put(67,1.5){\makebox(0,0){$\langle$}} \put(78,1.2){\makebox(0,0){$\bullet$}} \put(4,6){\makebox(0,0)[b]{$\scriptstyle a$}} \put(16,6){\makebox(0,0)[b]{$\scriptstyle b$}} \put(28,6){\makebox(0,0)[b]{$\scriptstyle c$}} \put(56,6){\makebox(0,0)[b]{$\scriptstyle $}} \put(78,6){\makebox(0,0)[b]{$\scriptstyle e+f+1$}} \end{picture}\\ &\enskip\qquad{}\xrightarrow{\,\nabla^{2f+2}\,} \begin{picture}(84,5) \put(4,1.5){\line(1,0){30}} \put(4,1.2){\makebox(0,0){$\bullet$}} \put(16,1.2){\makebox(0,0){$\bullet$}} \put(28,1.2){\makebox(0,0){$\bullet$}} \put(43,1.2){\makebox(0,0){$\cdots$}} \put(50,1.5){\line(1,0){6}} \put(56,1.2){\makebox(0,0){$\bullet$}} \put(56,0.5){\line(1,0){22}} \put(56,2.5){\line(1,0){22}} \put(67,1.5){\makebox(0,0){$\langle$}} \put(78,1.2){\makebox(0,0){$\bullet$}} \put(4,6){\makebox(0,0)[b]{$\scriptstyle a$}} \put(16,6){\makebox(0,0)[b]{$\scriptstyle b$}} \put(28,6){\makebox(0,0)[b]{$\scriptstyle c$}} \put(56,6){\makebox(0,0)[b]{$\scriptstyle $}} \put(78,6){\makebox(0,0)[b]{$\scriptstyle e+f+1$}} \end{picture}\\ &\qquad{}\xrightarrow{\,\nabla^{e+1}\,}\cdots\\ &\qquad\vdots\\ &\xrightarrow{\,\nabla^{a+1}\,} \begin{picture}(72,5) \put(4,1.5){\line(1,0){30}} \put(4,1.2){\makebox(0,0){$\bullet$}} \put(16,1.2){\makebox(0,0){$\bullet$}} \put(28,1.2){\makebox(0,0){$\bullet$}} \put(43,1.2){\makebox(0,0){$\cdots$}} \put(50,1.5){\line(1,0){6}} \put(56,1.2){\makebox(0,0){$\bullet$}} \put(56,0.5){\line(1,0){12}} \put(56,2.5){\line(1,0){12}} \put(62,1.5){\makebox(0,0){$\langle$}} \put(68,1.2){\makebox(0,0){$\bullet$}} \put(4,6){\makebox(0,0)[b]{$\scriptstyle b$}} \put(16,6){\makebox(0,0)[b]{$\scriptstyle c$}} \put(28,6){\makebox(0,0)[b]{$\scriptstyle d$}} \put(56,6){\makebox(0,0)[b]{$\scriptstyle e$}} \put(68,6){\makebox(0,0)[b]{$\scriptstyle f$}} \end{picture}, \end{array}$$ for arbitrary non-negative integers $a,b,c,d,\cdots,e,f$. When all these integers are zero, this is the Rumin--Seshadri complex. Just the first three terms in this complex, in the special case when only $a$ is non-zero, are already essential in~\cite{EG}. For example, if $a=1$, then the first two differential operators are $$\sigma\mapsto\nabla_a\nabla_b\sigma+\Phi_{ab}\sigma \quad\mbox{and}\quad \phi_{bc}\mapsto \big(\nabla_a\phi_{bc}-\nabla_b\phi_{ac})_\perp$$ where $\phi_{bc}$ is symmetric and $(\enskip)_\perp$ means to take the trace-free part with respect to $J_{ab}$. {From} the curvature decomposition and Bianchi identity we find that their composition is $$\sigma\longmapsto V_{ab}{}^d{}_c\nabla_d\sigma+Y_{abc}\sigma,$$ which vanishes in case $V_{ab}{}^c{}_d=0$. In case $\mathcal{T}heta$ is invertible, as for the Fubini--Study connection, we conclude that this sequence of differential operators is locally exact. \end{document}
\begin{document} \title{\LARGE \bf Optimal Local and Remote Controls of Multiple Systems with Multiplicative Noises and Unreliable Uplink Channels } \thispagestyle{empty} \pagestyle{empty} \begin{abstract} In this paper, the optimal local and remote linear quadratic (LQ) control problem is studied for {a networked control system (NCS) which consists of multiple subsystems and each of which is described by a general multiplicative noise stochastic system with one local controller and one remote controller}. Due to the unreliable uplink channels, the remote controller {can only access} unreliable state information of all subsystems, while the downlink channels from {the remote controller to the local controllers} are perfect. The difficulties of the LQ control problem {for such a system} arise from the different information structures of {the local controllers and the remote controller}. By developing the Pontyagin maximum principle, the necessary and sufficient solvability conditions are derived, which are based on the solution to a group of forward and backward difference equations (G-FBSDEs). Furthermore, by proposing a new method to decouple the G-FBSDEs and introducing new coupled Riccati equations (CREs), the optimal control strategies are derived {where we verify} that the separation principle holds for {the} multiplicative noise {NCSs with packet dropouts}. This paper can be seen as an important {contribution to the} optimal control problem with asymmetric information structures. \end{abstract} \begin{IEEEkeywords} Multiplicative noise system, multiple subsystems, optimal local and remote controls, Pontryagin maximum principle. \end{IEEEkeywords} \section{Introduction} As is well known, NCSs are systems in which actuators, sensors, and controllers exchange information through {a shared bandwidth limited} digital communication network. The research on NCSs has attracted {significant interest} in recent years, due to the {advantages of NCSs such as low cost and simple installation}, see \cite{hnx2007,iyb2006,wyb2002,ssfps2007,zbp2001,qz2017,qz2018,xxq2012} and the cited references therein. {On the other hand, unreliable wireless communication channels and limited bandwidth make NCSs less reliable \cite{wyb2002,ssfps2007,zbp2001}}, which may cause performance loss and destabilize the NCSs. Thus, it is necessary to investigate control problems for NCSs with unreliable communication channels. In this paper, {we investigate multiplicative noise NCSs (MN-NCSs) with local and remote controls and unreliable uplink channels.} As shown in Figure 1, the NCS is composed of $L$ subsystems, and each subsystem is regulated by one local controller and one remote controller. {Due to the limited communication capability of each subsystem, the uplink channel from a local controller to the remote controller is affected by packet dropouts, while the downlink channel from the remote controller to each local controller is perfect. The LQ optimal control problem is considered in this paper. Our aim is to design $L$ local controllers and the remote controller such that a given cost function is minimized. Due to the uncertainty of the uplink channels, the information sets available to the local controllers and the remote controller are different. Hence, we are concerned with the optimal control problem with an asymmetric information structure which pose major challenges.} \begin{figure} \caption{System model: Multiple subsystem NCSs with unreliable uplink channels from the local controllers to the remote controller, and perfect downlink channels from the remote controller to the local controllers.} \label{fig1:1} \end{figure} The pioneering study of asymmetric information control can be traced back to the 1960s, and it is well-known that finding optimal controls for an asymmetric-information control problem is difficult, see \cite{w1968b,b2008,rrj2016,lm2011}. For example, \cite{w1968b} gives the celebrated Witsenhausen's Counterexample {for which} the associated explicit optimal control with asymmetric information structures still remains to be solved. The optimal local and remote control problems were first studied in \cite{nmt2013, aon2018}. By using {the common information approach}, the optimal local and remote controls are derived. {Following \cite{aon2018}, an elementary proof of the common information approach} was given in \cite{am2019}. Furthermore, \cite{lx2018} studies the optimal local and remote control problem with only one subsystem by using maximum principle approach. It should be pointed out that previous works \cite{aon2018,oan2016,lx2018, oan2018b, am2019, nmt2013} {focused on NCSs with additive noise}. Different from {the} previous works, this paper investigates the local control and remote control problem for networked systems with multiplicative noise and multiple subsystems. The motivations of this paper are: On one hand, multiplicative noise systems exist in many applications. The existence of multiplicative noise results in the non-Gaussian property of the NCSs, see \cite{j1976,b1976, zlxf2015,rz2000,w1968, qzw2019}. On the other hand, the unreliable uplink {channels result in that the state estimation errors are} involved with the control inputs, which may {lead to the failure of the separation principle}. In other words, it remains difficult to design the optimal output feedback control for MN-NCSs with unreliable uplink channels, see \cite{j1976,aon2018,lx2018, oan2018b}. Furthermore, the optimal local and remote control problem for multiple subsystems can be regarded as a special case of {optimal control for multi-agent systems}, {for which the} optimal decentralized/distributed control design remains challenging, see \cite{yx2010,ml2014}. Based on the above discussions, we can conclude {that the study of the} local and remote control problem for MN-NCSs with multiple subsystems {has} the following difficulties and challenges: 1) Due to the possible failure of {the} separation principle for MN-NCSs with unreliable uplink channels, the {derivation of the} optimal ``output feedback" controllers remains challenging. 2) {When the Pontryagin maximum principle is adopted}, how to decouple a group of forward and backward difference equations (G-FBSDEs) is difficult and unsolved. In this paper, the optimal local and remote control problem for MN-NCSs with multiple subsystems is solved. Firstly, by developing the Pontryagin maximum principle, we show that the optimal control problem under consideration is uniquely solved if and only if a group of FBSDEs (G-FBSDEs) is uniquely solved; Consequently, a method is proposed to decouple the G-FBSDEs, it is shown that the solution to the original G-FBSDEs can be given by decoupling new G-FBSDEs {and} introducing new information filtrations. Furthermore, the optimal local and remote controllers are derived based on the solution to coupled CREs, which are asymmetric. {As special cases, the additive noise NCSs case, the single subsystem case, and the indefinite weighting matrices case are also investigated}. As far as we know, the obtained results are new and innovative in the following aspects: 1) Compared with the common information approach adopted in previous works, the structure of the optimal local controllers and remote controller is not assumed in advance, see Lemma 3 in \cite{am2019}; 2) In this paper, the multiple subsystems case is solved by using {the} Pontragin Maximum principle, while previous works focused on {the} single subsystem case \cite{lx2018}; 3) The existence of multiplicative noise results in that the state estimate error and the error covariance are involved with the controls, {which may cause the separation principle to fail}. To overcome this, new {asymmetric} CREs are introduced; It is verified that {the} separation principle holds for the considered optimization problem, i.e., the optimal local controllers and optimal remote controller can be designed, and the control gain matrices and the estimation gain matrix can be calculated separately; 4) It is noted that the obtained results {include} the results in \cite{lx2018,am2019,aon2018} as special cases. The rest of the paper is organized as {follows}. Section II investigates the existence of the optimal control strategies. The main results are presented in Section III, {where} the optimal local controllers and optimal remote controller are derived by decoupling the G-FBSDEs. Consequently, some discussions are given in Section IV. {In Section V, numerical examples are presented to illustrate the main results. We conclude this paper in Section VI. Finally, the proof of the main results is given in the Appendix.} The {following} notations will be used throughout this paper: $\mathbb{R}^n$ denotes the $n$-dimensional Euclidean space, and $A^T$ means the transpose of matrix $A$; {The subscript of $A^i, i=1,\cdots, L$ means the $i$-th subsystem, and subscript of $A^{\textbf{k}}$ denotes $A$ to the power of $k$.} Symmetric matrix $M>0$ ($\geq 0$) {means that $M$ is positive definite (positive semi-definite)}; $\mathbb{R}^{n}$ {is the} $n$-dimensional Euclidean space; $I_n$ denotes the identity matrix {of dimension $n$}; $E[\cdot]$ means the mathematical expectation and $E[\cdot|Y]$ signifies the conditional expectation with respect to $Y$. $\mathcal{N}(\mu, \Sigma)$ {is} the normal distribution with mean $\mu$ and covariance $\Sigma$, and $Pr(A)$ denotes the probability {of the occurrence of event $A$}. $\mathcal{F}(X)$ means the filtration generated by random variable/vector $X$, $Tr(\cdot)$ means the trace of a matrix, $vec(x^1, x^2,x^3,\cdots)$ means the vector $[(x^1)^T, (x^2)^T, (x^3)^T, \cdots]^T$, and $\sigma(X)$ denotes the $\sigma$-algebra generated by random vector $X$. \section{Existence of Optimal Control Strategy} \subsection{Problem Formulation} The system dynamics of the $i$-th subsystem ($i=1,\cdots,L$) is given by \begin{align}\label{sm-1} x_{k+1}^i=& [A^i+w^i_k\bar{A}^i]x^i_k+[B^{i}+w^i_k\bar{B}^{i}]u^i_k\notag\\ &+[B^{i0}+w^i_k\bar{B}^{i0}]u^0_k+v^i_k, \end{align} where $x^i_k\in \mathbb{R}^{n_i}$ is the system state of the $i$-th subsystem at time $k$, $A^i, \bar{A}^i\in\mathbb{R}^{n_i\times n_i}, B^{i},\bar{B}^i\in \mathbb{R}^{n_i\times m_i}, B^{i0},\bar{B}^{i0}\in \mathbb{R}^{n_i\times m_{0}}$ are the given system matrices, both $w^i_k\in \mathbb{R}$ and $v^i_k\in \mathbb{R}^{n}$ are Gaussian {white noises satisfying $w_k^i\sim\mathcal{N}(0,\Sigma_{w^i}), v^i_k\sim\mathcal{N}(0,\Sigma_{v^i})$}. The initial state $x^i_0\sim\mathcal{N}(\mu^i,\Sigma_{x_0^i})$. $u^i_k\in\mathbb{R}^{m_i}$ is {the} local controller of {the} $i$-th subsystem, and $u^0_k\in\mathbb{R}^{m_{0}}$ is the remote controller. {Since the uplink channels are unreliable, let binary random variables $\gamma_k^i, i=1, 2, ...$ with probability distribution $Pr(\gamma_k^i=1)=p^i$ denote if a packet is successfully transmitted, i.e., $\gamma_k^i=1$ means {that} the packet is successfully transmitted from the $i$-th local controller to the remote controller, and {fails} otherwise. $p^i, i=1,\cdots,L$ is called the packet dropout rate. Moreover, we assume $x_0, \{\gamma_k^i\}_{k=0}^{N}, \{w_k^i\}_{k=0}^{N}, \{v_k^i\}_{k=0}^{N} $ are independent of each other for all $i=1,\cdots,L$.} For the sake of discussion, the following notions are denoted: {\small\begin{align}\label{nota1} X_k&= vec(x_k^1,\cdots, x_k^L), U_k=vec(u^0_k, u_k^1, \cdots, u_k^L),\notag\\ V_k&= vec(v_k^1, \cdots, v_k^L), \mathbb{N}_L=\sum_{i=1}^{L}n_i, \mathbb{M}_L=\sum_{i=0}^{L}m_i,\notag\\ A&=\left[\hspace{-2mm} \begin{array}{cccc} A^1&\cdots&0\\ \vdots&\ddots&\vdots\\ 0&\cdots&A^L \end{array} \hspace{-2mm}\right],B=\left[\hspace{-2mm} \begin{array}{cccc} B^{10}&B^1&\cdots&0\\ \vdots&\vdots&\ddots&\vdots\\ B^{L0}&0&\cdots&B^L \end{array} \hspace{-2mm}\right],\notag\\ \bar{\mathbf{A}}^i&=\left[\hspace{-2mm} \begin{array}{ccccc} \text{\Huge{0}}&&\cdots &&\text{\Huge{0}}\\ &\ddots&&&\\ \vdots&&\bar{A}^i &&\vdots\\ &&&\ddots&\\ \text{\Huge{0}}&&\cdots &&\text{\Huge{0}} \end{array} \hspace{-2mm}\right]_{\mathbb{N}_L\times \mathbb{N}_L},\notag\\ \bar{\mathbf{B}}^i&=\left[\hspace{-2mm} \begin{array}{cccccc} \bar{B}^{10}&\text{\Huge{0}}&&\cdots &&\text{\Huge{0}}\\ &&\ddots&&&\\ \vdots& \vdots&&\bar{B}^i &&\vdots\\ &&&&\ddots&\\ \bar{B}^{L0}&\text{\Huge{0}}&&\cdots &&\text{\Huge{0}} \end{array} \hspace{-2mm}\right]_{\mathbb{N}_L\times \mathbb{M}_L},\notag\\ \Gamma_k&=\left[\hspace{-2mm} \begin{array}{cccc} \gamma^1_kI_{n_1}&\cdots&0\\ \vdots&\ddots&\vdots\\ 0&\cdots&\gamma^L_kI_{n_1} \end{array} \hspace{-2mm}\right], p=\left[\hspace{-2mm} \begin{array}{cccc} p^1I_{n_1}&\cdots&0\\ \vdots&\ddots&\vdots\\ 0&\cdots&p^LI_{n_L} \end{array} \hspace{-2mm}\right]. \end{align}} Using the notations in \eqref{nota1}, we can rewrite system \eqref{sm-1} as \begin{align}\label{asys} X_{k+1}&=AX_k+B U_k+\sum_{i=1}^{L} w_k^i(\bar{\mathbf{A}}^iX_k+\bar{\mathbf{B}}^iU_k)+V_k \end{align} with initial $X_0=vec(x_0^1, \cdots, x_0^L)$. Associated with system \eqref{sm-1}, the following cost function is introduced \begin{align}\label{pi} J_N & \hspace{-1mm}=\hspace{-1mm}E\left[\sum_{k=0}^{N}(X^T_kQX_k \hspace{-1mm}+\hspace{-1mm}U^T_kRU_k)\hspace{-1mm}+\hspace{-1mm}X_{N+1}^TP_{N+1}X_{N+1}\right], \end{align} where $Q,R, P_{N+1}$ are symmetric weighting matrices of appropriate dimensions with \begin{align}\label{nota2} Q&=\left[\hspace{-2mm} \begin{array}{cccc} Q^{11}&\cdots&Q^{1L}\\ \vdots&\ddots&\vdots\\ Q^{L1}&\cdots&Q^{LL} \end{array} \hspace{-2mm}\right], R=\left[\hspace{-2mm} \begin{array}{cccc} R^{00}&\cdots&R^{0L}\\ \vdots&\ddots&\vdots\\ R^{L0}&\cdots&R^{LL} \end{array} \hspace{-2mm}\right],\notag\\ P_{N+1}&=\left[\hspace{-2mm} \begin{array}{cccc} P_{N+1}^{11}&\cdots&P_{N+1}^{1L}\\ \vdots&\ddots&\vdots\\ P_{N+1}^{L1}&\cdots&P_{N+1}^{LL} \end{array} \hspace{-2mm}\right], \end{align} and block matrices $Q^{ij}, P_{N+1}^{ij}\in\mathbb{R}^{n_i\times n_j}, i,j=1,\cdots,L$, $R^{ij}\in\mathbb{R}^{m_i\times m_j}, i,j=0,\cdots,L$. Corresponding with the system model described in Figure 1, the feasibility of controllers $u_k^i, i=0,\cdots,L$ is given in the following assumption. \begin{assumption}\label{ass1} The remote controller $u^0_k$ is $\mathcal{F}^0_k$-measurable, and the local controller $u^i_k$ is measurable with respect to $\mathcal{F}^i_k$, $i=1,\cdots,L$, where \begin{align} \mathcal{F}^0_k&=\sigma\Big\{\{\Gamma_m\}_{m=0}^{k}, \{\Gamma_mX_m\}_{m=0}^{k}, \{u^0_m\}_{m=0}^{k-1}\Big\},\label{mwrt}\\ \mathcal{F}^i_k&=\sigma\Big\{\{\Gamma_m\}_{m=0}^{k}, \{\Gamma_mX_m\}_{m=0}^{k}, \{u^0_m\}_{m=0}^{k-1},\notag\\ &~~~~~~~~~~~~~~~~~~~~~~~~~~~\text{and}~\{u^i_m\}_{m=0}^{k-1},\{x^i_m\}_{m=0}^{k}\Big\}.\label{mwrt2} \end{align} \end{assumption} It can be easily judged from \eqref{mwrt}-\eqref{mwrt2} that $\mathcal{F}^0_k\subset \mathcal{F}^i_k, i=1,\cdots,L$. Next, we will introduce the LQ control problem to be solved in this paper. \begin{problem}\label{prob1} For system \eqref{sm-1}-\eqref{asys}, find $\mathcal{F}^i_k$-measurable control $u_k^i$ to minimize cost function \eqref{pi}, where $k=0,\cdots,N, i=0,\cdots,L$. \end{problem} Throughout this paper, the assumption on the weighting matrices of \eqref{pi} is given as follows. \begin{assumption}\label{ass2} $Q\geq 0, R>0$, and $P_{N+1}\geq 0$. \end{assumption} \begin{remark} It is stressed that Problem \ref{prob1} has not been solved {in the existing literatures}. {The} previous works \cite{aon2018,am2019,lx2018,oan2016,nmt2013} mainly focused on additive noise {systems, and their} multiplicative noise counterpart remains less investigated. {The existence of unreliable uplink channels for multiplicative noise systems may result in the failure of ``separation principle", making the design of optimal control {in Problem \ref{prob1}} difficult. Furthermore, the} maximum principle was adopted to solve only the single subsystem case in \cite{lx2018}, while the multiple subsystems case hasn't been solved in the framework of maximum principle. {The main challenge for the multiple subsystems case is that the solution for the G-FBSDEs is difficult.} \end{remark} \subsection{Necessary and Sufficient Solvability Conditions} In this section, we will derive the necessary and sufficient solvability conditions for Problem \ref{prob1}. \begin{lemma}\label{lem1} Given system \eqref{sm-1}-\eqref{asys} and cost function \eqref{pi}, for $i=l,\cdots,L, k=0,\cdots,N$, let $u^{i,\varepsilon}_k=u^i_k+\varepsilon \delta u^i_k$, {where $\varepsilon\in \mathbb{R}^1$ and $\delta u_k^i$ is $\mathcal{F}_k^i$-measurable satisfying $\sum_{k=0}^{N}E[ (\delta u^i_k)^T\delta u^i_k]<+\infty$}. Denote $x^{i,\varepsilon}_k$ and $ J_N^\varepsilon$ the corresponding state and cost function {associated with} $u^{i,\varepsilon}_k$, $U^{\varepsilon}_k=vec(u^{0,\varepsilon}_k~\cdots~u^{L,\varepsilon}_k)$, $X^{\varepsilon}_k=vec(x^{0,\varepsilon}_k~\cdots~x^{L,\varepsilon}_k)$, and $\delta U_k=vec(\delta u_k^1, \cdots, \delta u_k^L)$. Then we have \begin{align}\label{diff} J_N^\varepsilon-J_N&=\varepsilon^\textbf{2}\Big\{\sum_{k=0}^{N}E\{Y_k^TQY_k+\delta U_kR\delta U_k\}\notag\\ &~~~~~~~~+E[Y^T_{N+1}P_{N+1}Y_{N+1}]\Big\}\\ &+2\varepsilon \sum_{k=0}^{N}E\{[\Theta^T_k(B+\sum_{i=1}^{L}w_k^i\bar{\mathbf{B}}^i)+RU_k]^T\delta U_k\},\notag \end{align} where {$Y_k$ satisfies the iteration} \begin{align}\label{yk} Y_{k+1}&=[A+\sum_{i=1}^{L}w_k^i\bar{\mathbf{A}}^i]Y_k+[B+\sum_{i=1}^{L}w_k^i\bar{\mathbf{B}}^i]\delta U_k \end{align} with initial condition $Y_0=0$, {and} $\Theta_k=vec(\Theta_k^1,\cdots,\Theta_k^L)$ ($k=0,\cdots,N$) satisfies the iteration \begin{align}\label{coss} \Theta_{k-1}&=E\left[(A+\sum_{i=1}^{L}w_k^i\bar{\mathbf{A}}^i)^T\Theta_k+QX_k\Bigg|\mathcal{G}_k\right], \end{align} {with terminal condition $\Theta_N=P_{N+1}X_{N+1}$ and the information filtration $\mathcal{G}_k$ given by} \begin{align}\label{maxf} \mathcal{G}_k&\hspace{-1mm}=\hspace{-1mm}\sigma\Big\{\{\Gamma_m\}_{m=0}^{k}, \{U_m\}_{m=0}^{k-1}, \{X_m\}_{m=0}^{k}\Big\}. \end{align} \end{lemma} \begin{proof} By setting $Y_k=\frac{X^\varepsilon_k-X_k}{\varepsilon}$, we know that $Y_0=0$ and \eqref{yk} holds. Subsequently, it can be calculated {that} \begin{align*} & J_N^\varepsilon-J_N\notag\\ &=\sum_{k=0}^{N}E\Big[[X_k+\varepsilon Y_k]^TQ[X_k+\varepsilon Y_k]\notag\\ &+[U_k+\varepsilon \delta U_k]^TR[U_k+\varepsilon \delta U_k]\Big]\notag\\ &+E[X_{N+1}+\varepsilon Y_{N+1}]^TP_{N+1}[X_{N+1}+\varepsilon Y_{N+1}]\notag\\ &-EX^T_{N+1}P_{N+1}X_{N+1} -\sum_{k=0}^{N}E\Big[X^T_kQX_k+U^T_kRU_k\Big]\notag\\ &=2\varepsilon E[\sum_{k=0}^{N}[X^T_kQY_k+\delta U^T_kRU_k] +Y^T_{N+1}P_{N+1}X_{N+1} ]\notag\\ &+\varepsilon^\textbf{2}E[\sum_{k=0}^{N}[Y^T_kQY_k+\delta U^T_kR \delta U_k]+Y^T_{N+1}P_{N+1}Y_{N+1}]. \end{align*} {Using \eqref{yk}-\eqref{coss}, and noting $Y_k$ is $\mathcal{G}_k$-measurable}, we have \begin{align*} &E[\sum_{k=0}^{N}[X^T_kQY_k+\delta U^T_kRU_k] +Y^T_{N+1}P_{N+1}X_{N+1} ]\notag\\ &=E\Big[\sum_{k=0}^{N}\{\Theta_{k-1}-E[(A+\sum_{i=1}^{L}w_k^i\bar{\mathbf{A}}^i)^T\Theta_k|\mathcal{G}_k]\}^TY_k\notag\\ &+\delta U^T_kRU_k+\Theta^T_NY_{N+1} \Big]\notag\\ &=E\Big[\sum_{k=0}^{N}[\Theta^T_k(B+\sum_{i=1}^{L}w_k^i\bar{\mathbf{B}}^i)+RU_k]^T\delta U_k\Big], \end{align*} which ends the proof. \end{proof} Accordingly, we have the following results. \begin{theorem}\label{th1} Under Assumptions \ref{ass1} and \ref{ass2}, Problem \ref{prob1} can be uniquely solved if and only if the equilibrium condition \begin{align}\label{coseq} 0&=RU_k+E\left[(B+\sum_{i=1}^{L}w_k^i\bar{\mathbf{B}}^i)^T\Theta_k\Bigg|\mathcal{G}_k \right] \end{align} {can be uniquely solved,} where the costate $\Theta_k$ satisfies \eqref{coss}. \end{theorem} \begin{proof} `Necessity': Suppose Problem \ref{prob1} is uniquely {solvable and} $U_k=vec(u^{1}_k, \cdots, u_k^L)$ are optimal controls for $k=0,\cdots,N$. Using the symbols in Lemma \ref{lem1} and from \eqref{diff} we know that, for arbitrary $\delta U_k$ and $\varepsilon\in \mathbb{R}$, there holds \begin{align}\label{diff2} J_N^\varepsilon-J_N&=\varepsilon^\textbf{2}\delta J_N\notag\\ &+2\varepsilon \sum_{k=0}^{N}E\Big[[\Theta^T_k(B+\sum_{i=1}^{L}w_k^i\bar{\mathbf{B}}^i)+RU_k]^T\delta U_k\Big]\notag\\ &\geq 0, \end{align} where \begin{align}\label{deljn} \delta J_N&=\sum_{k=0}^{N}E\Big[Y_k^TQY_k+\delta U_kR\delta U_k\Big]\notag\\ &~~~~~~~~+E[Y^T_{N+1}P_{N+1}Y_{N+1}]. \end{align} {Observe from Assumption \ref{ass2} that} $\delta J_N\geq 0$. Next we will show \eqref{coseq} holds. Suppose, {by contradiction, that \eqref{coseq} is not satisfied. Let} \begin{align} \label{u11} &RU_k+E\left[[B+\sum_{i=1}^{L}w_k^i\bar{\mathbf{B}}^i]^T\Theta_k\Bigg|\mathcal{G}_k\right] =\tau_k \neq 0. \end{align} In this case, if we choose $\delta U_k=\tau_k$, then from \eqref{diff2} we have \begin{align*} J_N^\varepsilon-J_N= 2\varepsilon\sum_{k=0}^{N} \tau_k^T\tau_k +\varepsilon^2\delta J_N. \end{align*} {Note that} we can always find some $\varepsilon<0$ such that {$J_N^\varepsilon-J_N<0$}, which contradicts with \eqref{diff2}. Thus, $\tau_k=0$. This ends the necessity proof. `Sufficiency': If \eqref{coseq} is uniquely solvable, we shall show that Problem \ref{prob1} is uniquely {solvable} under Assumptions \ref{ass1}-\ref{ass2}. Actually, from \eqref{diff} we know that for any $\varepsilon\in\mathbb{R}$, $J_N^\varepsilon-J_N=\varepsilon^2\delta J_N\geq 0$, which means that Problem \ref{prob1} is uniquely solvable. \end{proof} It is noted that system dynamics \eqref{sm-1} and \eqref{asys} are forward, and the costate equation \eqref{coss} is backward, then \eqref{sm-1}, \eqref{asys}, \eqref{coss} and \eqref{coseq} constitute the G-FBSDEs. For the convenience of discussion, we denote the following G-FBSDEs composed of \eqref{sm-1}, \eqref{asys}, \eqref{coss} and \eqref{coseq}: {\small\begin{equation}\label{gfbs} \left\{ \begin{array}{ll} x_{k+1}^i&= [A^i+w^i_k\bar{A}^i]x^i_k+[B^{i}+w^i_k\bar{B}^{i}]u^i_k\\ &+[B^{i0}+w^i_k\bar{B}^{i0}]u^0_k+v^i_k,~x_0^i, i=1,\cdots,L,\\ X_{k+1}&=(A+\sum_{i=1}^{L}w_k^i\bar{\mathbf{A}}^i)X_k\\ &+(B+\sum_{i=1}^{L}w_k^i\bar{\mathbf{B}}^i)U_k+V_k,\\ \Theta_{k-1}&=E\left[(A+\sum_{i=1}^{L}w_k^i\bar{\mathbf{A}}^i)^T\Theta_k+QX_k|\mathcal{G}_k\right],\\ \Theta_N&=P_{N+1}X_{N+1},\\ 0&=RU_k+E\left[(B+\sum_{i=1}^{L}w_k^i\bar{\mathbf{B}}^i)^T\Theta_k\Big|\mathcal{G}_k \right]. \end{array} \right. \end{equation}} \begin{remark} The necessary and sufficient solvability conditions of Problem \ref{prob1} {given in Theorem 1} are presented for the first time, which are based on the solution to G-FBSDEs \eqref{gfbs}. Consequently, to derive the optimal control strategies $u_k^i, i=0\cdots,L, k=0,\cdots,N$, we will {find a} method of decoupling G-FBSDEs \eqref{gfbs}. \end{remark} Consequently, we will introduce some preliminary results. \begin{lemma}\label{lem3} {Denote} $\hat{u}_k^i=E[u_k^i|\mathcal{F}_k^0]$, then the following relationship holds: \begin{align}\label{rela1} E[u_k^j|\mathcal{G}_k^i]=\left\{ \begin{array}{ll} \hat{u}_k^i,&j=i,\\ u_k^j, &j\neq i, \end{array} \right. \end{align} where the information filtration $\mathcal{G}_k^i$ is given by \begin{align}\label{hki} \mathcal{G}_k^i&=\sigma\Big\{\{\Gamma_m\}_{m=0}^{k}, \{\Gamma_mX_m\}_{m=0}^{k},\{U_m\}_{m=0}^{k-1},\\ &~~~~~~~\text{and}~\{x_m^j\}_{m=0}^{k},j=1,\cdots,i-1,i+1,\cdots,L\Big\}.\notag \end{align} \end{lemma} \begin{proof} Due to the independence of $u_k^i$ and $\{x_m^j\}_{m=0}^{k}$, $\{u_m^j\}_{m=0}^{k}, j\neq i$, \eqref{rela1} can be obtained by using the properties of conditional expectation. \end{proof} By using Lemma \ref{lem3}, the following result can be derived. \begin{lemma}\label{lem4} Under Assumptions \ref{ass1} and \ref{ass2}, the equilibrium condition \eqref{coseq} can be rewritten as: \begin{align} 0&=R\hat{U}_k+E[(B+\sum_{i=1}^{L}w_k^i\bar{\mathbf{B}}^i)^T\Theta_k|\mathcal{F}^0_k],\label{adeqs}\\ 0 &=R\tilde{U}_{k}+E[(B+\sum_{i=1}^{L}w_k^i\bar{\mathbf{B}}^i)^T\Theta_{k}|\mathcal{G}_{k}]\notag\\ &~~~~~~~~~~~~~-E[(B+\sum_{i=1}^{L}w_k^i\bar{\mathbf{B}}^i)^T\Theta_{k}|\mathcal{F}^0_{k}], \label{2adeqs}\\ 0&=R^{ii}\tilde{u}_k^i+E[(B^i+w_k^i\bar{B}^i)^T\Theta_k^i|\mathcal{G}_k]\notag\\ &~~~~~~~~~~~~~-E[(B^i+w_k^i\bar{B}^i)^T\Theta_k^i|\mathcal{G}_k^i],\label{adeqs2} \end{align} where $ \hat{U}_k=vec(u^0_k,\hat{u}^1_k,\cdots, \hat{u}^L_k)$, $\tilde{U}_k=U_k-\hat{U}_k$ and $ \Theta_k=vec(\Theta^0_k,\Theta^1_k,\cdots, \Theta^L_k)$ satisfies \eqref{coss}. \end{lemma} \begin{proof} The results can be easily derived from Lemma \ref{lem3}. \end{proof} In view of Lemma \ref{lem4}, the G-FBSDEs \eqref{gfbs} can be equivalently presented as {\small\begin{equation}\label{gfbs2} \left\{ \begin{array}{ll} x_{k+1}^i&= [A^i+w^i_k\bar{A}^i]x^i_k+[B^{i}+w^i_k\bar{B}^{i}]u^i_k\\ &+[B^{i0}+w^i_k\bar{B}^{i0}]u^0_k+v^i_k,\\ X_{k+1}&=(A+\sum_{i=1}^{L}w_k^i\bar{\mathbf{A}}^i)X_k\\ &+(B+\sum_{i=1}^{L}w_k^i\bar{\mathbf{B}}^i)U_k+V_k,\\ \Theta_{k-1}&=E\left[(A+\sum_{i=1}^{L}w_k^i\bar{\mathbf{A}}^i)^T\Theta_k+QX_k|\mathcal{G}_k\right],\\ \Theta_N&=P_{N+1}X_{N+1},\\ 0&=R\hat{U}_k+E[(B+\sum_{i=1}^{L}w_k^i\bar{\mathbf{B}}^i)^T\Theta_k|\mathcal{F}^0_k],\\ 0 &=R\tilde{U}_{k}+E[(B+\sum_{i=1}^{L}w_k^i\bar{\mathbf{B}}^i)^T\Theta_{k}|\mathcal{G}_{k}]\\ &-E[(B+\sum_{i=1}^{L}w_k^i\bar{\mathbf{B}}^i)^T\Theta_{k}|\mathcal{F}^0_{k}],\\ 0&=R^{ii}\tilde{u}_k^i+E[(B^i+w_k^i\bar{B}^i)^T\Theta_k^i|\mathcal{G}_k]\\ &-E[(B^i+w_k^i\bar{B}^i)^T\Theta_k^i|\mathcal{G}_k^i]. \end{array} \right. \end{equation}} In the following lemma, we will introduce the preliminary results on the optimal estimation and the associated state estimation error. \begin{lemma}\label{lemma2} The optimal estimation $\hat{x}^i_k\triangleq E[x^i_k|\mathcal{F}^0_k], i=1,\cdots, L$ and $\hat{X}_k\triangleq E[X_k|\mathcal{F}^0_k]$ can be calculated by \begin{align} \hat{x}^i_{k+1} &= (1-\gamma^i_{k+1})(A^i\hat{x}^i_k+B^i\hat{u}^i_k+B^{i0}u^0_k)\notag\\ &+\gamma^i_{k+1}x^i_{k+1},\label{oe1}\\ \hat{X}_{k+1}&=(I_{\mathbb{N}_L}-\Gamma_{k+1})(A\hat{X}_k+B\hat{U}_k)+\Gamma_{k+1} X_{k+1},\label{oe2} \end{align} { with initial conditions $\hat{x}_{0}^i=\gamma^i_{0}\mu^i+(1-\gamma_{0}^i)x_{0}^i$, $\hat{X}_0=(I_{\mathbb{N}_L}-\Gamma_{0})\mu+\Gamma_{k+1} X_{0}$ and $\mu=vec(\mu^1,\cdots,\mu^L)$.} In this case, the error covariance $\tilde{x}_k^i=x_k^i-\hat{x}_k^i, i=1,\cdots,L$ and $\tilde{X}_k=X_k-\hat{X}_k$ satisfy \begin{align} \tilde{x}_{k+1}^i&=(1-\gamma^i_{k+1})\Big[ A^i\tilde{x}^i_{k}+B\tilde{u}^i_{k}\notag\\ &+w_{k}^i(\bar{A}^ix_{k}^i+\bar{B}^i(\hat{u}^i_{k}+\tilde{u}^i_k))+v_k^i\Big],\label{tild1}\\ \tilde{X}_{k+1}&=(I_{\mathbb{N}_L}-\Gamma_{k+1})\Big[ A\tilde{X}_{k}+B\tilde{U}_{k}\notag\\ &+\sum_{i=1}^{L}w_k^i(\bar{\mathbf{A}}^iX_{k} +\bar{\mathbf{B}}^i(\hat{U}_{k}+\tilde{U}_k))+V_{k}\Big].\label{tild2} \end{align} \end{lemma} \begin{proof} The detailed proof can be found in \cite{ssfps2007, qz2017}, which is omitted here. \end{proof} \begin{remark} It can be {observed from \eqref{tild1}-\eqref{tild2} that the controls are involved with the state estimation error, which} is the key difference from the additive noise case (i.e., $w_k^i=0$), see \cite{aon2018,am2019,lx2018,oan2016,nmt2013}. Moreover, since G-FBSDEs \eqref{gfbs} and G-FBSDEs \eqref{gfbs2} {are equivalent}, we will derive the optimal controls by solving \eqref{gfbs2} instead. \end{remark} \section{Optimal Controls by Decoupling G-FBSDEs} In this section, the optimal controls $u_k^i, i=0,\cdots,L$ will be derived via decoupling the G-FBSDEs \eqref{gfbs} (equivalently G-FBSDEs \eqref{gfbs2}). Firstly, the following CREs are introduced: {\small\begin{equation}\label{re} \left\{ \begin{array}{ll} P_k&=Q+A^TP_{k+1}A +\sum_{i=1}^{L}\Sigma_{w^i}(\bar{\mathbf{A}}^i)^TL_{k+1} \bar{\mathbf{A}}^i\\ &-\Psi_k^T\Lambda_k^{-1}\Psi_k,\\ H_k&=Q+A^TL_{k+1}A+\sum_{i=1}^{L}\Sigma_{w^i}(\bar{\mathbf{A}}^i)^TL_{k+1} \bar{\mathbf{A}}^i\\ &-\tilde{\Psi}_k^T\tilde{\Lambda}_k^{-1}\tilde{\Psi}_k,\\ P_k^{i}&=Q^{ii}+(A^i)^TP_{k+1}^{i}A^i +\Sigma_{w^i}(\bar{A}^i)^TL_{k+1}^{i}\bar{A}^i\\ &-(\Omega_k^i)^T(\Pi_k^i)^{-1}\Omega_k^i,\\ H_k^{i}&=Q^{ii}+(A^i)^TL_{k+1}^{i}A^i +\Sigma_{w^i}(\bar{A}^i)^TL_{k+1}^{i}\bar{A}^i\\ &-(\tilde{\Omega}_k^i)^T(\tilde{\Pi}_k^i)^{-1}\tilde{\Omega}_k^i, \end{array} \right. \end{equation}} with terminal conditions $L_{N+1}=H_{N+1}=P_{N+1}$, $L_{N+1}^{i}=H_{N+1}^{i}=P_{N+1}^{i}$ given in \eqref{pi}, and the coefficients matrices $\Lambda_k, \Psi_k, \tilde{\Lambda}_k, \tilde{\Psi}_k, \Pi_k^i, \Omega_k^i, \tilde{\Pi}_k^i, \tilde{\Omega}_k^i, L_k, L_k^{i}$ in \eqref{re} {being} given by {\small\begin{equation}\label{coma} \left\{ \begin{array}{ll} \Lambda_k&=R+B^TP_{k+1}B+\sum_{i=1}^{L}\Sigma_{w^i}(\bar{\mathbf{B}}^i)^TL_{k+1} \bar{\mathbf{B}}^i,\\ \Psi_k&=B^TP_{k+1}A+\sum_{i=1}^{L}\Sigma_{w^i}(\bar{\mathbf{B}}^i)^TL_{k+1} \bar{\mathbf{A}}^i,\\ \tilde{\Lambda}_k&=R+B^TL_{k+1}B+\sum_{i=1}^{L}\Sigma_{w^i}(\bar{\mathbf{B}}^i)^TL_{k+1} \bar{\mathbf{B}}^i,\\ \tilde{\Psi}_k&=B^TL_{k+1}A+\sum_{i=1}^{L}\Sigma_{w^i}(\bar{\mathbf{B}}^i)^TL_{k+1} \bar{\mathbf{A}}^i,\\ \Pi^i_k&=R^{ii}+(B^i)^TP_{k+1}^{i}B^i+\Sigma_{w^i}(\bar{B}^i)^TL_{k+1}^{i}\bar{B}^i,\\ \Omega_k^i&=(B^i)^TP_{k+1}^{i}A^i+\Sigma_{w^i}(\bar{B}^i)^TL_{k+1}^{i}\bar{A}^i,\\ \tilde{\Pi}^i_k&=R^{ii}+(B^i)^TL_{k+1}^{i}B^i+\Sigma_{w^i}(\bar{B}^i)^TL_{k+1}^{i}\bar{B}^i,\\ \tilde{\Omega}_k^i&=(B^i)^TL_{k+1}^{i}A^i+\Sigma_{w^i}(\bar{B}^i)^TL_{k+1}^{i}\bar{A}^i,\\ L_k&=P_k p+H_k(I_{\mathbb{N}_L}-p), \\ L_k^{i}&=p^iP_k^{i}+(1-p^i)H_k^{i}. \end{array} \right. \end{equation}} \begin{remark} For CREs \eqref{re}, the following points should be noted: \begin{itemize} \item The CREs \eqref{re} are well defined (i.e., can be recursively calculated backwardly) if and only if $\Lambda_k, \tilde{\Lambda}_k$, $\Pi_k^i$ and $\tilde{\Pi}_k^i$ are invertible. \item Different from traditional symmetric Riccati equation \cite{lvs2015}, $P_k, H_k$ in \eqref{re} are {asymmetric}, while {$P_k^{i}$ and $H_k^{i}$} are symmetric. \end{itemize} \end{remark} \begin{lemma}\label{lem6} Under Assumptions \ref{ass1} and \ref{ass2}, $\Pi_k^i, \tilde{\Pi}_k^i$ are positive definite for $k=0,\cdots,N$. \end{lemma} \begin{proof} To facilitate the discussions, we denote: \begin{align}\label{gki} g_k^i&=-(\Pi_k^i)^{-1}\Omega_k^i,~~ \tilde{g}_k^i=-(\tilde{\Pi}_k^i)^{-1}\tilde{\Omega}_k^i. \end{align} With $k=N$, from \eqref{re}-\eqref{gki} we know that {\small\begin{equation}\label{euqil} \left\{ \begin{array}{ll} (\Omega_N^i)^T(\Pi_N^i)^{-1}\Omega_N^i&=(g_N^i)^T\Pi_N^ig_N^i,\\ &=-(g_N^i)^T\Omega_N^i=-(\Omega_N^i)^Tg_N^i,\\ (\tilde{\Omega}_N^i)^T(\tilde{\Pi}_N^i)^{-1}\tilde{\Omega}_N^i &=(\tilde{g}_N^i)^T\tilde{\Pi}_N^i\tilde{g}_N^i,\\ &=-(\tilde{g}_N^i)^T\tilde{\Omega}_N^i=-(\tilde{\Omega}_N^i)^T\tilde{g}_N^i. \end{array} \right. \end{equation}} Then we can rewrite $P_N^{i}$ in CREs \eqref{re} as \begin{align}\label{pd1} P_N^{i}&=Q^{ii}+(A^i)^TP_{N+1}^{i}A^i +\Sigma_{w^i}(\bar{A}^i)^TL_{N+1}^{i}\bar{A}^i\notag\\ &-(\Omega_N^i)^T(\Pi_N^i)^{-1}\Omega_N^i\notag\\ &=Q^{ii}+(A^i)^TP_{N+1}^{i}A^i +\Sigma_{w^i}(\bar{A}^i)^TL_{N+1}^{i}\bar{A}^i\notag\\ &+({g}_N^i)^T{\Pi}_N^i{g}_N^i +({g}_N^i)^T{\Omega}_N^i+({\Omega}_N^i)^T{g}_N^i\notag\\ &=Q^{ii}\hspace{-1mm}+\hspace{-1mm}({g}_N^i)^TR^{ii}{g}_N^i \hspace{-1mm}+\hspace{-1mm}(A^i+B^i{g}_N^i)^TP_{N+1}^{i}(A^i+B^i{g}_N^i)\notag\\ &+\Sigma_{w^i}(\bar{A}^i+\bar{B}g_N^i)^TL_{N+1}^{i}(\bar{A}^i+\bar{B}g_N^i). \end{align} Similarly, it is not hard to obtain {that} \begin{align}\label{pd2} H_N^{i} & =Q^{ii}\hspace{-1mm}+\hspace{-1mm}(\tilde{g}_N^i)^TR^{ii}\tilde{g}_N^i \hspace{-1mm}+\hspace{-1mm} (A^i+B^i\tilde{g}_N^i)^TL_{N+1}^{i}(A^i+B^i\tilde{g}_N^i)\notag\\ &+\Sigma_{w^i}(\bar{A}^i+\bar{B}\tilde{g}_N^i)^T L_{N+1}^{i}(\bar{A}^i+\bar{B}\tilde{g}_N^i). \end{align} From Assumption \ref{ass2}, {we know that} $P_N^{i}$ and $H_N^{i}$ are both positive semidefinite, then it can be {observed} from \eqref{coma} that $\Pi_N^i>0$ and $\tilde{\Pi}_N^i>0$. By repeating the above procedures backwardly, we can conclude that $\Pi_k^i$ and $\tilde{\Pi}_k^i$ are positive definite for $k=0,\cdots,N$. This ends the proof. \end{proof} {With} the preliminaries introduced in Lemmas \ref{lem1}-\ref{lem6}, {we are in a position to present the solution to Problem \ref{prob1}}. \begin{theorem}\label{th2} Under Assumptions \ref{ass1} and \ref{ass2}, Problem \ref{prob1} is uniquely solvable if and only if $\Lambda_k$ and $\tilde{\Lambda}_k$ given by \eqref{coma} are invertible. In this case, the optimal controls $u^i_k, k=0,\cdots,N, i=0,\cdots,L$ of minimizing cost function \eqref{pi} are given by \begin{equation}\label{uk} \left\{ \begin{array}{ll} u_k^0&=\mathcal{I}^0\hat{U}_k,\\ u_k^i&=\mathcal{I}^i\hat{U}_k+\tilde{u}_k^i, i=1,\cdots,L, \end{array} \right. \end{equation} where {\begin{align} \hat{U}_k& =-\Lambda^{-1}_k\Psi_k\hat{X}_k, \label{uk0}\\ \tilde{u}_k^i&=-(\tilde{\Pi}^i_k)^{-1}\tilde{\Omega}^i_k\tilde{x}_k^i,~~i=1,\cdots,L,\label{uki}\\ \mathcal{I}^0&=[I_{m^0},0_{m^0\times m^1},\cdots,0_{m^0\times m^L}]_{m^0\times \mathbb{M}_{L}},\label{i0}\\ \mathcal{I}^i&=[0_{m^i\times m^0},0_{m^i\times m^1},\cdots,I_{m^i\times m^i},\cdots,0_{m^i\times m^L}]_{m^0\times \mathbb{M}_{L}},\notag \end{align}} {with $\hat{X}_k, \tilde{x}_k^i$ being calculated from Lemma \ref{lemma2}, and the coefficient matrices $\Lambda_k, \Psi_k, \tilde{\Pi}_k, \tilde{\Omega}_k$ being} calculated via \eqref{re}-\eqref{coma} backwardly. {Moreover, the optimal cost function is given by \begin{align} J_N^*=&\sum_{i=0}^{L}E[(x_0^i)^TP_0^{i}x_0^i]+\sum_{i=0}^{L}(1-p^i)Tr[\Sigma_{x_0^i}(P_0^{i}+H_0^{i})]\notag\\ &+\sum_{i=0}^{L}\sum_{k=0}^{N}Tr(\Sigma_{v^i}L^{i}_{k+1}). \end{align}} \end{theorem} \begin{proof} See Appendix. \end{proof} \begin{remark} In Theorem \ref{th2}, we first derive the optimal control strategies by decoupling the G-FBSDEs \eqref{gfbs} (equivalently G-FBSDEs \eqref{gfbs2}). {The optimal control strategies are given in terms of new CREs \eqref{re}, which can be calculated backwardly under Assumptions \ref{ass1}-\ref{ass2} and the conditions {that} $\Lambda_k, \tilde{\Lambda}_k$ are invertible.} Moreover, it is noted that $P_k$ and $H_k$ in \eqref{re} are asymmetric, {which is} the essential difference from the additive noise case, see \cite{aon2018,am2019,lx2018,oan2016,nmt2013}. \end{remark} \section{Discussions} In this section, {we shall discuss some special cases of Problem \ref{prob1} and demonstrate the novelty and significance of our results.} \subsection{Additive Noise Case} In the case of $w_k^i=0$, the MN-NCS \eqref{sm-1} turns into the additive noise case. Using the results in Theorem \ref{th2}, the solution to Problem \ref{prob1} can be presented as follows. \begin{corollary}\label{lem7} {Under Assumptions \ref{ass1} and \ref{ass2}}, Problem \ref{prob1} is uniquely solvable. Moreover, the control strategies $u^i_k, k=0,\cdots,N, i=0,\cdots,L$ {that minimize \eqref{pi} can be given by} \begin{equation}\label{uk2} \left\{ \begin{array}{ll} u_k^0&=\mathcal{I}^0\hat{U}_k,\\ u_k^i&=\mathcal{I}^i\hat{U}_k+\tilde{u}_k^i, i=1,\cdots,L, \end{array} \right. \end{equation} where \begin{align} \hat{U}_k& =-\Lambda^{-1}_k\Psi_k\hat{X}_k, \label{uk02}\\ \tilde{u}_k^i&=-(\tilde{\Pi}^i_k)^{-1}\tilde{\Omega}^i_k\tilde{x}_k^i,~~i=1,\cdots,L.\label{uki2} \end{align} {In the above,} $\hat{X}_k, \tilde{x}_k^i$ are given in Lemma \ref{lemma2} with $w_k^i=0$, and the coefficient matrices $\Lambda_k, \Psi_k, \tilde{\Pi}_k, \tilde{\Omega}_k$ can be calculated via the following Riccati equations: {\small\begin{equation}\label{re2} \left\{ \begin{array}{ll} P_k&=Q+A^TP_{k+1}A-\Psi_k^T\Lambda_k^{-1}\Psi_k,\\ P_k^{i}&=Q^{ii}+(A^i)^TP_{k+1}^{i}A^i -(\Omega_k^i)^T(\Pi_k^i)^{-1}\Omega_k^i,\\ H_k^{i}&=Q^{ii}+(A^i)^TL_{k+1}^{i}A^i -(\tilde{\Omega}_k^i)^T(\tilde{\Pi}_k^i)^{-1}\tilde{\Omega}_k^i,\\ L_{N+1}^{i}&=P_{N+1}^{i}, P_{N+1} ~~\text{given in \eqref{pi}}, \end{array} \right. \end{equation}} where $\Lambda_k, \Psi_k, \Pi_k^i, \Omega_k^i, \tilde{\Pi}_k^i, \tilde{\Omega}_k^i, L_k, L_k^{i}$ in \eqref{re} satisfy {\small\begin{equation}\label{coma2} \left\{ \begin{array}{ll} \Lambda_k&=R+B^TP_{k+1}B,\\ \Psi_k&=B^TP_{k+1}A,\\ \Pi^i_k&=R^{ii}+(B^i)^TP_{k+1}^{i}B^i,\\ \Omega_k^i&=(B^i)^TP_{k+1}^{i}A^i,\\ \tilde{\Pi}^i_k&=R^{ii}+(B^i)^TL_{k+1}^{i}B^i,\\ \tilde{\Omega}_k^i&=(B^i)^TL_{k+1}^{i}A^i,\\ L_k^{i}&=p^iP_k^{i}+(1-p^i)H_k^{i}. \end{array} \right. \end{equation}} \end{corollary} \begin{remark} As shown in Corollary \ref{lem7}, the following points should be noted: \begin{itemize} \item The obtained results in {Theorem \ref{th2}} can be reduced to the additive noise case (i.e., $w_k^i=0$), which include the results of \cite{aon2018,am2019,lx2018} as a special case. \item For the additive noise case, it is found that $L_k=P_k=H_k$, hence CREs \eqref{re} can be reduced to \eqref{re2}, which are symmetric. \item For Riccati equations \eqref{re2}, by following the techniques of Lemma \ref{lem6}, it can be shown that $\Lambda_k, \Pi_k^i$ are positive definite under Assumptions \ref{ass1} and \ref{ass2}, thus the solvability of Problem \ref{prob1} can be ensured. Furthermore, the control strategies \eqref{uk2} are unique in this case. \end{itemize} \end{remark} \subsection{Single Subsystem Case} In this section, we will consider the single subsystem case, i.e., $L=1$. Following the results in Theorem \ref{th2}, {the solution to Problem \ref{prob1} for the single subsystem case can be given as follows.} \begin{corollary}\label{lem8} Under Assumptions \ref{ass1} and \ref{ass2}, Problem \ref{prob1} is uniquely solvable, {and the optimal controls $u^i_k, k=0,\cdots,N, i=0, 1$ can be given as} \begin{equation}\label{uk3} \left\{ \begin{array}{ll} u_k^0&=\mathcal{I}^0\hat{U}_k,\\ u_k^1&=\mathcal{I}^1\hat{U}_k+\tilde{u}_k^1, \end{array} \right. \end{equation} where \begin{align} \hat{U}_k& =-\Lambda^{-1}_k\Psi_k\hat{X}_k, \label{uk03}\\ \tilde{u}_k^1&=-(\tilde{\Lambda}_k)^{-1} \tilde{\Psi}_k\tilde{x}_k,\label{uki3} \end{align} {with $\Lambda_k, \Psi_k, \tilde{\Lambda}_k, \tilde{\Psi}_k$ given by} {\small\begin{equation}\label{coma3} \left\{ \begin{array}{ll} \Lambda_k&=R+B^TP_{k+1}B+\Sigma_{w^1}\bar{B}^TL_{k+1}\bar{B},\\ \Psi_k&=B^TP_{k+1}A+\Sigma_{w^1}\bar{B}^TL_{k+1}\bar{A},\\ \tilde{\Lambda}_k&=R+B^TL_{k+1}B+\Sigma_{w^1}\bar{B}^TL_{k+1}\bar{B},\\ \tilde{\Psi}_k&=B^TL_{k+1}A+\Sigma_{w^1}\bar{B}^TL_{k+1}\bar{A},\\ L_k&=p P_k +(1-p)H_k, \end{array} \right. \end{equation}} {and $P_k, H_k$ satisfying} {\small\begin{equation}\label{re3} \left\{ \begin{array}{ll} P_k&\hspace{-1mm}=\hspace{-1mm}Q+A^TP_{k+1}A+\Sigma_{w^1}\bar{A}^TL_{k+1}\bar{A}-\Psi_k^T\Lambda_k^{-1}\Psi_k,\\ H_k&\hspace{-1mm}=\hspace{-1mm}Q+A^TL_{k+1}A+\Sigma_{w^1}\bar{A}^TL_{k+1}\bar{A}-\tilde{\Psi}_k^T\tilde{\Lambda}_k^{-1}\tilde{\Psi}_k, \end{array} \right. \end{equation}} with terminal conditions $L_{N+1}=H_{N+1}=P_{N+1}$, and $\Sigma_{w^1}$ is the covariance of system noise $\{w_k^1\}_{k=0}^{N}$ with $L=1$ in \eqref{sm-1} and \eqref{pi}. \end{corollary} As for the results given in Corollary \ref{lem8}, we have the following comments. \begin{remark} Firstly, different from {the} multiple subsystems case in Theorem \ref{th2}, the optimal controls \eqref{uk3} are based on symmetric Riccati equations \eqref{re3}. Secondly, Assumptions \ref{ass1} and \ref{ass2} are sufficient to guarantee the solvability of Problem \ref{prob1}. \end{remark} \subsection{Solvability with Indefinite Weighting Matrices} In this section, we will investigate the case of {indefinite weighting matrices in \eqref{pi}}. In other words, we will just assume that weighting matrices $Q, R, P_{N+1}$ in \eqref{pi} are {symmetric}. Firstly, we will introduce the generalized Riccati equation: {\small\begin{equation}\label{up1k} \left\{ \begin{array}{ll} \Delta_k&=Q+A^T\Delta_{k+1}A+\sum_{i=1}^{L}\Sigma_{w^i}(\bar{\mathbf{A}}^i)^T\Delta_{k+1} \bar{\mathbf{A}}^i\\ &-M_k^T\Upsilon_k^{\dag}M_k,\\ \Upsilon_k&=R+B^T\Delta_{k+1}B+\sum_{i=1}^{L}\Sigma_{w^i}(\bar{\mathbf{B}}^i)^T\Delta_{k+1} \bar{\mathbf{B}}^i,\\ M_k&=B^T\Delta_{k+1}A+\sum_{i=1}^{L}\Sigma_{w^i}(\bar{\mathbf{B}}^i)^T\Delta_{k+1} \bar{\mathbf{A}}^i, \end{array} \right. \end{equation}} {with terminal condition $\Gamma_{N+1}=P_{N+1}$, and $\dag$ denotes the Moore-Penrose inverse}. We will present the following corollary without proof. \begin{corollary}\label{lem9} Under Assumption \ref{ass1}, Problem \ref{prob1} is uniquely {solvable} if and only if ${\Upsilon}_k\geq 0$ in \eqref{up1k}, and $\Lambda_k, \tilde{\Lambda}_k, \Pi_k^i, \tilde{\Pi}_k^i$ in \eqref{re}-\eqref{coma} are all invertible for $k=0,\cdots, N, L=1,\cdots,L$. In this case, the optimal controls are given by \eqref{uk0}, in which the coefficients are based on the solution to the CREs \eqref{re}. \end{corollary} \begin{remark} The necessary and sufficient solvability conditions of Problem \ref{prob1} are shown in Corollary \ref{lem9} under the assumption that {the weighting matrices of \eqref{pi} are} indefinite. The proposed results in Corollary \ref{lem9} can be induced from Theorem \ref{th1} and its proof. {It can be observed that the positive semi-definiteness of $\Upsilon_k$ is equivalent to the condition $\delta J_N\geq 0$ in \eqref{deljn}, which is the key of deriving Corollary \ref{lem9}}. {To avoid repetition}, the detailed proof of Corollary \ref{lem9} is omitted here. \end{remark} \section{Numerical Examples} In this section, some numerical simulation examples will be provided to show the effectiveness and feasibilities of the main results. \subsection{State Trajectory with Optimal Controls} Consider MN-NCSs \eqref{sm-1} and cost function \eqref{pi} with {\small\begin{align}\label{coe} L&=3, p^1=0.8, p^2=0.6, p^3=0.8,N=60,\notag\\ A^1&=\left[\hspace{-2mm} \begin{array}{cccc} 2&-2\\ 1&3 \end{array} \hspace{-2mm}\right], A^2=\left[\hspace{-2mm} \begin{array}{cccc} 0.8&2\\ -1&0.6 \end{array} \hspace{-2mm}\right], A^3=\left[\hspace{-2mm} \begin{array}{cccc} 1&-0.3\\ 3&-2 \end{array} \hspace{-2mm}\right],\notag\\ B^1&=\left[\hspace{-2mm} \begin{array}{cccc} 1.1&-1.9\\ 0.6&2 \end{array} \hspace{-2mm}\right],B^2=\left[\hspace{-2mm} \begin{array}{cccc} 1&2\\ 3&4 \end{array} \hspace{-2mm}\right], B^3=\left[\hspace{-2mm} \begin{array}{cccc} -1&1.5\\ 1&2 \end{array} \hspace{-2mm}\right],\notag\\ B^{10}&=\left[\hspace{-2mm} \begin{array}{cccc} -1&1.5\\ 2&-2 \end{array} \hspace{-2mm}\right],B^{20}=\left[\hspace{-2mm} \begin{array}{cccc} 0.8&-2\\ 1&-3 \end{array} \hspace{-2mm}\right], B^{30}=\left[\hspace{-2mm} \begin{array}{cccc} 1&-3\\ 0&2 \end{array} \hspace{-2mm}\right],\notag\\ \bar{A}^1&=\left[\hspace{-2mm} \begin{array}{cccc} 0.6&2\\ 3&0.9 \end{array} \hspace{-2mm}\right], \bar{A}^2=\left[\hspace{-2mm} \begin{array}{cccc} 1&-0.5\\ 0.5&2 \end{array} \hspace{-2mm}\right], \bar{A}^3=\left[\hspace{-2mm} \begin{array}{cccc} 0&-1\\ 2&2.3 \end{array} \hspace{-2mm}\right],\notag\\ \bar{B}^1&=\left[\hspace{-2mm} \begin{array}{cccc} -1&0.2\\ 3&2 \end{array} \hspace{-2mm}\right],\bar{B}^2=\left[\hspace{-2mm} \begin{array}{cccc} 1.2&1.8\\ -3&2 \end{array} \hspace{-2mm}\right], \bar{B}^3=\left[\hspace{-2mm} \begin{array}{cccc} 1.1&-2\\ 3.1&2 \end{array} \hspace{-2mm}\right],\notag\\ \bar{B}^{10}&=\left[\hspace{-2mm} \begin{array}{cccc} -1&0.9\\ 0.7&2 \end{array} \hspace{-2mm}\right],\bar{B}^{20}=\left[\hspace{-2mm} \begin{array}{cccc} 1.2&1\\ -2&0 \end{array} \hspace{-2mm}\right], \bar{B}^{30}=\left[\hspace{-2mm} \begin{array}{cccc} 3&-1\\ 0.6&2 \end{array} \hspace{-2mm}\right],\notag\\ \Sigma_{w^1}&=\Sigma_{w^2}=\Sigma_{w^3}=1, \Sigma_{v^1}=\Sigma_{v^2}=\Sigma_{v^3}=\left[\hspace{-2mm} \begin{array}{cccc} 1&0\\ 0&1 \end{array} \hspace{-2mm}\right],\notag\\ \mu^1&=\left[\hspace{-2mm} \begin{array}{cccc} 1.2\\ 2 \end{array} \hspace{-2mm}\right], \mu^2=\left[\hspace{-2mm} \begin{array}{cccc} 2\\ 5 \end{array} \hspace{-2mm}\right], \mu^3=\left[\hspace{-2mm} \begin{array}{cccc} -3\\ 10 \end{array} \hspace{-2mm}\right],\notag\\ \Sigma_{x_0^1}&=\Sigma_{x_0^2}=\Sigma_{x_0^3}=\left[\hspace{-2mm} \begin{array}{cccc} 1&0\\ 0&1 \end{array} \hspace{-2mm}\right],\notag\\ Q&=\left[\hspace{-2mm} \begin{array}{cccccc} 1&0&-1&1.2&0.6 &0.2\\ 0&1&2&0.8&1 &1\\ -1&2&1.2&0.6&1 &1\\ 1.2&0.8&0.6&0.6&3 &1\\ 0.6&1&1&3&2 &1\\ 0.2&1&1&1&1 &2 \end{array} \hspace{-2mm}\right],\notag\\ R&\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cccccccc} 5.7 \hspace{-1mm}&\hspace{-1mm}-1.0 \hspace{-1mm}&\hspace{-1mm}-0.1 \hspace{-1mm}&\hspace{-1mm}-0.4 \hspace{-1mm}&\hspace{-1mm}-3.9 & -4.8 \hspace{-1mm}&\hspace{-1mm}-0.9 \hspace{-1mm}&\hspace{-1mm} 1.9 \\ -1.0\hspace{-1mm}&\hspace{-1mm} 6.5\hspace{-1mm}&\hspace{-1mm}0.5 \hspace{-1mm}&\hspace{-1mm}-4.7 \hspace{-1mm}&\hspace{-1mm}-1.6 & -0.4 \hspace{-1mm}&\hspace{-1mm}4.1 \hspace{-1mm}&\hspace{-1mm}0.9 \\ -0.1\hspace{-1mm}&\hspace{-1mm} 0.5\hspace{-1mm}&\hspace{-1mm} 10.4\hspace{-1mm}&\hspace{-1mm}0.7 & 2.3 \hspace{-1mm}&\hspace{-1mm}-3.7 \hspace{-1mm}&\hspace{-1mm}-3.9 \hspace{-1mm}&\hspace{-1mm}2.8 \\ -0.4\hspace{-1mm}&\hspace{-1mm} -4.7\hspace{-1mm}&\hspace{-1mm}0.7 \hspace{-1mm}&\hspace{-1mm}8.9 & 1.4 \hspace{-1mm}&\hspace{-1mm}4.9 \hspace{-1mm}&\hspace{-1mm}-0.4 \hspace{-1mm}&\hspace{-1mm}3.8 \\ -3.9\hspace{-1mm}&\hspace{-1mm} -1.6\hspace{-1mm}&\hspace{-1mm}2.3 \hspace{-1mm}&\hspace{-1mm}1.4 & 9.5 \hspace{-1mm}&\hspace{-1mm}5.9 \hspace{-1mm}&\hspace{-1mm}-2.6 \hspace{-1mm}&\hspace{-1mm}-2.8 \\ -4.8\hspace{-1mm}&\hspace{-1mm} -0.4\hspace{-1mm}&\hspace{-1mm}-3.7 \hspace{-1mm}&\hspace{-1mm}4.9 \hspace{-1mm}&\hspace{-1mm}5.9 & 11.3 \hspace{-1mm}&\hspace{-1mm}4.2 \hspace{-1mm}&\hspace{-1mm}-0.7 \\ -0.9\hspace{-1mm}&\hspace{-1mm} 4.1\hspace{-1mm}&\hspace{-1mm}-3.9 \hspace{-1mm}&\hspace{-1mm}-0.4 \hspace{-1mm}&\hspace{-1mm}-2.6 \hspace{-1mm}&\hspace{-1mm} 4.2\hspace{-1mm}&\hspace{-1mm}13.7 \hspace{-1mm}&\hspace{-1mm}1.6 \\ 1.9\hspace{-1mm}&\hspace{-1mm} 0.9\hspace{-1mm}&\hspace{-1mm} 2.8\hspace{-1mm}&\hspace{-1mm} 3.8\hspace{-1mm}&\hspace{-1mm}-2.8 \hspace{-1mm}&\hspace{-1mm}-0.7 \hspace{-1mm}&\hspace{-1mm}1.6\hspace{-1mm}&\hspace{-1mm}8.3  \end{array} \hspace{-2mm}\right],\notag\\ P_{101}&=\left[\hspace{-2mm} \begin{array}{cccccc} 1.2&-1.4&-1.4&-3.1&-1.7& -5.5\\ -1.4&1.3&0.9&1.8&-1.1 &0.7\\ -1.4&0.9&5.9&1.4&-3.5 &-1.4\\ -3.1&1.8&1.4&4.4&-0.2 &1.5\\ -1.7&-1.1&-3.5&-0.2&5.1 &2.7\\ -5.5&0.7&-1.4 & 1.5&2.7 &10.4 \end{array} \hspace{-2mm}\right]. \end{align}} From Theorem \ref{th2}, since Assumptions \ref{ass1} and \ref{ass2} hold for the coefficients given in \eqref{coe}, and $\Lambda_k$ and $\tilde{\Lambda}_k$ in \eqref{coma} are invertible, hence Problem \ref{prob1} can be uniquely solved. In this case, by using the optimal controls $u_k^i, i=1,2,3, k=0,\cdots,60$, the state trajectories of $x_k^1, x_k^2, x_k^3$ are presented as in Figures 2-4. \begin{figure} \caption{State trajectory of $x_k^1$, where $x_k^{1,(1)} \label{fig2} \end{figure} \begin{figure} \caption{State trajectory of $x_k^2$, $x_k^{2,(1)} \label{fig3} \end{figure} \begin{figure} \caption{State trajectory of $x_k^3$, $x_k^{3,(1)} \label{fig4} \end{figure} As shown, each subsystem state $x_k^i, i=1,2,3$ is convergent with optimal controls $u_k^i, i=0,\cdots,3$ calculated via Theorem \ref{th2}. \subsection{State Trajectory with Different Packet Dropout Rates} In this section, we will explore the effects on the state trajectories with different packet dropout rates $p^i, i=1,2,3$. Without loss of generality, we choose the same coefficients with \eqref{coe}, and the state trajectory of subsystem 1 is given in Figures 5-6 with different packet dropout rates. \begin{figure} \caption{State trajectory of $x_k^1$, with $p^1=p^2=p^3=0.8$.} \label{fig5} \end{figure} \begin{figure} \caption{State trajectory of $x_k^1$, with $p^1=p^2=p^3=0.3$.} \label{fig6} \end{figure} It can be observed that the convergence rate of $x_k^1$ decreases with the packet dropout rate {$1-p^i, i=1,2,3$ becoming larger}. \section{Conclusion} In this paper, we have investigated the optimal local and remote control problem for MN-NCSs with unreliable uplink channels and multiple subsystems. By adopting the Pontryagin maximum principle, the necessary and sufficient solvability conditions have been derived. Moreover, we have proposed a novel approach to decouple G-BFSDEs associated with the Pontryagin maximum principle. Finally, by introducing the {asymmetric} CREs, the optimal local and remote control strategies {were} derived in terms of the solution to CREs, which can be calculated backwardly. The proposed methods and the obtained results in this paper provide some inspirations for {studying} general control problems with {asymmetric} information structures. \section*{Appendix: Proof of Theorem \ref{th2}} \begin{proof} We will show the main results by using an induction method. Note that the terminal conditions are $P_{N+1}=H_{N+1}=L_{N+1}$, and $\Theta_N=P_{N+1}X_{N+1}$, it can be calculated from \eqref{adeqs} that \begin{align}\label{unm} 0&=R\hat{U}_N+E[(B+\sum_{i=1}^{L}w_N^i\bar{\mathbf{B}}^i)^T\Theta_N|\mathcal{F}^0_N]\notag\\ &=R\hat{U}_N+E[(B+\sum_{i=1}^{L}w_N^i\bar{\mathbf{B}}^i)^TP_{N+1}X_{N+1}|\mathcal{F}^0_N]\notag\\ &=[R+B^TP_{N+1}B+\sum_{i=1}^{L}(\bar{\mathbf{B}}^i)^TP_{N+1}\bar{\mathbf{B}}^i]\hat{U}_N\notag\\ &+[B^TP_{N+1}A+\sum_{i=1}^{L}(\bar{\mathbf{B}}^i)^TP_{N+1}\bar{\mathbf{A}}^i]E[X_N|\mathcal{F}^0_N]\notag\\ &=\Lambda_N\hat{U}_N+\Psi_NE[X_N|\mathcal{F}^0_N]. \end{align} From Theorem \ref{th1} we know that Problem \ref{prob1} is uniquely {solvable} if and only if G-FBSDEs \eqref{gfbs2} is uniquely {solvable} under Assumptions \ref{ass1} and \ref{ass2}. Moreover, \eqref{unm} is uniquely solved if and only if $\Lambda_N$ is invertible. In this case, $\hat{U}_N$ can be derived as in \eqref{uk0}. Next, from \eqref{adeqs2} we know \begin{align}\label{theni} \Theta_N^i&=P_{N+1}^{i1}x_{N+1}^1+\cdots+P_{N+1}^{iL}x_{N+1}^L, \end{align} then there holds \begin{align}\label{unm2} 0&=R^{ii}\tilde{u}_N^i+E[(B^i+w_N^i\bar{B}^i)^T\sum_{j=1}^{L}P_{N+1}^{ij}x_{N+1}^j|\mathcal{G}_N]\notag\\ &~~~~~~~~~~~~~-E[(B^i+w_N^i\bar{B}^i)^T\sum_{j=1}^{L}P_{N+1}^{ij}x_{N+1}^j|\mathcal{G}_N^i]\notag\\ &=R^{ii}\tilde{u}_N^i+E[(B^i+w_N^i\bar{B}^i)^T\sum_{j=1}^{L}P_{N+1}^{ij} [(A^j+w_k^j\bar{A})x_{N}^j\notag\\ &~~~+(B^j+w_k^j\bar{B})u_{N}^j+(B^{j0}+w_k^j\bar{B}^{j0})u_N^0]|\mathcal{G}_N]\notag\\ &-E[(B^i+w_N^i\bar{B}^i)^T\sum_{j=1}^{L}P_{N+1}^{ij} [(A^j+w_k^j\bar{A})x_{N}^j\notag\\ &~~~+(B^j+w_k^j\bar{B})u_{N}^j+(B^{j0}+w_k^j\bar{B}^{j0})u_N^0]|\mathcal{G}_N^i]\notag\\ &=R^{ii}\tilde{u}_N^i+E[(B^i+w_N^i\bar{B}^i)^TP_{N+1}^{i} [(A^i+w_k^i\bar{A})x_{N}^i\notag\\ &~~~+(B^i+w_k^i\bar{B})u_{N}^i+(B^{j0}+w_k^i\bar{B}^{j0})u_N^0]|\mathcal{G}_N]\notag\\ &-E[(B^i+w_N^i\bar{B}^i)^TP_{N+1}^{i} [(A^i+w_k^i\bar{A})x_{N}^i\notag\\ &~~~+(B^i+w_k^i\bar{B})u_{N}^i+(B^{j0}+w_k^j\bar{B}^{j0})u_N^0]|\mathcal{G}_N^i]\notag\\ &=[R^{ii}+(B^i)^TP_{N+1}^{i}B^i+\Sigma_{w^i}(\bar{B}^i)^TP_{N+1}^{i}\bar{B}^i]\tilde{u}_N^i\notag\\ &+[(B^i)^TP_{N+1}^{i}A^i+\Sigma_{w^i}(\bar{B}^i)^TP_{N+1}^{i}\bar{A}^i]\tilde{x}_{N}^i\notag\\ &=\tilde{\Pi}_N^i\tilde{u}_N^i+\tilde{\Omega}_N^i\tilde{x}_N^i. \end{align} Since $\tilde{\Pi}_N^i$ is positive definite as shown in Lemma \ref{lem6}, thus $\tilde{u}_N^i$ can be uniquely solved as \eqref{uki}. {Hence, the optimal remote controller $u_N^0$ and the optimal local controllers $u_N^i, i=1,\cdots, L$ can be derived as \eqref{uk}. } Consequently, we will calculate $\Theta_{N-1}$, actually, from \eqref{coss} we have \begin{align}\label{then1} \Theta_{N-1}&=E\left[(A+\sum_{i=1}^{L}w_N^i\bar{\mathbf{A}}^i)^T\Theta_N+QX_N\Bigg|\mathcal{G}_N\right]\notag\\ &=E\left[(A+\sum_{i=1}^{L}w_N^i\bar{\mathbf{A}}^i)^TP_{N+1}X_{N+1}+QX_N\Bigg|\mathcal{G}_N\right]\notag\\ &=E\Big[(A+\sum_{i=1}^{L}w_N^i\bar{\mathbf{A}}^i)^TP_{N+1}[(A+\sum_{i=1}^{L}w_N^i\bar{\mathbf{A}}^i)X_N\notag\\ &+(B+\sum_{i=1}^{L}w_N^i\bar{\mathbf{B}}^i)(\hat{U}_N+\tilde{U}_N)+V_N]\Big|\mathcal{G}_N\Big]+QX_N\notag\\ &=[Q+A^TP_{N+1}A+\sum_{i=1}^{L}\Sigma_{w_N^i}(\bar{\mathbf{A}}^i)^TP_{N+1}\bar{\mathbf{A}}^i]X_N\notag\\ &+[A^TP_{N+1}B+\sum_{i=1}^{L}\Sigma_{w_N^i}(\bar{\mathbf{A}}^i)^TP_{N+1}\bar{\mathbf{B}}^i]\hat{U}_N\notag\\ &+[A^TP_{N+1}B+\sum_{i=1}^{L}\Sigma_{w_N^i}(\bar{\mathbf{A}}^i)^TP_{N+1}\bar{\mathbf{B}}^i]\tilde{U}_N\notag\\ &=[Q+A^TP_{N+1}A+\sum_{i=1}^{L}\Sigma_{w_N^i}(\bar{\mathbf{A}}^i)^TP_{N+1}\bar{\mathbf{A}}^i]X_N\notag\\ &-\Psi_N^T\Lambda_N^{-1}\Psi_N\hat{X}_N-\Psi_N^T\Lambda_N^{-1}\Psi_N\tilde{X}_N\notag\\ &=P_N\hat{X}_N+H_N\tilde{X}_N. \end{align} where $P_N, H_N$ satisfy \eqref{re} for $k=N$. To complete the induction approach, we assume for $k=l+1,\cdots, N$, there holds \begin{itemize} \item The optimal controls $u_k^i, i=0,\cdots, L$ are given by \eqref{uk}-\eqref{uki}; \item The relationship between the system state $X_k$ and costate $\Theta_k$ satisfies: \begin{align}\label{rela12} \Theta_{k-1}=P_k\hat{X}_{k}+H_k\tilde{X}_k, \end{align} where $P_k, H_k$ can be calculated from \eqref{re} backwardly. \end{itemize} Next, we will calculate $u^i_{l}$. Note that $\Theta_{l}=P_{l+1}\hat{X}_{l+1}+H_{l}\tilde{X}_{l+1}$, it can be calculated from \eqref{adeqs} that \begin{align}\label{1unm} 0&=R\hat{U}_{l}+E[(B+\sum_{i=1}^{L}w_l^i\bar{\mathbf{B}}^i)^T\Theta_{l}|\mathcal{F}^0_{l}]\notag\\ &=R\hat{U}_{l}+E[(B+\sum_{i=1}^{L}w_l^i\bar{\mathbf{B}}^i)^T(P_{l+1}\hat{X}_{l+1}\notag\\ &~~~~~~~~~~+H_{l+1}\tilde{X}_{l+1})|\mathcal{F}^0_{l}]\notag\\ &=R\hat{U}_{l}+E\Big[(B+\sum_{i=1}^{L}w_l^i\bar{\mathbf{B}}^i)^T\big\{P_{l+1}[\Gamma_{l+1}X_{l+1}\notag\\ &~~~~~+(I_{\mathbb{N}_L}-\Gamma_{l+1})(A\hat{X}_{l}+B\hat{U}_{l})]\big\}\notag\\ &+(B+\sum_{i=1}^{L}w_l^i\bar{\mathbf{B}}^i)^T\big\{H_{l+1}(I_{\mathbb{N}_L}-\Gamma_{l+1})[A\tilde{X}_{l}+B\tilde{U}_{l}\notag\\ &~~~~~+V_{l} +\sum_{i=1}^{L}w_l^i (\bar{\mathbf{A}}^iX_{l}+\bar{\mathbf{B}}^iU_{l})]\big\}|\mathcal{F}^0_{l}\Big]\notag\\ &=\{R+B^TP_{l+1}[p B+(I_{\mathbb{N}_L}-p) B]\notag\\ &~~~~~~+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{B}}^i)^T[P_{l+1}p+H_{l+1}(I_{\mathbb{N}_L}-p)]\bar{\mathbf{B}}^i\}\hat{U}_{l}\notag\\ &+\{B^TP_{l+1}[p A+(I_{\mathbb{N}_L}-p) A]\notag\\ &~~~~~~+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{B}}^i)^T[P_{l+1}p+H_{l+1}(I_{\mathbb{N}_L}-p)]\bar{\mathbf{B}}^i\}\hat{X}_{l}\notag\\ &=\Lambda_l\hat{U}_l+\Psi_l\hat{X}_l. \end{align} By following the discussions below \eqref{unm}, we know that \eqref{1unm} can be uniquely solved if and only if $\Lambda_l$ is invertible. {Then} $\hat{U}_l$ can be derived as in \eqref{uk0}. Since $\tilde{U}_l=U_l-\hat{U}_l$, we have \begin{align}\label{ads1} 0 & =R\tilde{U}_{l}+E[(B+\sum_{i=1}^{L}w_l^i\bar{\mathbf{B}}^i)^T\Theta_{l}|\mathcal{G}_{l}]\notag\\ &~~~~~~~~~-E[(B+\sum_{i=1}^{L}w_l^i\bar{\mathbf{B}}^i)^T\Theta_{l}|\mathcal{F}^0_{l}]\notag\\ &=R\tilde{U}_{l}+E\Big[(B+\sum_{i=1}^{L}w_l^i\bar{\mathbf{B}}^i)^T\big\{P_{{l+1}}[\Gamma_{{l+1}}X_{{l+1}}\notag\\ &+(I_{\mathbb{N}_L}-\Gamma_{{l+1}})(A\hat{X}_{l}+B\hat{U}_{l})]\big\}\notag\\ &+(B+\sum_{i=1}^{L}w_l^i\bar{\mathbf{B}}^i)^T\big\{H_{{l+1}}(I_{\mathbb{N}_L}-\Gamma_{{l+1}})[A\tilde{X}_{l}+B\tilde{U}_{l}\notag\\ &+V_{l}+\sum_{i=1}^{L}w_l^i(\bar{\mathbf{A}}^iX_{l}+\bar{\mathbf{B}}^iU_{l})]\big\}|\mathcal{G}_{l}\Big]\notag\\ &-E\Big[(B+\sum_{i=1}^{L}w_l^i\bar{\mathbf{B}}^i)^T\big\{P_{{l+1}}[\Gamma_{{l+1}}X_{{l+1}}\notag\\ &+(I_{\mathbb{N}_L}-\Gamma_{{l+1}})(A\hat{X}_{l}+B\hat{U}_{l})]\big\}\notag\\ &+(B+\sum_{i=1}^{L}w_l^i\bar{\mathbf{B}}^i)^T\big\{H_{{l+1}}(I_{\mathbb{N}_L}-\Gamma_{{l+1}})[A\tilde{X}_{l}+B\tilde{U}_{l}\notag\\ &+V_{l}+\sum_{i=1}^{L}w_l^i(\bar{\mathbf{A}}^iX_{l}+\bar{\mathbf{B}}^iU_{l})]\big\}|\mathcal{F}^0_{l}\Big]\notag\\ &=R\tilde{U}_{l}+[B^TP_{l+1}p A+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{B}}^i)^TP_{l+1}p \bar{\mathbf{A}}^i]X_{l}\notag\\ &+[B^TP_{l+1}p B+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{B}}^i)^TP_{l+1}p \bar{\mathbf{B}}^i]U_{l}\notag\\ &+B^TP_{l+1}(I_{\mathbb{N}_L}-p) A\hat{X}_{l}+B^TP_{l+1}(I_{\mathbb{N}_L}-p) B\hat{U}_{l}\notag\\ &+B^TH_{l+1}(I_{\mathbb{N}_L}-p) A\tilde{X}_{l} +B^TH_{l+1}(I_{\mathbb{N}_L}-p) B\tilde{U}_{l}\notag\\ &+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{B}}^i)^TH_{l+1}(I_{\mathbb{N}_L}-p) \bar{\mathbf{A}}^iX_{l}\notag\\ &+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{B}}^i)^TH_{l+1}(I_{\mathbb{N}_L}-p) \bar{\mathbf{B}}^iU_{l}\notag\\ &-\{B^TP_{l+1}(p B+(I_{\mathbb{N}_L}-p) B)\notag\\ & +\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{B}}^i)^T[P_{l+1}p +H_{l+1}(I_{\mathbb{N}_L}-p)]\bar{\mathbf{B}}^i\}\hat{U}_{l}\notag\\ &-\{B^TP_{l+1}(p A+(I_{\mathbb{N}_L}-p) A)\notag\\ &+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{B}}^i)^T[P_{l+1}p+H_{l+1}(I_{\mathbb{N}_L}-p)]\bar{A}\}\hat{X}_{l}\notag\\ &=\{R+B^T[P_{l+1}p+H_{l+1}(I_{\mathbb{N}_L}-p) ]B\notag\\ &+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{B}}^i)^T[P_{l+1}p +H_{l+1}(I_{\mathbb{N}_L}-p) ]\bar{\mathbf{B}}^i\}\tilde{U}_{l}\notag\\ &+\{B^T[P_{l+1}p+H_{l+1}(I_{\mathbb{N}_L}-p) ]A\notag\\ &+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{B}}^i)^T[P_{l+1}p+H_{l+1}(I_{\mathbb{N}_L}-p)]\bar{\mathbf{A}}^i\}\tilde{X}_{l}\notag\\ &=\tilde{\Lambda}_{l}\tilde{U}_{l}+\tilde{\Psi}_{l}\tilde{X}_{l}. \end{align} Hence, the solvability of \eqref{ads1} is equivalent to the invertibility of $\tilde{\Lambda}_{l}$, and we have \begin{align}\label{tildeu} \tilde{U}_{l}=-\tilde{\Lambda}_{l}^{-1}\tilde{\Psi}_{l}\tilde{X}_{l}, \end{align} i.e, \eqref{uki} can be verified for $k=l$. Next, from \eqref{adeqs2} we know \begin{align}\label{1theni} \Theta_{l}^i&=P_{{l+1}}^{i1}\hat{x}_{{l+1}}^1+\cdots+P_{{l+1}}^{iL}\hat{x}_{{l+1}}^L\notag\\ &+H_{{l+1}}^{i1}\tilde{x}_{{l+1}}^1+\cdots+H_{{l+1}}^{iL}\tilde{x}_{{l+1}}^L. \end{align} {Thus, using Lemmas \ref{lem3}-\ref{lem4}, there holds} \begin{align}\label{1unm2} 0&=R^{ii}\tilde{u}_{l}^i+E[(B^i+w_{l}^i\bar{B}^i)^T\Theta_{l}^i|\mathcal{G}_{l}]\notag\\ &~~~~~~~~~~~~~-E[(B^i+w_{l}^i\bar{B}^i)^T\Theta_{l}^i|\mathcal{G}_{l}^i]\notag\\ &=R^{ii}\tilde{u}_{l}^i+E\Big[(B^i+w_{l}^i\bar{B}^i)^TP_{{l+1}}^{i} \{\gamma_{{l+1}}^i[(A^i+w_{l}^i\bar{A})x_{l}^i\notag\\ &~~~+(B^i+w_{l}^i\bar{B})u_{l}^i+(B^{i0}+w_{l}^i\bar{B}^{i0})u_{l}^0]\notag\\ &+(1-\gamma_{l+1}^i)(A^i\hat{x}_{l}^i+B^i\hat{u}_{l}^i+B^{i0}u_{l}^0)\}\Big|\mathcal{G}_{l}\Big]\notag\\ &+E\Big[(B^i+w_{l}^i\bar{B}^i)^TH_{{l+1}}^{i} (1-\gamma_{{l+1}}^i)[A^i\tilde{x}_{l}^i+B^i\tilde{u}_{l}^i\notag\\ &+w_{l}^i(\bar{A}x_{{l+1}}^i+\bar{B}u_{l}^i+\bar{B}^{j0})u_{l+1}^0]\Big|\mathcal{G}_{l}\Big]\notag\\ &-E\Big[(B^i+w_{l}^i\bar{B}^i)^TP_{{l+1}}^{i} \{\gamma_{{l+1}}^i[(A^i+w_k^i\bar{A})x_{{l+1}}^i\notag\\ &~~~+(B^i+w_k^i\bar{B})u_{{l+1}}^i+(B^{j0}+w_k^i\bar{B}^{j0})u_{l+1}^0]\notag\\ &+(1-\gamma_{l+1}^i)(A^i\hat{x}_{l}^i+B^i\hat{u}_{l}^i+B^{i0}u_{l}^0)\}\Big|\mathcal{G}_{l}^i\Big]\notag\\ &-E\Big[(B^i+w_{l}^i\bar{B}^i)^TH_{{l+1}}^{i} (1-\gamma_{{l+1}}^i)[A^i\tilde{x}_{l}^i+B^i\tilde{u}_{l}^i\notag\\ &+w_{l}^i(\bar{A}x_{{l+1}}^i+\bar{B}u_{l}^i+\bar{B}^{j0})u_{l+1}^0]\Big|\mathcal{G}_{l}^i\Big]\notag\\ &=[R^{ii}+(B^i)^T(p^iP_{{l+1}}^{i}+(1-p^i)H_{l+1}^{i})B^i\notag\\ &+\Sigma_{w^i}(\bar{B}^i)^T(p^iP_{{l+1}}^{i}+(1-p^i)H_{l+1}^{i})\bar{B}^i]\tilde{u}_{l}^i\notag\\ &+[(B^i)^T(p^iP_{{l+1}}^{i}+(1-p^i)H_{l+1}^{i})A^i\notag\\ &+\Sigma_{w^i}(\bar{B}^i)^T(p^iP_{{l+1}}^{i}+(1-p^i)H_{l+1}^{i})\bar{A}^i]\tilde{x}_{l}^i\notag\\ &=\Pi_{l}^i\tilde{u}_{l}^i+\Omega_{l}^i\tilde{x}_{l}^i. \end{align} In this case, $\tilde{u}_l^i$ can be derived as \eqref{uki} for $k=l$. {Therefore, the optimal controls $u_l^i, i=1,\cdots,L$ can be verified as \eqref{uk}. } Consequently, we will calculate $\Theta_{l-1}$, from \eqref{coss} \begin{align}\label{2then1} \Theta_{l-1}&=E\left[(A+\sum_{i=1}^{L}w_l^i\bar{\mathbf{A}}^i)^T\Theta_l+QX_l\Bigg|\mathcal{G}_l\right]\notag\\ &=E\Big[(A+\sum_{i=1}^{L}w_l^i\bar{\mathbf{A}}^i)^T(P_{l+1}\hat{X}_{l+1} \hspace{-1mm}+\hspace{-1mm}H_{l+1}\tilde{X}_{l+1})\notag\\ &+QX_l|\mathcal{G}_l\Big]\notag\\ &=QX_l+E\Big[(A+\sum_{i=1}^{L}w_l^i\bar{\mathbf{A}}^i)^TP_{l+1}\notag\\&\times\big[\Gamma_{l+1}((A+\sum_{i=1}^{L}w_l^i\bar{\mathbf{A}}^i)X_{l}\notag\\ &+(B+\sum_{i=1}^{L}w_l^i\bar{\mathbf{B}}^i)(\hat{U}_l+\tilde{U}_l)+V_l)\notag\\ &+(I_{\mathbb{N}_l}-\Gamma_{l+1})(A\hat{X}_{l}+B\hat{U}_{l})\big]\notag\\ &+(A+\sum_{i=1}^{L}w_l^i\bar{\mathbf{A}}^i)^TH_{l+1}(I_{\mathbb{N}_l}-\Gamma_{l+1})[A\tilde{X}_{l}+B\tilde{U}_{l}\notag\\ &+V_{l}+\sum_{i=1}^{L}w_l^i(\bar{\mathbf{A}}^iX_{l}+\bar{\mathbf{B}}^i(\hat{U}_{l}+\tilde{U}_l))]\Big|\mathcal{G}_l\Big]\notag\\ &=[Q+A^TP_{l+1}pA +\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{A}}^i)^TP_{l+1}p\bar{\mathbf{A}}^i]X_l\notag\\ &+(A^TP_{l+1}p B+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{A}}^i)^TP_{l+1}p\bar{\mathbf{B}}^i)\hat{U}_l\notag\\ &+(A^TP_{l+1}p B+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{A}}^i)^TP_{l+1}p\bar{\mathbf{B}}^i)\tilde{U}_l\notag\\ &+A^TP_{l+1}(I_{\mathbb{N}_L}-p)A\hat{X}_l+A^TP_{l+1}(I_{\mathbb{N}_L}-p)B\hat{U}_l\notag\\ &+A^TH_{l+1}(I_{\mathbb{N}_L}-p)A\tilde{X}_l+A^TH_{l+1}(I_{\mathbb{N}_L}-p)B\tilde{U}_l\notag\\ &+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{A}}^i)^TH_{l+1}(I_{\mathbb{N}_L}-p)\bar{\mathbf{A}}^iX_l \notag\\&+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{A}}^i)^TH_{l+1}(I_{\mathbb{N}_L}-p)\bar{\mathbf{B}}^i\hat{U}_l\notag\\ &+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{A}}^i)^TH_{l+1}(I_{\mathbb{N}_L}-p)\bar{\mathbf{B}}^i\tilde{U}_l\notag\\ &=[Q+A^TP_{l+1}pA+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{A}}^i)^TP_{l+1}p\bar{\mathbf{A}}^i\notag\\ &+A^TP_{l+1}(I_{\mathbb{N}_L}-p)A\notag\\&+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{A}}^i)^TH_{l+1}(I_{\mathbb{N}_L}-p)\bar{\mathbf{A}}^i]\hat{X}_l\notag\\ &+\Psi_l\hat{U}_l +[Q+A^TP_{l+1}p A+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{A}}^i)^TP_{l+1}p\bar{\mathbf{A}}^i\notag\\ &+A^TH_{l+1}(I_{\mathbb{N}_L}-p)A\notag\\&+\sum_{i=1}^{L}\Sigma_{w_l^i}(\bar{\mathbf{A}}^i)^TH_{l+1}(I_{\mathbb{N}_L}-p)\bar{\mathbf{A}}^i]\tilde{X}_l \hspace{-1mm}+\hspace{-1mm}\tilde{\Psi}_l\tilde{U}_l\notag\\ &=P_l\hat{X}_l+H_l\tilde{X}_l. \end{align} Thus, \eqref{rela12} has been derived for $k=l$. {This ends the induction}. Finally, we will calculate the optimal cost function. For simplicity, we denote \begin{align}\label{vn} V_{k}&\triangleq E[X_k^T\Theta_{k-1}]. \end{align} Hence, we have \begin{align}\label{mivk} &V_k-V_{k+1}=E[X_k^T\Theta_{k-1}]-E[X_{k+1}^T\Theta_{k}]\notag\\ =&E\Big[E[X_k^T(A+\sum_{i=1}^{L}w_k^i\bar{\mathbf{A}}^i)^T\Theta_k+QX_k|\mathcal{G}_k]\notag\\ &-E\big[[(A+\sum_{i=1}^{L}w_k^i\bar{\mathbf{A}}^i)X_k\notag\\ &~~~+(B+\sum_{i=1}^{L}w_k^i\bar{\mathbf{B}}^i)U_k+V_k]^T\Theta_{k}|\mathcal{G}_k\big]\Big]\notag\\ =&E\Big[X_k^TQX_k-V_k^T\Theta_k-U_k^T E[(B+\sum_{i=1}^{L}w_k^i\bar{\mathbf{B}}^i)^T\Theta_k|\mathcal{G}_k]\Big]\notag\\ =&E\Big[X_k^TQX_k+U_k^TRU_k-V_k^T\Theta_k\Big]. \end{align} Note $\Theta_N=P_{N+1}X_{N+1}$, then taking summation of \eqref{mivk} from $0$ to $N$, the optimal cost function can be given by \begin{align}\label{ocp} J_N^*=&\sum_{k=0}^{N}E[V_k^T\Theta_k]+E[X_0^T\Theta_{-1}]\notag\\ =&\sum_{k=0}^{N}E[V_k^T(P_{k+1}\hat{X}_{k+1}+H_{k+1}\tilde{X}_{k+1})]\notag\\ &+E[X_0^T(P_0\hat{X}_0+H_0\tilde{X}_k)]\notag\\ =&\sum_{k=0}^{N}E[V_k^T(P_{k+1}p+H_{k+1}(I_{\mathbb{N}_L}-p))V_k]\notag\\ &+E[\hat{X}_0^TP_0\hat{X}_0+\tilde{X}_0^TH_0\tilde{X}_0]\notag\\ =&\sum_{k=0}^{N}E[V_k^TL_{k+1}V_k]+E[\hat{X}_0^TP_0\hat{X}_0 +\sum_{i=0}^{L}(\tilde{x}_0^i)^TH_0^{i}\tilde{x}_0^i]\notag\\ =&\sum_{k=0}^{N}\sum_{i=0}^{L}E[(v_k^i)^TL^{i}_{k+1}v_k^i]\notag\\ &+E[\hat{X}_0^TP_0\hat{X}_0+\sum_{i=0}^{L}(\tilde{x}_0^i)^TH_0^{i}\tilde{x}_0^i]\notag\\ =&\sum_{i=0}^{L}E[(x_0^i)^TP_0^{i}x_0^i]+\sum_{i=0}^{L}(1-p^i)Tr[\Sigma_{x_0^i}(P_0^{i}+H_0^{i})]\notag\\ &+\sum_{i=0}^{L}\sum_{k=0}^{N}Tr(\Sigma_{v^i}L^{i}_{k+1}). \end{align} The proof is complete. \end{proof} \end{document}
\begin{document} \preprint{APS/123-QED} \title{Phonon trapping states as a witness for generation of phonon blockade in a hybrid micromaser system} \author{Hugo Molinares$^{1}$} \email{[email protected]} \author{Vitalie Eremeev$^{2,3}$} \email{[email protected]} \author{Miguel Orszag$^{1,4}$} \email{corresponding author: [email protected]} \affiliation{ $^{1}$Centro de Optica e Informaci\'on Cu\'antica, Universidad Mayor,\\ camino la Piramide 5750, Huechuraba, Santiago, Chile } \affiliation{ $^{2}$Instituto de Ciencias B\'asicas, Facultad de Ingenier\'ia y Ciencias, Universidad Diego Portales, Av. Ejercito 441, Santiago, Chile } \affiliation{ $^{3}$Institute of Applied Physics, Academiei 5, MD-2028, Chi\c{s}in\u{a}u, Moldova } \affiliation{ $^{4}$Instituto de F\'isica, Pontificia Universidad Cat\'olica de Chile, Casilla 306, Santiago, Chile } \date{\today} \begin{abstract} In a hybrid micromaser system consisting of an optical cavity with a moving mirror connected to a low temperature thermal bath, we demonstrate, both analytically and numerically, that for certain interaction times between a random atomic flux and the optomechanical cavity, vacuum phonon trapping states are generated. Furthermore, under the approach of the master equation with independent phonon and photon thermal baths, we show that the trapping of the phonons and photons is achieved for the same interaction times. The results also indicate that by increasing the cavity-oscillator coupling one may generate a coherent phonon state aside from the trapping states. Within the same hybrid system, but now connected to the squeezed phonon reservoir, a phonon blockade effect can be engineered. Moreover, we identify an interconnection between the trapping and blockade effects, particularly if one approaches the vacuum trapping state, strong phonon blockade can be achieved when the system is connected with a weakly squeezed phonon reservoir. \end{abstract} \maketitle \section{Introduction}\label{sec1} There are many interesting and important applications of quantum effects realized in cavity QED systems \cite{HR}, which commonly consists in a Fabry-Perot cavity (FPC) interacting with atoms, and the whole system interchanging its energy to a reservoir, usually a thermal one. In the mentioned system the standard interaction between the cavity mode and the atom is described by the well-known Jaynes-Cummings model \cite{JC}, and some external sources can be involved additionally to drive the cavity mode and pump the atoms in order to stimulate some expected effects. For example, maser/laser radiation \cite{Scully, Orszag}, superradiance \cite{Dicke, Gross}, Schrodinger cat states \cite{HR}, photon squeezing \cite{Walls, Scully}, sub-Poissonian photon statistics and “trapping” states \cite{Filipowicz,Weidinger,Walther2001, Meystre}, are some of such quantum phenomena. An interesting at fundamental level, as well rich in quantum features, is the micromaser (MM) model \cite{Scully,Orszag}, widely investigated theoretically \cite{Filipowicz,2phMM,Bergou} and experimentally \cite{Maser,2phMexp, Walther, Weidinger} in the decades of 80’s and 90’s. Actually, the most attractive quantum properties for applications observed in MM, are related to the photon blockade \cite{Walther2001}, squeezing \cite{Dag2016}, non-classical states \cite{Nation2013,Dambach2019,Kouzelis2020}, MM synchronization \cite{Davis2016}. Having in mind these MM’s features, we were motivated to investigate a kind of hybrid micromaser (HMM) architecture, particularly considering an oscillating mirror of the FPC, so including additionally the optomechanical interaction in the system’s Hamiltonian \cite{Aspelmeyer}. As result of this, one expects that the HMM will enrich the quantum features as compared to the standard MM because of the influence of a new degree of freedom, that is the mechanical motion mode. The hybrid systems considering interactions between cavities, atoms and mechanical resonators, nowadays are of great importance for many applications in quantum technologies \cite{Aspelmeyer, Mun2018,Mir2020} as well are the promising candidates to probe the foundations of the quantum mechanics, e.g. macroscopic quantum superpositions \cite{Nak1999, Liao2016, Mon2017, Teh2018}, quantum correlations at macroscopic scales \cite{Sca2013, Mar2018}, phonon and photon blockade effects \cite{Liu2010, Didier2011, Wang2016, Restrepo2017, Xu2019, expPB, Shi2019, Zheng2019, Yang2020, Lin2021, APL2021}. In the present work we develop a comprehensive investigation of the effects of trapping and blockade for phonons and photons in the HMM model. On the one hand, from the fundamental point of view, it is important to know how the mechanical degree of freedom affects the photon quantum features particular to the standard MM. On the other hand, for practical reasons, it is of high interest to engineer tools of control of different elements in a system, and the hybrid one is a marvel candidate for quantum control protocols. Therefore, we embrace these two strategies that converge to results of fundamental and application importance. Hence, in the HMM system, connected to a low temperature thermal reservoir, we show the possibility of generation of the vacuum phonon trapping states. Besides, the trapping of the phonons and photons can be achieved for the same interaction times. Another quantum effect which attracted our interest is the phonon blockade (antibunching). The effects of phonon and photon blockade were intensively studied the last decade, particularly in the hybrid setups \cite{Liu2010, Didier2011, Wang2016, Restrepo2017, Xu2019, expPB, Shi2019, Zheng2019, Yang2020, Lin2021, APL2021}. In order to realize such an effect commonly one needs a quantum strong nonlineariry in the system, i.e. for the photon blockade it can be a strong Jaynes-Cummings interaction \cite{Imamoglu97}, strong optomechanical coupling \cite{Rabl2011}, Kerr-type nonlinearity \cite{Didier2011}; for the phonon blockade could be a strong effective atom-cavity-mechanics interaction \cite{Restrepo2017}, strong opto/spin-mechanical coupling \cite{Liu2010, Xu2019} or the nonlinear phonon term similar to the Kerr one \cite{APL2021}. An alternative mechanism to stimulate the blockade effect, which is known as unconventional, is based on the destructive quantum interference between different paths so that the two-quanta and higher order excitations are blocked, e.g. photon \cite{Liew2010, Majumdar2012} and phonon \cite{Yang2020, Lin2021} unconventional blockade. Our proposal of HMM is developed in a weak optomechanical coupling regime and hence to enhance the quantum nonlinearity in the HMM we propose to connect the mechanical oscillator (MO) to a squeezed phonon reservoir. Therefore, by connecting the HMM to the squeezed phonon reservoir, one may stimulate and control the phonon blockade effect. Particularly, in the case of weak squeezed phonon reservoir and weak optomechanical coupling one observes the creation of the phonon blockade when the system approaches the vacuum phonon trapping state. As result, we can control one quantum effect by the other one as tuning some system’s parameters. Such protocol could be very useful for phonon blockade detection, which from the experimental point of view is challenging as pointed in the theoretical and experimental proposals \cite{Didier2011,Wang2016,expPB,APL2021}. Therefore, in this work we suggest to detect the phonon blockade effect by using the prototype of the photon trapping experimental setup \cite{Weidinger} adjusted (designed) for the HMM. This work is organized as follows. In Sec. \ref{sec:level2} we present the conceptual model of the HMM, by defining the Hamiltonian and the unitary dynamics. In Sec. \ref{sec:level3} the master equation for HMM is developed to get the phonon and photon density operators under the decoherence effects. Next, in Secs. \ref{sec:level4} and \ref{sec:level5} we demonstrate how the trapping states of the phonon and photons are realized, and the effect of synchronization between these trapping states is shown. Section \ref{sec:level6} is devoted to the study of the second order coherence function by which the maser effect is analyzed as function of the optomechanical coupling. In Sec. \ref{sec:level7} we present the effect of phonon blockade and explain how it is controlled by the vacuum squeezed reservoir and the optomechanical coupling. Finally, Sec. \ref{sec:level8} is devoted to Discussion, where we analyze the obtained results, suggest an experimental setup and make the conclusions. \begin{figure} \caption{Schematic diagram of a hybrid micromaser model, where a set of atoms (one at a time) crosses the cavity field connected to a mechanical oscillator.} \label{fig1} \end{figure} \section{MODEL AND MASTER EQUATION OF THE HYBRID MICROMASER} \subsection{\label{sec:level2}Hamiltonian of the atom-cavity-mechanics system} Let us consider a hybrid atom-cavity-mechanics system, as illustrated in Fig. \ref{fig1}. The Jaynes-Cummings interaction between the two-level atom of frequency $\omega_{a}$ and the mode of the cavity field of frequency $\omega_{c}$ is quantified by the coupling constant $g_{ac}$. The optomechanical interaction between the cavity and MO of frequency $\omega_{m}$ is quantified by the coupling constant $g_{cm}$. Therefore, we will consider our hybrid system described by the Hamiltonian under the rotating wave approximation as (with $\hbar=1$) \begin{eqnarray}\label{base} \mathcal{\hat{H}}&=&\omega_{a}\frac{\hat{\sigma}_{z}}{2}+\omega_{c}\hat{a}^{\dagger}\hat{a}+\omega_{m}\hat{b}^{\dagger}\hat{b}\nonumber\\ &+& g_{ac}\left(\hat{a}\hat{\sigma}_{+}+\hat{a}^{\dagger}\hat{\sigma}_{-}\right) -g_{cm}\hat{a}^{\dagger}\hat{a}\left(\hat{b}^{\dagger}+\hat{b}\right), \end{eqnarray} where $\hat{a}(\hat{a}^{\dagger})$ and $\hat{b}(\hat{b}^{\dagger})$ are the annihilation (creation) operators of the cavity and the MO modes, respectively. These operators obey the boson commutation relation $[\hat{a}(\hat{b}), \hat{a}^{\dagger}(\hat{b}^{\dagger})]=1$; and $\hat{\sigma}_{z}, \hat{\sigma}_{+}$, $\hat{\sigma}_{-}$ are respectively, the $z$-Pauli, raising and lowering spin operators. In the following, we calculate the Hamiltonian in the interaction picture (rotating at the mechanical frequency $\omega_{m}$) \begin{equation}\label{model} \hat{\tilde{\mathcal{H}}}=\delta\frac{\hat{\sigma}_{z}}{2}+g_{ac}\left(\hat{\sigma}_{+}\hat{a}e^{i \mathcal{\hat{F}}(t)}+\hat{\sigma}_{-}\hat{a}^{\dagger}e^{-i \mathcal{\hat{F}}(t)}\right), \end{equation} where we used the Hermitian operator $\mathcal{\hat{F}}(t)=-i\lambda\left(\hat{b}^{\dagger}\eta-\hat{b}\eta^{*}\right)$, with $\lambda=g_{cm}/\omega_{m}$, $\eta=e^{i\omega_{m}t}-1$ and $\delta=\omega_{a}-\omega_{c}$ is the detuning (see details in Appendix \ref{apendice1}). To solve the quantum dynamics, we proceed to derive the time evolution operator for the Hamiltonian in Eq. \ref{model} defined by $\mathcal{\hat{U}}(t)=\sum_{n=0}^{\infty}(-i t \mathcal{\hat{\tilde{H}}})^{n}/n!$. By a straightforward calculation, we get the time evolution operator: \begin{equation}\label{unitary} \mathcal{\hat{U}}(t)= \begin{pmatrix} \mathcal{\hat{C}} & -ig_{ac}\mathcal{\mathcal{\hat{S}}}\hat{a}e^{i\mathcal{\hat{F}}}\\\\ -ig_{ac}e^{-i\mathcal{\hat{F}}}\hat{a}^{\dagger}\mathcal{\mathcal{\hat{S}}} & \mathcal{\hat{D}} \end{pmatrix}, \end{equation} where \begin{align} \mathcal{\hat{C}}&=\cos{\left(t\sqrt{\hat{\varphi}+g_{ac}^{2}}\right)}-\frac{i\delta}{2}\frac{\sin{\left(t\sqrt{\hat{\varphi}+g_{ac}^{2}}\right)}}{\sqrt{\hat{\varphi}+g_{ac}^{2}}},\\ \mathcal{\hat{D}}&=\cos{\left(t\sqrt{\hat{\varphi}}\right)} +\frac{i\delta}{2}\frac{\sin{\left(t\sqrt{\hat{\varphi}}\right)}}{\sqrt{\hat{\varphi}}}\label{eq5},\\ \mathcal{\mathcal{\hat{S}}}&=\frac{\sin{\left(t\sqrt{\hat{\varphi}+g_{ac}^{2}}\right)}}{\sqrt{\hat{\varphi}+g_{ac}^{2}}}, \end{align} where $\hat{\varphi}=g_{ac}^{2}\hat{a}^{\dagger}\hat{a}+\left(\delta/2\right)^{2}$. According to the last experimental results, the optomechanical coupling cover a wide spectrum of values \cite{Aspelmeyer}. However, in the Eq. \ref{unitary} we have neglected the fast oscillations of the mechanical frequency, within which $\eta=-1$ is valid in the weak coupling regime, $g_{cm}\ll\omega_{m}$ \cite{Murch,Painter,Xuereb}. \subsection{\label{sec:level3}Master equation of HMM} In the following we study how the optomechanical interaction affects the well known micromaser model \cite{Scully,Orszag}. Let us consider the initial atom-cavity-MO operator $\hat{\rho}(0)=\hat{\rho}_{a}(0)\otimes\hat{\rho}_{c}(0)\otimes\hat{\rho}_{m}(0)$, then after the interaction time $\tau$, the density operator becomes \begin{equation} \hat{\rho}(\tau)=\mathcal{\hat{U}}(\tau) \left[\hat{\rho}_{a}(0)\otimes \hat{\rho}_{c}(0)\otimes\hat{\rho}_{m}(0)\right]\mathcal{\hat{U}}^{\dagger}(\tau), \end{equation} where the evolution operator $\mathcal{\hat{U}}(\tau)$ is defined in Eq. \ref{unitary}. Now, in order to study the dynamics of the mechanical subsystem, we trace over the atom and cavity field subsystems. For this, we assume that initially the atom is in ground state, i.e. $\hat{\rho}_{a}(0)=\vert0\rangle\langle0\vert$, and the cavity field is in a coherent state \begin{equation}\label{coh} \hat{\rho}_{c}(0)=e^{-\vert\alpha\vert^{2}}\sum_{n,m}\frac{\alpha^{n}\alpha^{*m}}{\sqrt{n!}\sqrt{m!}}\vert n\rangle\langle m\vert , \end{equation} where $\alpha$ is, in general, a complex number. By performing the matrix product and tracing over the atom, we get \begin{equation}\label{rocm} \hat{\rho}_{c,m}(\tau) = \hat{\rho}_{m}(0)\mathcal{\hat{D}}\hat{\rho}_{c}(0)\mathcal{\hat{D}}^{\dagger}+ g^{2}_{ac}e^{i\mathcal{\hat{F}}}\hat{\rho}_{m}(0)e^{-i\mathcal{\hat{F}}}\mathcal{\mathcal{\hat{S}}}\hat{a}\hat{\rho}_{c}(0)\hat{a}^{\dagger}\mathcal{\mathcal{\hat{S}}}. \end{equation} Finally, tracing over the cavity density operator, so the atom and cavity fields leave the MO in the state defined by the reduced density operator \begin{eqnarray}\label{rom} \hat{\rho}_{m}(\tau)&=&A(\tau)\hat{\rho}_{m}(0) + B(\tau)e^{i \mathcal{\hat{F}}}\hat{\rho}_{m}(0)e^{-i \mathcal{\hat{F}}}\nonumber\\ &\equiv& \hat{\mathcal{M}}_{m}(\tau)\hat{\rho}_{m}(0), \end{eqnarray} where $\hat{\mathcal{M}}_{m}$ is the gain superoperator acting on $\hat{\rho}_{m}$, and the coefficients $A(\tau)$ and $B(\tau)$ are given by \begin{eqnarray}\label{coefab} A(\tau) &=& e^{-\vert \alpha\vert ^{2}}\sum_{n}\frac{\vert \alpha\vert ^{2n}}{n!}\nonumber\\ &\times& \left[\cos^{2}{\left(\tau\sqrt{\varphi_{n}}\right)}+\frac{\delta^{2}}{4}\frac{\sin^{2}{\left(\tau\sqrt{\varphi_{n}}\right)}}{\varphi_{n}}\right],\\ B(\tau) &=& e^{-\vert \alpha\vert ^{2}}\sum_{n}\frac{\vert \alpha\vert ^{2(n+1)}}{n!}\frac{g_{ac}^{2} \sin^{2}{\left(\tau\sqrt{\varphi_{n+1}}\right)}}{\varphi_{n+1} }\label{coefab2}, \end{eqnarray} with $\varphi_{n+1}=g_{ac}^{2}(n+1)+\left(\delta/2\right)^{2}$. Since $\emph{Tr}\left\{\hat{\rho}_{m}(\tau)\right\}=1$ we can see that it is fulfilled $A(\tau)+B(\tau)=1$. The Eqs. \ref{rom}, \ref{coefab} and \ref{coefab2} are the main results of this section (see details in Appendix \ref{apendice2}). We assume that initially there is no interaction between the atom and the MO, see Eq. \ref{base}. However, the tripartite system produces an atom-MO interaction mediated by the cavity \cite{Orszag2022}. In the following, considering the generation of this indirect qubit-MO interaction, we develop the ME for the generalized pump statistics \cite{Orszag, Scully}. We first assume that the $j-th$ atom is “injected” at the time $t_{j}$ into the cavity. Then, the MO density operator after the cavity field interacted with the $j-th$ atom, can be written as $\hat{\rho}_{m}(t_{j}+\tau)=\hat{\mathcal{M}}_{m}(\tau)\hat{\rho}_{m}(t_{j})$. Now, if $k$ atoms are excited, then $\hat{\rho}_{m}(t)=\hat{\mathcal{M}}^{k}_{m}(\tau)\hat{\rho}(0)$. Of course, while the number of excited atoms is not known, one may use the probability defined as $P(k)=C_{Kk}p^{k}(1-p)^{K-k}$, where $C_{Kk}=K!/k!(K-k)!$, $p$ is the probability each atom has of being excited and $K$ is the total number of atoms involved in the lasing process, i.e., $0<k<K$. Therefore, the average number of atoms contributing to the gain is $\langle k\rangle=pK$. \begin{figure*} \caption{Steady state average phonon number as a function of the pump parameter $\Theta$ for different: $(a)$ cavity initial coherent states, $\alpha$ (with $g_{cm} \label{Fig2} \end{figure*} In the rest of our work we assume a random arrival of the atoms, that corresponds to $p\rightarrow 0$ in the micromaser notation \cite{Orszag}. Taking into account the above, and considering the gain part assisted by atoms injected at the rate $r$, also including the loss term for the MO subsystem, so the ME reads \begin{eqnarray}\label{me} \frac{d\hat{\rho}_{m}}{dt} &=& r\left(\hat{\mathcal{M}}_{m}(\tau)-1\right)\hat{\rho}_{m}(t)\nonumber\\ &+& \frac{\kappa_{b}}{2}(1+\bar{n}_{th})\mathcal{L}[\hat{b}]\hat{\rho}_{m}(t)+\frac{\kappa_{b}}{2}\bar{n}_{th}\mathcal{L}[\hat{b}^{\dagger}]\hat{\rho}_{m}(t), \end{eqnarray} where $r$ is the rate at which atoms enters into the cavity and $\mathcal{L}[\hat{b}]=2\hat{b}\hat{\rho} \hat{b}^{\dagger}-\hat{b}^{\dagger}\hat{b}\hat{\rho}-\hat{\rho} \hat{b}^{\dagger}\hat{b}$ is the standard Lindbladian describing the decoherence effect. Here $\kappa_{b}$ is the decay rate of the mechanical mode to the phonon thermal bath with $\bar{n}_{th}\equiv(\exp{[\hbar\omega_m/k_B T_m]}-1)^{-1}$ at temperature $T_m$. \section{\label{sec:level4}Generation of trapping states in the Hybrid micromaser} \subsection{\label{sec:level4}Phonon trapping states} In terms of Glauber’s $P$-distribution, the MO density matrix can be written as $\hat{\rho}_{m}(t)=\int d^{2}\beta P(\beta,\beta^{*},t)\vert \beta\rangle\langle\beta\vert $. The gain superoperator $\mathcal{\hat{M}}_{m}(\tau)$ in this notation generates \begin{eqnarray}\label{gen} \mathcal{\hat{M}}_{m}(\tau)\hat{\rho}_{m}(t)&=&\int d^{2}\beta P(\beta,\beta^{*},t)\{A(\tau)|\beta\rangle\langle\beta| \nonumber\\ &+&B(\tau)|\beta+\lambda\rangle\langle\beta+\lambda|\}. \end{eqnarray} As we see, the first term of the gain is proportional to the initial coherent state, and in the second term, the coherent state is modified due to the displacement operator. A witness for the vacuum phonon trapping states correspond to a sharp decrease in the steady state of the average phonon number, that can be calculated solving the ME (Eq. \ref{me}). By the standard technique to convert the ME into a Fokker-Planck second-order differential equation \cite{Carmichael} with the loss term $\mathcal{L}\hat{\rho}_{m}=\frac{\kappa_{b}}{2}\left(\frac{\partial}{\partial\beta}\beta+\frac{\partial}{\partial\beta^{*}}\beta^{*}\right)P+\kappa_{b}\bar{n}_{th}\frac{\partial^{2} P}{\partial \beta\partial \beta^{*}}$, we get the time-dependent Fokker-Planck equation \begin{eqnarray}\label{fp} \frac{\partial P}{\partial t} &=& \frac{\partial P}{2\partial \beta}\left(\kappa_{b}\beta-2B(\tau)\lambda r\right) +\frac{\partial P}{2\partial \beta^{*}}\left(\kappa_{b}\beta^{*}-2B(\tau)\lambda r\right)\nonumber\\ &+&\kappa_{b}\bar{n}_{th}\frac{\partial^{2} P}{\partial \beta\partial \beta^{*}}+\left(\kappa_{b}+r\left[A(\tau)+B(\tau)-1\right]\right)P. \end{eqnarray} For an initial thermal distribution we find a solution of the Fokker-Planck equation (see details in Appendix \ref{apendice3}): \begin{equation}\label{sfp} P(\beta,\beta^{*},t)=\frac{1}{\pi\bar{n}_{th}}\exp{\left[-\vert \beta-\beta_{1}\vert ^{2}/\bar{n}_{th}\right]}, \end{equation} with $\beta_{1}=\frac{2\lambda r B(\tau)}{\kappa_{b}}(1-\exp{\left[-\kappa_{b}t/2\right]})$. Now, using the definition for the average phonon number, $\langle \hat{b}^{\dagger}\hat{b}\rangle\equiv Tr\left\{\hat{b}^{\dagger}\hat{b}\hat{\rho}_{m}(t )\right\}$ we get \begin{equation} \langle \hat{b}^{\dagger}\hat{b}\rangle = \bar{n}_{th}+\frac{4B^{2}(\tau)\lambda^{2}r^{2}}{\kappa^{2}_{b}}\left(1-\exp{\left[-\kappa_{b}t/2\right]}\right)^{2}. \end{equation} Hence, the steady state $(t\rightarrow\infty)$ average phonon number is \begin{equation}\label{analitico} \langle \hat{b}^{\dagger}\hat{b}\rangle =\bar{n}_{th}+\frac{4B^{2}(\tau)\lambda^{2}r^{2}}{\kappa^{2}_{b}}. \end{equation} Although the above expression can be written in terms of an apparently simple formula, the state of the system changes dramatically as a function of the atomic flow rate $r$ and atom-cavity interaction time, parametrized by the pump parameter $\Theta=\tau \left(\omega_{m}r\right)^{1/2}$. In panels $(a)$ and $(b)$ of Fig. \ref{Fig2} we show the evolution of the average phonon occupation number as a function of $\Theta$ for different amplitudes of the coherent state of the cavity, $\alpha$, and for different optomechanical couplings, $g_{cm}$, respectively. Analytically, the trapping states can be obtained from the condition (see details in Appendix \ref{apendice4}) \begin{equation}\label{cond} \sum_{n}\frac{\vert \alpha\vert ^{2(n+1)}\sqrt{n+1}}{(n+1)!}\sin{\left(2g_{ac}\Theta\sqrt{\frac{n+1}{\omega_{m}r}}\right)}=0. \end{equation} The above equation can be solved numerically, and therefore, the values of $\Theta$ corresponding to the minimum values of $\langle\hat{b}^{\dagger}\hat{b}\rangle$ are determined, see vertical lines in Fig. \ref{Fig2}(b). In Fig. \ref{Fig2}(c) we plot the average phonon number in its minimum, for $\Theta = \left\{\Theta_{1}, \Theta_{2}, \Theta_{3}\right\}$ as function of the optomechanical coupling, $g_{cm}$. Therefore, the optomechanical coupling, although it does not control the critical point, $\Theta$ of the trapping states, it does contribute directly in the mean number of phonons, as results in Eq. \ref{analitico} through the parameter $\lambda$. In Fig. \ref{Fig2}(d) we show the average phonon occupation number as a function of the temperature of the thermal bath, $\bar{n}_{th}$. We observe that the vacuum trapping phonon states tend to occur for low value of the mean number of thermal phonons. Since $B(\tau)\propto\sqrt{\mid\langle \hat{b}^{\dagger}\hat{b}\rangle-\bar{n}_{th}\mid}$, so, in order to have a vacuum trapping state, i.e. $\langle\hat{b}^{\dagger}\hat{b}\rangle\rightarrow0$, the thermal phonon number must be close to zero \cite{Filipowicz}. \begin{figure*} \caption{$(a)$ Steady state average occupation number of photons [black-solid line, using Eq. \ref{me2} \label{Fig3} \end{figure*} \subsection{\label{sec:level5}Photon trapping states and synchronization with the phonon mode} In the previous subsection, we studied how the phonon trapping states can be realized by developing the MM model \cite{Orszag} for the HMM with optomechanical coupling, so that the indirect interaction between the MO and atoms is mediated by the cavity. On the other hand, the cavity-atom interaction is direct in this model, via Jaynes-Cummings coupling (see Eq. \ref{base}), then one expects the occurrence of the photon trapping states as in the standard MM. To evaluate how the cavity mode dynamics is affected in the HMM and its correspondence to the mechanics mode, we develop the ME for the cavity field. Following the results in previous section, after tracing over the atom, was obtained the Eq. \ref{rocm}. Next, tracing over the MO and assuming a random arrival of the atoms interacting with the cavity ($p\rightarrow0$), we get the ME for the cavity field: \begin{eqnarray}\label{me2} \frac{d\hat{\rho}_{c}}{dt} &=& r\left(\hat{\mathcal{M}}_{c}(\tau)-1\right)\hat{\rho}_{c}(t)\nonumber\\ &+& \frac{\kappa_{a}}{2}(1+\bar{n}_{th})\mathcal{L}[\hat{a}]\hat{\rho}_{c}(t)+\frac{\kappa_{a}}{2}\bar{n}_{th}\mathcal{L}[\hat{a}^{\dagger}]\hat{\rho}_{c}(t), \end{eqnarray} where $\kappa_{a}$ is the decay rate of the cavity mode to the bath with $\bar{n}_{th}$ photons on average. In Eq. \ref{me2} $\mathcal{\hat{M}}_{c}(\tau)$ is the gain superoperator acting on $\hat{\rho}_{c}$, defined by \begin{eqnarray} \hat{\mathcal{M}}_{c}(\tau) &=& g_{ac}^{2}\hat{a}^{\dagger}\frac{\sin{\left(\tilde{\Theta}\sqrt{\hat{\varphi}/\omega_{m}r}\right)}}{\sqrt{\hat{\varphi}}}\hat{\rho}_{c}(0) \frac{\sin{\left(\tilde{\Theta}\sqrt{\hat{\varphi}/\omega_{m}r}\right)}}{\sqrt{\hat{\varphi}}}\hat{a}\nonumber\\ &+& \cos{\left(\tilde{\Theta}\sqrt{\hat{\varphi}/\omega_{m}r}\right)}\hat{\rho}_{c}(0)\cos{\left(\tilde{\Theta}\sqrt{\hat{\varphi}/\omega_{m}r}\right)}, \end{eqnarray} where $\tilde{\Theta}\equiv \Theta$. The above equation shows that in the weak regime $g_{cm}\ll\omega_{m}$, the initial condition for the MO does not directly affect the dynamics of the cavity field, however, as will be discussed later, the initial condition for the cavity field (Eq. \ref{coh}) modifies the value of the pump parameter $\Theta$, where the phonon trapping states are generated. \begin{figure*} \caption{$(a)$ Second-order coherence degree for photons (black-solid line) and phonons (blue dashed line) as a function of the pump parameter, $\Theta$, for $g_{cm} \label{Fig4} \end{figure*} Next, we can evaluate a complete analytical solution for the steady state elements of the cavity density matrix. This is because the ME leads to a dynamics, in which the diagonal and off-diagonal elements in the Fock states basis are decoupled. Finally, the detailed balance approach leads to the following expression for the diagonal elements \cite{Filipowicz} \begin{equation} P_{n}=N\prod_{k=1}^{\infty}\frac{r}{\kappa_{a}}\frac{\sin^{2}(g_{ac}\tilde{\Theta}\sqrt{k/\omega_{m}r})}{k} \end{equation} where $N$ is a constant determined by normalization. The photon trapping condition occurs for certain specific values of $\tilde{\Theta}$ such that, $\sin{\left(g_{ac}\tilde{\Theta}\sqrt{\frac{k+1}{\omega_{m}r}}\right)}=0$ with $k=0,1,2,\ldots$, then $P_{n}=0$ for $n>k$. So, the matrix element that generates transitions between the $k$ and $k+1$ number states vanishes, e.g. for vacuum photon trapping state, $k=0$, one finds that $\tilde{\Theta}=\left\{\tilde{\Theta}_{1}\approx9.36, \tilde{\Theta}_{2}\approx18.73, \tilde{\Theta}_{3}\approx28.09\right\}$, almost same values as for $\{\Theta_1, \Theta_2, \Theta_3\}$, see Fig.\ref{Fig2}. In Fig. \ref{Fig3}(a) it is shown the behavior of the steady state average number of excitations for both modes, photons and phonons, as functions of the pump parameter, $\Theta$. We can see that the vacuum trapping states for both modes exist at almost same interaction times. Certainly the steady states photon average number reach lower values than the phonon states at the minima, since between the photons and the atoms the interaction is direct and result strongly nonlinear, while the phonons interact with the atoms through the cavity field, so the effect of phonon trapping state is weaker. As observed in Fig. \ref{Fig3}(b), the optimal synchronization is achieved for a relatively low field amplitude, i.e. $\vert \alpha\vert \ll1$. For more clarity, in Fig. \ref{Fig3}(c) we show that e.g. for the interaction times, $\Theta_{1}$ and $\Theta_{2}$, the Wigner functions for both subsystems look similar, corresponding to the vacuum states. For other interaction times these functions show different states. In the case of the cavity density operator, one gets a superposition of the Fock states while for the MO one has a displaced coherent state. This last result is expected from Eq. \ref{gen} for values where the interaction time leads to $B(\tau)\neq0$. \subsection{\label{sec:level6}Second-order correlation function} In this section, we evaluate one important characteristics of the micromaser model, that corresponds to the degree of coherence of the maser emission, which can be quantified by the second-order correlation function, $g^{(2)}(0)$. In Fig. \ref{Fig4}(a) we present the numerical calculations for the cavity and MO second-order correlation functions vs. the pump parameter, $\Theta$, for the baths at zero temperature. As result, the photons and phonons evidence a super-Poissonian statistics for the interaction times where the trapping of vacuum states occur, see vertical green dotted line. Additionally, we can find that the cavity evidences the photon blockade effect $(g_{a}^{(2)}(0)\rightarrow0)$, matching perfectly to the trapping of one photon, see vertical magenta dotted line and respectively the inset showing the generation of one photon state. Therefore a correspondence between the two phenomena is observed for the one-photon case, however there is no correspondence between the blockade and trapping for the vacuum states of photons and phonons. On the other hand, as will be explained in the next section, one may realize a matching between the blockade and trapping corresponding to the phonon vacuum state. In the following, we calculate analytically the mechanical second-order correlation function defined by $g_{b}^{(2)}(0)=\langle\hat{b}^{\dagger}\hat{b}^{\dagger}\hat{b}\hat{b}\rangle/\langle\hat{b}^{\dagger}b\rangle^{2}$, and after a simple, but rather long calculation (see details in Appendix \ref{apendice5}) one gets the final expression \begin{equation}\label{g2} g_{b}^{(2)}(0)=\frac{2\bar{n}^{2}_{th}+4\beta^{2}_{1}\bar{n}_{th}+\beta^{4}_{1}}{\bar{n}^{2}_{th}+2\beta^{2}_{1}\bar{n}_{th}+\beta^{4}_{1}}. \end{equation} Now, let's study the effect of the optomechanical coupling on the MO connected to a thermal bath at finite temperature. Then, in Fig. \ref{Fig4}(b) one shows the mechanical second-order correlation function as a function of the pump parameter, $\Theta$, for different optomechanical couplings. The environment temperature has been set such that $\bar{n}_{th}=0.01$. The solid lines correspond to the analytical solutions, Eq. \ref{g2}, and the dashed lines correspond to the numerical solutions, Eq. \ref{me}. In particular, one observes when there is no optomechanical interaction, i.e. $g_{cm}=0$, the phonon correlation function evidences a thermal distribution: $g_{b}^{(2)}(0)=2, \forall\Theta$. This result can be deduced from Eq. \ref{rom} since $\hat{\rho}_{m}(\tau)=\hat{\rho}_{m}(0)$ for the null optomechanical coupling and the MO is initially in equilibrium with the thermal bath. On the other hand, the optomechanical coupling will stimulate the coherent phonon statistics, i.e. $g_{b}^{(2)}(0)\sim1$ outside of the specific values $\left\{\Theta_{1},\Theta_{2},\Theta_{3}\right\}$ corresponding to phonon trapping states. However, in the close vicinity of these values the phonon statistics is super-Poissonian. As conclusion of this section, there is no sign of a blockade effect in the case of a thermal phonon bath for any pump parameter, only a coherent phonon emission stimulated by the optomechanical coupling is observed. \begin{figure*} \caption{$(a)$ Second-order coherence degree as a function of the squeezing parameter, $\xi$ and the pump parameter, $\Theta$ (with $g_{cm} \label{Fig5} \end{figure*} \section{\label{sec:level7}Phonon blockade with squeezed vacuum reservoir} In the following we discuss how to realize the effect of phonon blockade and its relation to the trapping states. As was mentioned in the Introduction, to get the phonon blockade, according to the recent studies \cite{Yang2020, Lin2021, Restrepo2017, Liu2010, Xu2019, APL2021} two mechanisms can be used: i) conventional - by a strong driven phonon nonlinearity in the system, or ii) unconventional - by the destructive interference between different paths which restrict the higher than one-phonon excitations. In the present model we propose the enhancement of the phonon nonlinearity by considering the interaction of the system with a phonon vacuum squeezed reservoir. Therefore, the MO dynamics is described by the following maser ME \begin{eqnarray}\label{dinamica} \frac{d\hat{\rho}_{m}}{dt} &=& r\left(\hat{\mathcal{M}}_{m}(\tau)-1\right)\hat{\rho}_{m}\nonumber\\ &+& \frac{\kappa_{b}}{2}(N_{sq}+1)\left(2\hat{b}\hat{\rho}_{m} \hat{b}^{\dagger}- \hat{b}^{\dagger}\hat{b}\hat{\rho}_{m}-\hat{\rho}_{m} \hat{b}^{\dagger}\hat{b}\right)\nonumber\\ &+& \frac{\kappa_{b}}{2}N_{sq}\left(2\hat{b}^{\dagger}\hat{\rho}_{m} \hat{b}-\hat{b}\hat{b}^{\dagger}\hat{\rho}_{m}-\hat{\rho}_{m} \hat{b}\hat{b}^{\dagger}\right)\nonumber\\ &+& \frac{\kappa_{b}}{2}M_{sq}\left(2\hat{b}^{\dagger}\hat{\rho}_{m} \hat{b}^{\dagger}- \hat{b}^{\dagger}\hat{b}^{\dagger}\hat{\rho}_{m}-\hat{\rho}_{m} \hat{b}^{\dagger}\hat{b}^{\dagger}\right)\nonumber\\ &+& \frac{\kappa_{b}}{2}M_{sq}^{*}\left(2\hat{b}\hat{\rho} \hat{b}-\hat{b}\hat{b}\hat{\rho}-\hat{\rho} \hat{b}\hat{b}\right), \end{eqnarray} where $N_{sq}=\sinh^{2}{\xi}$ corresponds to the average number of phonons in the squeezed reservoir at zero temperature and the quantity $M_{sq}=-\exp{(i\phi)}\sinh{\xi}\cosh{\xi}$, obeys the relation $\sqrt{N_{sq}(N_{sq}+1)}=\vert M_{sq}\vert $. Here, the parameters $\xi$ and $\phi$ represent the squeezing amplitude and the phase, respectively, as appear in the definition of the complex squeezing parameter, $\zeta=\xi\exp{[i\phi]}$. In Fig. \ref{Fig5}(a) we plot the phonon second-order coherence degree as a function of the squeezing parameter, $\xi$, and pump parameter, $\Theta$. On the one hand, for the no squeezing case, $\xi=0$, super-Poissonian peaks occur, i.e $g_{b}^{(2)}(0)>1$. On the other hand, for the squeezed reservoir with $\xi\sim [0.01,1]$, we find that the phonon correlation function evidences sub-Poissonian phonon statistics, i.e. $g_{b}^{(2)}(0)<1$, for specific values of the pump parameter. In addition, one observes that if coming closer to the vacuum trapping state, the stronger blockade effect $(g_{b}^{(2)}(0)\rightarrow0)$ can be achieved with less squeezing, $\xi\sim10^{-2}$. In Fig. \ref{Fig5}(b-d) we plot the phonon second-order coherence degree as a function of the optomechanical coupling, $g_{cm}$ and the squeezing parameter, $\xi$, for each value of pump parameter, $\Theta$, where the phonon vacuum trapping states occur. As result, one finds that in each of these $\Theta$ values, there is an optomechanical coupling that optimizes the phonon blockade according to a certain squeezing parameter. In Fig. \ref{Fig6} we plot the phonon second-order correlation function (left vertical axis) and the steady state average number of phonons and photons (right vertical axis) vs. pump parameter, $\Theta$. One finds that the phonon correlation function evidences a phonon blockade ($g_{b}^{(2)}(0)<1$) at the positions where phonon and photon vacuum trapping states are generated. In the remaining intervals of $\Theta$, it is a coherent distribution, $g_{b}^{(2)}(0)=1$, i.e. a phonon lasing effect. Since we are in a good synchronization zone, $\alpha=0.384$ (see Fig. \ref{Fig3}b), both trapping states of phonons and photons, represent a good witness for the phonon blockade effect. Therefore, the generation of the phonon blockade effect in HMM and its detection by the photon and phonon trapping vacuum states conclude this section and these effects represent the main results of the work. \section{\label{sec:level8}Discussion} In summary, we have proposed a hybrid micromaser model consisting of atoms passing at a given rate through a cavity with a moving mirror, which is connected to a low temperature thermal bath in one case or to a vacuum squeezed phonon reservoir in another case. We demonstrate, both analytically and numerically, that for certain interaction times between the atoms and the optomechanical cavity, vacuum phonon trapping states are generated. In this framework, with independent phonon and photon thermal baths, we show that the trapping of the phonons and photons may occur simultaneously, i.e. at same value of $\Theta$ as observed in Figs. \ref{Fig3}(a,c). Therefore, we find that for a low coherence amplitude of the initial cavity state one has better synchronization of trapping of the phonon and photon states, see Fig. \ref{Fig3}(b). Moreover, by increasing the optomechanical coupling, the phonon second-order correlation function evidences the effect of a coherent distribution, $g_{b}^{(2)}(0)\sim1$, that occurs for the finite steady state phonon number, i.e. outside of the vacuum trapping states, see Fig. \ref{Fig4}(b). Besides that the creation of the phonon trapping states \textit{per se} is an interesting and new (to the best of our knowledge) quantum effect in a hybrid maser architecture, our work goes further and we propose this effect as a witness and control of another important quantum effect that is the phonon blockade (see Fig. \ref{Fig6}). To have a photon/phonon blockade effect, commonly one needs a strong photon/phonon nonlinearity in the system. However, as our model of HMM is developed in the weak optomechanical coupling regime with the mechanical mode connected to a thermal bath, all this will result in a very weak phonon nonlinearity and hence the phonon blockade is not observed, see Fig. \ref{Fig4}(a-b). At the same time the photon nonlinearity is strong via the Jaynes-Cummings coupling, and so the photon blockade may occur for some values of the atom-cavity interaction time, see in Fig. \ref{Fig4}(a) when $g_a^{(2)}(0)<1$. Therefore, we propose to connect the HMM to a vacuum squeezed phonon reservoir in order to search the phonon blockade. As result, one observes that in the close neighborhood of the critical points of $\Theta$, resulted from Eq. \ref{cond}, where the trapping states occur, the phonon distribution becomes sub-Poissonian so that $g_{b}^{(2)}(0)<1$, corresponding to so-called phonon blockade effect (see Figs. \ref{Fig5} - \ref{Fig6}). From the experimental point of view the model of HMM can in principle use a similar setup of the standard MM, where the photon trapping effect was observed for the first time \cite{Weidinger}. To produce the optomechanical interaction, the microwave cavity will have a movable mirror. The configuration of a FPC with oscillatory mirror could be inspired from the detectors of the gravitational waves (GW), where the optomechanical coupling is the main ingredient and very well studied \cite{Aspelmeyer}. Of course, the GW interferometers usually are of very large dimensions as compared to the common FPC used for maser and laser setups, but what one needs is particularly to consider the physical functionality of the cavity with a moving mirror. There exist some setups of small-sized tabletop GW interferometers using Fabry Perot resonator \cite{GWD}, which may serve as desirable examples to develop an experimental prototype of HMM optimal to observe the phonon trapping and blockade effects. Moreover our proposal is developed in the regime of weak optomechanical coupling, hence the HMM does not require a strong coupling between the cavity and vibrating mirror. Additionally to point out, in the standard MM experiments one finds a particular problem, if the cavity is crossed simultaneously by two atoms, the photon trapping leaks and eventually get destroyed \cite{Orszag94}. This effect causes an experimental difficulty, since the atoms arrive to the cavity at random times and there is a non zero probability of having such a situation. In the case of phonon HMM, we may encounter a similar difficulty, unless the atoms pass through a very fine velocity filter. As a central result of the present study, we identify an interconnection between the trapping and blockade effects for phonons and photons in the HMM. Particularly if one approaches to the phonon trapping states, which additionally is synchronized to the photon trapping, the phonon blockade effect can be achieved for a slightly squeezed reservoir and corresponding optomechanical coupling, see Fig. \ref{Fig5}. Our proposal for witnessing the phonon blockade by the trapping states in a hybrid micromaser setup could facilitate the experimental detection of the phonon blockade effects. In conclusion, we can realize in the HMM setup several common maser properties for the phonon and photon modes, like coherence, trapping and sub-Poissonian states. These effects highlight the findings of this work. \begin{figure} \caption{Second-order correlation function for phonons (green curve) and steady state average number of photons (black curve) and phonons (blue curve) vs. the pump parameter, $\Theta$. Other parameters (in units of $\omega_m$) are: $g_{ac} \label{Fig6} \end{figure} \begin{acknowledgments} H.M. acknowledge financial support from Universidad Mayor through the Doctoral fellowship. V.E. and M.O. acknowledge the financial support from Fondecyt Regular No. $1180175$. V.E. acknowledge grant No. $20.80009.5007.01$ of the State Program (2020-2023) from National Agency for Research and Development of Moldova. \end{acknowledgments} \appendix \section{\label{apendice1}Hamiltonian in the interaction picture} This appendix provides the derivation of the initial Hamiltonian Eq. \ref{base} in the interaction picture. Let us introduce the operator counting the number of atom-cavity polaritonic excitations: \begin{equation} \hat{\mathcal{N}}=\hat{a}^{\dagger}\hat{a}+\hat{\sigma}_{z}/2 \end{equation} We observe that this polariton number operator commutes with the Hamiltonian of the system, $[\hat{\mathcal{H}},\hat{\mathcal{N}}]=0$. Therefore, the Hamiltonian of the closed system is block-diagonal in the basis of eigenvectors of the polariton number operator. By considering the detuning $\delta=\omega_{a}-\omega_{c}$, the Hamiltonian in Eq. \ref{base} can be written as \begin{equation} \hat{\mathcal{H}}=\hat{\mathcal{H}}_{0}+\hat{\mathcal{H}}_{I}, \end{equation} where \begin{align} \hat{\mathcal{H}}_{0}&=\omega_{m}\hat{b}^{\dagger}\hat{b},\nonumber\\ \hat{\mathcal{H}}_{I}&=\delta\frac{\hat{\sigma}_{z}}{2}+ g_{ac}\left(\hat{a}\hat{\sigma}_{+}+\hat{a}^{\dagger}\hat{\sigma}_{-}\right) -g_{cm}\hat{a}^{\dagger}\hat{a}\left(\hat{b}^{\dagger}+\hat{b}\right). \end{align} Now, we calculate the Hamiltonian in the first interaction picture, that is \begin{equation}\label{cuadro1} \hat{\mathcal{H}}'=e^{i \hat{\mathcal{H}}_{0} t}\hat{\mathcal{H}}_{I}e^{-i \hat{\mathcal{H}}_{0} t}. \end{equation} Using the fact that \begin{equation} \mathcal{\hat{U}}f\left(\left\{\mathcal{\hat{X}}_{i}\right\}\right)\mathcal{\hat{U}}^{\dagger} =f\left(\left\{\mathcal{\hat{U}}\mathcal{\hat{X}}_{i}\hat{U}^{\dagger}\right\}\right), \end{equation} for any function $f$, unitary operator $\mathcal{\hat{U}}$ and arbitrary set of operators $\left\{\mathcal{\hat{X}}_{i}\right\}$, so the Eq. \ref{cuadro1} takes the form \begin{eqnarray} \hat{\mathcal{H}}'=\hat{\mathcal{H}}'_{0}+\hat{\mathcal{H}}'_{I}, \end{eqnarray} where \begin{align} \hat{\mathcal{H}}'_{0}&=\delta\frac{\hat{\sigma}_{z}}{2}+ g_{ac}\left(\hat{a}\hat{\sigma}_{+}+\hat{a}^{\dagger}\hat{\sigma}_{-}\right),\nonumber\\ \hat{\mathcal{H}}'_{I}&=-g_{cm}\hat{a}^{\dagger}\hat{a}\left(\hat{b}^{\dagger}e^{i\omega_{m}t}+\hat{b}e^{-i\omega_{m}t}\right). \end{align} Now, we move to a second interaction picture, defined as \begin{equation} \hat{\tilde{\mathcal{H}}}=\exp{\left[i\int\hat{\mathcal{H}}'_{0}dt\right]}\hat{\mathcal{H}}'_{I}\exp{\left[-i\int\hat{\mathcal{H}}'_{0}dt\right]}. \end{equation} The result of the transformation is easily calculated to be finally the Eq. \ref{model}.\\ \section{\label{apendice2}Phonon gain superoperator} This section presents the derivation of the gain part of the maser ME for the MO operator. We consider an initial atom-cavity-oscillator operator as \begin{equation} \hat{\rho}(0)=\begin{pmatrix} 0 & 0\\\\ 0 & 1 \end{pmatrix}\otimes\hat{\rho}_{c}(0)\otimes\hat{\rho}_{m}(0), \end{equation} where the atom was taken in the ground state. After the interaction time $\tau$, the total density operator evolves according to Eq. \ref{unitary}. After a straightforward calculation, one gets \begin{widetext} \begin{equation} \hat{\rho}(\tau)= \begin{pmatrix} g_{ac}^{2}\mathcal{\mathcal{\hat{S}}}\hat{a}e^{i\mathcal{\hat{F}}}\hat{\rho}_{c}(0)\otimes\hat{\rho}_{m}(0)e^{-i\mathcal{\hat{F}}}\hat{a}^{\dagger}\mathcal{\mathcal{\hat{S}}} & -i g_{ac}\mathcal{\mathcal{\hat{S}}}\hat{a}e^{i\mathcal{\hat{F}}}\hat{\rho}_{c}(0)\otimes\hat{\rho}_{m}(0)\mathcal{\hat{D}}^{\dagger}\\\\ ig_{ac}\mathcal{\hat{D}}\hat{\rho}_{c}(0)\otimes\hat{\rho}_{m}(0)e^{-i\mathcal{\hat{F}}}\hat{a}^{\dagger}\mathcal{\mathcal{\hat{S}}} & \mathcal{\hat{D}}\hat{\rho}_{c}(0)\otimes\hat{\rho}_{m}(0)\mathcal{\hat{D}}^{\dagger} \end{pmatrix}, \end{equation} \end{widetext} where $\mathcal{\hat{D}}^{\dagger}$ is hermitian conjugate of $\mathcal{\hat{D}}$, see Eq. \ref{eq5}. Next, tracing over the atom, the cavity-MO reduced operator reads \begin{equation} \hat{\rho}_{c,m}(\tau) = \hat{\rho}_{m}(0)\mathcal{\hat{D}}\hat{\rho}_{c}(0)\mathcal{\hat{D}}^{\dagger}+ g^{2}_{ac}e^{i\mathcal{\hat{F}}}\hat{\rho}_{m}(0)e^{-i\mathcal{\hat{F}}}\mathcal{\mathcal{\hat{S}}}\hat{a}\hat{\rho}_{c}(0)\hat{a}^{\dagger}\mathcal{\mathcal{\hat{S}}}. \end{equation} In the following, we consider the cavity field initialized in a coherent state (Eq. \ref{coh}). Hence, the above equation becomes \begin{eqnarray} \hat{\rho}_{c,m}(\tau) &=& \hat{\rho}_{m}(0)e^{-\vert \alpha\vert ^{2}}\sum_{n,m}\frac{\alpha^{n}\alpha^{*m}}{\sqrt{n!m!}}D_{n}D_{m}^{*}\vert n\rangle\langle m\vert \nonumber\\ &+& g^{2}_{ac}e^{i\mathcal{\hat{F}}}\hat{\rho}_{m}(0)e^{-i\mathcal{\hat{F}}}e^{-\vert \alpha\vert ^{2}}\sum_{n,m}\frac{\alpha^{n}\alpha^{*m}}{\sqrt{n!m!}}\sqrt{nm}\nonumber\\ &\times& S_{n-1}S_{m-1}\vert n-1\rangle\langle m-1\vert , \end{eqnarray} where \begin{align} D_{n}&=\cos{\left(\sqrt{\varphi_{n}}\tau\right)}+\frac{i\delta}{2}\frac{\sin{\left(\tau\sqrt{\varphi_{n}}\right)}}{\sqrt{\varphi_{n}}},\\ S_{n-1}&=\frac{\sin{\left(\tau\sqrt{\varphi_{n-1}+g_{ac}^{2}}\right)}}{\sqrt{\varphi_{n-1}+g_{ac}^{2}}}. \end{align} Finally tracing over the cavity, we find the Eqs. \ref{rom}, \ref{coefab} and \ref{coefab2}. \section{\label{apendice3}Solution of the Fokker-Planck equation} Here we present the steps to get the solution \ref{sfp} of the Fokker-Planck equation \ref{fp}. Let us consider the MO initially is in a coherent state, i.e. in the Gaussian representation takes the form \begin{equation} P(\beta,\beta^{*},0)=(\pi\epsilon)^{-1}\exp{\left[-\vert \beta-\beta_{0}\vert ^{2}/\epsilon\right]} \end{equation} By considering a solution type $P(\beta,\beta^{*},t)=\exp{\left[a(t)+b(t)\beta+c(t)\beta^{*}+d(t)\beta\beta^{*}\right]}$ one obtains a set of first order differential equations \begin{align} \dot{a}(t)&=\kappa_{b}\left(d(t)+\bar{n}_{th}d^{2}(t)\right),\\ \dot{b}(t)&=\kappa_{b}\left(\frac{1}{2}c(t)+\bar{n}_{th}c(t)d(t)\right)-\lambda B(\tau)rd(t),\\ \dot{c}(t)&=\kappa_{b}\left(\frac{1}{2}b(t)+\bar{n}_{th}b(t)d(t)\right)-\lambda B(\tau)rd(t),\\ \dot{d}(t)&=\kappa_{b}\left(1+\bar{n}_{th}[c(t)b(t)+d(t)]\right)-\lambda B(\tau)r[c(t)+b(t)]. \end{align} For an initial thermal distribution, $\epsilon=\bar{n}_{th}$, $\beta_{0}=0$ and after a straightforward calculation, we get \begin{eqnarray} P(\beta,\beta^{*},t) &=& \frac{1}{\pi\bar{n}_{th}}\textrm{exp}[\textrm{ln}[\pi\bar{n}_{th}]\nonumber\\ &-&\frac{\beta_{1}^{2}}{\bar{n}_{th}}-\frac{|\beta|^{2}}{\bar{n}_{th}}+\frac{\beta_{1}\beta}{\bar{n}_{th}}+\frac{\beta_{1}\beta^{*}}{\bar{n}_{th}}], \end{eqnarray} with $\beta_{1}=2\lambda r B(\tau)\kappa_{b}^{-1}(1-\exp{\left[-\kappa_{b}t/2\right]})$. Finally, the last equation can be written compactly as Eq. \ref{sfp}. \section{\label{apendice4}Phonon trapping condition} The critical values of the pump parameter, $\Theta$, where the phonon trapping vacuum states occur, are calculated from the condition of the minimum value for the steady state average phonon number: $\frac{\partial}{\partial\Theta}\langle \hat{b}^{\dagger}\hat{b}\rangle=0$ with $\frac{\partial^{2}}{\partial\Theta^{2}}\langle \hat{b}^{\dagger}\hat{b}\rangle>0$. So the derivative of Eq. \ref{analitico} gives: \begin{eqnarray} \frac{\partial}{\partial\Theta}\langle \hat{b}^{\dagger}\hat{b}\rangle &=& \frac{8e^{-2\vert \alpha\vert ^{2}}g_{ac}\lambda^{2}r^{3/2}}{\kappa_{b}^{2}\omega_{m}^{1/2}}\nonumber\\ &\times& \sum_{n}\frac{\vert \alpha\vert ^{2(n+1)}\sqrt{n+1}}{(n+1)!}\sin{\left(2g_{ac}\Theta\sqrt{\frac{n+1}{\omega_{m}r}}\right)}\nonumber\\ &\times& \sum_{n}\frac{\vert \alpha\vert ^{2(n+1)}}{(n+1)!}\sin{\left(g_{ac}\Theta\sqrt{\frac{n+1}{\omega_{m}r}}\right)}^{2}. \end{eqnarray} Taking into account that $\left\{\alpha,g_{ac},r,\lambda,\kappa_{b}\right\}>0$, the phonon trapping state is valid for \begin{equation} \sum_{n}\frac{\vert \alpha\vert ^{2(n+1)}\sqrt{n+1}}{(n+1)!}\sin{\left(2g_{ac}\Theta\sqrt{\frac{n+1}{\omega_{m}r}}\right)}=0. \end{equation} Finally, the above equation can be solved numerically, and the minimal values for $\langle\hat{b}^{\dagger}\hat{b}\rangle$ are found, see vertical lines in Fig. \ref{Fig2}$(b)$. \section{\label{apendice5}Calculation of $g_{b}^{(2)}(0)$ } To calculate the second-order correlation function for phonons, $g_{b}^{(2)}(0)=\langle\hat{b}^{\dagger}\hat{b}^{\dagger}\hat{b}\hat{b}\rangle/\langle\hat{b}^{\dagger}b\rangle^{2}$, we consider convenient to use the moment-generating function \begin{equation} \mathcal{Q}(s)=\sum_{n=0}^{\infty}(1-s)^{n}P(n), \end{equation} where $P(n)=\int d^{2}\beta P(\beta,\beta^{*},t)\vert \langle n\vert \beta\rangle\vert ^{2}$ is the probability to have $n$ phonons in the MO and $P(\beta,\beta^{*},t)$ is defined in Eq. \ref{sfp}. After a straightforward calculation one obtains \begin{equation} P(n)= \frac{1}{\pi\bar{n}_{th}n!} \int d^{2}\beta \vert \beta\vert ^{2n} e^{-\vert \beta\vert ^{2}-\frac{\vert \beta-\beta_{1}\vert ^{2}}{\bar{n}_{th}}}. \end{equation} Finally, using the definition $g^{(2)}(0)=\frac{1}{\langle n\rangle}\frac{d^{2}\mathcal{Q}}{ds^{2}}\vert _{s=0}$, see \cite{Scully}, we get the Eq. \ref{g2}. \end{document}
\begin{document} \begin{center} \textbf{\Large Dynamics of information in the presence of deformation} N. Metwally \\[0pt] Math. Dept., Faculty of Science, South Valley University, Aswan, Egypt. \\[0pt] E.mail: Nmetwally$@$gmail.com \end{center} \begin{abstract} The entanglement of atomic system consists of two atoms interacts with a deformed cavity mode is quantified by the means of Bloch vectors and the cross dyadic of the traveling state inside the cavity. For large value of the deformation the amplitude of Bloch vectors decrease very fast and consequently the traveling state turns into mixed state quickly. The generated entangled state is used as quantum channel to implement quantum teleportation protocol. It is shown that both of the deformed parameter and the number of photons inside the cavity play a central role on controlling the fidelity of the transmitted information. \end{abstract} \section{Introduction} Controlling quantum coherence is one of the most fundamental issues in quantum information processing \cite{nil}. From a practical point of view the investigating of the behavior of entanglement in the presences of noise is very important, where there are many studies that has been done in different directions. Among these noise is the deformation, which arise from the defects of devices \cite{Lav,Abou,Bon}. So, in the presence of deformation, one can generate a deformed entangled state and consequently there will be a deformation on the information processing. The quantization of the field in the presence of deformed operators has been investigated in different topics, where the relation between the deformed and non deformed operators have been obtained in \cite{Hong,Man,Lav2}. As an applications of the deformed operators, Mancini and Man’ko have compared the dynamics of information which is coded in deformed cat state with non-deformed cat state \cite{Man2}. Also, in \cite{Yuk} the holonomic quantum gates using isospectral deformations of an Ising model have been constructed. The survival of quantum coherence against dissipation provided to superpose distinguishable coherent states of suitable deformed field has been investigated in \cite{Man3}. Recently, Metwally and et.al, have showed that the non-classical properties of a two qubits state can be enhanced via deformed operators\cite{Metwally}. These efforts motivate us to introduce a different application of the deformation. In this work, we consider a system consists of of two separable atoms interacts with a cavity mode with multiphotons. The operators which describe the field are assumed to be deformed. We investigate the behavior of entanglement which is generated between the two atoms. Also, the dynamics of the Bloch vectors are investigated for this deformed atomic system. Finally, we use the generated entangled state between the two atoms as quantum channel to implement the quantum teleportation protocol. The effect of the deformation on the fidelity of the transmitted information is investigated. This paper is organized as follows: In Sec.2, we introduce the model and its solution by means of the Bloch vectors and the cross dyadic. The entanglement and the dynamics of the Bloch vectors are studied in Sec.3. The quantum teleportation protocol is the subject of Sec.4. Finally, we summarize our results in Sec.5, \section{ Model} Let us assume that we have atomic system consists of two atoms interacts with cavity mode with $m$ \ photons. The annihilation and creation operators which describe the cavity mode are assumed to be deformed. The deformed operators result as a distortion of the usual annihiliation and creation operators \cite{Lav,Man}. In the rotating wave approximation, the interaction of the cavity mode and the atomic systems is described by the Hamilation , \begin{equation} \hat{H}\mathbf{=\varpi }_{0}{a_q}^{\dag }a_q+\frac{1}{2}\omega(\sigma_z+\tau_z)+\lambda_1 (\sigma_{+}a_q ^{m}+\sigma a_q ^{\dag m})+ \lambda_2 (\tau_{+}a_q ^{m}+\tau_{-} a_q ^{\dag m}) , \label{Ham} \end{equation} where, $\varpi _{0}$ is the frequencies of the field, $\omega_i$ are the atomic transition frequency, $\lambda_i,i=1,2 $ are the coupling constants between the atoms and the field, $ a_q$ and \ $a_q^{\dag }$ are deformed annihilation and creation operators, which can be described by means of the non-deformed operators $a $ and $a^{\dagger}$ as, \begin{equation} a_q=af(n),\quad a_q^{\dag }=a^{\dag }f(n), \label{DefOper} \end{equation} where $f(\hat{n})$\ is a function of the number of photons $\bar n=a^{\dag }a.$ The deformed operators $a_q$ and \ ${a_q}^{\dag }$ satisfy the commutation relations, \begin{eqnarray} \left[ a_q,{a_q}^{\dag }\right] &=&(\hat{n}+1)f^{2}(\hat{n}+1)-\hat{n}f^{2}(\hat{n}), \label{DefRel} \\ \left[ a_q,n\right] &=&a_q,~\left[ a_q^{\dag },n\right] =-a_q ^{\dag }. \nonumber \end{eqnarray} The operators $\sigma_{\pm }=\sigma_x\pm i\sigma_y$, $\tau_{\pm }=\tau_x\pm i\tau_y$ and $\sigma_{z}$,$\tau_z$ are the raising (lowering) and inversion operators for the two atoms. The operators $\sigma_i$ and $\tau_i, i=x,y,z$ are the Pauli's operators for the first and the second atoms respectively. In this treatment, the function $f(\hat{n})$ represents what is called the $q-$deformation and it is defined as \begin{equation} f(\hat{n})=\sqrt{\frac{1-q^n}{n(1-q)}}. \end{equation} In the invariant sub-space of the global system, we can assume a set of complete basis of the field-atomic system as $\left\vert e,e,n\right\rangle $ , $\ \left\vert e,g,n+2\right\rangle ,$ $\left\vert g,e,n+2\right\rangle $ \ and $\left\vert g,g,n+2\right\rangle.$ Now, let the \ initial state of the atomic system, $\left\vert \psi _{a}(0)\right\rangle =a_{1}\left\vert ee\right\rangle +a_{2}\left\vert eg\right\rangle +a_{3}\left\vert ge\right\rangle +a_{4}\left\vert gg\right\rangle ,$ while the \ field is initially prepared in the coherent state $\left\vert \psi _{f}(0)\right\rangle =\sum_{n=0}^{\infty }W_{n}\left\vert n\right\rangle ,W_{n}=\frac{\overline{n}^{n}}{\sqrt{n!}}Exp\left( -\frac{1}{2}|\overline{n} |^{2}\right).$ For a convenient notations the density operator of the atomic system is described by the Bloch vectors and the cross dyadic as \cite{metwally1,Englert2}, \begin{equation} \rho _{a}(0)=\frac{1}{4}\left( +\row{s}_{0}\cdot\col\sigma + \row{t}_{0}\cdot\col\tau+\row\sigma\cdot\dyadic{C}(0)\cdot\col\tau\right) , \label{Inatom} \end{equation} where, \begin{equation} \row{s}_{0}=(s_{x}(0),s_{y}(0),s_{z}(0)),\quad \row{t}_{0}=(t_{x}(0),t_{y}(0),t_{z}(0)), \end{equation} are the initial Bloch vectors of the two atoms respectively, with \begin{eqnarray} s_{x}(0) &=&(a_{1}a_{3}^{\ast }+a_{3}a_{1}^{\ast })+(a_{2}a_{4}^{\ast }+a_{4}a_{2}^{\ast }),s_{y}(0)=i(a_{3}a_{1}^{\ast }-a_{1}a_{3}^{\ast })+i(a_{4}a_{2}^{\ast }-ia_{2}a_{4}^{\ast }), \nonumber \\ s_{z}(0) &=&|a_{1}|^{2}+|a_{2}|^{2}-|a_{3}|^{2}-|a_{4}|^{2},t_{x}(0)=(a_{1}a_{2}^{ \ast }+a_{2}a_{1}^{\ast })+(a_{3}a_{4}^{\ast }+a_{4}a_{3}^{\ast }), \nonumber \\ t_{y}(0) &=&i(a_{2}a_{1}^{\ast }-a_{1}a_{2}^{\ast })+i(a_{4}a_{3}^{\ast }-a_{3}a_{4}^{\ast }),t_{z}(0)=|a_{1}|^{2}-a_{2}|^{2}+|a_{3}|^{2}-|a_{4}|^{2}. \label{BlochIni} \end{eqnarray} and the elements of the initial cross dyadic between the two atoms are given by, \begin{eqnarray} c_{xx}(0) &=&a_{1}a_{4}^{\ast }+a_{2}a_{3}^{\ast }+a_{3}a_{2}^{\ast }+a_{4}a_{1}^{\ast },c_{xy}(0)=i(a_{4}a_{1}^{\ast }-a_{1}a_{4}^{\ast })+i(a_{2}a_{3}^{\ast }-a_{3}a_{2}^{\ast }), \nonumber \\ c_{xz}(0) &=&(a_{1}a_{3}^{\ast }+a_{3}a_{1}^{\ast })-(a_{2}a_{4}^{\ast }+a_{4}a_{2}^{\ast }),c_{yx}(0)=i(a_{4}a_{1}^{\ast }-a_{1}a_{4}^{\ast })+i(a_{3}a_{2}^{\ast }-a_{2}a_{3}^{\ast }), \nonumber \\ c_{yy}(0) &=&(a_{3}a_{2}^{\ast }+a_{2}a_{3}^{\ast })-(a_{4}a_{1}^{\ast }+a_{1}a_{4}^{\ast }),c_{yz}(0)=i(a_{2}a_{4}^{\ast }-a_{4}a_{2}^{\ast })+i(a_{3}a_{1}^{\ast }-a_{1}a_{3}^{\ast }), \nonumber \\ c_{zx}(0) &=&(a_{1}a_{2}^{\ast }+a_{2}a_{1}^{\ast })-(a_{3}a_{4}^{\ast }+a_{4}a_{3}^{\ast }),c_{zy}(0)=i(a_{2}a_{1}^{\ast }-a_{1}a_{2}^{\ast })+i(a_{3}a_{4}^{\ast }-a_{4}a_{3}^{\ast }), \nonumber \\ c_{zz}(0) &=&|a_{1}|^{2}-|a_{2}|^{2}-|a_{3}|^{2}+|a_{4}|^{2}. \label{IniDyad} \end{eqnarray} Also, the density operator of the field is given by, \begin{equation} \rho _{f}(0)=\sum_{n=0}^{\infty }W_{n}^{2}\left\vert n\right\rangle \left\langle n\right\vert. \label{InField} \end{equation} Then using the Hamiltonian (\ref{Ham}), the initial state of the atomic system (\ref{Inatom}), and the initial state of the field (\ref{InField}) one gets the time evolution of the density operator of the field and the atomic system, where $\lambda_1=\lambda_2=\lambda$, $\omega_1=\omega_2=\omega$, and $\omega_0-\omega=\Delta=0$. After tracing out the state of the field, the density operator of the atomic system is given by \begin{equation} \rho _{a}(t)=\frac{1}{4}\left( 1+\mathord{\buildrel{\lower3pt\hbox{$ \scriptscriptstyle\rightarrow$}}\over s}\cdot{\sigma ^{\raisebox{2pt}[ \height]{$\scriptstyle\downarrow$}}}+ \mathord{\buildrel{\lower3pt\hbox{$ \scriptscriptstyle\rightarrow$}}\over t}\cdot{\tau^{\raisebox{2pt}[ \height]{$\scriptstyle\downarrow$}}}+\mathord{\buildrel{\lower3pt\hbox{$ \scriptscriptstyle\rightarrow$}}\over \sigma }\cdot\mathord{\dyadic@rrow{C}} \cdot{\tau^{\raisebox{2pt}[\height]{$\scriptstyle\downarrow$}}} \right) , \label{FinalAtomic} \end{equation} where, \begin{eqnarray} s_{x}(t) &=&\sum_{n=0}^{\infty }\left( c_{n}^{(1)}c_{n-2}^{\ast (3)}+c_{n}^{(2)}c_{n-2}^{\ast (4)}+c_{n}^{(3)}c_{n+2}^{\ast (1)}+c_{n}^{(4)}c_{n+2}^{\ast (2)}\right) , \nonumber \\ s_{y}(t) &=&\sum_{n=0}^{\infty }\left( -ic_{n}^{(1)}c_{n-2}^{\ast (3)}-ic_{n}^{(2)}c_{n-2}^{\ast (4)}+ic_{n}^{(3)}c_{n+2}^{\ast (1)}+ic_{n}^{(4)}c_{n+2}^{\ast (2)}\right) , \nonumber \\ s_{z}(t) &=&\sum_{n=0}^{\infty }\left( |c_{n}^{(1)}|^{2}+|c_{n}^{(2)}|^{2}-|c_{n}^{(3)}|^{2}-|c_{n}^{(4)}|^{2} \right) , \nonumber \\ t_{x}(t) &=&\sum_{n=0}^{\infty }\left( c_{n}^{(1)}c_{n-2}^{\ast (2)}+c_{n}^{(2)}c_{n+2}^{\ast (1)}+c_{n}^{(3)}c_{n-2}^{\ast (4)}+c_{n}^{(4)}c_{n+2}^{\ast (3)}\right) , \nonumber \\ t_{y}(t) &=&\sum_{n=0}^{\infty }\left( -ic_{n}^{(1)}c_{n-2}^{\ast (2)}+ic_{n}^{(2)}c_{n+2}^{\ast (1)}-ic_{n}^{(3)}c_{n-2}^{\ast (4)}+ic_{n}^{(4)}c_{n+2}^{\ast (3)}\right) , \nonumber \\ t_{z}(t) &=&\sum_{n=0}^{\infty }\left( |c_{n}^{(1)}|^{2}-|c_{n}^{(2)}|^{2}+|c_{n}^{(3)}|^{2}-|c_{n}^{(4)}|^{2} \right) , \label{FinalBloch} \end{eqnarray} and the elements of the cross dyadic are, \begin{eqnarray} c_{xx}(t) &=&\sum_{n=0}^{\infty }\left( c_{n}^{(1)}c_{n-4}^{\ast (4)}+c_{n}^{(4)}c_{n+4}^{\ast (1)}+c_{n}^{(2)}c_{n}^{\ast (3)}+c_{n}^{(3)}c_{n}^{\ast (2)}\right) , \nonumber \\ c_{xy}(t) &=&i\sum_{n=0}^{\infty }\left( -c_{n}^{(1)}c_{n-4}^{\ast (4)}+c_{n}^{(4)}c_{n+4}^{\ast (1)}+c_{n}^{(2)}c_{n}^{\ast (3)}-c_{n}^{(3)}c_{n}^{\ast (2)}\right) , \nonumber \\ c_{xz}(t) &=&\sum_{n=0}^{\infty }\left( c_{n}^{(1)}c_{n-2}^{\ast (3)}+c_{n}^{(3)}c_{n+2}^{\ast (1)}-c_{n}^{(2)}c_{n-2}^{\ast (4)}+c_{n}^{(4)}c_{n+2}^{\ast (2)}\right) , \nonumber \\ c_{yx}(t) &=&i\sum_{n=0}^{\infty }\left( -c_{n}^{(1)}c_{n-4}^{\ast (4)}+c_{n}^{(4)}c_{n+4}^{\ast (1)}-c_{n}^{(2)}c_{n}^{\ast (3)}+c_{n}^{(3)}c_{n}^{\ast (2)}\right) . \nonumber \\ c_{yy}(t) &=&-\sum_{n=0}^{\infty }\left( c_{n}^{(1)}c_{n-4}^{\ast (4)}+c_{n}^{(4)}c_{n+4}^{\ast (1)}-c_{n}^{(2)}c_{n}^{\ast (3)}-c_{n}^{(3)}c_{n}^{\ast (2)}\right) , \nonumber \\ c_{yz}(t) &=&i\sum_{n=0}^{\infty }\left( -c_{n}^{(1)}c_{n-2}^{\ast (3)}+c_{n}^{(3)}c_{n+2}^{\ast (1)}+c_{n}^{(2)}c_{n-2}^{\ast (4)}+c_{n}^{(4)}c_{n+2}^{\ast (2)}\right) , \nonumber \\ c_{zx}(t) &=&\sum_{n=0}^{\infty }\left( c_{n}^{(1)}c_{n-2}^{\ast (2)}+c_{n}^{(2)}c_{n+2}^{\ast (1)}-c_{n}^{(3)}c_{n-2}^{\ast (4)}-c_{n}^{(4)}c_{n+2}^{\ast (3)}\right) , \nonumber \\ c_{zy}(t) &=&i\sum_{n=0}^{\infty }\left( -c_{n}^{(1)}c_{n-2}^{\ast (2)}+c_{n}^{(2)}c_{n+2}^{\ast (1)}+c_{n}^{(3)}c_{n-2}^{\ast (4)}-c_{n}^{(4)}c_{n+2}^{\ast (3)}\right) , \nonumber \\ c_{zz}(t) &=&\sum_{n=0}^{\infty }\left( |c_{n}^{(1)}|^{2}-|c_{n}^{(2)}|^{2}-|c_{n}^{(3)}|^{2}+|c_{n}^{(4)}|^{2} \right), \label{Finaldyd} \end{eqnarray} with, \begin{eqnarray} c_{n}^{(1)}(t) &=&a_{1}W_{n}-\nu _{1}(a_{1}\nu _{1}W_{n}+a_{4}\nu _{2}W_{n+2})\frac{\sin ^{2}\mu _{n} t}{\mu _{n}^{2}}-i\nu _{1}(a_{2}+a_{3})W_{n+1}\frac{\sin 2\mu _{n} t}{2\mu _{n} t}, \nonumber \\ c_{n}^{(2)}(t) &=&W_{n+1}(a_{2}\cos ^{2}\mu _{n} t-a_{3}\sin ^{2}\mu _{n} t)-i(a_{1}\nu _{1}W_{n}+a_{4}\nu _{2}W_{n+2})\frac{\sin 2\mu _{n} t}{2\mu _{n}t}, \nonumber \\ c_{n}^{(3)}(t) &=&W_{n+1}(a_{3}\cos ^{2}\mu _{n} t-a_{2}\sin ^{2}\mu _{n} t)-i(a_{1}\nu _{1}W_{n}+a_{4}\nu _{2}W_{n+2})\frac{\sin 2\mu _{n}t}{2\mu _{n}t}, \nonumber \\ c_{n}^{(4)}(t) &=&a_{4}W_{n+2}-\nu _{2}(a_{1}\nu _{1}W_{n}+a_{4}\nu _{2}W_{n+2})\frac{\sin ^{2}\mu _{n} t}{\mu _{n}^{2}}-i\nu _{2}(a_{2}+a_{3})W_{n+1}\frac{\sin 2\mu _{n}t}{2\mu _{n} t},\nonumber\\ \end{eqnarray} where, $\mu_n=\frac{1}{\sqrt{2}}\sqrt{\nu_1^2(n)+\nu_2^2(n)}$, $\nu_1(n)=\lambda\sqrt{(n+m)!/n!}$ and $\nu_2(n)=\lambda\sqrt{(n+2m)!/(n+m)!}$. \section{Dynamics of Entanglement} Since we use a measure of entanglement depends on the Bloch vectors and the cross dyadic of the two atoms, it is important to shed light on the dynamics of these vectors. This study gives us a perception in the form of the state of the two atoms as it passes through the deformed cavity. We assume that the two atoms are identical, so the dynamics of $|\row{s}|$ for the first atom and $|\row{t}|$ for the second atom has the same behavior. Fig.(1) shows the dynamics of the Bloch vectors for different values of the deformation. For free deformation cavity, the amplitude of the Bloch vectors decreases as $\lambda t$ increases. Due to the flocculating behavior, the travelling state through the cavity turns into mixed and pure state several time. As one consider the deformation, the amplitude of the Bloch vectors decrease more and the minimum values are always smaller than that depicted for the free deformation case (dot-curves). However as one increases the deformed parameter ($ q=0.9)$, the amplitude of the Bloch vectors decrease faster as shown in Fig.$(1a)$. This means that the state of the two qubits turns into a mixed state faster for larger values of the deformity. In Fig.(1b), the number of photons inside the cavity is increased ($m=2$). It is clear that for free deformation the amplitude of the Bloch vectors oscillates so fast and increases as time increases. So, by increasing the number of photons inside the cavity, one can increase the purity of the travelling state. In the presence of deformation ($q=0.5)$, the behavior of the amplitude of the Bloch vectors is similar to that depicted for the free deformation case. However as one increases the deformity parameter more $(q=0.9)$, the amplitude of the Bloch vectors increases. From Fig.(1a) and Fig(1b), it is clearly that by increasing the number of photons inside the cavity, one can avoid the defected which results from the deformed cavity. On the other hand, when the Bloch vectors vanish and the entanglement still survival between the two atoms, this means that an entangled state of Werner type \cite{Werner,Englert2} has been generated and it can be written as: \begin{equation} \rho_w=\frac{1}{4}(1+x_1\sigma_x\tau_x+x_2\sigma_y\tau_y+x_3\sigma_z\tau_z). \end{equation} However, at certain time say (t=2.5,5) as shown in Fig.(1a), the amplitudes of the Bloch vectors $|\row{s}|=|\row{t}|=1$ and the entanglement vanishes (see Fig.(2a)), this means that the atomic system turns into itsd initial state. \begin{figure} \caption{The Bloch vector for the travailing state (\ref{InitAstate} \end{figure} To quantify the degree of entanglement between the two atoms, we use a measure defined by means of the Bloch vectors and the cross dyadic. The entangled dyadic is defined as \begin{equation} \dyadic{E}=\dyadic{C}-\row{s}\col{t} \end{equation} where $\dyadic{C}$ is a $3\times 3$ matrix which is defined by(\ref{Finaldyd}) and $\row{s}\col{t}$ is also a $3\times 3$ matrix whose elements can be obtained from (\ref{FinalBloch}). The degree of entanglement is defined as in \cite{Englert, Metwally2} \begin{equation}\label{DoE} \mathcal{E}=tr\{~{\mathord{\dyadic@rrow{E}}}^{\mathsf{T}}\cdot \mathord{ \dyadic@rrow{E}}\}, \end{equation} where ${\mathord{\dyadic@rrow{E}}}^{\mathsf{T}}$ is the transpose matrix of the dyadic $\mathord{\dyadic@rrow{E}}$ and $\mathcal{E}=0$ for separable states. Fig $(2$), describes the dynamics of entanglement which is generated between two atoms prepared initially in a separable state define by, \begin{equation}\label{InitAstate} \rho _{a}=\frac{1}{4}(1+s_{z}+t_{z}+c_{zz}). \end{equation} \begin{figure} \caption{ The degree of entanglement $\mathcal{E} \end{figure} Fig.(2a), shows the behavior of the degree of entanglement for $m=1$. Since we start with a separable atomic system, the degree of entanglement, $\mathcal{E}=0$ at $\lambda t=0$. However, for $\lambda t>0$, an entangled state generated between the two atoms. For $q=0.5$, one can easily see that the entanglement of the two atoms decreases as time increases( dot-curves). However, $\mathcal{E}$ goes to zero for a very small finite time and suddenly increases. As one increases the deformity parameter ($q=0.9),$ the degree of entanglement decreases gradually but does not vanish. It is clear that, the entanglement tends to zero several time for small values of the deformity parameter. On the other hand, for larger values of the deformity, $\mathcal{E}$ goes to zero only limited time. The dynamics of the entanglement for $m=2$ is displayed in Fig.(2b), where it is smaller than that depicted in Fig.(1a). However, the entanglement oscillates so fast as one increases the value of the deformed parameter. It is clear that, the possibility of obtaining a long lived entanglement is increases as one increases the value of the deformed parameter, where we can see that the entanglement is zero only when $\lambda t=0.$ One can conclude that the number of photons plays as a control parameter, where one can improve the degree of entanglement between the two atoms, by increasing the number of photons inside the cavity. Also, the long-lived entanglement between the two atoms can be obtained by increasing the deformation. \section{Teleportation} In this section, the generated entangled state (\ref{FinalAtomic}) is employed to implement the original quantum teleportation protocol \cite{ben}. This protocol is used to send unknown quantum state between two users Alice and Bob, by using local operations and classical communication. Now assume that, Alice is given unknown state defined by, \begin{equation} \rho _{u}=\frac{1}{2}(1+\row{s_u}\cdot\sigma), \label{unknown} \end{equation} where the componnents of the Bloch vector $\row{s_u}$ are given by \begin{equation} s_{u_x}=\alpha \beta ^{\ast }+\beta \alpha ^{\ast },s_{u_y}=i(\beta \alpha ^{\ast }-\alpha \beta ^{\ast }),s_{u_z}=|\alpha |^{2}-|\beta |^{2},~ \mbox{where} |\alpha|^2+|\beta|^2=1. \end{equation} To achieve the teleportation Protocol, the partners follow the following steps: \begin{enumerate} \item Alice performs the CNOT gate on her qubit and the given unknown qubit followed by Hadamard gate. \item Alice measures her qubit and the unknown qubit randomly in one of the basis $\left\vert ee\right\rangle ,\left\vert eg\right\rangle ,\left\vert ee\right\rangle $ and \ $\left\vert gg\right\rangle $ and sends her results to Bob by using classical channel. \item As soon as Bob receives the classical data from Alice, he applies a single qubit operation on his qubit depending on Alice's results. So, if Alice measures in the basis $\ket{ee}$, Bob will obtain the state. \begin{equation} \rho _{Bob}=\frac{1}{2}(1+\row{s}_{b}\cdot\row\sigma), \label{Rbob} \end{equation} where, $s_{x_b},s_{y_b},s_{z_b}$ are the components of $\row{s}_{b}$, \begin{eqnarray} s_{x_b} &=&|\alpha |^{2}(c_{n}^{(3)}c_{n-1}^{(\ast 4)}+c_{n}^{(4)}c_{n+2}^{(\ast 3)})+\alpha \beta ^{\ast }(c_{n}^{(4)}c_{n-1}^{(\ast 1)}+c_{n}^{(3)}c_{n}^{(\ast 2)})- \nonumber \\ &&\alpha ^{\ast }\beta (c_{n}^{(1)}c_{n-2}^{(\ast 2)}+c_{n}^{(3)}c_{n}^{(\ast 2)})+|\beta |^{2}(c_{n}^{(1)}c_{n-2}^{(\ast 2)}+c_{n}^{(2)}c_{n+2}^{(\ast 1)}), \nonumber \\ s_{y_b} &=&|\alpha |^{2}(c_{n}^{(4)}c_{n+2}^{(\ast 3)}-c_{n}^{(3)}c_{n-2}^{(\ast 4)})+\alpha \beta ^{\ast }(c_{n}^{(4)}c_{n-1}^{(\ast 1)}-c_{n}^{(3)}c_{n}^{(\ast 2)}) \nonumber \\ &&-\alpha ^{\ast }\beta (c_{n}^{(3)}c_{n}^{(\ast 2)}-c_{n}^{(1)}c_{n-2}^{(\ast 2)})+|\beta |^{2}(c_{n}^{(2)}c_{n+2}^{(\ast 1)}-c_{n}^{(1)}c_{n-2}^{(\ast 2)}), \nonumber \\ s_{z_b} &=&|\alpha |^{2}(|c_{n}^{(3)}|^{2}-|c_{n}^{(4)}|^{2})+\alpha \beta ^{\ast }(c_{n}^{(3)}c_{n+2}^{(\ast 1)}-c_{n}^{(4)}c_{n+2}^{(\ast 2)})+ \nonumber \\ &&\alpha ^{\ast }\beta (c_{n}^{(2)}c_{n-2}^{(\ast 4)}-c_{n}^{(1)}c_{n-2}^{(\ast 3)})+|\beta |^{2}(|c_{n}^{(1)}|^{2}-|c_{n}^{(2)}|^{2}). \end{eqnarray} \end{enumerate} \begin{figure} \caption{ The Fidelity of the teleported state (\ref{unknown} \end{figure} The accuracy of the teleported state (\ref{unknown}) is quantified by evaluating the fidelity, $\mathcal{F}$ \begin{equation} \mathcal{F}=\frac{1}{4}(1+\row{s_u}\cdot\row{s}_{Bob}). \end{equation} In Fig.(3), we investigate the dynamics of the fidelity $\mathcal{F}$ for different values of the field and atomic parameters. We use the generated entangled state which is defined by its Bloch vectors (\ref{FinalBloch}) and the cross dyadic (\ref{Finaldyd}) with $a_1=1$ and $a_2=a_3=a_4=0$. Also, we assume that Alice is given information coded in the state (\ref{unknown}) with $s_{x_u}=1$ and $s_{y_u}=s_{z_u}=0$. Fig.(3a), shows the behavior of $\mathcal{F}$, for $m=1$ and different values of the deformity parameter. From this figure it is clear that, for $q=0.5$, the amplitude of the oscillations of the fidelity decreases as time increases. As one increases the deformity parameter $(q=0.9)$, the fidelity is smaller than depicted for small values of $q$. Also, the phenomena of revivals and collapse is clearly displayed for larger values of the deformity parameter. In Fig.(3b), we increase the number of photons inside the cavity, ($m=2$). In this case the flocculations of the fidelity is very fast for small range of time. But as time increases these revivals almost disappear and the fidelity is almost constant. This behavior is changed dramatically as one increases the value of the deformity parameter ($q=0.9)$, where the collapses appear for small range of time. But as time goes on, the fidelity oscillates so fast but always the amplitudes of the fidelity $\mathcal{F}$ is smaller than that shown in Fig.(3a). These results show that Bennett's teleportation protocol \cite{ben} is robust against the deformation \section{Conclusion} In this contribution, a system of two atoms interacts with a deformed cavity mode is introduced. The solution of the system is introduced by means of the Bloch vectors and the cross dyadic of the travelling atoms inside the cavity. Due to the interaction, there are some entangled states have been generated. The amount of entanglement between the entangled atoms is quantified by using a measure depends on the Bloch vectors and the cross dyadic. Although, as one increases the deformity, the entanglement decreases more, but it is more robust, where it vanishes only on small interval of time. The deformation of entanglement appears clearly as one increases the number of photons inside the cavity, where the entanglement oscillates so fast in irregular behavior. Also, we investigate the dynamics of the Bloch vectors, where we show that as one increases the deformation parameter, the amplitudes of the Bloch vectors decrease much faster. One can obtain, entangled states of Werner types with high degree of entanglement. The effect of the deformity on the accuracy of the transported information between two users is investigated. The amplitude of the fidelity decreases in the presence of the deformity. This means that the minimum value of the fidelity is improved as time goes on. For large values of the deformed parameter, the fidelity decreases and the oscillations of the amplitudes decrease more. If the number of photons inside the cavity increases, the fidelity flocculates very fast in small range of time and as time goes on the fidelity is almost constant. The deformation of the fidelity appears for larger values of the deformity parameter, where the revivals are very fast. Finally, one can conclude that the deformation and the number of photons inside the cavity play an important role on the dynamics of entanglement, Bloch vectors and the fidelity of the teleported state. If the devices are defective manufacturing, then by increasing the number of photons within the cavity one can reduce the distortion resulting from such defects. The effect of deformation is different from other noise which leads to a sudden death of entanglement and consequently a sudden death of communication. So, we expect that these results are important in quantum communication and consequently in building quantum computers {\bf Acknowledgment} I would like to thank Prof. B-G. Englert his fruitful discussion and the important remarks which has improve the manuscript. \end{document}
\begin{document} \title{Interferometric Graph Transform for Community Labeling} \begin{abstract} We present a new approach for learning unsupervised node representations in community graphs. We significantly extend the Interferometric Graph Transform (IGT) to community labeling: this non-linear operator iteratively extracts features that take advantage of the graph topology through demodulation operations. An unsupervised feature extraction step cascades modulus non-linearity with linear operators that aim at building relevant invariants for community labeling. Via a simplified model, we show that the IGT concentrates around the E-IGT: those two representations are related through some ergodicity properties. Experiments on community labeling tasks show that this unsupervised representation achieves performances at the level of the state of the art on the standard and challenging datasets Cora, Citeseer, Pubmed and WikiCS. \end{abstract} \section{Introduction}\label{intro} Graph Convolutional Networks (GCNs) \cite{kipf2016semi} are now the state of the art for solving many supervised (using labeled nodes) and semi-supervised (using unlabeled nodes during training) graph tasks, such as nodes or community labeling. They consist in a cascade of layers that progressively average node representations, while maintaining discriminative properties through supervision. In this work, we are mainly interested in the principles that allow such models to outperform other baselines: we propose a specific class of GCNs, which is unsupervised, interpretable, with several theoretical guarantees while obtaining good accuracies on standard datasets. One of the reasons why GCNs lack interpretability is because no training objective is assigned to a specific layer except the final one: end-to-end training makes their analysis difficult \cite{oyallon2017building}. They also tend to oversmooth graph representations \cite{yang2016revisiting}, because applying successively an averaging operator leads to smoother representations. Also, the reason of their success is in general unclear \cite{li2018deeper}. In this work, we propose to introduce a novel architecture which, by design, will address those issues. Our model can be interpreted through the lens of Stochastic Block Models (SBMs) \cite{holland1983stochastic} which are standard, yet are not originally designed to analyze graph attributes through representation learning. For example, several works \cite{keriven2020sparse, abbe2017community} prove that a Laplacian matrix concentrates around a low-rank expected Laplacian matrix, via simplified models like a SBM \cite{cohen2020power}. In the context of community detection, it is natural to assume that the intra-class, inter-class connectivity and feature distributions of a random graph are ruled by an SBM. To our knowledge, this work is the first to make a clear connection with those unsupervised models and the self-supervised deep GCNs which solve datasets like Cora, Citeseer, Pubmed, or WikiCS. Our model is driven by ideas from the Graph Signal Processing \cite{hammond2011wavelets} community and based on the Interferometric Graph Transform \cite{oyallon2020interferometric}, a class of models mainly inspired by the (Euclidean) Scattering Transform \cite{mallat2012group}. The IGT aims at learning unsupervised (not using node labels at the representation learning stage), self-supervised representations that correspond to a cascade of isometric layer and modulus non-linearity, whose goal is to obtain a form of demodulation \cite{oyallon2018compressing} that will lead to smoother but discriminative representation, in the particular case of community labeling. Smooth means here, by analogy with Signal Processing \cite{mallat1999wavelet}, that the signal is in the low-frequency domain, which corresponds to a quite lower dimensional space if the spectral decay is fast enough: this is for instance the case with a standard Laplacian~\cite{grinsztajn2021lowrank} or a low-rank SBM adjacency matrix~\cite{loukas2018spectrally}. Here, the degree of invariance of a given representation is thus characterized by the smoothness of the signal. Our main contribution is to introduce a simplified framework that allows to analyze node labeling tasks based on a non-linear model, via concentration bounds and which is numerically validated. Our other contributions are as follows. First, we introduce a novel graph representation for community labeling, which doesn't involve community labels. It consists in a cascade of linear isometry, band-pass filtering, pointwise absolute value non-linearity. We refer to it as an Interferometric Graph Transform (IGT) (for community labeling), and we show that under standard assumptions on the graph of our interest, a single realization of our representation concentrates around the Expected Interferometric Graph Transform (E-IGT), which can be defined at the node level without incorporating any graph knowledge. We also introduce a novel notion of localized low-pass filter, whose invariance can be adjusted to a specific task. Second, we study the behavior of this representation under an SBM model: with our model and thanks to the structure of the IGT, we are able to demonstrate theoretically that IGT features accumulate around the corresponding E-IGT. We further show that the architecture design of IGTs allows to outperform GCNs in a synthetic setting, which is consistent with our theoretical findings. Finally, we show that this semi-supervised and unsupervised representation is numerically competitive with supervised representations on standard community labeling datasets like Cora, Citeseer, Pubmed and WikiCS. Our paper is organized as follows. First, we define the IGT in Sec.\@ \ref{sec:IGT} and study its basic properties. Sec.\@ \ref{sec:expected-IGT} defines the E-IGT and bounds its distance from the IGT. Then, we discuss our model in the context of a SBM in Sec.\@ \ref{sec:model} and we explain our optimization procedure in Sec.\@ \ref{sec:optim}. Finally, Sec.\@ \ref{sec:xp} corresponds to our numerical results. Our source can be found at \href{https://github.com/nathangrinsztajn/igt-community-detection}{https://github.com/nathangrinsztajn/igt-community-detection} and all proofs of our results can be found in the Appendix. \section{Related Work} We now discuss a very related line of work, namely the IGT \cite{oyallon2020interferometric}, which takes source in several conceptual ideas from the Scattering Transform \cite{mallat2012group}. Both consist in a cascade of unitary transform, absolute value non-linearity and linear averaging, except that the Euclidean structure is neatly exploited via Wavelets Transforms for complex classification tasks in the case of the standard Scattering Transform \cite{bruna2013invariant,oyallon2015deep,7324385,oyallon2018compressing}, whereas this structure is implicitly used in the case of IGT. In particular, similarly to a Scattering Transform, an IGT aims at projecting the feature representation in a lower dimensional space (low-frequency space) while being discriminative: the main principle is to employ linear operators, which combined with a modulus non-linearity, leads to a demodulation effect. In our case however, this linear operator is learned. The IGT for community labeling is rather different from standard IGT: first, \cite{oyallon2020interferometric} is not amenable to node labeling because it doesn't preserve node localization, contrary to ours. Second, we do not rely on the Laplacian spectrum explicitely contrary to \cite{gama2020stability,oyallon2020interferometric}. Third, the community experiments of \cite{gama2020stability,oyallon2020interferometric} are rather the classification of a diffusion process than a node labeling task. This is also similar to the Expected Scattering Transform \cite{mallat2013deep}, yet it is applied in a rather different context for reducing data variance, in order to shed lights on standard Deep Neural Networks. Our E-IGT and the Expected-Scattering have a very close architecture, however the linear operators are obtained with rather different criteria (e.g., ours are obtained from a concave procedure rather than convex) and goals (e.g., preserving energy, whereas we try to reduce it). Note however there is no equivalent of the E-IGT for other context that community detection or labeling, which is another major difference with \cite{oyallon2020interferometric}. In addition, our Prop. \ref{prop:boundigt} is new compared to similar results of \cite{mallat2013deep}. Thus while having similar architectures, those works have quite different outcomes and objectives. Another line of works corresponds to the Graph Scattering Transform \cite{gama2020stability,gao2019geometric,ioannidis2020efficient}, which proposes to employ a cascade of Wavelet Transforms that respects the graph structure \cite{hammond2011wavelets}. Yet, the principles that allow good generalization of those representations are unclear and they have only been tested until now on small datasets. Furthermore, this paper extends all those works by proposing an architecture and theoretical principles which are specific to the task of community labeling. A last related line of work corresponds to the hybrid Scattering-GCNs \cite{min2020scattering}, which combines a GCN with the inner representation of a Scattering Transform on Graphs, yet they employ massive supervision to refine the weights of their architecture, which we do not do. The architecture of an IGT model for community labeling takes also inspiration from Graph Convolutional Networks (GCNs) \cite{kipf2016semi, bronstein2017geometric}. They are a cascade of linear operators and ReLU non-linearities whose each layer is locally averaged along local nodes. Due to this averaging, GCNs exhibit two undesirable properties: first, the oversmoothing phenomenon \cite{li2018deeper}, which makes learning of high-frequencies features difficult; second, the training of deeper GCNs is harder \cite{huang2020tackling} because much information has been discarded by those averaging steps. Other types of Graph Neural Networks succeeded in approximating message-passing methods \cite{chen2018supervised}, or have worked on the spatial domain such as Spectral GCNs \cite{bruna2013spectral}, and Chebynet \cite{defferrard2016convolutional}. In our work, we solely use a well chosen averaging for separating high-frequencies and low-frequencies without using any other extra-structure, which makes our method more generic than those approaches, without using supervision at all. We further note that theoretical works often address the problem of estimating the expected Laplacian under SBM assumptions \cite{keriven2020sparse, abbe2017community, le2018concentration}. However up to our knowledge, none of those works is applied in a semi-supervised context and they aim at discovering communities rather than estimating communities from a small subset of labels. Moreover, the model remains mostly linear (e.g. based on the spectrum of the adjacency matrix). Here, our representation is non-linear and amenable for learning with a supervised classifier. We also note that several theoretical results have allowed to obtain approximation or stability guarantees for GCNs \cite{ruiz2020graph, bruna2013invariant, keriven2019universal}: our work follows those lines and analyzes a specific type of GCN through the lens of Graph Signal Processing theory \cite{hammond2011wavelets}. \section{Framework} \paragraph*{Notations.} For a matrix $X$, we write $\Vert X\Vert^2=\text{Tr}(X^TX)=\sum_{i,j}X_{i,j}^2$ its Frobenius-norm and for an operator $L$ (acting on $X$), we might consider the related operator norm $\Vert L\Vert\triangleq \sup_{\Vert X\Vert \leq 1}\Vert LX\Vert $. The norm of the concatenation $\{B,C\}$ of two operators $B,C$ is $\Vert \{B,C\}\Vert^2=\Vert B\Vert^2+\Vert C\Vert^2$ and this definition can be extended naturally to more than two operators. Note also that we use a different calligraphy between quantities related to the graph (e.g., adjacency matrix $\mathbfcal{A}$) and operators (e.g., averaging matrix $A$). We write $A \preccurlyeq B$ if $B-A$ is a symmetric positive matrix. Here, $a_n\sim b_n$ means that $\exists \alpha>0,\beta>0:\alpha |a_n|\leq |b_n|\leq \beta |b_n|$ and $a_n=\mathcal{O}(b_n)$ means $\exists \alpha>0:|a_n|\leq \alpha|b_n|$. \subsection{Definition of IGT}\label{defIGT} \label{sec:IGT} Our initial graph data are node features $X\in\mathbb{R}^{n\times P}$ obtained from a graph with $n$ nodes and unormalized adjacency matrix $\mathbfcal{A}$. We then write $ \mathbfcal{A}_{\text{norm}}$ the normalized adjacency matrix with self-connexion, as introduced by \cite{kipf2016semi}. We note that $\mathbfcal{A}_{\text{norm}}$ satisfies $0 \preccurlyeq \mathbfcal{A}_{\text{norm}} \preccurlyeq I$ and has positive entries. In Graph Signal Processing \cite{hammond2011wavelets}, those properties allow to interpret $\mathbfcal{A}_{\text{norm}}$ as an averaging operator. It means that applying $\mathbfcal{A}_{\text{norm}}$ to $X$ leads to a linear representation $\mathbfcal{A}_{\text{norm}}X$ which is smoother than $X$ because $\mathbfcal{A}_{\text{norm}}$ projects the data in a subspace ruled by the topology (or connectivity) of a given community~\cite{gama2020stability}. The degree of smoothness can be adjusted to a given task simply by considering: \begin{equation} \label{smoothness} A_J \triangleq \mathbfcal{A}_{\text{norm}}^J\,. \end{equation} This step is analogeous to the rescaling of a low-pass filter in Signal Processing \cite{mallat1999wavelet}, and $A_J$ satisfies: \begin{lemma}\label{lemma:positive} If $0 \preccurlyeq \mathbfcal{A}_{\text{norm}} \preccurlyeq I$ and $\mathbfcal{A}_{\text{norm}}$ has positive entries, then for any $J\in \mathbb{N}$, $A_J$ has positive entry and satisfies also $0 \preccurlyeq A_J \preccurlyeq I$. \end{lemma} Applying solely $A_J$ leads to a loss of information that we propose to recover via $I-A_J$. This allows to separate low and high-frequencies of the graph in two channels, as expressed by the next lemma: \begin{lemma}\label{proj} If $0 \preccurlyeq A \preccurlyeq I$, then $\Vert AX\Vert^2+\Vert (I-A)X\Vert^2\leq \Vert X\Vert^2$ with equality iff $A^2=A$. \end{lemma} Yet, contrary to $A_JX$, $(I-A_J)X$ is not smooth and thus, it might not be amenable for learning because community structures might not be preserved. Furthermore, a linear classifier will not be sensitive to the linear representation $\{A_JX,(I-A_J)X\}$. Similarly to \cite{oyallon2020interferometric}, we propose to apply an absolute value $|.|$ point-wise non-linearity to our representations. Section \ref{sec:optim} will explain how to estimate isometries $\{W_n\}$, which combined with a modulus, will smooth the signal envelope while preserving signal energy. We now formally describe our architecture and we consider $\{W_n\}$ a collection of isometries, that we progressively apply to an input signal representation $U_0\triangleq X$ via: \begin{equation} \label{eq:u} U_{n+1} \triangleq |(I-A_J)U_nW_n|\,, \end{equation} and we introduce the IGT representation of order $N\in\mathbb{N}$ with averaging scale $J\in\mathbb{N}$ defined by: \begin{equation} S^N_JX\triangleq\{A_JU_0,...,A_JU_N\}\,. \end{equation} Fig. \ref{icml-historical} depicts our architecture. The following explains that $S_J^N$ is non-expansive, thus stable to noise: \begin{proposition} \label{prop:lip} For $N\in\mathbb{N}$, $S_J^NX$ is 1-Lipschitz leading to: \begin{equation} \Vert S_J^NX-S_J^NY\Vert\leq\Vert X-Y\Vert\, \text{and, } \Vert S_J^NX\Vert\leq\Vert X\Vert\,. \end{equation} \end{proposition} \begin{wrapfigure}{r}{0.35\textwidth} \begin{center}\centerline{\includegraphics[trim={27.5cm 15cm 22cm 4.5cm}, clip, width=0.4\columnwidth]{icmligt2021.pdf}}\caption{We illustrate our model for $N=2$. Low and high frequencies are separated (blue) and then the high frequencies are demodulated (red) via an isometry and a non-linear point wise absolute value, and then propagated to the next layer.} \label{icml-historical}\end{center}\end{wrapfigure} The next section will describe the E-IGT, which was introduced as the Expected Scattering \cite{mallat2013deep}, but in a rather different context: we will show under simplifying assumptions that an IGT for community labeling concentrates around the E-IGT. \subsection{Definition of the Expected-IGT (E-IGT)} \label{sec:expected-IGT} Similarly to the previous section, for an input signal $\bar U_0\triangleq X$, we consider the following recursion, introduced in \cite{mallat2013deep}: \begin{equation} \bar U_{n+1} \triangleq |(\bar U_n-\mathbb{E}\bar U_n)W_n|\,, \end{equation} which leads to the E-IGT \footnote{We rename it here because we use rather different principles to obtain the $\{W_0...,W_{N-1}\}$ compared to the original Scattering.} of order $N$ defined by: \begin{equation} \bar S_N\triangleq\{\mathbb{E}\bar U_0,...,\mathbb{E}\bar U_N\}\,. \end{equation}Similarly to Prop. \ref{prop:lip}, we prove the following stability result: \begin{proposition} \label{prop:lip2} For $N\in\mathbb{N}$, $\bar S^NX$ is 1-Lipschitz, meaning that: \begin{equation} \Vert \bar S^NX- \bar S^NY\Vert^2\leq\mathbb{E}[\Vert X-Y\Vert^2]\,, \end{equation} and furthermore: \begin{equation} \Vert \bar S^NX\Vert^2 \leq\mathbb{E}[\Vert X\Vert^2]\,. \end{equation} \end{proposition} \begin{proof} Indeed, \cite{mallat2013deep} have proven this for the columns of $X$. \end{proof} Note that this represention is also more amenable to standard supervised classifiers such as SVMs because no operation mixing nodes is involved. Prop. \ref{prop:lip2} highlights the fact that the E-IGT is non-expansive, and \cite{waldspurger2017wavelet} shows that this allows to discriminate the attributes of the distribution of $X$. However, it is difficult in general to estimate the E-IGT because one does not know the distribution of a given node and it is difficult to estimate it from a single realization as there is a clear curse of dimensionality. However, we will show that $S_J^N$ will be very similar to $\bar S^N$ under standard assumptions on communities. We now state the following proposition, which allows to quantify the distance between an IGT and its E-IGT: \begin{proposition}\label{prop:boundigt} For any $X, N, J$, we get: \begin{equation} \Vert S_J^NX-\bar S^N X\Vert\leq \sqrt{2} \sum_{m=0}^{N}\Vert (A_J-\mathbb{E})\bar U_m\Vert \,. \end{equation} \end{proposition} The proof of this proposition can be found in the Appendix: it fully uses the tree structure of Fig. \ref{icml-historical}, in order to obtain tighter bounds than \cite{mallat2013deep}, as it allows $N$ to be of arbitrary size without diverging. We now bound the distance between the IGT and the E-IGT: \begin{corollary}\label{prop:concentration} For $N\in \mathbb{N}$, we have: \begin{equation} \sup_{\mathbb{E}\Vert X\Vert\leq 1}\mathbb{E}[\Vert S_J^{N}X- \bar S^{N}X\Vert] \leq 2^{N+2} \sup_{\mathbb{E}\Vert X\Vert\leq 1}\mathbb{E}[\Vert A_JX-\mathbb{E}X\Vert]\,.\label{ergo} \end{equation} \end{corollary} \begin{proof} The next Lemma combined with the norm homogeneity allows to conclude with Prop.\@ \ref{prop:boundigt}. \end{proof} \begin{lemma}\label{bounded} If $\Vert X\Vert\leq 1$, then $\Vert\bar U_n\Vert\leq 2^n$, with $\bar U_0=X$. Also, if $\mathbb{E}[\Vert X\Vert]\leq 1$, then $\mathbb{E}[\Vert \bar U_n\Vert]\leq 2^n$ \end{lemma} \begin{proof} This is true for $n=0$, and then by induction, since isometry preserves the $\ell^2$-norm: $\Vert\bar U_{n+1}\Vert\leq \Vert\bar U_n\Vert+\Vert \mathbb{E}\bar U_n\Vert\leq \Vert\bar U_n\Vert+ \mathbb{E}\Vert\bar U_n\Vert\leq 2^{n+1}$. The proof is similar for the second part. \end{proof} The right term of Eq. \ref{ergo} measures the ergodicity properties of a given $A_J$. For instance, in the case of images, a stationary assumption on $X$ implies that $A_Jf(X)\approx\mathbb{E}f(X)$ for all measurable $f$, which is the case for instance for textures \cite{mallat1999wavelet}. The following proposition shows that in case of exact ergodicity, the two representations have bounded moments of order 2: \begin{proposition} If $\mathbb{E}[A_JX]=\mathbb{E}X$, and if $X$ has variance $\sigma^2=\mathbb{E}\Vert X \Vert^2- \Vert \mathbb{E}X \Vert^2$, then: \begin{align} \mathbb{E}[\Vert S_J^NX-\bar S^NX\Vert^2 ] & \leq 2 \sigma^2\,. \end{align} \end{proposition} \subsection{Graph model and concentration bounds} \label{sec:model} In this subsection, we propose to demonstrate novel bounds which improve the upper bound obtained at Prop.\@ \ref{prop:concentration} by introducing a Stochastic Block Model \cite{holland1983stochastic}. We will show that the IGT features of a given community concentrates around the E-IGT feature of this community: IGT features are thus more amenable to be linearly separable. Recall from Sec.\@ \ref{defIGT} that $A_1=\mathbfcal{A}_{\text{norm}}$, thus we note that for some $m>0$, via the triangular inequality we get: \[ \Vert A_1\bar U_m-\mathbb{E}\bar U_m\Vert=\Vert \mathbfcal{A}_{\text{norm}}\bar U_m-\mathbb{E}\bar U_m\Vert \leq \Vert (\mathbfcal{A}_{\text{norm}}-\mathbb{E}[\mathbfcal{A}]_{\text{norm}})\bar U_m\Vert + \Vert \mathbb{E}[\mathbfcal{A}]_{\text{norm}}\bar U_m-\mathbb{E}[\bar U_m]\Vert\,. \] Now, the left term can be upper bounded as: \begin{equation} \Vert (\mathbfcal{A}_{\text{norm}}-\mathbb{E}[\mathbfcal{A}_{\text{norm}}])\bar U_m\Vert\leq \Vert \mathbfcal{A}_{\text{norm}}-\mathbb{E}[\mathbfcal{A}_{\text{norm}}]\Vert \Vert \bar U_m\Vert\,. \end{equation} For the sake of simplicity, we will consider a model with two communities, yet the extension to more communities is straightforward and would simply involve a linear term in the number of communities. We now describe our model. Once the $n$ nodes have been split in two groups of size $n\sim n_1, n \sim n_2$, we assume that each edge between two different nodes is sampled independently with probability $p_n$ (or simply $p$ if not ambiguous) if they belong to the same community and $q$ otherwise. We assume that $q= \tau p$ for some constant $\tau\sim \frac{1}{\sqrt n}\ll 1$ and the features belonging to the same community are i.i.d.\@ and $\sigma$-sub-Gaussian, and $\Vert X\Vert\leq 1$. Those assumptions are not restrictive as they hold in many practical applications (and the second, always holds up to a constant). For a given community $i\in\{1,2\}$, we write $(\mu^i_m)_{m\leq N}$ its E-IGT. We impose that $p_n\sim \frac{\log(n)}{n}$ in this particular Bernoulli model. Sparse random graphs do not generally concentrate. Yet, according to \cite{keriven2020sparse}, in the relatively sparse case where $p_n \sim \frac{\log n}{n}$, we get the following spectral concentration bound of the normalized adjacency matrix: \begin{lemma} \label{lemma:keriven} Let $\mathbfcal{A}$ be a symmetric matrix with independent entries $\mathbfcal{A}_{ij}$ obtained as above. If $n_1\sim n, n_2\sim n$, and p is relatively sparse as above, then for all $\nu > 0$, there is a constant $C_\nu$ such that, with high probability $\geq 1-n^{-\nu}$: \begin{equation} \Vert \mathbfcal{A}_{\text{norm}} - \mathbb{E}[\mathbfcal{A}]_{\text{norm}} \Vert \leq \frac{C_{\nu}}{\sqrt{\log n}}\,. \end{equation} \end{lemma} \begin{proof} Can be found in \cite{keriven2020sparse}. \end{proof} Note that in general, $\mathbb{E}[\mathbfcal{A}]_{\text{norm}}\neq \mathbb{E}[\mathbfcal{A}_{\text{norm}}]$ and here, because of our model: \begin{equation} \mathbb{E}[\mathbfcal{A}]_{\text{norm}}=\begin{bmatrix}\frac{p}{n_1p+n_2q}\mathbf{1}_{n_1\times n_1} & \frac{q}{n_1p+n_2q}\mathbf{1}_{n_1\times n_2} \\ \frac{q}{n_1q+n_2p}\mathbf{1}_{n_2\times n_1} & \frac{p}{n_1q+n_2p}\mathbf{1}_{n_2\times n_2}\end{bmatrix}\,, \end{equation} where $\mathbf{1}_{m\times n}$ is a matrix of ones of size $m\times n$. Now, note also that: \begin{equation} \mathbb{E}[\bar U_m]=[\mu^1_m\mathbf{1}_{n_1}^T ,\mu^2_m \mathbf{1}_{n_2}^T]\,, \end{equation} Now, we prove that the IGT will concentrate around the E-IGT, under a Stochastic Block Model and sub-Gaussianity assumptions. We note that a bias term of the order of $\sqrt{n}\tau$ is present, which is consistent with our model assumptions. Note it is also possible to leverage the boundedness assumption yet it will lead to an additional constant term. \begin{proposition}\label{prop:concentrate}Under the assumptions above, there exists $C>1$ s.t.\@ for all $N>0, \delta>0$, we have with high probability, larger than $1-\mathcal{O}(N\delta+n^{-\nu})$: \begin{equation} \Vert S_1^NX-\bar S^N X\Vert =\mathcal{O}(\sigma \frac{1+C^N}{1-C}(\sqrt{\ln\frac{1}{\delta}}+\frac{1}{\sqrt{\log n}})) +\mathcal{O}(\tau \sqrt{n}\sum_{m\leq N}\Vert \mu^2_m-\mu^1_m\Vert)\,. \end{equation} \end{proposition}The following proposition allows to estimate the concentration of each IGT order: \begin{proposition} \label{prop:concentrate_each} Assume that each line of $X\in \mathbb{R}^{n\times P}$ is $\sigma$-sub-Gaussian. There exists $C>1,K>0,C'>1$ such that $\forall m,\delta>0$ with probability $1-8P\delta$, we have: \begin{equation} \Vert \mathbb{E}[\mathbfcal{A}]_{\text{norm}}\bar U_m-\mathbb{E}[\bar U_m]\Vert \leq K\sigma C^m\sqrt{\ln \frac{1}{\delta}}+C'\sqrt{n}\tau\Vert \mu^2_m-\mu^1_m\Vert\,. \end{equation} \end{proposition}This Lemma shows that a cascade of IGT linear isometries preserves sub-Gaussianity: \begin{lemma}\label{op-subg} If each line of $X$ is $\sigma$-sub-Gaussian, then each (independent) line of $\bar U_m$ is $C^m\sigma$-sub-Gaussian for some universal constant $C$. \end{lemma} In order to show the previous Lemma, we need to demonstrate that the modulus of a sub-Gaussian variable is itself sub-Gaussian, which is shown below: \begin{lemma}\label{subgausscontractivity} There is $C>0$, s.t. $X\in\mathbb{R}^P$ is $\sigma$-sub-Gaussian, then $|(X-\mathbb{E}X)W|$ is $C\sigma$-sub-Gaussian. \end{lemma} \if False \subsection{Louis: Graph model and concentration bounds} From the process to generate $\bar U_m$, note that each row $\bar U^i_m$ for $i\leq n$ is independent. We will use the following Lemma: \begin{lemma} If $X\in\mathbb{R}^d$ is $\sigma$-norm-subgaussian (i.e., $\Vert X\Vert$ is subgaussian with parameter $\sigma$), then $|(X-\mathbb{E}X)W|$ is $C\sigma$-norm-subgaussian for some universal constant C. \end{lemma} \begin{proof} \end{proof} \begin{remark} $C>1$, as ... \end{remark} Now, we will also use the fact that: \begin{lemma} If $X_1,...,X_k$ are $\sigma$-norm-subgaussian, then with high probability $1-e^{something}$, we get: \begin{equation} \Vert \frac 1n \sum_i X_i-\mathbb{E}X\Vert\leq ? \end{equation} \end{lemma} \begin{proposition} Combining Lemma \ref{},\ref{},\ref{}, we get: \end{proposition} \fbox{Model}: Nodes intra- and inter-communities are linked independently with some prescribed probabilities.\\ Sample first the community $C_1,...,C_n$ , and draw an edge between nodes $i,j$ with proba $\mathcal{B}er_{C_i,C_j}$. Hence the (stochastic) adjacency matrix is defined as \begin{equation} A_{ij} \sim Ber(B_{C_i C_j}) \end{equation}; where $B$ is the community probability matrix of size $K*K$ (with $K=$ number of different communities) defined as \begin{equation*} B = \begin{pmatrix} p & q & \cdots & q \\ q & p & \cdots & q \\ \vdots & \vdots & \ddots & \vdots \\ q & q & \cdots & p \end{pmatrix} \end{equation*}; where $p=\alpha_n$ and $q= \tau \alpha_n << p$.\\ Each node has some features $X_i,X_j$, independant. We then design the averaging $A$ by renormalizing each row ( we want them to exactly sum to $1$). Then we get $\mathbb{E}[AX|C_1,...,C_n]=\mathbb{E}[A|C_1,...,C_n]\mathbb{E}[X|C_1,...,C_n]$.\\ \begin{lemma} \label{subgaussian} Let $X \sim subGaussian(0, \sigma_X^2)$ and $Y \sim subGaussian(0, \sigma_Y^2)$, hence $X+Y \sim subGaussian(0, (\sigma_X+\sigma_Y)^2)$, even if $X$ and $Y$ are not independent. \begin{proof} Applying Holder's inequality to the moment generating functions, we have : \begin{equation} \mathbb{E}[|e^{t(X+Y)}|] \leq \mathbb{E}[(e^{tX})^p]^\frac{1}{p} \mathbb{E}[(e^{tY})^q]^\frac{1}{q} \end{equation} Taking $p=1+\frac{\sigma_Y}{\sigma_X}$ and $q=1+\frac{\sigma_X}{\sigma_Y}$, we obtain $\mathbb{E}[e^{t(X+Y)}] \leq e^{\frac{(\sigma_X + \sigma_Y)^2t^2}{2}}$.\\ By definition, $X+Y$ is sub-gaussian with parameter $(0, (\sigma_X+\sigma_Y)^2)$. \end{proof} \end{lemma} \begin{proposition} \label{prop:concentrate} Suppose each line in our model features follow a subGaussian law that only depends on the community it belongs to ($X_i \sim subGaussian(\mathbb{E}[X_i], (\sigma_{C_i1}, ..., \sigma_{C_ip}))$). Note $n_l$ the number of node in the community $l$ (among $K$ possible).\\ Thus for $m \geq 0$ with probability $\geq 1-2\sum\limits_{l=1}^K \sum\limits_{j=1}^p e^\frac{-t^2}{2 n_l \sigma_{m l j}^2}$,\\ We have \begin{equation} \Vert \mathbb{E}[A] \bar U_m - \mathbb{E}[\bar U_m]\Vert_{MAX} \leq t+\Vert \epsilon \Vert \end{equation}; where $\Vert \Vert_{MAX}$ is the matrix max norm. \begin{proof} First one must point out that line $i$ from the matrix $\mathbb{E}[A] \bar U_m - \mathbb{E}[\bar U_m]$ is the following: \begin{equation} \Vert \frac{1}{n_i} \sum\limits_{k \in C_i} (\bar U_m - \mathbb{E}[\bar U_m])_k + \epsilon_i \Vert \end{equation} Where $\epsilon$ is the matrix made of the sum of elements that does not belong to the community of node $i$. The normalization factor of line $i$ is $d_i = p n_i +(n-n_i)q$, and we assume $pn_i >> (n-n_i)q$. As a consequence, the sum of community members (with factor $p$) is leading the global sum with a normalization of $\frac{p}{d_i} \sim \frac{1}{n_i}$, and $\epsilon_i$ is a sum over non-members with a normalization of $\frac{q}{d_i}$ which is neglictable.\\ Lemma \ref{subgaussian} proves that each line (as sum of subGaussian variables) in $\bar U_m$ is also subGaussian. Note we can precisely compute the vector parameter, but the formula become increasingly complex (line $i$ of $\bar U_1$ is subGaussian with parameter $((\sum\limits_{j=1}^p W^{(1)}_{1j}\sigma_{C_ij})^2, ..., (\sum\limits_{j=1}^p W^{(1)}_{pj}\sigma_{C_ij})^2)$; where $W^{(1)}$ is the isometry $W_1$).\\ Hence for a given line $i$, with probability $\leq \sum\limits_{j=1}^p 2 e^\frac{-t^2}{2n_{C_i} \sigma_{mC_ij}^2}$, we have $max \{|\frac{1}{n_i} \sum\limits_{k \in C_i} (\bar U_m - \mathbb{E}[\bar U_m])_k| \} \geq t$.\\ Thanks to our model, the matrix $\mathbb{E}[A] \bar U_m - \mathbb{E}[\bar U_m]$ is made of K blocks in which the $n_l$ lines of each block $l$ are exactlty the same (up to $\epsilon$ matrix) beacause we have same law inside each community. As a consequence, for block $l \in [|1, K|]$ with probability $\leq \sum\limits_{j=1}^p 2 e^\frac{-t^2}{2n_{l} \sigma_{mlj}^2}$, we have $max \{|Block(l)| \} \geq t$.\\ Summing it on each block, we have \begin{equation*} \Vert \mathbb{E}[A] \bar U_m - \mathbb{E}[\bar U_m]\Vert_{MAX} \geq t \end{equation*} with probability $\leq \sum\limits_{l=1}^K \sum\limits_{j=1}^p 2 e^\frac{-t^2}{2n_{l} \sigma_{mlj}^2}$.\\ Taking the invert event concludes the proof. \end{proof} \end{proposition} \begin{proposition}Let's note $\alpha_n$ the mean probability of connection (in our model). Make the same assumptions of Proposition \ref{prop:concentrate}.\\ Then, with probability $\geq (1-n^{-t})( 1-2\sum\limits_{l=1}^K \sum\limits_{j=1}^p e^\frac{-t^2}{2 n_l \sigma_{X l j}^2})$, we have the following concentration : \begin{equation} \Vert S_k - \bar S_k \Vert \leq \sqrt{2} \sum\limits_{m=0}^{k-1} \Vert \epsilon_m \Vert + k\sqrt{2}t + \sqrt{\frac{2}{log(n)}} \sum\limits_{m=0}^{k-1} \Vert \bar U_m \Vert \end{equation} \begin{proof} Let's note $\alpha_n$ the mean probability of connection (in the particular Bernouilli model). Sparse random graphs do not concentrate (when the expected degree is very low in comparison to $log (n)$). But according to \cite{spectral_sbm}, in the relatively sparse case where $\alpha_n \sim \frac{log (n)}{n}$, the spectral concentration bound of the normalized adjacency matrix is (with probability $\geq 1-n^{-t}$) : \begin{equation} \Vert A - \mathbb{E}[A] \Vert \leq \frac{1}{\sqrt{log(n)}} \end{equation} As $|W|$ are contractive operator, we have $\Vert \bar U_1 - \mathbb{E}[\bar U_1] \Vert \leq \Vert X - \mathbb{E}[X] \Vert$. Thus concentrating $X$ implies the concentration of other orders of the IGT features. We apply the inequality (with $m=0$) from Proposition \ref{prop:concentrate} to conclude. \end{proof} \end{proposition} \iffalse \begin{lemma} \label{subg} Recall the definition of the Birnbaum–Orlicz norm : \begin{equation} \Vert X \Vert_{\psi_2} = inf\{ t>0 | \mathbb{E}[e^\frac{X^2}{t^2}] \leq 2\} \end{equation} If $\Vert X \Vert$ is subGaussian, $\Vert X - \mathbb{E}[X] \Vert$ is also subGaussian, with the Birnbaum–Orlicz norm. \begin{proof} According to \cite{hdp_book}, for any subGaussian variable $X$, then $X - \mathbb{E}[X]$ is sub-gaussian, too and there is $C$ such as : \begin{equation} \Vert X - \mathbb{E}[X] \Vert_{\psi_2} \leq C \Vert X \Vert_{\psi_2} \end{equation} Thus, \begin{align} \Vert \Vert X-\mathbb{E}[X] \Vert - \mathbb{E}[\Vert X-\mathbb{E}[X] \Vert] \Vert_{\psi_2 } & \leq C \Vert \Vert X-\mathbb{E}[X]\Vert \Vert_{\psi_2}\\ & \leq C(\Vert \Vert X\Vert \Vert_{\psi_2} + \Vert \Vert \mathbb{E}[X]\Vert \Vert_{\psi_2})\\ \end{align} If $Y$is a constant, $\Vert Y \Vert_{\psi_2} \leq \frac{|\mathbb{E}[Y]|}{\sqrt{ln(2)}} \leq \frac{\mathbb{E}[|Y|]}{\sqrt{ln(2)}} \leq \frac{\Vert Y \Vert_1}{\sqrt{ln(2)}} \leq C_2 \frac{\Vert Y \Vert_{\psi_2}}{\sqrt{ln(2)}}$\\ Hence, \begin{equation} \Vert \Vert X-\mathbb{E}[X] \Vert - \mathbb{E}[\Vert X-\mathbb{E}[X] \Vert] \Vert_{\psi_2 } \leq (C(1+\frac{C_2}{ln(2)})) \Vert \Vert X\Vert \Vert_{\psi_2} \end{equation} \end{proof} \end{lemma} \begin{proposition} Let's note $\alpha_n$ the mean probability of connection (in our model). Assume $X$ is norm-subGaussian with mean $\mathbb{E}[X]$ and parameter $\sigma^2$, and assume $\alpha_n \sim \frac{log (n)}{n}$.\\ Then, with probability $\geq 1-kn^{-t} - (\#_{commu})\sum\limits_{m=0}^{k-1} e^\frac{-t^2}{2\sigma_m^2}$, we have the following concentration : \begin{equation} \Vert S_k - \bar S_k \Vert \leq \sqrt{2} \sum\limits_{m=0}^{k-1} \Vert \epsilon_m \Vert + k\sqrt{2}nt + \sqrt{2}\mathbb{E}[\sum\limits_{m=0}^{k-1} \Vert \mathbb{E}[A]\bar U_m - \mathbb{E}[\bar U_m] \Vert] + \sqrt{\frac{2}{log(n)}} \sum\limits_{m=0}^{k-1} \Vert \bar U_m \Vert \end{equation} \begin{proof} Take $m \geq 0$ and a line $i$ from the matrix $\mathbb{E}[A] \bar U_m - \mathbb{E}[\bar U_m]$. Because we assume features follow the same law inside a community, line $i$ norm is the following: \begin{equation} \Vert \frac{1}{n_i} \sum\limits_{j \in C_i} (\bar U_m - \mathbb{E}[\bar U_m])_j + \epsilon_i \Vert \end{equation} Where $\epsilon$ is the matrix made of the sum of elements that does not belong to the community of node $i$. The normalization factor of line $i$ is $d_i = p n_i +(n-n_i)q$, and we assume $pn_i >> (n-n_i)q$. As a consequence, the sum of community members (with factor $p$) is leading the global sum with a normalization of $\frac{p}{d_i} \sim \frac{1}{n_i}$, and $\epsilon_i$ is a sum over non-members with a normalization of $\frac{q}{d_i}$ which is neglictable.\\ Thanks to Lemma \ref{subg}, we know that $\Vert (\bar U_m - \mathbb{E}[\bar U_m])_j \Vert$ is subGaussian with parameter $\sigma_m^2$. The Lemma \ref{subgaussian} gives us the subGaussianity of the sum (among the $n_i$ community neighbors of node $i$).\\ Not taking into account the $\epsilon$ matrix, we figure out that the matrix $\mathbb{E}[A] \bar U_m - \mathbb{E}[\bar U_m]$ has identical lines : for $l$ communities, we have $l$ blocks of size $n_l$. For a given block $l$ of identical subgaussian lines, we have with probability $\leq 2 e^\frac{t^2}{2\sigma_m^2}$, that $|block(l) - \mathbb{E}[block(l)| \geq n_l t$.\\ It is straitforward to point out that $\sum\limits_{l} n_l = n$. Hence, with probability $\leq 2 (\#_{communities})e^\frac{t^2}{2\sigma_m^2}$, we obtain $|\Vert \mathbb{E}[A] \bar U_m - \mathbb{E}[\bar U_m] \Vert - \mathbb{E}[\Vert \mathbb{E}[A] \bar U_m - \mathbb{E}[\bar U_m] \Vert]| \geq \sum\limits_{l} n_l t$.\\ \iffalse \begin{lemma} \label{lemma} Take $X_i$ a sub-gaussian variable with parameter $\sigma_i^2$. By definition, for all $t \geq 0$, $\mathcal{P}(|X_i| \geq t) \leq 2 e^{-\frac{t^2}{2 \sigma_i^2}}$. If $t \leq 1$, $X_i^2$ is also a sub-gaussian with parameter $\sigma_i^2$. And if $t > 1$, we have the following inequality $\sum\limits_{i=1}^{n} X_i^2 \geq nt^2$, with propablity $\leq 2 \sum\limits_{i=1}^{n} e^{-\frac{t^2}{2 \sigma_i^2}}$ \begin{proof} $\mathcal{P}(X_i^2 \geq t^2) \leq 2 e^{-\frac{t^2}{2 \sigma_i^2}}$, hence $\mathcal{P}(|X_i^2| \geq T) \leq 2 e^{-\frac{t^2}{2 \sigma_i^2}} \leq e^{-\frac{T^2}{2 \sigma_i^2}}$ if $t \leq 1$. This shows that $X_i^2$ is sub-gaussian for $t \leq 1$. When $t > 1$, we can only apply the concentration for each $i$ and sum it. We obtain a broad bound depending on $n$. \end{proof} \end{lemma} The following Proposition shows our IGT computation is concentrating around the E-IGT when the Graph (number of nodes) is large. Suppose $X_i$ are subgaussian and independant with mean $\mu_i$ and parameter $\sigma_i^2$, and note $a_{ij}$ the general term of the matrix $\mathbb{E}[A]$. We also assume that our Graph follows the model above.\\ Let's note \begin{equation} \epsilon_1 = e^\frac{-t^2}{2(\sum\limits_{i=1}^{n} \mu_i \sqrt{\sum\limits_{j=1}^{n} (a_{ij} \sigma_j)^2})^2} + e^\frac{-t^2}{2(\sum\limits_{i=1}^{n} \sqrt{\sum\limits_{j=1}^{n} (a_{ij} \sigma_j)^2})^2} + e^\frac{-t^2}{2 (\sum\limits_{i=1}^{n} (\sum\limits_{j=1}^{n} a_{ij} \mu_j)\sqrt{\sum\limits_{j=1}^{n} (a_{ij}\sigma_j)^2})^2} \end{equation} \begin{equation} \epsilon_2 = e^\frac{-t^2}{2(\sum\limits_{i=1}^{n} \mu_i \sqrt{\sum\limits_{j=1}^{n} (a_{ij} \sigma_j)^2})^2} + 2\sum\limits_{i=1}^{n} e^\frac{-t^2}{2\sum\limits_{j=1}^{n} (a_{ij} \sigma_j)^2} + e^\frac{-t^2}{2 (\sum\limits_{i=1}^{n} (\sum\limits_{j=1}^{n} a_{ij} \mu_j)\sqrt{\sum\limits_{j=1}^{n} (a_{ij}\sigma_j)^2})^2} \end{equation} \\ \\ Then if $0 \leq t \leq 1$ :\\ With proba $\geq 1-\epsilon_1$, \begin{equation} \Vert AX - \mathbb{E}[X] \Vert \leq \sqrt{5t + \Vert \mathbb{E}[X]-\mathbb{E}[A]\mathbb{E}[X] \Vert^2}+ \sqrt{\frac{1}{log(n)}}\Vert X \Vert\end{equation} Then if $t > 1$ :\\ With proba $\geq 1-\epsilon_2$, \begin{equation} \Vert S_k-\bar S_k \Vert_o \leq k ( \sqrt{8t+2nt^2+2\sum\limits_{i=1}^{n} (\mu_i - \sum\limits_{j=1}^{n} a_{ij} \mu_j)^2} + \sqrt{\frac{2}{log(n)}})\end{equation} $\Vert \mathbb{E}[A]X - \mathbb{E}[X] \Vert^2 = \sum\limits_{i=1}^{n} (\sum\limits_{j=1}^{n} a_{ij}X_j)^2 + \sum\limits_{i=1}^{n} \mu_i^2 -2 \sum\limits_{i=1}^{n} \mu_i \sum\limits_{j=1}^{n} a_{ij}X_j$\\ \\ First notice that $\sum\limits_{j=1}^{n} a_{ij}X_j$ is the sum of independent sub-gaussian variables that have the same law. Hence $\sum\limits_{j=1}^{n} a_{ij}X_j$ is also a sub-gaussian variable with mean $(\sum\limits_{j=1}^{n} a_{ij} \mu_j)$ and parameter $(\sum\limits_{j=1}^{n} (a_{ij} \sigma_j)^2)$. From Lemma \ref{subgaussian}, $\sum\limits_{i=1}^{n} -\mu_i \sum\limits_{j=1}^{n} a_{ij}X_j$ is a sub-gaussian variable with mean $(- \sum\limits_{i=1}^{n} \mu_i \sum\limits_{j=1}^{n} a_{ij} \mu_j)$, and parameter $((\sum\limits_{i=1}^{n} \mu_i \sqrt{\sum\limits_{j=1}^{n} (a_{ij} \sigma_j)^2})^2)$. Thanks to the Hoeffding Bound,\\ with proba $\leq e^\frac{-t^2}{2(\sum\limits_{i=1}^{n} \mu_i \sqrt{\sum\limits_{j=1}^{n} (a_{ij} \sigma_j)^2})^2}$, we have \begin{equation} 2\sum\limits_{i=1}^{n} -\mu_i \sum\limits_{j=1}^{n} a_{ij}X_j \geq 2t - 2\sum\limits_{i=1}^{n} \mu_i \sum\limits_{j=1}^{n} a_{ij} \mu_j. \end{equation}\\ \\ Then, $\sum\limits_{i=1}^{n} (\sum\limits_{j=1}^{n} a_{ij}X_j)^2 = \sum\limits_{i=1}^{n} Z_i^2 + \sum\limits_{i=1}^{n} (\sum\limits_{j=1}^{n} a_{ij} \mu_j)^2 + 2 \sum\limits_{i=1}^{n} (\sum\limits_{j=1}^{n} a_{ij} \mu_j) Z_i$; where $Z_i$ follows a sub-gaussian law (centered) of parameter $(\sum\limits_{j=1}^{n} (a_{ij} \sigma_j)^2)$. According to Lemma \ref{lemma} if $t \leq 1$, $Z_i^2$ is also a sub-gaussian variable (centered) of parameter $(\sum\limits_{j=1}^{n} (a_{ij} \sigma_j)^2)$, and with proba $\leq e^\frac{-t^2}{2(\sum\limits_{i=1}^{n} \sqrt{\sum\limits_{j=1}^{n} (a_{ij} \sigma_j)^2})^2}$, $\sum\limits_{i=1}^{n} Z_i^2 \geq t$.\\ If $t > 1$, we only have\\ with proba $\leq 2\sum\limits_{i=1}^{n} e^\frac{-t^2}{2\sum\limits_{j=1}^{n} (a_{ij} \sigma_j)^2}$, $\sum\limits_{i=1}^{n} Z_i^2 \geq nt^2$.\\ We also can apply the Lemma \ref{subgaussian} and the Hoeffding Bound to $\sum\limits_{i=1}^{n} (\sum\limits_{j=1}^{n} a_{ij} \mu_j) Z_i$, and we obtain \\ with proba $\leq e^\frac{-t^2}{2 (\sum\limits_{i=1}^{n} (\sum\limits_{j=1}^{n} a_{ij} \mu_j)\sqrt{\sum\limits_{j=1}^{n} (a_{ij}\sigma_j)^2})^2}$, that \begin{equation} 2\sum\limits_{i=1}^{n} (\sum\limits_{j=1}^{n} a_{ij} \mu_j) Z_i \geq 2t \end{equation}\\ Putting it all together (for $t \leq 1$), we have with proba $\leq \epsilon_1$, $\Vert \mathbb{E}[A]X - \mathbb{E}[X] \Vert^2 \geq 5t + \sum\limits_{i=1}^{n} (\mu_i - \sum\limits_{j=1}^{n} a_{ij} \mu_j)^2$. We do the same for $t > 1$, and evaluate the concentration of the inverted events.\\ \fi Let's note $\alpha_n$ the mean probability of connection (in the particular Bernouilli model). Sparse random graphs do not concentrate (when the expected degree is very low in comparison to $\log (n)$). But according to \cite{spectral_sbm}, in the relatively sparse case where $\alpha_n \sim \frac{log (n)}{n}$, the spectral concentration bound of the normalized adjacency matrix is (with probability $\geq 1-n^-t$) : \begin{equation} \Vert A - \mathbb{E}[A] \Vert \leq \frac{1}{\sqrt{\log(n)}} \end{equation} We basically have $\Vert AX - \mathbb{E}[X] \Vert \leq \Vert \mathbb{E}[A]X - \mathbb{E}[X] \Vert + \Vert AX - \mathbb{E}[A][X] \Vert$. Making use of a sub-multiplicative matrix norm (like the Frobenius one for eg), we can deduce $\Vert AX - \mathbb{E}[A][X] \Vert \leq \Vert A - \mathbb{E}[A] \Vert \Vert X \Vert$.\\ We make use of the operator norm and of Proposition \ref{prop:concentration} to conclude. \end{proof} \end{proposition} \iffalse Edouard: \begin{proposition} If $X$ is sub-gaussian, assuming $\Vert X_i\Vert\leq C$ then $AX$ is subgaussian too, $A,X$ are independant \end{proposition} \begin{proof} Assuming $(AX)_j=\frac{1}{\#\{i\rightarrow j}\sum_j X_j$. Write: $f(A,X)=(AX)_j$ has bounded variation, indeed: changing one $X_i$ changes by at most by $C$. Changing one vertex $\{i,j\}$ changes the sum by at most $C$ as well. Conclusion is obvious (see https://math.stackexchange.com/questions/3072363/show-that-x-is-a-sub-gaussian-random-vector-with-dependent-sub-gaussian-coordina for giht bounds) \end{proof} \begin{proposition} In the case of the model XXX, \begin{equation} \Vert \mathbb{E}[X]-\mathbb{E}[AX]\Vert \leq C ... \end{equation} \end{proposition} \begin{proposition} $i,j$-th node with features $X_i$ ind de $X_j$, then $A_{i,j}\sim \mathcal{B}_{\mathcal{C}_i,\mathcal{C}_j}$ and $C_i$ ind de $C_j$ et $X_i\sim p_{C_i}$, $i,j$ belongs to community $C_i,C_j$ following $p$. \Edouard{Ca doit découler du modèle }Assume that $\mathbb{E}[AX|\mathcal{C}_i]=A\mathbb{E}[X|\mathcal{C}_i]=\mathbb{E}[X|\mathcal{C}_i]$, init: $(x_1,x_2,x_3,x_4,x_5,x_6,x_7,x_8)$ $E_1=x_1+x_2+x_3+x_4$ $E_2=x_5+x_6+x_7+x_8$ $(E_1,E_1,E_1,E_1,E2,E_2,E_2,E_2)\approx (\mathbb{E}x_1,\mathbb{E}x_1,\mathbb{E}x_1,\mathbb{E}x_1,\mathbb{E}x_2,\mathbb{E}x_2,\mathbb{E}x_2,\mathbb{E}x_2)$ /////////////// and that the $(AX)_i$ are independent, and $(AX)_i \sim \mathcal{N}(\mathbb{E}[AX_i], 1)$. Let's take $t \in [0, 1]$, we get : \begin{equation} \mathcal{P}(\Vert AX-\mathbb{E}X\Vert \geq \sqrt{n(t+1)}) \leq 2e^{-\frac{nt^2}{8}} \end{equation} \end{proposition} \begin{proposition} Assume that $\mathbb{E}[AX]=\mathbb{E}X$, such that $X$ has variance $\sigma$. We get: \begin{equation} \mathbb{E}[\Vert AX-\mathbb{E}X\Vert]\leq \sigma \end{equation} \end{proposition} Following the Prop \ref{prop1}, we note that it is possible to employ concentration results along $AX$. Indeed, assume: \begin{equation} \mathbb{P}(\Vert AZ-\mathbb{E}Z\Vert \geq t)\leq e^{-\frac{t^2}{2\lambda^2}}\,, \end{equation} for some restrained $Z$ (why not $Z\sim \mathbb{E}[.|A]$), then we can plug this result in Eq. \ref{avg} and Eq. \ref{stab} which leads again to a concentration bounds. It is unclear to me yet if $S_n$ is lipschitz w.r.t. $A$. Then we need to figure out which result (e.g., https://www3.math.tu-berlin.de/numerik/csa2015/Slides/Vershynin.pdf) fits appropriately. E.g., assume that $\mathcal{G}$ has $n$ nodes, and each nodes is sampled via $p_{i,j}\sim \mathcal{B}_{i,j}$. Then if only keep the first eigenvectors, we get... and thus we get... and prop 3 writes... \Edouard{Again, introduce the E-IGT atm you find the appropriate representation.} Assumption: Furthermore, if $A$ is large and sparse (check nicolas keriven), then: \begin{equation} \mathbb{P}(\Vert A-\mathbb{E}(A)\Vert\geq \lambda)\leq e^{-\frac{\lambda^2}{\sigma^2}} \end{equation} \fi \fi \begin{proposition} If $W_m$ is unitary, and $A$ is positive, assume that $\mathbb{E}[AX]=\mathbb{E}X$, such that $X$ has variance $\sigma^2$. \begin{align} \mathbb{E}[\Vert S_n-\bar S_n\Vert^2 ] & \leq 2\mathbb{E}[\Vert X\Vert^2 ] -2 \mathbb{E}[\langle AX ; \mathbb{E}[X]\rangle ] \\ & \leq 2 \sigma^2 \end{align} \begin{proof} \begin{align} \mathbb{E}[\Vert S_n-\bar S_n\Vert^2 ] & = \mathbb{E}[\sum_{i=0}^n \Vert AU_i - \mathbb{E}[\bar U_i] \Vert^2 ] \\ & = \mathbb{E}[\sum_i^n \Vert AU_i \Vert^2 + \Vert \mathbb{E}[\bar U_i] \Vert^2 -2\sum_i^n \langle AU_i; \mathbb{E}[\bar U_i] \rangle] \\ & \leq \mathbb{E}[\sum_i^n \Vert AU_i \Vert^2] + \sum_i^n \Vert \mathbb{E}[\bar U_i] \Vert^2 -2 \mathbb{E}[\langle AX; \mathbb{E}[\bar X] \rangle] \\ \end{align} Because from $A$ positive, we have $\sum\limits_{i=1}^{n} \langle AU_i; \mathbb{E}[\bar U_i] \rangle \geq 0$. \\ From Equation \eqref{eq:finite_energy}, we have $\sum\limits_{m=0}^{n} \Vert AU_m\Vert^2\leq \Vert U_0\Vert^2$.\\ Similar to Proposition \ref{prop:lip}, $\bar S_n$ is contractive. Hence, $\sum\limits_{i=1}^{n} \Vert \mathbb{E}[\bar U_i] \Vert^2 \leq \mathbb{E}[\Vert \bar U_0\Vert^2] - \mathbb{E}[\Vert \bar U_{n+1} \Vert^2]$. \\ Notifying that $\bar U_0 = U_0 = X$ concludes the proof. \end{proof} \end{proposition} \fi \subsection{Optimization procedure} \label{sec:optim} We now describe the optimization procedure of each of our operators $\{W_n\}$, that consists in a greedy layer-wise procedure \cite{greedy}. Our goal is to specify $|W_n|$ such that it leads to a demodulation effect, as well as to have a fast energy decay. Demodulation means that the envelope of a signal should be smoother, whereas fast decay will allow the use of shallower networks. In practice, it means that at depth $n$, the energy along the direction of averaging should be maximized, which leads to consider: \begin{equation} \max_{W^TW=I} \Vert A_J| (I-A_J)U_{n}W|\Vert\,. \end{equation} As observed in \cite{oyallon2020interferometric}, because the extremal points of the $\ell^2$ ball are the norm preserving matrix, this optimization problem is equivalent to: \begin{equation} \max_{\Vert W\Vert_2\leq 1} \Vert A_J| (I-A_J)U_{n}W|\Vert\,. \end{equation} Note that this can be approximatively solved via a projected gradient procedure which projects the operator $W$ on the unit ball for the $\ell^2$-norm at each iteration. Furthermore, contrary to \cite{oyallon2020interferometric}, we might constrain $W$ to have a rank lower than the ambient space, that we denote by $k$: increasing $k$ as well as the order $N$ allows to potentially increase the capacity of our model, yet we as discussed in the next section, this wasn't necessary to obtain accuracies at the level of the state of the art. \section{Numerical Experiments} \label{sec:xp} We test our unsupervised IGT features on a synthetic example, and on challenging semi-supervised tasks, in various settings that appeared in the graph community labeling litterature: the \textbf{full } \cite{dropedge}, \textbf{predefined} \cite{kipf2016semi} and \textbf{random splits} \cite{kipf2016semi} of Cora, Citeseer, Pubmed, as well as the WikiCS dataset. \subsection{Synthetic example} \label{sec:synthetic_experiment} As GCNs progressivly apply a smoothing operator on subsequent layers, deeper features are less sensitive to intra-community variability. This progressive projection can have a big impact on datasets where discriminative features are close in average, yet have very different distributions over several communities. In order to underline this phenomenon, we propose to study the following synthetic example: following the model and notations of Sec. \ref{sec:model}, we consider two communities, with an equal number of samples in each and we assume that $P=1$, $J=1$, $p=0.001$ and $q=0$ and $n=10000$. Here, we assume the features are centered Gaussians with variance $\sigma_1=1$ for the first community and $\sigma_2=\sigma_1+\Delta \sigma$ for the second. In other words, $\Delta\sigma$ controls the relative spread of the community features. Our goal is to show numerically that an IGT representation is, by construction, more amenable to distinguish the two communities than a GCN. \begin{wrapfigure}{r}{0.5\textwidth} \begin{center} \centerline{\includegraphics[width=0.5\columnwidth]{toy1.pdf}} \caption{Accuracies of GCN against our method on a synthetic example, for several values of $\Delta \sigma$. }\label{fig:toy_plot}\end{center} \end{wrapfigure} As a training set, we randomly sample 20 nodes and use the remaining ones as a validation and test set. For IGT parameters, we pick $J=2$, $k=1$ and $N\in\{0,1,2\}$. On top of our standardized IGT features, our classification layer consists in a 1-hidden layer MLP of width 128. We train the IGT operators for 50 epochs. We compare our representation with a standard GCN \cite{kipf2016semi} that has two hidden layers and a hidden dimension of 128 for the sake of comparison. Both supervised architectures are trained during 200 epochs using Adam \cite{kingma2014adam} with a learning rate of 0.01 ($\epsilon=10^{-8},\beta_1=0.9,\beta_2=0.999$). We discuss next the accuracies averaged over 5 different seeds on Fig. \ref{fig:toy_plot} for various representations and values of $\Delta\sigma$. We observe that an order 0 IGT performs poorly for any values of $\Delta\sigma$, which is consistent because the linear smoothing will dilute important informations for node classification. However, non-linear model like IGT (of order larger than 0) or GCN outperforms this linear representation. The IGT outperforms the GCN for all values of $\Delta\sigma$ because as Sec. \ref{sec:IGT} shows, by construction, this representation extracts explicitly the high frequency of the graph, whereas a GCN can only smooth its features and thus will tend to lose in discriminability despite supervision. We note that orders 1 and 2 perform similarly, which is not surprising given the simplistic assumption of this model: all the informative variability is contained in the order 1 IGT and the order 2 is likely to only bring noisy features in this specific case. \subsection{Supervised community detection} \label{sec:supervised_experiments} First, we describe our experimental protocol on the datasets Cora, CiteSeer and PubMed. Each dataset consists in a set of bag-of-words vectors with citation links between documents. They are made of respectively 5.4k, 4.7k, 44k and 216k edges with features of size respectively 1.4k, 3.7k, 0.5k and 0.3k. For the three first datasets, we test our method in three semi-supervised settings, which consist in three different approaches to split the dataset into train, validation and test sets: at this stage, we would like to highlight we are one of the few methods to try its architecture on those three splits (which we discuss for clarity), which allows to estimate the robustness to various sample complexity. Each accuracy is averaged over 5 runs and we report the standard-deviation in the Appendix. The most standard split is the \textbf{predefined split} setting: each training set is provided by \cite{kipf2016semi} and consist in 20 training nodes per class, which represent a fraction 0.052, 0.036, and 0.003 of the data for Cora, CiteSeer and PubMed respectively. 500 and 1000 nodes are respectively used as a validation and test set. Then, we consider the \textbf{random split} setting introduced in \cite{kipf2016semi}, which is exactly as above except that we randomly extract 5 splits of the data, and we average the accuracies among those splits. Finally, we consider the \textbf{full split} setting which was used in \cite{dropedge} and employs 5 random splits of a larger training set: a fraction 0.45, 0.54 and 0.92 of the whole labeled datasets respectively. Note that each of those tasks is transductive yet our method would require minimal adaptation to fit an inductive pipeline. For WikiCS, we followed the only guideline of \cite{mernyei2020wiki}. Our architectures are designed as follow: an IGT representation only requires 4 hyper-parameters: an adjacency matrix $\mathbfcal{A}$, an output-size $k$ for each linear isometry, a smoothness parameter $J$ and an IGT order $N$. Given that the graphs are undirected, $\mathbfcal{A}$ satisfies the assumption described in Sec. \ref{sec:IGT}, yet it would be possible to symmetrize the adjacency matrix of a directed graph. This corresponds to our unsupervised graph representation that will be then fed to a supervised classifier. Sec. \ref{sec:model} shows that our IGT representation should concentrate around the E-IGT of their respective community, which means that they should be well separated by a Linear classifier. However, there might be more intra-class variability than the one studied from the lens of our model, thus we decided to use potentially deeper models, e.g., Multi Layer Perceptrons (MLPs) as well as Linear classifiers. We use the same fixed MLP architecture for every dataset: a single hidden layer with 128 features. Our linear model is simply a fully connected layer, and each model is fed to a cross-entropy loss. We note that our MLP is shallow, with few units, and does not involve the graph structure by contrast to semi-supervised GCNs: we thus refer to the combination of IGT and a MLP or a Linear layer as an unsupervised graph representation for node labeling. Note also that a MLP is a scalable classifier in the context of graphs: once the IGT representation is estimated, one can learn the weight of the MLP by splitting the training set in batches, contrary to standard GCNs. We now describe our training procedure as well as the regularizaton that we incorporated: it was identical for any splits of the data. We optimized our pipeline on Cora and applied it on Citeseer, Pubmed and WikiCS, unless otherwise stated. Each parameter was cross-validated on a validation set, and we report the test accuracy on a test set that was not used until the end. First, we learn each $\{W_m\}_{m\leq N}$ via Adam for 50 epochs and a learning rate of 0.01. Once computed, the IGT features are normalized and are fed to our supervised classifier, that we train again using Adam and a learning rate of 0.01 for at most 200 epochs, with a early stopping procedure and a patience of 30. A dropout ratio which belongs to $\{0, 0.2, 0.4, 0.5, 0.6, 0.8\}$ is potentially incorporated to the one hidden layer of the MLP. On CiteSeer and PubMed, our procedure selected 0.2, on WikiCS 0.8, whereas no-dropout was added on Cora. Furthermore, we incorporated an $\ell^2$-regularization with our linear layer which we tried amongst $\{0, 0.001, 0.005, 0.01\}$: we picked 0.005 via cross-validation. We discuss here WikiCS: by cross-validation, we used $J=1, N=1, k=150$ for the linear experiment and $J=2, N=1, k=35$ for the MLP experiment. For the other datasets and every splits, we used $N=2$ and $k=10$: we note that less capacity is needed compared to WikiCS, because those datasets are simpler. For the three other datasets, for both the \textbf{predefined} and \textbf{random splits}, we fix $J=4$. For the \textbf{full split}, we used $J=1$ for each dataset: we noticed that increasing $J$ degrades the performance, likely because less invariance is required and can be learned from the data, because more samples are available. This makes sense, as the amount of smoothing depends on the variability exhibited by the data. Thanks to the amount of available data, the supervised classifier can estimate the degree of invariance needed for the classification task, which was not possible if using only 20 samples per community. Tab. \ref{table:semi} reports the semi-supervised accuracy for each dataset, in various settings, and compares standard supervised~\cite{gao2019graph, kipf2016semi,dropedge,chen2018fastgcn,chen2020measuring,velivckovic2017graph} and unsupervised \cite{perozzi2014deepwalk,velickovic2019deep,garcia2017learning,qu2019gmnn,hamilton2017inductive} architectures. Note that each supervised model is trained in an end-to-end manner. The unsupervised models are built differently and we discuss them now briefly: for instance, EP~\cite{garcia2017learning}, uses a node embedding with a rather different architecture from GCNs. Also, DeepWalk~\cite{perozzi2014deepwalk} is analogous to a random walk, GraphSage~\cite{hamilton2017inductive} learns an embedding with a local criterion, DGI~\cite{velickovic2019deep} relies on a mutual information criterion and finally \cite{qu2019gmnn} relies on a random field model. Note that each of those models are significantly different from ours and they do not have the same theoretical foundations and properties as ours. As expected, accuracy in the \textbf{full} setting is higher than the others. We observe that in general, supervised models outperforms unsupervised models by a large margin except on WikiCS and Citeseer for the \textbf{random} and \textbf{predefined} splits, for which an IGT obtains better accuracy: it indicates that it has a better inductive bias for this dataset. Note that an IGT obtains competitive accuracies amongst unsupervised representations and this is consistent with the fact that those datasets, discussed above, are likely to satisfy the hypothesis described in Sec. \ref{sec:model}. In general, a MLP outperforms a linear layer (because it has better approximation properties), except on Citeseer for which the accuracy is similar, which seems to validate that the data of Citeseer follow the model that we introduced in \ref{sec:model} on Citeseer, that leads to linear separability. \if False \begin{table}[t] \caption{Classification accuracies for the full supervised settings on Cora, Citeseer and Pubmed datasets.} \label{table:full} \begin{center} \begin{small} \begin{sc} \begin{tabular}{lcccc} \toprule Method & Cora & Citeseer & Pubmed & Data augmentation\\ \midrule GCN \cite{dropedge} & 86.6 & 79.3 & 70.7 & $\times$ \\ Fastgcn \cite{fast_gcn} & 86.5 & - & 88.8 & $\times$\\ DropEdge \cite{dropedge} & 88.2 & 80.5 & 91.7 & $\surd$ \\ \cite{adapt} & 87.4 & 79.7 & 90.6 & $\surd$ \\ \midrule MLP on $S_J^N$ (ours) & 0 & 0 & 0 & $\times$\\ \bottomrule \end{tabular} \end{sc} \end{small} \end{center} \vskip -0.1in \end{table} \fi \begin{table}[t] \caption{Classification accuracies (in \%) for each splits of Cora, Citeseer, Pubmed as well as WikiCS.} \label{table:semi} \begin{center} \begin{small} \begin{tabular}{lcccccccccc} \toprule Method/Dataset & \multicolumn{3}{c}{Cora}&\multicolumn{3}{c}{Citeseer}&\multicolumn{3}{c}{Pubmed}&WikiCS\\%Cora & Cit. & Pub. \\ & Full& Rand& Pred& Full& Rand& Pred& Full& Rand& Pred\\ \midrule Supervised&\\ \midrule GAT \cite{velivckovic2017graph}& &&83.0 & & & 72.5 & & & 79.0&77.2 \\ GCN \cite{kipf2016semi}& &80.1&81.5 & & 67.9& 70.3 & &\textbf{78.9} & 79.0& \textbf{77.7}\\ Graph U-Net \cite{gao2019graph} & &&\textbf{84.4}&&&\textbf{73.2}&&&\textbf{79.6}&\\ DropEdge \cite{dropedge} & \textbf{88.2}&& & \textbf{80.5} &&& \textbf{91.7}&&&\\ FastGCN \cite{chen2018fastgcn}& 85.0&& & 77.6 &&& 88.0 &&&\\ OS \cite{chen2020measuring}& &\textbf{82.3} &&& \textbf{69.7}&&& 77.4& \\ \midrule Unsupervised\\ \midrule Raw \cite{velickovic2019deep,mernyei2020wiki}& &&47.9&&&49.3&&&69.1&72.0\\ DeepWalk \cite{perozzi2014deepwalk}& &&67.2&& & 43.2&& & 65.3& 74.4\\ IGT + MLP (ours) &\textbf{87.7} &\textbf{78.3}&80.3&\textbf{78.4} &67.6& \textbf{73.1 }&\textbf{88.2}&76.2&76.4& \textbf{77.2} \\ IGT + Lin. (ours) & 83.3 &77.6&77.4& \textbf{78.4} &\textbf{73.0}&\textbf{73.1} &88.1&74.5& 73.9&76.7\\ EP \cite{garcia2017learning}&&78.1&&&71.0&&&\textbf{79.6}&\\ GraphSage \cite{hamilton2017inductive}&82.2&&&71.4&&&87.1&&&\\ DGI \cite{velickovic2019deep}&&&82.3&&&71.8&&&76.8&75.4\\ GMNN \cite{qu2019gmnn}&&&\textbf{82.8}&&&71.5&&&\textbf{81.6}&\\ \bottomrule \end{tabular} \end{small} \end{center} \end{table} \subsection{Ablation experiments} \label{sec:ablation}\begin{wraptable}{r}{0.5\textwidth} \caption{Linear classification accuracies (in \%) for the \textbf{predefined split} on Cora's \textit{validation set}, for various values of $N,J$.\label{table:linear_classif}} \begin{center} \begin{small} \begin{sc} \begin{tabular}{c|cccc} \toprule \backslashbox{J}{N} & 0 & 1 & 2 & 3\\ \midrule 1 & 62.4 & 60.8 & 62.8 & 61.4\\ 2 & 68.6 & 70.6& 72.2& 68.6 \\ 3 & 71.4& 72.2& \textbf{74.6}& 72.6 \\ 4 & 72.4& 73.2& \textbf{74.6}& 73.0\\ \bottomrule \end{tabular} \end{sc} \end{small} \end{center} \end{wraptable} In order to understand better the IGT representation, we propose to study the accuracy of an IGT representation on Cora's validation set, as a function of the scale $J$ and the IGT order $N$. For the sake of simplicity, we consider a linear classifier. Each linear operator is learned with 40 epochs. We picked $k=10$ and train our basic model for 200 epochs with SGD, the validation accuracies are reported in Tab. \ref{table:linear_classif}. As $N,J$ increase, we feed the features to a linear classifier: in general, for $0\leq N\leq 2$, as $N$ grows the accuracy improves. However, the order 3 IGT decreases the accuracy: this is consistent because it conveys a noise which is amplified by the standardization. As $J$ increases, we smooth our IGT features on more neighbor nodes, which results in better performances for a fixed order $N$, and is also consistent with the finding of Sec. \ref{sec:IGT}. We performed a second ablation experiment in order to test the inductive bias of our architecture: we considered random $\{W_n\}$ at evaluation time and we obtained respectively on the full split of Cora, Citeseer and Pubmed some accuracy drops of respectively 6.3\%, 5.2\% and 5.6\%. This is relatively smaller drops than DGI~\cite{velickovic2019deep} which reports for instance some drops of about $10\%$: our architecture is likely to have a better inductive bias for this task. \section{Conclusion} In this work, we introduced the IGT which is an unsupervised and semi-supervised representation for community labeling. It consists in a cascade of linear isometries and point-wise absolute values. This representation is similar to a semi-supervised GCN, yet it is trained layer-wise, without using labels, and has strong theoretical fundations. Indeed, under a SBM assumption and a large graph hypothesis, we show that an IGT representation can discriminate communities of a graph from a single realization of this graph. It is numerically supported by a synthetic example based on Gaussian features, which shows that an IGT can estimate the community of a given node better than a GCN because it tends to alleviate the over-smoothing phenomenon. This is further supported by our numerical experiments on the standard, challenging datasets Cora, CiteSeer, PubMed and WikiCS: with shallow supervised classifiers, we obtain numerical accuracy which is competitive with semi-supervised approaches. Future directions could be to either refine our theoretical analysis by weakening our assumptions, or to test our method on inductive tasks. Furthermore, following \cite{oyallon2020interferometric}, one can also wonder if this type of approach could be extended to more complex data, in order to obtain stronger theoretical guarantees (e.g., manifold). Finally, future works could also be dedicated to scale our algorithms to very large graphs: this is a challenging task both in terms of memory and computations. \paragraph{Broader impact.} Graph Neural Networks can be used in many domains, like protein prediction, or network analysis to cite only a few, and could become even more prevalent tomorrow. Our work is thus included in a large literature whose societal impact and ethical considerations are to become more and more important. We provide here a new model aiming at learning unsupervised node representation in community graphs graphs. While its most natural application lies in community detection in social science, we hope that the provided theoretical guarantees could be used in the future to provide safer and more readable models toward more various directions. \section{Appendix} \subsection{Proofs} \setcounter{lemma}{1} \begin{lemma}\label{proj} If $0 \preccurlyeq A \preccurlyeq I$, then $\Vert AX\Vert^2+\Vert (I-A)X\Vert^2\leq \Vert X\Vert^2$ with equality iff $A^2=A$. \end{lemma} \begin{proof}We note that for any $x$, we get: \begin{equation} \Vert Ax\Vert^2+\Vert (I-A)x\Vert^2=\Vert Ax\Vert^2+\Vert x\Vert^2+\Vert Ax\Vert^2-2\langle x,Ax\rangle \end{equation} Yet, $\Vert Ax\Vert^2=\langle x,A^TAx\rangle \leq \langle x,Ax\rangle$ because $\text{Sp}(A)\subset[0,1]$. Thus, \begin{equation} 2(\Vert Ax\Vert^2-\langle x,Ax\rangle)+\Vert x\Vert^2\leq \Vert x\Vert^2\,, \end{equation} with equality $\forall x$ iff $A=A^2$. It is now enough to observe that $\{A,I-A\}$ inherits from those properties. \end{proof} The following proposition explains that our representation is non-expansive, and thus stable to noise: \begin{proposition} \label{prop:lip} For $N\in\mathbb{N}$, $S_J^NX$ is 1-Lipschitz leading to: \begin{equation} \Vert S_J^NX-S_J^NY\Vert\leq\Vert X-Y\Vert\,. \end{equation} and furthermore: \begin{equation} \Vert S_J^NX\Vert\leq\Vert X\Vert\,. \end{equation} \end{proposition} \begin{proof} For two feature matrices $X,Y$, let us consider $U_{i}$ and $\tilde U_{i}$ defined from Equation \eqref{eq:u}, with $U_{0}=X$ and $\tilde U_{0}=Y$. Because $|W_i|$ is a contractive and from Lemma \ref{proj}, \begin{align} \Vert U_{i+1} - \tilde U_{i+1}\Vert^2 & \leq \Vert U_{i} - \tilde U_{i}- A_J ( U_{i} -\tilde U_{i})\Vert^2 \\ &\leq\Vert U_{i} - \tilde U_{i} \Vert^2 - \Vert A_J ( U_{i} - \tilde U_{i})\Vert^2 \end{align} Hence, \begin{align} \sum_i^N \Vert A_J ( U_{i} - \tilde U_{i})\Vert^2 & \leq \Vert X-Y\Vert^2 - \Vert U_{n} - \tilde U_{n}\Vert^2 \\ & \leq \Vert X-Y\Vert^2 \end{align} Taking $X=0$ leads to the second part as then $SX=0$. \end{proof} \setcounter{lemma}{4} This Lemma shows that a cascade of IGT linear isometries preserve sub-Gaussianity: \begin{lemma}\label{op-subg} If each line of $X$ is $\sigma$-sub-Gaussian, then each (independent) line of $\bar U_m$ is $C^m\sigma$-sub-Gaussian for some universal constant $C$. \end{lemma} \begin{proof} Apply the Lemma \ref{subgausscontractivity} with $W=W_n$ for $n\leq m$ leads to the result. \end{proof} \setcounter{lemma}{5} In order to show the previous Lemma, we need to demonstrate that the modulus of a sub-Gaussian variable is itself sub-Gaussian, which is shown below: \begin{lemma}\label{subgausscontractivity} If $X\in\mathbb{R}^P$ is $\sigma$-sub-Gaussian, then $|(X-\mathbb{E}X)W|$ is $C\sigma$-sub-Gaussian for some absolute value $C$. \end{lemma} \begin{proof} If $X$ is $\sigma$-sub-Gaussian, then $X-\mathbb{E}X$ is $C'\sigma$-subGaussian by recentering \cite{high_dim_proba}. We note that as $W$ is unitary, thus $(X-\mathbb{E}X)W$ is also $C'\sigma$-subgaussian. Then, let $u\in\mathbb{R}^p$ an unit vector. We note that: \begin{align} &\mathbb{P}(\sum_{i=1}^pu_i|X_i|\geq t)\\ &\leq \sum_{\epsilon_i\in\{-1,1\}} \mathbb{P}(\{\epsilon_iX_i\geq 0\}\cap\{\sum_iu_i|X_i|\geq t\})\\ &=\sum_{\epsilon_i\in\{-1,1\}} \mathbb{P}(\{\epsilon_iX_i\geq 0\}\cap\{\sum_i\epsilon_iu_iX_i\geq t\})\\ &\leq 2^p e^{-\frac{t^2}{2C'^2\sigma^2}}=e^{p\ln 2-\frac{t^2}{2C'^2\sigma^2}}\,. \end{align} This leads to the conclusion by sub-Gaussian characterization. \end{proof} \setcounter{proposition}{2} \begin{proposition}\label{prop:boundigt} For any $X, N, J$, we get: \begin{equation} \Vert S_J^NX-\bar S^N X\Vert\leq \sqrt{2} \sum_{m=0}^{N}\Vert (A_J-\mathbb{E})\bar U_m\Vert \,. \end{equation} \end{proposition} \begin{proof} Here, write $V_J^m=|(X-A_JX)W_m|, V_J^0 X=X$, and define: \begin{align} Y_J^{n,m} X=&\{A_JV_J^n...V_J^{n-m+1}X,A_JV_J^{n-1}...V_J^{n-m+1}X\\ &,...,A_JV_J^{n-m+1}X,A_JX\}, \end{align} \paragraph{Lemma.} \textit{If $A_J$ is a unitary projector and each $W_i$ is unitary, then $Y_J^mX$ is 1-Lipschitz w.r.t. $X$.} \begin{proof} We can apply the proposition 2 with the operators $\{W_{n-m+1},...,W_n\}$, as this can be interprated as an IGT with different unitary operators. \end{proof} Here the idea is to take advantage of the tree structure of the IGT features. Thus when $Y_J^{n,m}$ is computing $S_J^n$ to orders limited in $[n, n-m+1]$, we chain the features with the order $n-m$ to recover $Y_J^{n,m-1}$. To do so, we introduce for $m\geq 1$ : \begin{align} \Delta_J^{n,m} X&=\{Y_J^{m}V_J^{n-m}X-Y_J^{m}\bar V^{n-m}X,A_JX-\mathbb{E}X\}\\ &=\{-Y_J^{m}\bar V^{n-m}X,-\mathbb{E}X\}+Y_J^{m-1}X\,,\label{eq:delta} \end{align} where $\bar V^n X=|(X-\mathbb{E}X)W_n|$, $\bar V^0X=X$ and $\{x,y\}$ stands for a concatenation. This implies that $\Delta_J^{n,m} X$ is a $(m+1)$-uplet (and the symbol $+$ in (\ref{eq:delta}) is thus a couple addition and the convention is that left corresponds to highest order of the couple), and $\Delta_J^{0,0}X=A_JX-\mathbb{E}X=-\mathbb{E}X+S_J^0X$.\\ The sum over $m$-uplet with different size is done such that the left elements are summed first. We then notice that: \begin{equation} \sum_{m=0}^{N} \Delta_J^{N,m}\bar V^{N-m-1}...\bar V_1X=S_J^N X-\bar S^N X\, \end{equation} because each term of the couple is a telescopic sum (again here, we chain the features with orders in $[n-m-1, 1]$ to obtain the telescopy).\\ As $Y_J^{n,m}$ is 1-Lipschitz w.r.t. $X$ and since a modulus is non expansive, $\Vert |(X-A_JX)W_n|-|(X-\mathbb{E}X)W_n|\Vert\leq \Vert \mathbb{E}X-A_JX\Vert$, combining those ingredients we get: \begin{align} \Vert \Delta_J^{n,m} X\Vert^2 = & \Vert A_JX-\mathbb{E}X\Vert^2+\\ & \Vert Y_J^{m-1}|(X-A_JX)W_n|-Y_J^{m-1}|(X-\mathbb{E}X)W_n\Vert^2\\ & \leq 2 \Vert A_J X-\mathbb{E}X\Vert^2\,. \end{align} Then, we further apply the triangular inequality to get the desired result. \iffalse with an operator norm: \begin{align} \Vert S_n-\bar S_n \Vert_o & \leq \sqrt{2}\Vert A-\mathbb{E}\Vert_o \sum_{m=0}^{n-1}\Vert \bar V_{n-m-1}...\bar V_1 \Vert_o\\ & = \sqrt{2}\Vert A-\mathbb{E}\Vert_o \sum_{m=0}^{n-1}\Vert \bar V_m...\bar V_1 \Vert_o\\ &=\sqrt{2}\Vert A-\mathbb{E}\Vert_o \sum_{m=0}^{n-1} \Vert \bar U_m\Vert_o \end{align} By symmetry, we finally get the desired result. \fi \end{proof} The following proposition shows that in case of exact ergodicity, the IGT and Expected-IGT representations have bounded moments of order 2: \begin{proposition} Assume that $\mathbb{E}[A_JX]=\mathbb{E}X$, and that $X$ has variance $\sigma^2=\mathbb{E}\Vert X \Vert^2- \Vert \mathbb{E}X \Vert^2$, then: \begin{align} \mathbb{E}[\Vert S_J^NX-\bar S^NX\Vert^2 ] & \leq 2 \sigma^2\,. \end{align} \begin{proof} \begin{align} & \mathbb{E} [\Vert S_J^NX-\bar S^NX \Vert^2 ]=\mathbb{E}[\Vert S_J^NX\Vert^2]+\mathbb{E}[\Vert \bar S^NX\Vert^2]\\ &-2\sum_{m=0}^N \mathbb{E}[\text{Tr}( (A_J U_m)^T \mathbb{E}[\bar U_m] )]\\ &\leq 2(\mathbb{E}\Vert X\Vert^2-\sum_{m=0}^N \mathbb{E}[\text{Tr}( (A_J U_m)^T \mathbb{E}[\bar U_m] )]) \end{align} The inequality follows from Prop. \ref{proj} and Prop. \ref{prop:lip}. Now, from Lemma \ref{lemma:positive}, $A_J, U_m, \bar U_m$ have positive coefficients, thus we get: $2\sum_{m=1}^N \mathbb{E}[\text{Tr}( (A_J U_m)^T \mathbb{E}[\bar U_m] )] \geq 0$. The first term allows to conclude as $\bar U_0=U_0=X$. \end{proof} \end{proposition} \setcounter{proposition}{5} \begin{proposition} \label{prop:concentrate_each} Assume that each line of $X$ is $\sigma$-sub-Gaussian. There exists $C>1,K>0,C'>0$ such that $\forall m,\delta>0$ with probability $1-8P\delta$, we have: \begin{align} \Vert \mathbb{E}&[\mathbfcal{A}]_{\text{norm}}\bar U_m-\mathbb{E}[\bar U_m]\Vert \\ & \leq K\sigma C^m\sqrt{\ln \frac{1}{\delta}}+\tau\sqrt{n}C'\Vert \mu^2_m-\mu^1_m\Vert\,. \end{align} \end{proposition} \begin{proof} Here, for the sake of simplicity, $X_p$ corresponds to the $p$-th row of $X$. We write $\mu_m^j$ the expected-IGT of the node distribution of community $j$. Here, we have for $t\leq n_1$ (note that the right does not depend on $t$): \begin{align} &[\mathbb{E}[\mathbfcal{A}]_{\text{norm}}\bar U_m]_t-\mathbb{E}[\bar U_m]_t\\ &=\frac{1}{n_1p+n_2q}\big(p\sum_{i=1}^{n_1}(\bar U^i_m-\mu_m^1)+q\sum_{i=n_1+1}^{n_1+n_2}(\bar U^i_m-\mu_m^2)\big)\\ &+\frac{n_2q}{n_1p+n_2q}(\mu^2_m-\mu^1_m)\,. \end{align} Now, we note that from Lemma \ref{op-subg}, $\{\bar U^i_m\}_{i\leq n}$ is a family of $\sigma C^m$-sub-Gaussian independant r.v. From Hoeffding lemma \cite{high_dim_stat}, we obtain that for any $\delta$, we have with probability $1-4P\delta$: \begin{align*} \Vert\sum_{i=1}^{n_1}(\bar U^i_m-\mu^1_m)\Vert\leq \sqrt{n_1}\sqrt{2}\sigma C^m\sqrt{\ln\frac{1}{\delta}} \,\,\,\text{ and }\\ \Vert\sum_{i=n_1+1}^{n_1+n_2}(\bar U^i_m-\mu^2_m)\Vert\leq \sqrt{n_2}\sqrt{2}\sigma C^m\sqrt{\ln\frac{1}{\delta}}\,. \end{align*} As if $n$ is large, by hypothesis $(\frac{p\sqrt{2n_1}+q\sqrt{2n_2}}{n_1p+n_2q})\sqrt{n}=\mathcal{O}(1)$. We perform the same for $n_1<t\leq n_1+n_2$ We then sum along $n$ and use that $\frac{n_1}{n_2+\tau n_1}+\frac{n_2}{n_1+\tau n_2}=\mathcal{O}(1)$ and $\sqrt{a+b}\leq\sqrt{a}+\sqrt{b}$.\end{proof} \subsection{Dataset statistics} \begin{table}[th] \label{dataset_statistics} \begin{center} \caption{Dataset Statistics} \begin{tabular}{ccccccc} \bf Datasets & \bf Nodes & \bf Edges & \bf Classes & \bf Features & \textit{full} \bf Train/Val/Test & \textit{semi} \bf Train/Val/Test \\ \hline \\ Cora & 2,708 & 5,429 & 7 & 1,433 & 1,208/500/1,000 & 140/500/1,000 \\ Citeseer & 3,327 & 4,732 & 6 & 3,703 & 1,812/500/1,000 & 120/500/1,000 \\ Pubmed & 19,717 & 44,338 & 3 & 500 & 18,217/500/1,000 & 60/500/1,000 \\ WikiCS & 11,701 & 216,123 & 10 & 300 &\multicolumn{2}{c}{20 canonical train/valid/test splits}\\ \end{tabular} \end{center} \end{table} \begin{table}[t] \caption{Standard deviations of classification accuracies for each splits of Cora, Citeseer, Pubmed as well as WikiCS.} \label{table:semi} \begin{center} \begin{small} \begin{tabular}{lcccccccccc} \toprule Method/Dataset & \multicolumn{3}{c}{Cora}&\multicolumn{3}{c}{Citeseer}&\multicolumn{3}{c}{Pubmed}&WikiCS\\%Cora & Cit. & Pub. \\ & Full& Rand& Pred& Full& Rand& Pred& Full& Rand& Pred\\ \midrule Unsupervised\\ \midrule IGT + MLP (ours) & 0.5 & 0.8 & 0.9 & 0.4 & 0.8 & 0.7 & 0.6 & 0.5 & 0.3 & 0.5 \\ IGT + Lin. (ours) & 0.1 & 0.8 & 0.2 & 0.3 & 0.7 & 0.5 & 0.1 & 0.2 & 0.1 & 0.5\\ \bottomrule \end{tabular} \end{small} \end{center} \end{table} \subsection{Code and Data availability} All the code is accessible in the folder given in the supplementary materials. \subsection{Training time} We informally noticed that the training of our isometry layers converges quickly. During the supervised training, no multiplication with the adjacency matrix is involved, which can speed up the training compared to GCNs. We further report wall-clock training time in seconds until convergence for our method and for GCNs. For the latter, we used an implementation provided by the authors and trained on the same hardware (with GPU) as our IGT model. For Cora, Citeseer and PubMed respectively, the training time of our IGT layers was 0.45s, 0.57s and 4.88s, whereas the training time of the classification head was 0.25s, 0.24s and 0.94s. By way of comparison, GCN training time was 0.86s, 1.82s, and 1.12s. We would like to highlight that our code works on limited resources and we used a total of 10 GPU hours for developing and benchmarking this project. \end{document}
\begin{document} \title{\parbox{\textwidth}{Optimal Detection of Rotations about Unknown Axes by Coherent and Anticoherent States}} \author{John Martin} \affiliation{Institut de Physique Nucléaire, Atomique et de Spectroscopie, CESAM, University of Liège, B-4000 Liège, Belgium} \email{[email protected]} \orcid{0000-0003-0804-959X} \author{Stefan Weigert} \affiliation{Department of Mathematics, University of York, UK-York YO10 5DD, United Kingdom} \email{[email protected]} \orcid{0000-0002-6647-3252} \author{Olivier Giraud} \affiliation{Université Paris-Saclay, CNRS, LPTMS, 91405 Orsay, France} \email{[email protected]} \maketitle \begin{abstract} Coherent and anticoherent states of spin systems up to spin $j=2$ are known to be optimal in order to detect rotations by a known angle but unknown rotation axis. These optimal quantum rotosensors are characterized by minimal fidelity, given by the overlap of a state before and after a rotation, averaged over all directions in space. We calculate a closed-form expression for the average fidelity in terms of anticoherent measures, valid for arbitrary values of the quantum number $j$. We identify optimal rotosensors (i) for arbitrary rotation angles in the case of spin quantum numbers up to $j=7/2$ and (ii) for small rotation angles in the case of spin quantum numbers up to $j=5$. The closed-form expression we derive allows us to explain the central role of anticoherence measures in the problem of optimal detection of rotation angles for arbitrary values of $j$. \end{abstract} \section{Introduction and main result} Historically, advances in measurement techniques often are the reason for physics to progress. Over time,\emph{ metrology} has developed as a subject of its own, especially in the context of defining standard units of measurement for physical quantities. Quantum theory provides new perspectives on measurements, ranging from fundamental limitations on measurements \cite{heisenberg27}, new opportunities \cite{gio11} as well as technical challenges and even philosophical quagmires \cite{busch+2016}. From a practical point of view, quantum information science requires ever better control of microscopic systems and, hence, measurements which are as accurate as possible. More specifically, quantum metrology \cite{Nawrocki19} aims at finding bounds on the achievable measurement precision and at identifying states which would be optimal for quantum measurements or other specific tasks. The optimal transmission of a Cartesian frame~\cite{Per01} or the efficient detection of inhomogeneous magnetic fields~\cite{Hak20} are typical examples. While the classical Cram\'er-Rao theorem \cite{Rao45, Cra46} provides a lower bound on the variance of random estimators by means of the Fisher information, its quantum-mechanical counterpart provides bounds for quantum parameter estimation theory \cite{Hel76}. The quantum Cram\'er-Rao bound is expressed as the inverse of the quantum Fisher information, which can be geometrically interpreted as the (Bures) distance between two quantum states differing by an infinitesimal amount in their parameter \cite{Hub92, Hub93}. It provides lower bounds on the variance of any quantum operator whose measurement aims at estimating the parameter. Optimal measurement is achieved by maximizing the quantum Fisher information over parameter-dependent states. The quantum Cram\'er-Rao bound was calculated for instance in the reference frame alignment problem \cite{Kol08}. This problem involves estimating rotations about unknown axes. It has been shown in~\cite{Goldberg18} that spin states with vanishing spin expectation value and isotropic variances of the spin components are valuable for estimating such rotations, as they saturate the quantum Cram\'er-Rao bound for \emph{any} axis. Also, recently, the problem of characterizing a rotation about an unknown direction encoded into a spin-$j$ state has been considered in~\cite{MoC19}. In this paper, we are interested to determine whether a quantum system has undergone a rotation $R_{\mathbf{n}}(\eta)$ by a \emph{known} angle $\eta$ about an \emph{unknown} axis $\mathbf{n}$. Suppose first that we apply the rotation by $\eta$ to an initial state $|\psi\rangle$ about a \emph{known} axis and perform a measurement of the projector $|\psi\rangle\langle\psi|$ in the rotated state $R_{\mathbf{n}}(\eta)\ket{\psi}$. The expectation value of the observable $|\psi\rangle\langle\psi|$ is given by \begin{equation} F_{|\psi\rangle}(\eta,\mathbf{n})=|\bra\psi R_{\mathbf{n}}(\eta)|\psi\rangle|^{2}\,,\label{eq: fidelity} \end{equation} i.e.\ by the fidelity between the initial state and the final state. The fidelity $F_{|\psi\rangle}(\eta,\mathbf{n})$ equals the probability to find the quantum system in the initial state after the rotation. Thus, the probability to detect that the rotation has occurred is given by the quantity $1-F_{|\psi\rangle}(\eta,\mathbf{n})$. Therefore, the measurement will be most sensitive if the rotation is applied to states $|\psi\rangle$ which \emph{minimize} the expression \eqref{eq: fidelity} for given angle and rotation axis. Next, suppose that only the rotation angle $\eta$ is well-defined while the rotation axis is not known, as described in \cite{ChrHer17}. This situation occurs, for example, when spins prepared in the state $\ket\psi$ are---during the measurement sequence---subjected to a magnetic field whose direction randomly fluctuates on a time scale much larger than the Larmor period. Measuring the observable $|\psi\rangle\langle\psi|$ on an ensemble of identically prepared systems will now produce a value of the fidelity \eqref{eq: fidelity} averaged over all possible spatial directions $\mathbf{n}$. Then, the most suitable quantum states $\ket\psi$---called \emph{optimal} \emph{quantum rotosensors} in \cite{ChrHer17}---are determined by the requirement that the \emph{average fidelity} \begin{equation} {\cal F}_{|\psi\rangle}(\eta)=\frac{1}{4\pi}\int_{\mathcal{S}^{2}}F_{|\psi\rangle}(\eta,\mathbf{n})\,d\mathbf{n}\,,\label{eq: probability} \end{equation} achieve its minimum, for a given value of the parameter $\eta$. The fidelity \eqref{eq: fidelity} and its average \eqref{eq: probability} also play a role when setting up experiments which aim to determine an unknown rotation angle as accurately as possible. This is explained in more detail in Appendix~\ref{Appendix_param}. For the spin values $j=1/2,1,3/2,2$, optimal quantum rotosensors have been identified \cite{ChrHer17}, using an approach which combines analytical and numerical methods. For rotation angles $\eta$ close to $\pi$, the average fidelity is minimized systematically by \emph{coherent} spin states. Coherent spin states are strongly localized in phase space and entirely specified by a spatial direction into which they point on the Bloch sphere \cite{Are72}. For small rotation angles $\eta$, the average fidelity is minimized by \emph{anticoherent} states, which are characterized by the fact that they do not manifest any privileged direction; in this respect, they are as distinct as possible from coherent states \cite{Zimba06}. The role of anticoherent states for optimal detection of rotations has also been observed and was subsequently quantified in terms of quantum Fisher information in~\cite{Goldberg18}. Between these two extreme cases of $\eta\sim0$ and $\eta\sim\pi$, optimal states are neither coherent nor anticoherent in general. From an experimental point of view, anticoherent and other non-classical spin states have been created using a variety of physical systems. For instance, anticoherent states of quantum light fields have been generated using orbital angular momentum states of single photons with their usefulness for quantum metrology being established in~\cite{Bou17}. Non-classical spin states---including Schrödinger cat states (c.f.\ Sec.~\ref{sec: Optimal-quantum-rotosensors})---of highly magnetic dysprosium atoms with spin quantum number $j=8$ have been created in order to enhance the precision of a magnetometer \cite{Cha18}. The main result of the present paper is a closed-form expression of the average fidelity ${\cal F}_{|\psi\rangle}(\eta)$, valid for arbitrary values of $j$. A rather general argument, based solely on the symmetries of the average fidelity ${\cal F}_{|\psi\rangle}(\eta)$, shows that it must be a linear combination of the form \begin{equation} {\cal F}_{|\psi\rangle}(\eta)=\varphi_{0}^{(j)}(\eta)+\sum_{t=1}^{\lfloor j\rfloor}\varphi_{t}^{(j)}(\eta)\,\mathcal{A}_{t}(|\psi\rangle),\label{PexpansionAC} \end{equation} as explained in detail in Sec.~\ref{sec: Tools and concepts}. In this expression, the ${\cal A}_{t}(\ket\psi)$ are the anticoherence measures of a state $\ket\psi$, introduced in \cite{Bag17} and given explicitly in Eq.~\eqref{ACR}, while the real-valued functions $\varphi_{t}^{(j)}(\eta)$ are trigonometric polynomials independent of $\ket{\psi}$, and $\lfloor j\rfloor$ is the largest integer smaller than or equal to $j$. The main challenge is to calculate the $\eta$-dependent coefficients $\varphi_{t}^{(j)}(\eta)$, which we do in Sec.~\ref{sec: Closed-form}. In earlier works, the average fidelity ${\cal F}_{|\psi\rangle}(\eta)$ had been expressed as a sum of functions of $\eta$ weighted by \emph{state-dependent} coefficients, upon representing the state in the polarization-tensor basis \cite{ChrHer17}. The advantage of relation \eqref{PexpansionAC} is that the average fidelity depends on the state under consideration only through its measures of anticoherence, and thus it directly relates to the degree of coherence or anticoherence of the state. Expression \eqref{PexpansionAC} allows us to identify optimal quantum rotosensors for spin quantum numbers up to $j=5,$ thereby confirming the role played by coherent and anticoherent states beyond $j=2$. Readers mainly interested in the optimal quantum rotosensors may want to directly consult Sec.~\ref{sec: Optimal-quantum-rotosensors}. Let us outline the overall argument leading to the expression of the average fidelity ${\cal F}_{|\psi\rangle}(\eta)$ in \eqref{PexpansionAC}. In Sec.~\ref{sec: Tools and concepts}, we introduce a number of tools and concepts feeding into the derivation of \eqref{PexpansionAC}: first, we discuss the symmetries built into the average fidelity ${\cal F}_{\ket\psi}(\eta)$, followed by a brief summary of the Majorana representation which enables us to interpret spin-$j$ states as completely symmetric states of $N=2j$ qubits. This perspective allows us to introduce, for $1\leqslantslant t \leqslantslant \lfloor j\rfloor$, the anticoherence measure $\mathcal{A}_{t}(\ket\psi)$, defined as the linear entropy of the $t$-qubit reduced density matrix of $\ket\psi\bra\psi$. To actually carry out the integration in Eq.~\eqref{eq: probability}, we will use a tensor representation (see Sec.~\ref{subsec:Tensor-representation}) of mixed spin-$j$ states generalizing the Bloch representation. In addition, this representation also enables us to exploit the symmetries of the average fidelity which can only depend on expressions invariant under $\mathrm{SU}(2)$ rotations. As shown in Sec.~\ref{subsec:-Invariants}, it is then possible to establish a linear relation between these invariants and the anticoherence measures $\mathcal{A}_{t}(\ket\psi)$, which finally leads to \eqref{PexpansionAC}. Section~\ref{sec: Closed-form} is dedicated to deriving explicit expressions for the functions $\varphi_{t}^{(j)}(\eta)$. This will be done in two ways: the first one is based on the fact that anticoherence measures are explicitly known for certain states, so that the functions $\varphi_{t}^{(j)}(\eta)$ appear as solutions of a linear system of equations. The second approach makes use of representations of the Lorentz group and allows us to obtain a general closed expression. In Sec.~\ref{sec: Optimal-quantum-rotosensors} we make use of this closed-form expression to identify the optimal quantum rotosensors. We conclude with a brief summary given in Sec. \ref{sec:Conclusion}. \section{Concepts and tools\label{sec: Tools and concepts}} In this section, we introduce the tools that will be needed to address the optimality problem described in the Introduction. \subsection{Notation} Quantum systems with integer or half-integer spin $j$ are described by states $\ket{\psi}$ of the Hilbert space $\mathbb{C}^{N+1}$ with $N=2j$, which carries a $(N+1)$-dimensional representation of the group SU$(2)$. The components of the angular momentum operator ${\bf J}$ satisfy $[J_{k},J_{\ell}]=i\varepsilon_{k\ell m}J_{m}$, $k,\ell,m\in\{x,y,z\}$, where $\varepsilon_{k\ell m}$ is the Levi-Civita symbol. Denoting unit vectors in $\mathbb{R}^{3}$ by \begin{equation} \mathbf{n}=\begin{pmatrix}\sin\theta\cos\phi\\ \sin\theta\sin\phi\\ \cos\theta \end{pmatrix}\,,\quad\theta\in[0,\pi]\,,\quad\phi\in[0,2\pi[\,,\label{eq: unit vector} \end{equation} the operator \begin{equation} R_{\mathbf{n}}(\eta)=e^{-i\eta\mathbf{J}\boldsymbol{\cdot}\mathbf{n}}\label{rot} \end{equation} describes a rotation by an angle $\eta\in[0,4\pi[$ about the direction $\mathbf{n}$. \subsection{Symmetries} By definition, the average fidelity in \eqref{eq: probability} is a positive function of the angle $\eta$ and of the state $\ket{\psi}$ and possesses three symmetries: it is $2\pi$-periodic in $\eta$, symmetric about $\eta=\pi$, and invariant under rotation of $\ket{\psi}$. Periodicity with period $2\pi$ comes from the fact that $R_{\mathbf{n}}(2\pi)=(-1)^{N}$. Symmetry about $\eta=\pi$ is equivalent to \begin{equation} {\cal F}_{\ket{\psi}}(\eta)={\cal F}_{\ket{\psi}}(2\pi-\eta)\,,\label{eq: reflection symmetry for angle eta} \end{equation} which can be shown using $R_{\mathbf{n}}(2\pi-\eta)=(-1)^{N}R_{-\mathbf{n}}(\eta)$ and the fact that the set of directions averaged over in \eqref{eq: probability} is the same irrespective of the sign of the unit vector ${\bf n}$ since the fidelity \eqref{eq: fidelity} is given by the the \emph{squared} modulus of the overlap between the states $|\psi\rangle$ and $R_{\mathbf{n}}(\eta)\ket{\psi}$. Invariance under rotation of $\ket{\psi}$ can be understood in the following way. Let $R_{\mathbf{m}}(\chi)=e^{-i\chi\mathbf{J}\boldsymbol{\cdot}\mathbf{m}}$ be a unitary operator representing a rotation in $\mathbb{R}^{3}$ by an angle $\chi\in[0,4\pi[$ about the direction $\mathbf{m}$, acting on a state $|\psi\rangle\in\mathbb{C}^{N+1}$. Then the average fidelities ${\cal F}$ associated with the states $|\psi\rangle$ and $|\psi^R\rangle\equiv R_{\mathbf{m}}(\chi)|\psi\rangle$ are equal. Indeed, we have \begin{equation} F_{|\psi^R\rangle}(\eta,\mathbf{n})=\bra{\psi}R_{\mathbf{m}}(\chi)^{\dagger}R_{\mathbf{n}}(\eta)R_{\mathbf{m}}(\chi)|\psi\rangle\label{eq: trf of F under rotations} \end{equation} and \begin{align} R_{\mathbf{m}}(\chi)^{\dagger}R_{\mathbf{n}}(\eta)R_{\mathbf{m}}(\chi) & =e^{-i\eta (R_{\mathbf{m}}(\chi)^{\dagger}\mathbf{J}R_{\mathbf{m}}(\chi))\boldsymbol{\cdot}\mathbf{n}}\nonumber \\ & =e^{-i\eta(R\mathbf{J})\boldsymbol{\cdot}\mathbf{n}}=e^{-i\eta\mathbf{J}\boldsymbol{\cdot}\mathbf{n}^R}\,,\label{eq: trf of R under rotation} \end{align} with ${\bf n}^R\equiv R^{T}\mathbf{n}$ the vector obtained by the rotation $R\in$ SO$(3)$ associated with $R_{\mathbf{m}}(\chi)$. Due to the invariance under rotations of the unit-ball region $\mathcal{S}^{2}$ appearing in \eqref{eq: probability} (invariance of the Haar measure used), the result of the integration will be the same, leading to \begin{align} {\cal F}_{|\psi^R\rangle}(\eta) & =\frac{1}{4\pi}\int_{\mathcal{S}^{2}}F_{|\psi^R\rangle}(\eta,\mathbf{n})\,d\mathbf{n}\nonumber \\ & =\frac{1}{4\pi}\int_{\mathcal{S}^{2}}F_{|\psi\rangle}(\eta,\mathbf{n})\,d\mathbf{n}={\cal F}_{|\psi\rangle}(\eta)\,.\label{eq: invariance of av fid} \end{align} This invariance of the fidelity can be seen in a geometrically appealing way by use of the Majorana representation, which we consider now. \subsection{Majorana representation of pure spin states \label{subsec:Majorana-representation}} The Majorana representation establishes a one-to-one correspondence between spin-$j$ states and $N=2j$-qubit states that are invariant under permutation of their constituent qubits (see e.g.~\cite{Bie81,Zyczkowski_book,Coe98}). It allows to geometrically visualise a pure spin-$j$ state as $N$ points on the unit sphere associated with the Bloch vectors of the $N$ qubits. The Majorana points are often referred to as stars, and the whole set of Majorana points of a given state as its Majorana constellation. Considering a spin-$j$ state $\ket{\psi}$ as an $N$-qubit state, any local unitary (LU) operation $U=u^{\otimes N}$ with $u\in \mathrm{SU}(2)$ transforms $\ket{\psi}$ into a state whose Majorana constellation is obtained by the constellation of $\ket{\psi}$ rotated by the SO$(3)$ rotation associated with $u$. Spin-coherent states take a very simple form in the Majorana representation, as they can be seen as the tensor product $\ket{\phi}^{\otimes N}$ of some spin-$1/2$ state $\ket{\phi}$. Their constellation thus reduces to an $N$-fold degenerate point. The fidelity \eqref{eq: fidelity} is given by the squared modulus of the overlap between $|\psi\rangle$ and $R_{\mathbf{n}}(\eta)\ket{\psi}$. Since the Majorana constellation of $R_{\mathbf{n}}(\eta)\ket{\psi}$ is obtained by rigidly rotating that of $|\psi\rangle$, the fidelity \eqref{eq: fidelity} only depends on the relative positions of these two sets of points. The \emph{average} transition probability ${\cal F}_{|\psi\rangle}(\eta)$ is obtained by integrating over all possible constellations obtained by rigid rotations of the Majorana constellation of $|\psi\rangle$, and therefore it must be invariant under LU. In other words, the equality \eqref{eq: invariance of av fid} takes the form ${\cal F}_{|\psi\rangle}(\eta)={\cal F}_{u^{\otimes N}|\psi\rangle}(\eta)$. \subsection{Anticoherence measures \label{subsec:Anticoherence-measures}} An order-$t$ \emph{anticoherent} state $\ket\chi$ is defined by the property that $\langle\chi|(\mathbf{J}\boldsymbol{\cdot}\mathbf{n})^{k}|\chi\rangle$ is independent of the vector $\mathbf{n}$ for all $k=1,\ldots,t$. In the Majorana representation, it is characterized by the fact that its $t$-qubit reduced density matrix is the maximally mixed state in the symmetric sector~\cite{prl}. The degree of coherence or $t$-anticoherence of a spin-$j$ pure state $|\psi\rangle$ can be measured by the quantities $\mathcal{A}_{t}(|\psi\rangle)$, which are positive-valued functions of $|\psi\rangle$ \cite{Bag17}. Let $\rho_{t}=\mathrm{tr}_{\neg t}\left[|\psi\rangle\langle\psi|\right]$ be the $t$-qubit reduced density matrix of the state $|\psi\rangle$ interpreted as a $N$-qubit symmetric state with $N=2j$; it is obtained by taking the partial trace over all but $t$ qubits (it does not matter which qubits are traced over since $|\psi\rangle$ is a symmetric state). The measures $\mathcal{A}_{t}(|\psi\rangle)$ are defined as the rescaled linear entropies \begin{equation} \mathcal{A}_{t}(|\psi\rangle)=\frac{t+1}{t}\left(1-\mathrm{tr}\left[\rho_{t}^{2}\right]\right)\,,\label{ACR} \end{equation} where $\mathrm{tr}\left[\rho_{t}^{2}\right]$ is the purity of $\rho_{t}$. Thus, anticoherence measures are quartic in the state $|\psi\rangle$ and range from $0$ to $1$, and are invariant under SU$(2)$ rotations. Spin-coherent states are characterized by pure reduced states and thus are the only states such that $\mathcal{A}_{t}=0$. Anticoherent states to order $t$ are characterized by $\rho_t=\mathbb{1}/(t+1)$ and thus are the only states such that $\mathcal{A}_{t}=1$. In particular, if a state $|\psi\rangle$ is anticoherent to some order $t$, then it is necessarily anticoherent to all lower orders $t'=1,\ldots,t$ since reductions of the maximally mixed state are maximally mixed. While for any state we have $0\leqslantslant\mathcal{A}_{t}\leqslantslant1$, not all possible tuples $(\mathcal{A}_{1},\mathcal{A}_{2},\ldots)$ are realised by a physical state $|\psi\rangle$. For instance, since $\mathcal{A}_{t}=1$ implies that $\mathcal{A}_{t'}=1$ for all $t'\leqslantslant t$, the choice $\mathcal{A}_{2}=1$ and $\mathcal{A}_{1}<1$ cannot correspond to any state. We denote the domain of admissible values of the measures $\mathcal{A}_{t}$ by $\Omega$. \subsection{Tensor representation of mixed states \label{subsec:Tensor-representation}} We now introduce a tensor representation of an arbitrary (possibly mixed) spin-$j$ state $\rho$ acting on a $(N+1)$-dimensional Hilbert space with $N=2j$, following~\cite{prl}. Any state can be expanded as \begin{equation} \rho=\frac{1}{2^{N}}\,x_{\mu_{1}\mu_{2}\ldots\mu_{N}}S_{\mu_{1}\mu_{2}\ldots\mu_{N}}.\label{rhoarbitrary} \end{equation} Here and in what follows, we use Einstein summation convention for repeated indices, with Greek indices running from $0$ to $3$ and Latin indices running from $1$ to $3$. Here, the $S_{\mu_{1}\mu_{2}\ldots\mu_{N}}$ are $(N+1)\times(N+1)$ Hermitian matrices invariant under permutation of the indices. The $x_{\mu_{1}\mu_{2}\ldots\mu_{N}}$ are real coefficients also invariant under permutation of their indices, which enjoy what we call the tracelessness property \begin{equation} \sum_{a=1}^{3}x_{aa\mu_{3}\ldots\mu_{N}}=x_{00\mu_{3}\ldots\mu_{N}}\,,\quad\forall\;\mu_{3},\ldots,\mu_{N}.\label{traceless} \end{equation} Whenever $x_{\mu_{1}\mu_{2}\ldots\mu_{N}}$ has some indices equal to $0$, we take the liberty to omit them, so that e.g.~for a spin-$3$ state $x_{110200}$ may be written $x_{112}$ (recall that the order of the indices does not matter). In the case of a spin-coherent state given by its unit Bloch vector $\mathbf{n}=(n_{1},n_{2},n_{3})$, the coefficients in \eqref{rhoarbitrary} are simply given by $x_{\mu_{1}\mu_{2}\ldots\mu_{N}}=n_{\mu_{1}}n_{\mu_{2}}\ldots n_{\mu_{N}}$, with $n_{0}=1$. In the following, we will make use of two essential properties of the tensor representation. Namely, let us consider a state $\rho$ with coordinates $x_{\mu_{1}\mu_{2}\ldots\mu_{N}}$ in the expansion \eqref{rhoarbitrary}. Then, the tensor coordinates of the $t$-qubit reduced state $\rho_{t}$ in the expansion \eqref{rhoarbitrary} are simply given by $x_{\mu_{1}\mu_{2}\ldots\mu_{t}}=x_{\mu_{1}\mu_{2}\ldots\mu_{t}0...0}$. Thus, since we omit the zeros in the string $\mu_{1}\mu_{2}\ldots\mu_{N}$, the tensor coordinates of $\rho_{t}$ and $\rho$ coincide for any string of $k\leqslant t$ nonzero indices. The second property we use is that for states $\rho$ and $\rho'$ in the form \eqref{rhoarbitrary} with tensor coordinates respectively $x_{\mu_{1}\mu_{2}\ldots\mu_{N}}$ and $x'_{\mu_{1}\mu_{2}\ldots\mu_{N}}$ we have \begin{equation} \mathrm{tr}\left[\rho\rho'\right]=\frac{1}{2^{N}}\sum_{\mu_{1},\mu_{2},...,\mu_{N}}x_{\mu_{1}\mu_{2}\ldots\mu_{N}}x'_{\mu_{1}\mu_{2}\ldots\mu_{N}}. \end{equation} Note that this equality holds despite the fact that the $S_{\mu_{1}\mu_{2}\ldots\mu_{N}}$ are not orthogonal; this property follows from the fact that these matrices form a $2^N$-tight frame, see~\cite{prl}. In particular, for a pure state $\rho=\ket{\psi}\bra{\psi}$, the equality $\mathrm{tr}\rho^{2}=1$ translates into \begin{equation} \sum_{\mu_{1},\mu_{2},...,\mu_{N}}x_{\mu_{1}\mu_{2}\ldots\mu_{N}}^{2}=2^{N},\label{forpure} \end{equation} while the purity of the reduced density matrix $\rho_{t}$ reads \begin{equation} \mathrm{tr}\left[\rho_{t}^{2}\right]=\frac{1}{2^{t}}\sum_{\mu_{1},\mu_{2},...,\mu_{t}}x_{\mu_{1}\mu_{2}\ldots\mu_{t}}^{2}\,.\label{trrt2} \end{equation} The normalization condition $\mathrm{tr}\left[\rho\right]=1$ imposes $x_{00\ldots0}=1$. A consequence of \eqref{traceless} is then that $\sum_{a=1}^3x_{aa}=1$. \subsection{SU$(2)$-Invariants \label{subsec:-Invariants}} If $u\in$ SU(2) and $R\in$ SO(3) is the corresponding rotation matrix, then the tensor coordinates of $U\rho U^{\dagger}$ with $U=u^{\otimes N}$ are the $\mathsf{R}_{\mu_{1}\nu_{1}}\ldots\mathsf{R}_{\mu_{N}\nu_{N}}x_{\nu_{1}\ldots\nu_{N}}$ where $\mathsf{R}$ is the $4\times4$ orthogonal matrix \begin{equation} \mathsf{R}=\left(\begin{array}{c|c} \begin{array}{c} 1\end{array} & \begin{array}{c} 0\end{array}\\ \hline \begin{array}{c} 0\end{array} & \begin{array}{c} R\end{array} \end{array}\right).\label{matrixR} \end{equation} That is, $x_{\mu_{1}\mu_{2}\ldots\mu_{N}}$ transforms as a tensor. Under such transformations, $x_{\mu}x_{\mu}$ goes into $\mathsf{R}_{\mu\nu}\mathsf{R}_{\mu\nu'}x_{\nu}x_{\nu'}=(\mathsf{R}^{T}\mathsf{R})_{\nu'\nu}x_{\nu}x_{\nu'}=x_{\nu}x_{\nu}$, where the last equality comes from orthogonality of $\mathsf{R}$. Thus $x_{\mu}x_{\mu}$ is an SU(2) invariant. Similarly, $x_{\mu}x_{\mu\nu}x_{\nu}$ and, more generally, any product of the $x_{\mu_{1}\mu_{2}\ldots\mu_{N}}$ such that all indices are contracted (i.e.\ summed from $0$ to $3$), are invariant under SU(2) action on $\rho$. One can then show by induction that products of terms $x_{a_{1}a_{2}\ldots a_{k}}$ with $k\leqslantslant N$ where all indices appear in pairs and are summed from $1$ to $3$ are also SU(2) invariant. For instance, $x_{a}x_{a}$, $x_{ab}x_{ab}$, $x_{ab}x_{bc}x_{ca}$, $x_{a}x_{ab}x_{b}$ are such invariants. Invariants of degree 1 in $x$ are of the form $x_{a_{1}a_{2}\ldots a_{2k}}$, where the $a_{i}$ appear in pairs. Since the order of indices is not relevant, these invariants are in fact of the form $x_{a_{1}a_{1}a_{2}a_{2}\ldots a_{k}a_{k}}$. Because of Eq.~\eqref{traceless}, each pair can be replaced by zeros in the string, so that $x_{a_{1}a_{1}a_{2}a_{2}\ldots a_{k}a_{k}}=x_{00\ldots0}=1$. Therefore, there is no invariant of degree 1. The invariants of degree 2 are products of the form $x_{a_{1}a_{2}\ldots a_{k}}x_{b_{1}b_{2}\ldots b_{k'}}$ where indices appear in pairs and are summed from $1$ to $3$. If the two indices of a pair appear in the same index string ($a_{1}a_{2}\ldots a_{k}$ or $b_{1}b_{2}\ldots b_{k'}$), then from Eq.~\eqref{traceless}, they can again be replaced by zeros and discarded. Thus the invariants of degree 2 are $\kappa_{1}=x_{a}x_{a}$, $\kappa_{2}=x_{ab}x_{ab}$, and more generally, for $1\leqslantslant r\leqslantslant N$, \begin{equation} \kappa_{r}=x_{a_{1}a_{2}...a_{r}}x_{a_{1}a_{2}...a_{r}}.\label{defkappa} \end{equation} Using \eqref{ACR} and \eqref{trrt2} one can express the invariants $\kappa_{r}$ in terms of a linear combination of the $\mathcal{A}_{t}$. Indeed, grouping together terms with the same number of nonzero indices in \eqref{trrt2} yields \begin{equation} \mathrm{tr}\left[\rho_{t}^{2}\right]=\frac{1}{2^{t}}\sum_{\mu_{1},\mu_{2},...,\mu_{t}}x_{\mu_{1}\mu_{2}\ldots\mu_{t}}^{2}=\frac{1}{2^{t}}\sum_{r=0}^{t}\binom{t}{r}\kappa_{r}\,.\label{trbis} \end{equation} Inverting that relation via the binomial inversion formula, we obtain \begin{equation} \kappa_{r}=\sum_{t=0}^{r}(-1)^{t+r}\,2^{t}\binom{r}{t}\mathrm{tr}\left[\rho_{t}^{2}\right]\,,\label{invrel} \end{equation} and by use of \eqref{ACR} we finally can express the $\mathrm{SU}(2)$-invariants in terms of anticoherence measures, \begin{equation} \kappa_{r}=\sum_{t=0}^{r}(-1)^{t+r}\,2^{t}\binom{r}{t}\left(1-\frac{t}{t+1}\mathcal{A}_{t}\right)\label{kappar} \end{equation} for $r=1,\ldots,N$. \subsection{General form of the average fidelity} Let us now explain why the average fidelity ${\cal F}_{|\psi\rangle}(\eta)$ given in Eq.~\eqref{PexpansionAC} is a linear combination of the lowest $\left\lfloor j\right\rfloor $ anticoherent measures $\mathcal{A}_{t}$. Due to its rotational symmetry, the average fidelity ${\cal F}_{|\psi\rangle}(\eta)$---when considered as a function of the tensor coordinates $x_{\mu_{1}\mu_{2}\ldots\mu_{N}}$---can only involve invariants constructed from these coordinates. With ${\cal F}_{|\psi\rangle}(\eta)$ being quadratic in $\rho=\ket{\psi}\bra{\psi}$, it must also be quadratic in $x$. As there is no invariant of degree 1, the only invariants that can appear in the expression of ${\cal F}_{|\psi\rangle}(\eta)$ are the invariants $\kappa_{r}$ defined in \eqref{defkappa}. Since the quantity ${\cal F}_{|\psi\rangle}(\eta)$ is quadratic it must be a linear combination of the coefficients $\kappa_{r}$ which, according to Eq.~\eqref{kappar}, implies that ${\cal F}_{|\psi\rangle}(\eta)$ is also a linear combination of the $\mathcal{A}_{t}$. Furthermore, the identity \begin{equation} \mathrm{tr}\left[\rho_{t}^{2}\right]=\mathrm{tr}\left[\rho_{N-t}^{2}\right]\,,\label{eq: t vs N-t symmetry} \end{equation} which holds for any pure state, means that the anticoherence measures $\mathcal{A}_{t}$ for $t>N/2$ can be expressed in terms of the measures $\mathcal{A}_{t}$ for $t< N/2$. Therefore, \eqref{PexpansionAC} is the most general form the fidelity ${\cal F}_{|\psi\rangle}(\eta)$ can take, with the dependence in $\eta$ being only in the coefficients of the measures $\mathcal{A}_{t}$. \subsection{Generalizations} It is worth stressing that the form \eqref{PexpansionAC} for the average fidelity also holds for more general types of average fidelity \begin{equation}\label{genfid} \frac{1}{4\pi}\int_{S^{2}}|\langle\psi|U_{\mathbf{n}}(\eta)|\psi\rangle|^{2}\,d\mathbf{n} \end{equation} between a state $|\psi\rangle$ and its image under the unitary \begin{equation} U_{\mathbf{n}}(\eta)=e^{-i\eta\, f(\mathbf{J}\boldsymbol{\cdot}\mathbf{n})}, \end{equation} where $f$ is an arbitrary real analytic function, ensuring that $f(\mathbf{J}\boldsymbol{\cdot}\mathbf{n})$ is an Hermitian operator. Indeed, from an argument similar to that of Sec.~\ref{subsec:-Invariants}, the generalized fidelity \eqref{genfid} can be expressed as a function of the $\kappa_r$ and hence of the $\mathcal{A}_t$. An interesting case is when $U_{\mathbf{n}}(\eta)$ is a spin-squeezing operator, which corresponds to choosing $f(\mathbf{J}\boldsymbol{\cdot}\mathbf{n})=(\mathbf{J}\boldsymbol{\cdot}\mathbf{n})^2$. Moreover, if we now consider the quantities \begin{equation}\label{genfid2} \frac{1}{4\pi}\int_{S^{2}}|\langle\psi|U_{\mathbf{n}}(\eta)|\psi\rangle|^{2k}\,d\mathbf{n} \end{equation} with integer $k\geqslantslant 2$, the same arguments show that they are linear combinations of higher-order invariants, leading to generalizations of the relation \eqref{kappar}. \section{Closed form of the average fidelity \label{sec: Closed-form}} In this section we derive the angular functions $\varphi_{t}^{(j)}(\eta)$, which characterize the fidelity through \eqref{PexpansionAC}, in two different ways. The first method (subsection \ref{closed1}) is based on the fact that anticoherence measures can be evaluated explicitly for Dicke states. The second method (subsection \ref{closed2}) exploits a tensor representation of spin states \cite{prl} which uses Feynman rules from relativistic spin theory. These approaches are independent and we checked, for all integers and half-integers $j$ up to $26$, that as expected they yield the same angular functions. Technical detail is delegated to appendices in both cases. \subsection{Derivation based on anticoherence measures for Dicke states \label{closed1} } In the following, we will work in the standard angular momentum basis of $\mathbb{C}^{N+1}$, for positive integer or half-integer value of $j=N/2$. It consists of the Dicke states $\left\{ \ket{j,m},\left|m\right|\leqslant j\right\} $ given by the common eigenstates of $\mathbf{J}^{2}$, the square of the angular momentum operator $\mathbf{J}$, and of its $z$-component $J_{z}$. In this basis, any spin-$j$ state $|\psi\rangle$ can be expanded as \begin{equation} |\psi\rangle=\sum_{m=-j}^{j}c_{m}\,\ket{j,m}\,,\label{eq:jm_decomp} \end{equation} with $c_{m}\in\mathbb{C}$ and $\sum_{m=-j}^{j}|c_{m}|^{2}=1$. The first derivation is based on the fact that both the measures of $t$-anticoherence $\mathcal{A}_{t}(|j,m\rangle)$ and the average fidelities ${\cal F}_{|j,m\rangle}(\eta)$ can be determined explicitly for Dicke states. Their measures of $t$-anticoherence are given by \begin{equation} \mathcal{A}_{t}(|j,m\rangle)=\frac{t+1}{t}\left[1-\frac{\sum_{\ell=0}^{t}\binom{j+m}{t-\ell}^{2}\binom{j-m}{j-m-\ell}^{2}}{\binom{2j}{t}^{2}}\right]. \label{ACRDicke} \end{equation} They can readily be obtained from the purities $\mathrm{tr}\left[\rho_{t}^{2}\right]$ for a state of the form \eqref{eq:jm_decomp}, which were calculated in \cite{Bag17} in terms of the coefficients $c_{m}$ and read \begin{equation} \mathrm{tr}\left[\rho_{t}^{2}\right]=\sum_{q,\ell=0}^t\left| \sum_{k=0}^{2j-t} c_{j-k-\ell}^* \, c_{j-k-q} \,\Gamma_k^{\ell q} \right|^2 \label{puritiescm} \end{equation} with \begin{equation} \Gamma_k^{\ell q}=\frac{\sqrt{\binom{2j-k-q}{t-q}\binom{2j-k-\ell}{t-\ell}\binom{k+q}{k}\binom{k+\ell}{k}}}{\binom{2j}{t}}. \end{equation} As for the fidelity, the calculation is done in Appendix \ref{sec: appendix C (Dicke)} and yields \begin{equation} {\cal F}_{|j,m\rangle}(\eta)=\frac{1}{(2j+1)^{2}}\sum_{\ell=0}^{2j}(2\ell+1)(C_{jm\ell0}^{jm}\,\chi_{\ell}^{j}(\eta))^{2}\,,\label{PDicke} \end{equation} with Clebsch-Gordan coefficients $C_{jm\ell0}^{jm}$ and the functions $\chi_{\ell}^{j}(\eta)$ defined in Eqs.~\eqref{chilj}--\eqref{chij}. The angular functions $\varphi_{t}^{(j)}(\eta)$ are then solutions of the system of linear equations \begin{equation} \left\{ \begin{array}{l} {\cal F}_{|j,m\rangle}(\eta)=\varphi_{0}^{(j)}(\eta)+\sum_{t=1}^{\lfloor j\rfloor}\varphi_{t}^{(j)}(\eta)\,\mathcal{A}_{t}(|j,m\rangle)\\[8pt] \mathrm{for}\;\,m=j,j-1,\ldots,j-\lfloor j\rfloor. \end{array}\right.\label{syseq} \end{equation} This system can easily be solved for the lowest values of $j$. A general (but formal) solution can then be obtained by inverting the system \eqref{syseq}. \subsection{Derivation based on relativistic Feynman rules and tensor representation of spin states \label{closed2}} The second approach allows us to derive a closed-form expression for the functions $\varphi_{t}^{(j)}(\eta)$. It is based on an expansion of the operator \begin{equation} \Pi^{(j)}(q)\equiv(q_{0}^{2}-|{\mathbf q}|^{2})^{j}\,e^{-2\theta_{q}\,\hat{{\bf q}}\boldsymbol{\cdot}{\bf J}},\label{lorentzboost} \end{equation} with $\tanh\theta_{q}=-|{\mathbf q}|/q_{0}$ and $\hat{{\bf q}}={\mathbf q}/|{\mathbf q}|$, as a multivariate polynomial in the variables $q_{0},q_{1} ,q_{2},q_{3}$. This operator is a $(N+1)$-dimensional representation (with $N=2j$) of a Lorentz boost in the direction of the 4-vector $q=(q_{0},{\mathbf q})=(q_{0},q_{1},q_{2},q_{3})$. As shown in \cite{Wei64}, it can be written as \begin{equation} \Pi^{(j)}(q)=(-1)^{2j}q_{\mu_{1}}q_{\mu_{2}}\ldots q_{\mu_{2j}}S_{\mu_{1}\mu_{2}\ldots\mu_{2j}}.\label{egaliteweinberg} \end{equation} The identification of Eqs.~\eqref{lorentzboost} and \eqref{egaliteweinberg} defines the $(N+1)\times(N+1)$ matrices $S_{\mu_{1}\ldots\mu_{N}}$ appearing in \eqref{rhoarbitrary} (see~\cite{prl} for detail). Taking \begin{equation} q_{0}=i\cot(\eta/2)\quad\mbox{and}\quad q_{i}=n_{i}\,,\quad i=1,2,3\,,\label{eq: q0 + qi} \end{equation} in \eqref{lorentzboost}, we see that $\Pi^{(j)}(q)$ reduces to a rotation operator, \begin{equation} R_{\mathbf{n}}(\eta)=e^{-i\eta\mathbf{J}\boldsymbol{\cdot}\mathbf{n}}=\frac{\Pi^{(j)}(q)}{m^{N}}\label{eq: rot as Pi} \end{equation} with \begin{equation} m^{2}=q_{0}^{2}-|{\mathbf q}|^{2}=-\frac{1}{\sin^{2}(\eta/2)}.\label{eqm} \end{equation} Moreover, for a state $\rho$ given by \eqref{rhoarbitrary} we have \begin{equation} \mathrm{tr}\left[\rho\,\Pi^{(j)}(q)\right]=(-1)^{N}x_{\mu_{1}\mu_{2}\ldots\mu_{N}}q_{\mu_{1}}\ldots q_{\mu_{N}},\label{eq:xq-1} \end{equation} according to Eq.~(24) of \cite{prl}, which holds for any 4-vector $q$. Thus, with $\rho=\ket{\psi}\bra{\psi}$, using the identity \eqref{eq: rot as Pi} and the expansion \eqref{egaliteweinberg} for the rotation operator in \eqref{eq: fidelity} allows us to explicitly perform the integral in Eq.~\eqref{eq: probability}, resulting in \begin{equation} \begin{aligned} {\cal F}_{|\psi\rangle}(\eta) ={}& \frac{1}{4\pi}\int_{\mathcal{S}^{2}}|\bra{\psi}R_{\mathbf{n}}(\eta)\ket{\psi}|^{2}\,d\mathbf{n}\\ ={}& \frac{1}{4\pi}\int_{\mathcal{S}^{2}}\left|\textrm{tr}\left[\rho\frac{\Pi^{(j)}(q)}{m^{N}}\right]\right|^{2}d\mathbf{n}\\ ={}& (-1)^{N}\frac{x_{\mu_{1}\ldots\mu_{N}}x_{\nu_{1}\ldots\nu_{N}}}{4\pi}\\ & \times \int_{\mathcal{S}^{2}}\frac{q_{\mu_{1}}\ldots q_{\mu_{N}}q_{\nu_{1}}^{*}\ldots q_{\nu_{N}}^{*}}{m^{2N}}\,d\mathbf{n}, \end{aligned} \label{integxx-1} \end{equation} where $*$ denotes complex conjugation (which acts on $q_{0}$ only because of the choice \eqref{eq: q0 + qi} and using $|m|^{2}=-m^{2}$). Each term $q_{\mu_{1}}\ldots q_{\nu_{N}}^{*}$ with $2(N-k)$ indices equal to 0 is proportional to \begin{equation} \frac{q_{0}^{2(N-k)}}{m^{2N}}=(-1)^{k}\sin^{2k}\left(\frac{\eta}{2}\right)\cos^{2(N-k)}\left(\frac{\eta}{2}\right).\label{q0k-1} \end{equation} For the remaining $2k$ nonzero indices, we have from \eqref{eq: q0 + qi} that $q_{i}=n_{i}$, so that \eqref{integxx-1} involves an integral of the form \begin{equation} \frac{1}{4\pi}\int_{\mathcal{S}^{2}}n_{a_{1}}n_{a_{2}}\ldots n_{a_{2k}}\,d\mathbf{n}\,,\qquad1\leqslantslant a_{i}\leqslantslant3 \,.\label{ints-1} \end{equation} These integrals are performed in Appendix \ref{appexplicit}. The integrals \eqref{ints-1} are in fact precisely given by the tensor coordinates $x_{a_{1}a_{2}\ldots a_{2k}}^{(0)}$ of the maximally mixed state, whose expression is explicitly known. One can therefore rewrite \eqref{integxx-1} as \begin{equation} \begin{aligned} & {\cal F}_{|\psi\rangle}(\eta)=\sum_{k=0}^{N}(-1)^{N}\frac{q_{0}^{2(N-k)}}{m^{2N}}\\ & \times\hspace{-0.5cm}\sum_{\genfrac{}{}{0pt}{1}{\boldsymbol{\mu}{,}\boldsymbol{\nu}}{2(N-k)\textrm{zeros}}{}}\hspace{-0.5cm}(-1)^{\textrm{nr of 0 in }\boldsymbol{\nu}}x_{\mu_{1}\ldots\mu_{N}\nu_{1}\ldots\nu_{N}}^{(0)}x_{\mu_{1}\ldots\mu_{N}}x_{\nu_{1}\ldots\nu_{N}}\,, \end{aligned} \label{Ptot-1} \end{equation} where the sum over $\boldsymbol{\mu}{,}\boldsymbol{\nu}$ runs over all strings of indices (between 0 and 3) containing $2(N-k)$ zeros. An explicit expression for this sum is derived in Appendix~\ref{appexplicit}, leading to the compact expression \begin{equation} {\cal F}_{|\psi\rangle}(\eta)=\sum_{k=0}^{N}\sin^{2k}\left(\frac{\eta}{2}\right)\cos^{2(N-k)}\left(\frac{\eta}{2}\right)\sum_{t=0}^{N}a_{t,k}^{(j)}\;\mathrm{tr}\left[\rho_{t}^{2}\right]\,,\label{Ptotmain} \end{equation} with numbers \begin{equation} a_{t,k}^{(j)}=\frac{4^{t}(-1)^{k+t}\binom{2N}{2k}\binom{k}{t}\binom{2N-2t}{N-t}}{(2k+1)\binom{2N}{N}}.\label{ajtk} \end{equation} Note that the sum over $k$ in \eqref{Ptotmain} can start at $k=t$ because the factor $\binom{k}{t}$ in $a_{t,k}^{(j)}$ implies that $a_{t,k}^{(j)}=0$ for $t>k$. Using the symmetry $\mathrm{tr}\left[\rho_{t}^{2}\right]=\mathrm{tr}\left[\rho_{N-t}^{2}\right]$ we may rewrite \eqref{Ptotmain} as \begin{equation} \begin{aligned}{\cal F}_{|\psi\rangle}(\eta)={} & \sum_{k=t}^{N}\sin^{2k}\left(\frac{\eta}{2}\right)\cos^{2(N-k)}\left(\frac{\eta}{2}\right)\\ & \times\sum_{t=0}^{\lfloor j\rfloor}\left(a_{t,k}^{(j)}+a_{N-t,k}^{(j)}\right)\left(1-\frac{\delta_{jt}}{2}\right)\mathrm{tr}\left[\rho_{t}^{2}\right]. \end{aligned} \label{Ptotmainsym} \end{equation} From \eqref{ACR} we obtain a relation between $\mathcal{A}_{t}$ and $\mathrm{tr}\left[\rho_{t}^{2}\right]$, namely $\mathrm{tr}\left[\rho_{t}^{2}\right]=1-\frac{t}{t+1}\mathcal{A}_{t}$, which yields the explicit expression of the polynomials $\varphi_{t}^{(j)}(\eta)$ in Eq.~\eqref{PexpansionAC} as \begin{equation} \varphi_{t}^{(j)}(\eta)=\sum_{k=t}^{N}b_{t,k}^{(j)}\,\sin^{2k}\left(\frac{\eta}{2}\right)\cos^{2(N-k)}\left(\frac{\eta}{2}\right),\label{Phimain} \end{equation} with coefficients \begin{equation} b_{t,k}^{(j)}=\left\{ \begin{array}{ll} {\displaystyle -\frac{t}{t+1}\left(a_{t,k}^{(j)}+a_{N-t,k}^{(j)}\right)\left(1-\frac{\delta_{jt}}{2}\right)} & t\neq0\\ {\displaystyle \frac{\binom{N}{k}}{2k+1}} & t=0\,. \end{array}\right.\label{btk} \end{equation} Note that although $q_{0}$ and $m$ are not well-defined for $\eta=0$, the ratio in \eqref{q0k-1} always is, so that the expression above is valid over the whole range of values of $\eta$. For spin-coherent states, all $\mathcal{A}_{t}$ vanish and thus ${\cal F}_{|\psi\rangle}(\eta)=\varphi_{0}^{(j)}(\eta)$ from Eq.~\eqref{PexpansionAC}, which coincides with the expression obtained in~\cite{ChrHer17}. For the smallest values of $j$, we recover the functions obtained in Section \ref{closed1}. In the following section, we will use the functions $\varphi_{t}^{(j)}(\eta)$ given in \eqref{Phimain} to identify optimal quantum rotosensors. \section{Optimal quantum rotosensors \label{sec: Optimal-quantum-rotosensors}} \subsection{Preliminary remarks} We now address the question of finding the states $|\psi\rangle$ which minimize the average fidelity ${\cal F}_{|\psi\rangle}(\eta)$ for fixed rotation angles $\eta$. According to Eq.~\eqref{PexpansionAC}, the fidelity is a \emph{linear} function of the anticoherence measures $\mathcal{A}_{t}$ with $1\leqslantslant t\leqslantslant\lfloor j\rfloor$. Linearity, when combined with the fact that the domain $\Omega$, over which the measures $\mathcal{A}_{t}$ vary, is \emph{bounded} implies that the fidelity must attain its minimum on the boundary. The minimization problem thus amounts to characterizing this domain $\Omega$. Unfortunately, even for the smallest values of $j$, no simple descriptions of this domain are known. We will first determine the states minimizing the $2\pi$-periodic average fidelity for values of $j$ up to $j=7/2$, with the rotation angle taking values in the interval $\eta\in[0,\pi]$ (which is sufficient due to the symmetry \eqref{eq: reflection symmetry for angle eta}). Then we will examine the limiting case of angles $\eta$ close to $0$ for arbitrary values of the quantum number $j$. Throughout this section, we will expand arbitrary states with spin $j$ in terms of the Dicke states, as shown in Eq.~\eqref{eq:jm_decomp}. For spins up to $j=2$ the states minimizing the average fidelity ${\cal F}_{\ket\psi}(\eta)$ are known \cite{ChrHer17}. In Sec.~\ref{subsec:jupto2}, we show that our approach based on the expression \eqref{PexpansionAC} correctly reproduces these results. Then, in Sec.~\ref{subsec:ju5o2pto7o2}, we consider the minimization problem for spin quantum numbers up to $j=7/2$, mainly identifying the optimal rotosensors within various ranges of the rotation angle $\eta$ by numerical techniques. More specifically, for a fixed angle $\eta$, ${\cal F}_{\ket\psi}(\eta)$ is a function of the $\mathcal{A}_{t}$ which can be parametrized by the complex coefficients $c_{m}$ entering the expansion \eqref{eq:jm_decomp} of the state $|\psi\rangle$ in the Dicke basis (see Eq.~\eqref{puritiescm}). We search numerically for the minimum value of ${\cal F}_{\ket\psi}(\eta)$ with respect to the $c_{m}$, taking into account the normalization condition $\sum_{m}|c_{m}|^{2}=1$. In most cases this numerical search converges towards states which have simple analytic expressions which are the ones that we give. For each value of $j$, we performed this search at about 1000 evenly spaced values of $\eta$ in order to explore the whole range of rotation angles. Whenever we find a region of values of $\eta$ in which $|\psi_{1}\rangle$ is the optimal state adjacent to a region where $|\psi_{2}\rangle$ is optimal, at the critical angle separating these two regions, one should have ${\cal F}_{|\psi_{1}\rangle}(\eta)={\cal F}_{|\psi_{2}\rangle}(\eta)$ because the average fidelity ${\cal F}_{|\psi\rangle}(\eta)$ is a continuous function of $|\psi\rangle$. Therefore, the critical angle is a solution of the equation \begin{equation}\label{eqcritical} \sum_{t=1}^{\lfloor j\rfloor}\varphi_{t}^{(j)}(\eta)\,\mathcal{A}_{t}(|\psi_{1}\rangle)=\sum_{t=1}^{\lfloor j\rfloor}\varphi_{t}^{(j)}(\eta)\,\mathcal{A}_{t}(|\psi_{2}\rangle). \end{equation} \subsection{Rotosensors for arbitrary rotation angles $\eta$ and $j\protect\leqslant2$ \label{subsec:jupto2}} \subsubsection{$j=1/2$} For a spin $1/2$, all pure states are coherent: each state $\ket\psi$ can be obtained by a suitable rotation of the state $|\tfrac{1}{2},\tfrac{1}{2}\rangle$. Since the fidelity is invariant under rotation, all states are equally sensitive to detect rotations for any angle $\eta$. \subsubsection{$j=1$} For $j=1$, the expansion \eqref{PexpansionAC} takes the form \begin{equation} {\cal F}_{|\psi\rangle}(\eta)=\varphi_{0}^{(j)}(\eta)+\varphi_{1}^{(1)}(\eta)\,\mathcal{A}_{1}\,, \end{equation} with \begin{equation} \begin{aligned}\varphi_{0}^{(1)}(\eta)={} & \frac{1}{15}\big(6\cos(\eta)+\cos(2\eta)+8\big),\\ \varphi_{1}^{(1)}(\eta)={} & -\frac{1}{15}\big(2\cos(\eta)-3\cos(2\eta)+1\big). \end{aligned} \end{equation} The first strictly positive zero of $\varphi_{1}^{(1)}(\eta)$ is given by $\eta_{0}=\arccos(-2/3)$. In the interval $\eta\in[0,\eta_{0}[$, where $\varphi_{1}^{(1)}(\eta)$ is negative, the fidelity ${\cal F}_{|\psi\rangle}(\eta)$ is minimized by states with $\mathcal{A}_{1}=1$, i.e. by $1$-anticoherent states. For $\eta=\eta_{0}$, the fidelity takes the same value for all states $|\psi\rangle$, namely ${\cal F}_{|\psi\rangle}(\eta_{0})=\varphi_{0}^{(1)}(\eta_{0})=7/27$. For rotation angles in the the remaining interval, $\eta\in]\eta_{0},\pi]$, where $\varphi_{1}^{(1)}(\eta)$ is positive, ${\cal F}_{|\psi\rangle}(\eta)$ is minimized for states with $\mathcal{A}_{1}=0$, i.e.\ coherent states. Thus, we indeed recover the results obtained in \cite{ChrHer17}. \subsubsection{$j=3/2$} In this case, the average fidelity \eqref{PexpansionAC} reads \begin{equation} {\cal F}_{|\psi\rangle}(\eta)=\varphi_{0}^{(3/2)}(\eta)+\varphi_{1}^{(3/2)}(\eta)\,\mathcal{A}_{1}\,, \end{equation} with \begin{equation} \begin{aligned}\varphi_{0}^{(3/2)}(\eta)={} & \frac{1}{70}\big(\cos(3\eta)+8\cos(2\eta)+29\cos(\eta)+32\big),\\ \varphi_{1}^{(3/2)}(\eta)={} & \frac{3}{70}\big(3\cos(3\eta)+3\cos(2\eta)-4\cos(\eta)-2\big). \end{aligned} \end{equation} The situation is basically the same as for $j=1$. The first strictly positive zero of the coefficient $\varphi_{1}^{(3/2)}(\eta)$ is found to be $\eta_{0}=\arccos(\frac{-9+\sqrt{21}}{12})$. Hence, in the interval $\eta\in[0,\eta_{0}[$ where $\varphi_{1}^{(3/2)}(\eta)$ is negative, the fidelity ${\cal F}_{|\psi\rangle}(\eta)$ is minimal for $1$-anticoherent states. At the value $\eta=\eta_{0}$, the fidelity takes the same value for all states $|\psi\rangle$, namely, ${\cal F}_{|\psi\rangle}(\eta_{0})=\varphi_{0}^{(3/2)}(\eta_{0})=(33+2\sqrt{21})/80$. Otherwise, ${\cal F}_{|\psi\rangle}(\eta)$ is minimized for coherent states, thereby reproducing earlier results \cite{ChrHer17}. \subsubsection{$j=2$} For $j=2$, the fidelity \eqref{PexpansionAC} is a linear combination of three terms, \begin{equation} {\cal F}_{|\psi\rangle}(\eta)=\varphi_{0}^{(2)}(\eta)+\varphi_{1}^{(2)}(\eta)\,\mathcal{A}_{1}+\varphi_{2}^{(2)}(\eta)\,\mathcal{A}_{2}\,,\label{Petaj2} \end{equation} with the angular functions $\varphi_{k}^{(2)},k=0,1,2$, displayed in Appendix~\ref{Appendix_phi}. They all take negative values in the interval $\eta\in[0,\eta_{0}]$, with $\eta_{0}\approx1.2122$ the first strictly positive zero of $\varphi_{1}^{(2)}(\eta)$. The tetrahedron state \begin{equation} \ket{\psi^{\mathrm{tet}}}=\frac{1}{2}\left(\ket{2,-2}+i\sqrt{2}\,\ket{2,0}+\ket{2,2}\right), \label{s2} \end{equation} whose Majorana points lie at the vertices of a regular tetrahedron, is $2$-anticoherent, and for $j=2$ it is the only state (up to LU) with $\mathcal{A}_{1}=\mathcal{A}_{2}=1$~\cite{Bag14}; hence it provides the optimal rotosensor for angles in the interval $\eta\in[0,\eta_{0}]$. For larger angles of rotation comprised between $1.68374$ and $2.44264$, we find numerically that an optimal state is the Schrödinger cat state \begin{equation} \ket{\psi^{\mathrm{cat}}}=\frac{1}{\sqrt{2}}\left(\ket{2,-2}+\ket{2,2}\right)\,,\label{s2GHZ} \end{equation} which is only 1-anticoherent, with $\mathcal{A}_{1}=1$ and $\mathcal{A}_{2}=3/4$. For values $\eta\gtrsim 2.44264$, the optimal state is a coherent state. We thus obtain numerically three intervals with three distinct optimal states corresponding to $(\mathcal{A}_1,\mathcal{A}_2)=(1,1), (1,3/4)$, and $(0,0)$, respectively. In order to find the critical angles, we solve Eq.~\eqref{eqcritical}. The angle $\eta_{1}$ separating the first two regions is a solution of $\varphi_{2}^{(2)}(\eta)=0$. The first positive zero of $\varphi_{2}^{(2)}(\eta)$ is $\eta_{1}=2\arctan(\sqrt{9-2\sqrt{15}})\approx 1.68374$, which coincides with the numerically obtained value. The angle $\eta_{2}$ at which the second and third region touch, is a zero of $\varphi_{1}^{(2)}(\eta)+\tfrac{3}{4}\,\varphi_{2}^{(2)}(\eta)$. Its first strictly positive zero is given by \begin{equation} \eta_{2}=2\arctan\left(\sqrt{-\frac{a+102b}{a-38b}}\right)\,, \end{equation} with $a=19\ 6^{2/3}+\sqrt[3]{6}\left(223-35\sqrt{7}\right)^{2/3}$ and $b=\sqrt[3]{223-35\sqrt{7}}$, and we have indeed $\eta_{2}\approx 2.44264$. The results we obtained are summarized in Fig.~\ref{figj2}; they agree with the findings of~\cite{ChrHer17}. It is noteworthy that the state \eqref{s2GHZ} is not the only state with anticoherence measures $\mathcal{A}_{1}=1$ and $\mathcal{A}_{2}=3/4$. For instance, any state of the form \begin{equation} \ket{\psi}=\frac{c_1\ket{2,-1}+c_2\ket{2,0}-c_1^*\ket{2,1}}{\sqrt{2|c_1|^2+|c_2|^2}}\, \end{equation} with $c_1\in\mathbb{C}$ and $c_2\in\mathbb{R}$ come with the same measures of anticoherence, as readily follows from Eq.~\eqref{puritiescm}. These states are thus also optimal in the interval $\eta\in[\eta_{1},\eta_{2}]$, thereby removing the uniqueness of optimal rotosensors observed for $j=1$ and $j=3/2$. \begin{figure} \caption{Average fidelity ${\cal F} \label{figj2} \end{figure} \subsection{Rotosensors for $5/2\protect\leqslant j\protect\leqslant7/2$ \label{subsec:ju5o2pto7o2}} \subsubsection{$j=5/2$} For $j=5/2$, there is no anticoherent state of order $2$ but only of order $1$~\cite{Kol08}. Numerical optimization shows that the optimal state for small angles of rotation is the $1$-anticoherent state with the largest measure of $2$-anticoherence, that is given by \begin{equation} \ket{\psi}=\frac{1}{\sqrt{2}}\left(\ket{\tfrac{5}{2},-\tfrac{3}{2}}+\ket{\tfrac{5}{2},\tfrac{3}{2}}\right),\label{s52} \end{equation} and has $\mathcal{A}_{1}=1$ and $\mathcal{A}_{2}=99/100$. This state is found to be optimal up to a critical angle $\eta_{1}\approx1.49697$, which is obtained from Eq.~\eqref{eqcritical} and coincides with the first strictly positive zero of $\varphi_{2}^{(5/2)}(\eta)$. It is worth noting that the optimal state \eqref{s52} was also found to be the most non-classical spin state for $j=5/2$, both in the sense that it maximizes the quantumness~\cite{Gir10} and that it minimizes the cumulative multipole distribution~\cite{Bjo15,BjoGra15}. The Majorana constellation of this state defines a triangular bipyramid, which is a spherical $1$-design~\cite{Del77,sloane}, thus corresponding to the arrangement of point charges on the surface of a sphere which minimize the Coulomb electrostatic potential energy (solution to Thomson's problem for 5 point charges, see~\cite{Sch13}). For larger angles of rotation ranging between $\eta_{1}$ and $\eta_{2}\approx2.2521$, we find that an optimal state is \begin{equation} \ket{\psi^{\mathrm{cat}}}=\frac{1}{\sqrt{2}}\left(\ket{\tfrac{5}{2},-\tfrac{5}{2}}+\ket{\tfrac{5}{2},\tfrac{5}{2}}\right)\,;\label{s5/2GHZ} \end{equation} unlike in the case $j=2$, we found this state for $j=5/2$ to be the only state (up to LU) with $\mathcal{A}_{1}=1$ and $\mathcal{A}_{2}=3/4$. For $\eta\in[\eta_{2},\pi]$, we find that coherent states are optimal. The transition occurs at the first strictly positive zero $\eta_{2}$ of $\varphi_{1}^{(5/2)}(\eta)+\tfrac{3}{4}\,\varphi_{2}^{(5/2)}(\eta)$. Our results are summarized in Fig.~\ref{fig5o2}. \begin{figure} \caption{Average fidelity ${\cal F} \label{fig5o2} \end{figure} \subsubsection{$j=3$} Anticoherent states of order $3$ do exist for $j=3$. They are all connected by rotation to the octahedron state \begin{equation} \ket{\psi^{\mathrm{oct}}}=\frac{1}{\sqrt{2}}\left(\ket{3,-2}+\ket{3,2}\right),\label{s3} \end{equation} whose Majorana points lie at the vertices of a regular octahedron. Therefore, the state \eqref{s3} is, at small $\eta$, the unique optimal quantum rotosensor (up to LU) for $j=3$. Numerical optimization shows that the octahedron state is optimal up to a critical angle $\eta_{1}\approx1.3635$ coinciding with the first strictly positive zero of $\tfrac{1}{4}\,\varphi_{2}^{(3)}(\eta)+\tfrac{1}{3}\,\varphi_{3}^{(3)}(\eta)$, and that, for larger angles, the state \begin{equation} \ket{\psi^{\mathrm{cat}}}=\frac{1}{\sqrt{2}}\left(\ket{3,-3}+\ket{3,3}\right)\label{s3GHZ} \end{equation} with $\mathcal{A}_{1}=1$, $\mathcal{A}_{2}=3/4$ and $\mathcal{A}_{3}=2/3$ is optimal up to a critical angle $\eta_{2}\approx2.04367$ coinciding with the first strictly positive zero of $\varphi_{1}^{(3)}(\eta)+\tfrac{3}{4}\,\varphi_{2}^{(3)}(\eta)+\tfrac{2}{3}\,\varphi_{3}^{(3)}(\eta)$. We found that this is the only spin-$3$ state (up to LU) with $\mathcal{A}_{1}=1$, $\mathcal{A}_{2}=3/4$ and $\mathcal{A}_{3}=2/3$. Coherent states are found to be optimal for angles of rotation in the ranges $[\eta_{2},\eta_{3}]$ and $[\eta_{4},\pi]$ with $\eta_{3}\approx2.35881$ and $\eta_{4}\approx 2.65576$ coinciding with the second and third strictly positive zeros of $\varphi_{1}^{(3)}(\eta)+\varphi_{2}^{(3)}(\eta)+\varphi_{3}^{(3)}(\eta)$. In the range $[\eta_{3},\eta_{4}]$, the octahedron state \eqref{s3} becomes again optimal (although the three functions $\varphi_k^{(3)}$ for $k=1,2,3$ are not simultaneously negative in that range). Our results are displayed in Fig.~\ref{figj3}. \begin{figure} \caption{Average fidelity ${\cal F} \label{figj3} \end{figure} \subsubsection{$j=7/2$} This is the smallest spin quantum number for which a smooth variation of the optimal state with $\eta$ is observed, resulting in the complex behaviour displayed in Figs.~\ref{figj72} and \ref{figj72bis}. There are no anticoherent states to order $3$ for $j=7/2$, but there exist anticoherent states to order $2$. The optimal state for small angles of rotation (by which we mean here $\eta\to0$) turns out to be one of those. Numerical optimization yields the state \begin{equation} \ket{\psi}=\sqrt{\tfrac{2}{9}}\,\ket{\tfrac{7}{2},-\tfrac{7}{2}}-\sqrt{\tfrac{7}{18}}\,\ket{\tfrac{7}{2},-\tfrac{1}{2}}-\sqrt{\tfrac{7}{18}}\,\ket{\tfrac{7}{2},\tfrac{5}{2}}\label{s72AC} \end{equation} with measures of anticoherence $\mathcal{A}_{1}=\mathcal{A}_{2}=1$ and $\mathcal{A}_{3}=1198/1215$. This is not the state with the highest measure of $3$-anticoherence, as the state \begin{equation} \ket{\psi}=\frac{1}{\sqrt{2}}\left(\ket{\tfrac{7}{2},-\tfrac{5}{2}}+\ket{\tfrac{7}{2},\tfrac{5}{2}}\right), \end{equation} has measures of anticoherence $\mathcal{A}_{1}=1$, $\mathcal{A}_{2}=195/196$ and $\mathcal{A}_{3}=146/147>1198/1215$. The latter state is found to be optimal for $\eta\in[\eta_{1},\eta_{2}]$ with $\eta_{1}\approx0.71718$ (not identified) and $\eta_{2}\approx1.24169$ coinciding with the first strictly positive zero of $\tfrac{12}{49}\,\varphi_{2}^{(7/2)}(\eta)+\tfrac{16}{49}\,\varphi_{3}^{(7/2)}(\eta)$. The state \begin{equation} \ket{\psi^{\mathrm{cat}}}=\frac{1}{\sqrt{2}}\left(\ket{\tfrac{7}{2},-\tfrac{7}{2}}+\ket{\tfrac{7}{2},\tfrac{7}{2}}\right)\label{s72GHZ} \end{equation} with $\mathcal{A}_{1}=1$, $\mathcal{A}_{2}=3/4$ and $\mathcal{A}_{3}=2/3$ is found to be optimal for $\eta\in[\eta_{2},\eta_{3}]$ and $\eta\in[\eta_{4},\eta_{5}]$ with $\eta_{3}\approx1.60141$ and $\eta_{4}\approx1.88334$ coinciding with the third and fourth strictly positive zeros of $\varphi_{1}^{(7/2)}(\eta)$ and $\eta_{5}\approx2.41684$ with the first strictly positive zero of $\varphi_{1}^{(7/2)}(\eta)+\tfrac{3}{4}\,\varphi_{2}^{(7/2)}(\eta)+\tfrac{2}{3}\,\varphi_{3}^{(7/2)}(\eta)$. In the interval $[\eta_{5},\pi]$, coherent states are found to be optimal. \begin{figure} \caption{Average fidelity ${\cal F} \label{figj72} \end{figure} \begin{figure} \caption{Measures of anticoherence ${\cal A} \label{figj72bis} \end{figure} \subsection{Rotosensors for small rotation angles $\eta$ and arbitrary values of $j$ \label{subsec: Rotosensors small eta any j}} \subsubsection{Angular functions at small angles} According to Secs.~\ref{subsec:jupto2} and \ref{subsec:ju5o2pto7o2} optimal rotosensors for integer values of spin ($j=1,2,3$) are given by $j$-anticoherent states while for half-integer spin ($j=3/2,5/2,7/2$) the fidelity is optimized by states which are anticoherent of order $t=1,1,2$, respectively, and possess large anticoherence measures ${\cal A}_{t}$ for values of $t$ up to $t=\left\lfloor j\right\rfloor $. This fact can be understood quite generally through the behaviour of the functions $\varphi_{t}^{(j)}(\eta)$ at small $\eta$ for arbitrary values of $j$. In the vicinity of $\eta=0$, the functions $\varphi_{t}^{(j)}(\eta)$ given in Eq.~\eqref{Phimain} take the form \begin{equation} \varphi_{t}^{(j)}(\eta)=\frac{b_{t,t}^{(j)}}{2^{2t}}\,\eta^{2t}+\mathcal{O}(\eta^{2t+2}),\label{phiseries} \end{equation} with coefficients $b_{t,t}^{(j)}$ given by Eq.~\eqref{btk}. These coefficients are strictly negative for all $t\geqslantslant 1$ and all $j=N/2$, since $a_{t,t}^{(j)}>0$ and $a_{N-t,t}^{(j)}$ is either $0$ for $t<N/2$ or positive for $t=N/2$. This implies that all functions $\varphi_{t}^{(j)}(\eta)$ are negative in some interval around $\eta=0$. Thus, the fidelity ${\cal F}_{|\psi\rangle}(\eta)$ is a linear combination of the $\mathcal{A}_{t}$ with negative coefficients in that interval. Since $0\leqslant\mathcal{A}_{t}\leqslant1$, it follows that if there exists a state with $\mathcal{A}_{t}=1$ for all $t\leqslant\lfloor j\rfloor$---that is, an anticoherent state to order $\lfloor j\rfloor$---then this state provides an optimal quantum rotosensor for $\eta$ in that interval. This interval can be made more specific, at least for the lowest values of $j$. Let $\eta_{0}$ denote the first zero of $\varphi_{1}^{(j)}(\eta)$. Numerical results up to $j=85$ indicate that all functions $\varphi_{t}^{(j)}(\eta)$ for $t=1,\ldots,\lfloor j\rfloor$ are negative for $\eta\in[0,\eta_{0}]$, so that an anticoherent state to order $\lfloor j\rfloor$ (if it exists) is optimal in the whole interval $[0,\eta_{0}]$. As shown in Fig.~\ref{etamin}, $\eta_{0}$ is found to scale as $3\pi/(4j)$ for large $j$. A simple explanation for this is that the expansion of the function $\varphi_{1}^{(j)}(\eta)$ as $\sum_{k}a_{k}\cos(k\eta)$ is dominated by the term $a_{2j}\cos(2j\eta)$ (note however that $\eta_0$ is even better approximated by $9/(4j)$). Conversely, the states maximizing ${\cal F}_{|\psi\rangle}(\eta)$ for small angles of rotation are the states with $\mathcal{A}_{t}=0$ for all $t$, i.e.~coherent states. \begin{figure} \caption{First zero $\eta_{0} \label{etamin} \end{figure} To see whether any general pattern emerges, we now identify optimal small-angle rotosensors for the next few values of the spin quantum numbers. \subsubsection{$j=4$} For $j=4$, there is no anticoherent state to order $t=4$. We find that the optimal state for small angles of rotation is the $3$-anticoherent state \begin{equation} \ket{\psi}=\sqrt{\tfrac{5}{24}}\,\ket{4,-4}-\sqrt{\tfrac{7}{12}}\,\ket{4,0}-\sqrt{\tfrac{5}{24}}\,\ket{4,4}, \end{equation} with $\mathcal{A}_{1}=\mathcal{A}_{2}=\mathcal{A}_{3}=1$ and $\mathcal{A}_{4}=281/288$. \subsubsection{$j=9/2$} For $j=9/2$, there is no anticoherent state to order $t\geqslantslant3$. The anticoherent states of order $t=2$ with the largest $\mathcal{A}_{3}$ are found to be of the form \begin{equation} \ket{\psi}=\tfrac{\sqrt{13}}{8}\,\ket{\tfrac{9}{2},-\tfrac{9}{2}}+e^{i\chi}\sqrt{\tfrac{15}{32}}\,\ket{\tfrac{9}{2},-\tfrac{1}{2}}-\tfrac{\sqrt{21}}{8}\,\ket{\tfrac{9}{2},\tfrac{7}{2}}, \end{equation} with $\chi\in[0,\pi/2]$. Their measures of anticoherence are $\mathcal{A}_{1}=\mathcal{A}_{2}=1$, $\mathcal{A}_{3}=2347/2352$ and $\mathcal{A}_{4}=5\left(355609+175\sqrt{273}\cos(2\chi)\right)/1806336$. Among these states, the one with $\chi=0$ has the largest value of $\mathcal{A}_{4}$ and numerical results suggest that this is the optimal state for small angles of rotation. \subsubsection{$j=5$} For $j=5$, there is no anticoherent state to order $t\geqslantslant4$. We find that the optimal state for small angles is the $3$-anticoherent state \begin{equation} \ket{\psi}=\sqrt{\tfrac{5}{16}}\,\ket{5,-4}+\sqrt{\tfrac{3}{8}}\,\ket{5,0}-\sqrt{\tfrac{5}{16}}\,\ket{5,4}, \end{equation} with $\mathcal{A}_{1}=\mathcal{A}_{2}=\mathcal{A}_{3}=1$, $\mathcal{A}_{4}=895/896$ and $\mathcal{A}_{5}=1097/1120$. \begin{table*} \begin{centering} \begin{tabular}{|c|c|c|c|} \hline $j$ & $\ket{\psi^{\mathrm{optimal}}}$ & $\mathcal{A}_{t}$ & Interval \tabularnewline \hline \hline $1$ & $\begin{array}{c} \ket{\psi^{\mathrm{cat}}}\\ \mathrm{any~state}\\ |j,j\rangle \end{array}$ & $\begin{array}{c} \mathcal{A}_{1}=1\\ 0\leqslantslant\mathcal{A}_{1}\leqslantslant 1\\ \mathcal{A}_{1}=0 \end{array}$ & $\begin{array}{c} \eta\in [0,\eta_{0}[ \\ \eta=\eta_{0} \\ \eta\in [\eta_{0},\pi] \end{array}$ \tabularnewline \hline $3/2$ & $\begin{array}{c} \ket{\psi^{\mathrm{cat}}}\\ \mathrm{any~state}\\ |j,j\rangle \end{array}$ & $\begin{array}{c} \mathcal{A}_{1}=1\\ 0\leqslantslant\mathcal{A}_{1}\leqslantslant 1\\ \mathcal{A}_{1}=0 \end{array}$ & $\begin{array}{c} \eta\in [0,\eta_{0}[ \\ \eta=\eta_{0} \\ \eta\in [\eta_{0},\pi] \end{array}$ \tabularnewline \hline $2$ & $\begin{array}{c} \ket{\psi^{\mathrm{tet}}}\\ \ket{\psi^{\mathrm{cat}}}\\ |j,j\rangle \end{array}$ & $\begin{array}{c} \mathcal{A}_{1}=\mathcal{A}_{2}=1 \\ \mathcal{A}_{1}=1,\mathcal{A}_{2}=3/4\\ \mathcal{A}_{1}=\mathcal{A}_{2}=0 \end{array}$ & $\begin{array}{c} \eta\in [0,\eta_{1}], \eta_{1}\approx 1.68374\\ \eta\in [\eta_{1},\eta_{2}]\\ \eta\in [\eta_{2},\pi], \eta_{2}\approx 2.44264 \end{array}$\tabularnewline \hline $5/2$ & $\begin{array}{c} \mathrm{Eq}.~\eqref{s52}\\ \ket{\psi^{\mathrm{cat}}}\\ |j,j\rangle \end{array}$ & $\begin{array}{c} \mathcal{A}_{1}=1, \mathcal{A}_{2}=99/100 \\ \mathcal{A}_{1}=1,\mathcal{A}_{2}=3/4\\ \mathcal{A}_{1}=\mathcal{A}_{2}=0 \end{array}$ & $\begin{array}{c} \eta\in [0,\eta_{1}], \eta_{1}\approx 1.49697\\ \eta\in [\eta_{1},\eta_{2}]\\\ \eta\in [\eta_{2},\pi], \eta_{2}\approx 2.2521 \end{array}$\tabularnewline \hline $3$ & $\begin{array}{c} \ket{\psi^{\mathrm{oct}}} \\ \ket{\psi^{\mathrm{cat}}}\\ |j,j\rangle \end{array}$ & $\begin{array}{c} \mathcal{A}_{1}=\mathcal{A}_{2}=\mathcal{A}_{3}=1 \\ \mathcal{A}_{1}=1,\mathcal{A}_{2}=3/4 ,\mathcal{A}_{3}=2/3\\ \mathcal{A}_{1}=\mathcal{A}_{2}=\mathcal{A}_{3}=0 \end{array}$ & $\begin{array}{c} \eta\in [0,\eta_{1}]\cup [\eta_{3},\eta_{4}], \eta_{3}\approx 2.35881\\ \eta\in [\eta_{1},\eta_{2}], \eta_{1}\approx 1.3635, \eta_{2}\approx 2.04367\\ \eta\in [\eta_{2},\eta_{3}]\cup [\eta_{4},\pi], \eta_{4}\approx 2.65576 \end{array}$\tabularnewline \hline $7/2$ & $\begin{array}{c} \mathrm{Eq}.~\eqref{s72AC} \\ - \\ \ket{\psi^{\mathrm{cat}}}\\ - \\ |j,j\rangle \end{array}$ & $\begin{array}{c} \mathcal{A}_{1}=\mathcal{A}_{2}=1, \mathcal{A}_{3}=1198/1215 \\ \tfrac{195}{196}\leqslantslant\mathcal{A}_{2}\leqslantslant 1, \tfrac{1198}{1215}\leqslantslant\mathcal{A}_{3}\leqslantslant \tfrac{146}{147},\, \mathrm{see~Fig.}~\ref{figj72bis} \\ \mathcal{A}_{1}=1,\mathcal{A}_{2}=3/4 ,\mathcal{A}_{3}=2/3\\ \mathrm{see~Fig.}~\ref{figj72} \\ \mathcal{A}_{1}=\mathcal{A}_{2}=\mathcal{A}_{3}=0 \end{array}$ & $\begin{array}{c} \eta \to 0 \\ \eta\in [0,\eta_{1}],\eta_{1}\approx 0.71718\\ \eta\in [\eta_{2},\eta_{3}]\cup [\eta_{4},\eta_{5}], \eta_{2}\approx 1.24169\\ \eta\in [\eta_{3},\eta_{4}], \eta_{3}\approx 1.60141, \eta_{4}\approx 1.88334\\ \eta\in [\eta_{5},\pi], \eta_{5}\approx 2.41684 \end{array}$\tabularnewline \hline \end{tabular} \caption{Summary of the results of Secs.~\ref{subsec:jupto2} and \ref{subsec:ju5o2pto7o2} on optimal states for $1\leqslant j \leqslant 7/2$. Here, $\eta_{0}$ denotes the first strictly positive zero of $\varphi_{1}^{(j)}(\eta)$, $\ket{\psi^{\mathrm{tet}}}$ defined for $j=2$ is given by Eq.~\eqref{s2}, $\ket{\psi^{\mathrm{oct}}}$ defined for $j=3$ is given by Eq.~\eqref{s3}, and $\ket{\psi^{\mathrm{cat}}}=\frac{1}{\sqrt{2}}\left(\ket{j,-j}+\ket{j,j}\right)$ for any $j$. The state $|j,j\rangle$ has been taken as an example of coherent state. Note that optimal states given here are not necessarily unique (states not related by a rotation can have the same $\mathcal{A}_t$). \label{tab}} \par\end{centering} \end{table*} \subsubsection{Arbitrary values of $j$} As was mentioned earlier, if an anticoherent state to order $\lfloor j\rfloor$ exists for a given $j$, then this state gives rise to an optimal quantum rotosensor for $\eta\in[0,\eta_{0}]$. This applies to values $j=1,3/2,2$ and $j=3$, which are the only cases where existence of anticoherent states to order $t=\lfloor j\rfloor$ has been established (see e.g.~\cite{Bag15,Bag17}). The situation is less straightforward if such a state is not known to exist from the outset. The only general conclusion one can draw is that minimizing the average fidelity ${\cal F}_{|\psi\rangle}(\eta)$ for a fixed angle $\eta\in[0,\eta_{0}]$ corresponds to maximizing the measures $\mathcal{A}_{t}$ within the domain $\Omega$ (by definition, $\Omega$ is the set of all reachable $\mathcal{A}_{t}$ so that by changing $|\psi\rangle$, we will remain within $\Omega$). In this sense, the more anticoherent a state is, the more sensitive it will be as a quantum rotosensor. In general, varying $|\psi\rangle$ will change all anticoherence measures simultaneously. The challenge is to determine whether a state with given values of the measures $\mathcal{A}_{t}$ exists and, if it does, to identify it. The maximal order of anticoherence that a spin-$j$ state can display is generally much smaller than $\lfloor j\rfloor$, typically $t\sim2\sqrt{j}$ for large spins $j$~\cite{Bag15}. Numerical results for $j\lesssim100$ seem to suggest that the pairs $(t,j)$ for which a $t$-anticoherent spin-$j$ state exists coincide with those for which a $2j$-points spherical $t$-design exists in three dimensions~\cite{Grashttp}. The latter have been tabulated up to $j=50$ \cite{sloane}. For example, the first pairs $(t,j)$ for $j\leqslant4$ are given by $(1,1)$, $(1,3/2)$, $(2,2)$, $(1,5/2)$, $(3,3)$, $(2,7/2)$, $(3,4)$. \section{Summary and conclusions \label{sec:Conclusion}} The main result of this work is a closed-form expression \eqref{PexpansionAC} for the fidelity ${\cal F}_{|\psi\rangle}(\eta)$ between a state and its image under a rotation by an angle $\eta$ about an axis ${\bf n}$, averaged over all rotation axes. The expression takes the form of a linear combination of anticoherence measures $\mathcal{A}_{t}$, with explicit $\eta$-dependent coefficients. It follows that not only spin-$j$ states which are related by a global rotation of the axes come with the same average fidelity, but more generally all states with identical purities of their reduced density matrices (calculated for any subset of their $2j$ constituent spin-$1/2$ in the Majorana representation). This gives an explanation for the observation of~\cite{ChrHer17} that optimal states are not necessarily unique. Moreover, since the fidelity is linear in the anticoherence measures, optimal states correspond to values of $\mathcal{A}_{t}$ on the boundary of the domain $\Omega$ of admissible values. This shows the relevance of characterizing the domain $\Omega$. The expression \eqref{PexpansionAC} allows us to characterize states which optimally detect rotations by their degree of coherence or anticoherence. At small angles $\eta\leqslant\eta_{0}$, where the coefficients of the measures $\mathcal{A}_{t}$ are all negative, optimality of detection of rotations goes hand in hand with high degrees of anticoherence. For angles close to $\eta=\pi$, however, numerical results support the claim that optimality is achieved throughout by spin coherent states. We also performed a systematic investigation of states minimizing the average fidelity for small values of $j$, for all integers and half-integers from $j=1/2$ to $j=5$. Table~\ref{tab} summarizes our findings for the lowest values of $j$. At small rotation angle, all optimal states were found to have a maximal lowest anticoherence measure: $\mathcal{A}_{1}=1$. These states, which are anticoherent to order $1$, exist for any value of $j$, and one may conjecture that they should, in fact, be optimal for arbitrary values of $j$. More generally, for all values of $j$ investigated and for $\eta\leqslant\eta_{0}$, the optimal states turned out to have, for each $t>1$, the largest admissible anticoherence measure $\mathcal{A}_{t}$ compatible with fixed values of the lower measures $\mathcal{A}_{1},\mathcal{A}_{2},\ldots,\mathcal{A}_{t-1}$. Whether this property holds in general remains an open question. Note that natural generalizations of this problem, such as maximization of the average fidelity, can also be addressed by our approach. For instance, for small rotation angles $\eta\in [0,\eta_0]$, where all $\varphi_t^{(j)}(\eta)$ with $t\geqslantslant 1$ are negative, the average fidelity is maximal for coherent states. For rotation angles close to $\eta=\pi$, numerical results indicate that the $1$-anticoherent state $\ket{\psi^{\mathrm{cat}}}=\frac{1}{\sqrt{2}}\left(\ket{j,-j}+\ket{j,j}\right)$ is optimal for all $j$ up to $17/2$. \begin{acknowledgments} OG and SW thank the hospitality of the University of Liège, where this work has been initiated. \end{acknowledgments} \appendix \section{Fidelity in parameter estimation theory of rotations} \label{Appendix_param} It was shown in~\cite{Goldberg18} that minimizing the uncertainty in the measurement of an \emph{unknown} angle about a \emph{known} rotation axis is equivalent to identifying the states which minimize the fidelity $F_{|\psi\rangle}(\eta,\mathbf{n})$, assuming small rotation angles and using parameter estimation theory. To see this, first expand the fidelity as~\begin{equation} |\bra\psi R_{\mathbf{n}}(\eta)|\psi\rangle|^{2}=1-\eta^{2}(\Delta J_{\mathbf{n}})^{2}+{\cal O}(\eta^{4})\,,\label{eq: Goldberg fidelity} \end{equation} where $(\Delta J_{\mathbf{n}})^{2}$ is the variance of $J_{\mathbf{n}}\equiv\mathbf{J}\boldsymbol{\cdot}\mathbf{n}$ in the state $|\psi\rangle$. Solving \eqref{eq: Goldberg fidelity} for the angle $\eta$ will, upon measuring the fidelity in any state $|\psi\rangle$, result in an estimate of $\eta$. Second, the accuracy of this value depends on the initial state $|\psi\rangle$: using error propagation, one finds that the variance of the estimator is approximately given by \begin{equation} \left(\Delta\eta\right){}^{2}\approx\frac{1}{\left(2\Delta J_{{\bf n}}\right)^{2}}\,.\label{eq: angle variance} \end{equation} Thus, states $|\psi\rangle$ with \emph{large }values of the variance \emph{$\left(\Delta J_{{\bf n}}\right)^{2}$} are seen to minimize the uncertainty of the angle $\eta$. According to Eq. \eqref{eq: Goldberg fidelity}, these states also \emph{minimize} the fidelity $F_{|\psi\rangle}(\eta,\mathbf{n})$. Let us generalize the argument to the case in which the rotation axis is \emph{unknown}. We will see that the states producing the most reliable results---i.e.\ the smallest variance in the angle estimator---are those which minimize the average fidelity ${\cal F}_{|\psi\rangle}(\eta)$. It is convenient to describe the randomness in the rotation axis in terms of a quantum channel (see for instance \cite{Sidhu+2019} for the use of channels in quantum estimation theory). Suppose we prepare the pure initial state $\rho_{0}=\ket\psi\bra\psi$ and send it through the $\eta$-dependent channel $\Lambda_{\eta}(\cdot)$, \begin{equation} \rho_{\eta}=\Lambda_{\eta}(\rho_{0})=\frac{1}{4\pi}\int_{{{\cal S}^2}}R_{\mathbf{n}}(\eta)\rho_{0}R_{\mathbf{n}}^{\dagger}(\eta)\,d{\bf n}\,,\label{eq: rho eta} \end{equation} which describes rotations by $\eta$ about all possible rotation axes. Next, we measure the projector $\rho_{0}=\ket\psi\bra\psi$. Assuming the rotation angle to be small, $\eta\ll1$, the probability to still find the propagated state $\rho_{\eta}$ in the initial state $\rho_{0}$ is given by \begin{equation} \langle\rho_{0}\rangle_{\eta}=\mathrm{tr}\left[\rho_{0}\rho_{\eta}\right]=1-\eta^{2}V+{\cal O}(\eta^{4})\,,\label{eq: mean of rho_0} \end{equation} where $V$ is the variance of $J_{\mathbf{n}}\equiv\mathbf{J}\boldsymbol{\cdot}\mathbf{n}$, averaged over all directions, \begin{equation} V=\frac{1}{4\pi}\int_{{\cal S}^2}\left(\bra\psi J_{\mathbf{n}}^{2}\ket\psi-\bra\psi J_{\mathbf{n}}\ket\psi^{2}\right)\,d{\bf n}\,.\label{eq: average variance} \end{equation} Using $\rho_{0}^{2}=\rho_{0}$ and the relation \eqref{eq: mean of rho_0}, the variance of the measurement outcomes is found to be \begin{equation} \left(\Delta\rho_{0}\right)_{\eta}^{2}=\mathrm{tr}\left[\rho_{0}^{2}\rho_{\eta}\right]-\left(\mathrm{tr}\left[\rho_{0}\rho_{\eta}\right]\right)^{2}=\eta^{2}V+{\cal O}(\eta^{4})\,.\label{eq: variance of rho_0} \end{equation} Now using again the error propagation formula instrumental in the derivation of Eq.~\eqref{eq: angle variance} about \emph{known }rotation axes\emph{, }its generalization to \emph{unknown }axes is given by \begin{equation} \left(\Delta\eta\right){}^{2}=\frac{\left(\Delta\rho_{0}\right)_{\eta}^{2}}{|\partial\langle\rho_{0}\rangle_{\eta}/\partial\eta|^{2}}+{\cal O}(\eta^{2})\approx\frac{1}{4V}\,.\label{eq: eta variance (unknown axes)} \end{equation} This result concludes the argument we wish to provide: it is of physical interest to minimize the average fidelity \begin{equation} {\cal F}_{|\psi\rangle}(\eta)\equiv\mathrm{tr}\left[\rho_{0}\rho_{\eta}\right]\approx1-\eta^{2}V\,,\label{eq: average fidelity (alternative)} \end{equation} since the states which do so are those states which allow one to most accurately estimate a (small) rotation angle about unknown axes. \section{Average fidelity for Dicke states\label{sec: appendix C (Dicke)}} For Dicke states $|j,m\rangle$ (common eigenstates of $\mathbf{J}^{2}$ and $J_{z}$), the average fidelity \eqref{eq: probability} reads \begin{equation} \begin{aligned}{\cal F}_{|j,m\rangle}(\eta) & {}=\frac{1}{4\pi}\int_{\mathcal{S}^{2}}|\bra{j,m}R_{\mathbf{n}}(\eta)\ket{j,m}|^{2}\,d\mathbf{n}\\ & {}=\frac{1}{4\pi}\int_{\mathcal{S}^{2}}|U_{mm}^{j}(\eta,\mathbf{n})|^{2}\,d\mathbf{n} \end{aligned} \end{equation} with $U_{mm}^{j}(\eta,\mathbf{n})\equiv U_{mm}^{j}$ a matrix element of the rotation operator in the angle-axis parametrization given by \begin{equation} U_{mm}^{j}=\frac{\sqrt{4\pi}}{2j+1}\sum_{\lambda,\mu}(-i)^{\lambda}\sqrt{2\lambda+1}\chi_{\lambda}^{j}(\eta)C_{jm\lambda\mu}^{jm}Y_{\lambda}^{m}(\mathbf{n})\label{Umm} \end{equation} where $C_{jm\lambda\mu}^{jm}$ are Clebsch-Gordan coefficients, $Y_{\lambda}^{m}(\mathbf{n})$ are spherical harmonics and $\chi_{\lambda}^{j}(\eta)$ are the generalized characters of order $\lambda$ of the irreducible representations of rank $j$ of the rotation group~\cite{Var88}. These are defined by \begin{equation} \chi_{\lambda}^{j}(\eta)=\sqrt{\tfrac{(2j+1)(2j-\lambda)!}{(2j+\lambda+1)!}}\sin^{\lambda}\left(\tfrac{\eta}{2}\right)\left(\tfrac{d}{d\cos\left(\tfrac{\eta}{2}\right)}\right)^{\lambda}\chi^{j}(\eta)\label{chilj} \end{equation} with the characters \begin{equation} \chi^{j}(\eta)=\frac{(4j+2)!!}{2(4j+1)!!}\,P_{2j}^{\big(\tfrac{1}{2},\tfrac{1}{2}\big)}\left(\cos\left(\tfrac{\eta}{2}\right)\right) \label{chij} \end{equation} where $P_{n}^{(\alpha,\beta)}$ are Jacobi polynomials. Taking the modulus squared of \eqref{Umm} and integrating over all directions by using orthonormality of the spherical harmonics, we readily get Eq.~\eqref{PDicke}. \section{Explicit calculation of the angular functions $\varphi_{t}^{(j)}(\eta)$ \label{appexplicit}} \subsection{Matrices $S_{\mu_{1}\mu_{2}\ldots\mu_{N}}$} The matrices $S_{\mu_{1}\mu_{2}\ldots\mu_{N}}$ with $N=2j$ appearing in the expansion \eqref{rhoarbitrary} can be obtained by expanding the $(j,0)$ representation of a Lorentz boost, \begin{equation} \Pi^{(j)}(q)\equiv(q_{0}^{2}-|{\mathbf q}|^{2})^{j}\,e^{-2\theta_{q}\,\hat{{\bf q}}\boldsymbol{\cdot}{\bf J}},\label{Aplorentzboost} \end{equation} with $\theta_{q}=\mathrm{arctanh}(-|{\mathbf q}|/q_{0})$ and $\hat{{\bf q}}={\mathbf q}/|{\mathbf q}|$. This expansion takes the form of a multivariate polynomial in the variables $q_{0},q_{1},q_{2},q_{3}$, \begin{equation} \Pi^{(j)}(q)=(-1)^{2j}q_{\mu_{1}}q_{\mu_{2}}\ldots q_{\mu_{2j}}S_{\mu_{1}\mu_{2}\ldots\mu_{2j}}, \label{Apegaliteweinberg} \end{equation} where the coefficients are the $(N+1)\times(N+1)$ matrices $S_{\mu_{1}\mu_{2}\ldots\mu_{N}}$~\cite{prl}. \subsection{Tensor coordinates of the maximally mixed state} The maximally mixed state $\rho_{0}=\mathbb{1}/(N+1)$ can be expanded along \eqref{rhoarbitrary} with coefficients $x_{\mu_{1}\mu_{2}\ldots\mu_{N}}^{(0)}$. The coherent state decomposition of the maximally mixed state, $\rho_{0}=\frac{1}{4\pi}\int_{\mathcal{S}^{2}}\ket{\mathbf{n}}\bra{\mathbf{n}}\,d\mathbf{n}$, yields the identity \begin{equation} x_{\mu_{1}\mu_{2}\ldots\mu_{N}}^{(0)}=\frac{1}{4\pi}\int_{\mathcal{S}^{2}}n_{\mu_{1}}n_{\mu_{2}}\ldots n_{\mu_{N}}\,d\mathbf{n}.\label{identiterho0} \end{equation} Using our convention not to write indices when they are equal to 0, we have, irrespective of spin size, $x_{0}^{(0)}=1$, $x_{aa}^{(0)}=1/3$, $x_{aaaa}^{(0)}=1/5$ and $x_{aabb}^{(0)}=1/15$ for $a\neq b$. More generally, the coefficients of the maximally mixed state are given by the polynomial identity (cf. Eq.~(27) of \cite{prl}) \begin{equation} x_{\mu_{1}\mu_{2}\ldots\mu_{N}}^{(0)}q_{\mu_{1}}\ldots q_{\mu_{N}}=\sum_{k=0}^{j}\frac{\binom{N}{2k}}{2k+1}q_{0}^{N-2k}|{\mathbf q}|^{2k}\,,\label{coorid} \end{equation} which leads to \begin{equation} x_{a_{1}a_{2}\ldots a_{N}}^{(0)}=\frac{1}{N+1}\frac{\binom{j}{p_{1}/2,p_{2}/2,p_{3}/2}}{\binom{N}{p_{1},p_{2},p_{3}}}\,,\label{coorid2} \end{equation} where $p_{i}$ denotes the number of $i$ in $\{a_{1},a_{2},\ldots,a_{N}\}$ and the terms in the fraction are multinomial coefficients (by convention the right-hand side evaluates to zero if some $p_{i}$ is not even). \subsection{Average fidelity in terms of tensor coordinates} According to Eq.~\eqref{Ptot-1}, the average fidelity can be written as a double sum, \begin{equation} \begin{aligned} & {\cal F}_{|\psi\rangle}(\eta)=\sum_{k=0}^{N}(-1)^{N}\frac{q_{0}^{2(N-k)}}{m^{2N}}\\ & \times\hspace{-0.5cm}\sum_{\genfrac{}{}{0pt}{1}{\boldsymbol{\mu}\bm{,}\boldsymbol{\nu}}{2(N-k)\textrm{zeros}}{}}\hspace{-0.5cm}(-1)^{\textrm{nr of 0 in }\boldsymbol{\nu}}x_{\mu_{1}\ldots\mu_{N}\nu_{1}\ldots\nu_{N}}^{(0)}x_{\mu_{1}\ldots\mu_{N}}x_{\nu_{1}\ldots\nu_{N}}\,. \end{aligned} \label{Ptot} \end{equation} We now wish to show that the second sum which runs over all strings of indices (between 0 and 3) containing $2(N-k)$ zeros can evaluated explicitly leading to the simpler form for ${\cal F}_{|\psi\rangle}(\eta)$ given in Eq.~\eqref{Ptot6} at the end of this section. The sum runs over terms containing $2(N-k)$ zeros, that is, $2k$ non-zero indices. We split it into terms containing $r$ nonzero indices in $\boldsymbol{\mu}$ and $2k-r$ in $\boldsymbol{\nu}$. At fixed $k$ we have \begin{align} & \sum_{\genfrac{}{}{0pt}{1}{\boldsymbol{\mu}\bm{,}\boldsymbol{\nu}}{2(N-k)\textrm{zeros}}{}}\hspace{-0.5cm}(-1)^{\textrm{nr of 0 in }\boldsymbol{\nu}}x_{\mu_{1}\ldots\mu_{N}}x_{\nu_{1}\ldots\nu_{N}}x_{\mu_{1}\ldots\mu_{N}\nu_{1}\ldots\nu_{N}}^{(0)}\nonumber \\ & =\sum_{r=2k-N}^{N}(-1)^{N-2k+r}\binom{N}{r}\binom{N}{2k-r}\times\nonumber \\ & \qquad\times\sum_{a_{i},b_{i}}x_{a_{1}...a_{r}}x_{b_{1}...b_{2k-r}}x_{a_{1}...a_{r}b_{1}...b_{2k-r}}^{(0)}.\label{eq: fixed k sum} \end{align} We now evaluate the sums $\sum_{a_{i},b_{i}}x_{a_{1}...a_{r}}x_{b_{1}...b_{2k-r}}x_{a_{1}...a_{r}b_{1}...b_{2k-r}}^{(0)}$. We may suppose that $r\leqslantslant2k-r$. Using \eqref{coorid2}, we see that the nonzero indices $a_{i}$ and $b_{i}$ must occur in pairs. Indices $a_{i}$ are either paired with indices $a_{k}$ or indices $b_{k}$. We can then split the sum according to the number of pairings of the form $(a_{i},b_{i})$ (all other pairings are then within the $a_{i}$ or within the $b_{i}$). Let us first consider the case $k=r$. We are going to show that \begin{equation} \begin{aligned} \sum_{a_{i},b_{i}} &{} x_{a_{1}...a_{r}}x_{b_{1}...b_{r}}x_{a_{1}...a_{r}b_{1}...b_{r}}^{(0)} =\\ &{} \lambda_{0}\sum_{a_{i}}x_{a_{1}...a_{r}}^{2} \\ &{} + \lambda_{1}\sum_{a_{i}}\left(\sum_{b}x_{a_{1}...a_{r-2}bb}\right)^{2} \\ &{} + \lambda_{2}\sum_{a_{i}}\left(\sum_{b_{1},b_{2}}x_{a_{1}...a_{r-4}b_{1}b_{1}b_{2}b_{2}}\right)^{2}+\ldots \end{aligned}\label{kequalr} \end{equation} with \begin{equation} \lambda_{q}=\frac{2^{r-2q}r!^{2}}{(2r+1)!}\binom{r}{r-2q,q,q}. \end{equation} We first use the explicit expression \eqref{coorid2} of $x_{a_{1}...a_{r}b_{1}...b_{r}}^{(0)}$ to get an equation equivalent to \eqref{kequalr}, namely \begin{align} & \sum_{c_{i}}x_{c_{1}...c_{r}}x_{c_{r+1}...c_{2r}}\frac{\binom{2r}{r}\binom{r}{p_{1}/2\,p_{2}/2\,p_{3}/2}}{\binom{2r}{p_{1}\,p_{2}\,p_{3}}}=2^{r}\sum_{a_{i}}x_{a_{1}...a_{r}}^{2}\nonumber \\ & +2^{r-2}\binom{r}{r-2,1,1}\sum_{a_{i}}\left(\sum_{b}x_{a_{1}...a_{r-2}bb}\right)^{2}+\cdots\nonumber \\ & +2^{r-2q}\binom{r}{r-2q,q,q}\sum_{a_{i}}\left(\sum_{b}x_{a_{1}...a_{r-2q}b_{1}b_{1}...b_{q}b_{q}}\right)^{2}\nonumber \\ & +\cdots,\label{kequalr2} \end{align} where $p_{i}$ is the number of $i$ in $\{c_{1},c_{2},\ldots,c_{2r}\}$ and terms with $p_{i}$ odd are zero. In order to prove Eq.~\eqref{kequalr2}, we just observe that it represents two different ways of counting the same quantity. Indeed, let $\eta_{i}=\{a_{i},\epsilon_{i},\epsilon_{i}'\}$ for $1\leqslantslant i\leqslantslant r$ be triplets with $1\leqslantslant a_{i}\leqslantslant3$ and $0\leqslantslant\epsilon_{i},\epsilon_{i}'\leqslantslant1$. To a given set $\mathbf{\eta}=\{\eta_{1},\ldots,\eta_{r}\}$ we associate a term of the form $x_{c_{1}...c_{r}}y_{c_{r+1}...c_{2r}}$ where the $c_{i}$ occur in pairs $(a_{1},a_{1}),(a_{2},a_{2}),\ldots,(a_{r},a_{r})$. In a pair $(a_{i},a_{i})$, the first $a_{i}$ is assigned to be an index of $x$ if $\epsilon_{i}=0$, of $y$ if $\epsilon_{i}=1$ (and similarly the second $a_{i}$ in the pair is an index of $x$ if $\epsilon_{i}'=0$, of $y$ otherwise). For instance, $\eta=(a,0,0)$ corresponds to a term $x_{aa\ldots}y_{\ldots}$ and $\eta=(a,0,1)$ corresponds to a term $x_{a\ldots}y_{a\ldots}$. In order that $x$ and $y$ have the same number $r$ of indices we need to have $\sum_{i}(\epsilon_{i}+\epsilon_{i}')=r$, so that among the $\epsilon_{i},\epsilon_{i}'$ there are $r$ 0's and $r$ 1's. Each $\mathbf{\eta}=\{\eta_{1},\ldots,\eta_{r}\}$ such that $\sum_{i}(\epsilon_{i}+\epsilon_{i}')=r$ then corresponds to a unique term of the form $x_{c_{1}...c_{r}}y_{c_{r+1}...c_{2r}}$. Consider now, for some $q\leqslant r$, all $\mathbf{\eta}$ with $\sum_{i}(\epsilon_{i}+\epsilon_{i}')=r$ for which $\epsilon_{i}=\epsilon_{i}'=0$ for exactly $q$ values of $i$. These correspond to terms $x_{c_{1}...c_{r}}y_{c_{r+1}...c_{2r}}$ such that exactly $q$ pairs $(a_i,a_i)$ appear as indices of $x$, $q$ pairs appear as indices of $y$, and $r-2q$ are distributed over $x$ and $y$, i.e.~terms of the form $x_{a_{1}a_2...a_{r-2q}b_1b_1 b_2b_2\ldots b_qb_q}y_{a_{1}a_2...a_{r-2q}c_1c_1 c_2c_2\ldots c_qc_q}$. Replacing $y$ by $x$, all these terms are those appearing in the right-hand side of \eqref{kequalr2}. In fact, each sum on the right-hand side of \eqref{kequalr2} can be interpreted as the sum over all $\eta_{i}$ such that $\sum_{i}(\epsilon_{i}+\epsilon_{i}')=r$ and $\epsilon_{i}=\epsilon_{i}'=0$ for exactly $q$ values of $i$. For instance the first term on the right-hand side of \eqref{kequalr2} corresponds to terms $q=0$, where all pairs $(a_{i},a_{i})$ are distributed over the two different strings of indices (and then of course replacing $y$ by $x$). The prefactors correspond to the ways of choosing the positions of a given set of pairs: the multinomial coefficient corresponds to the choice of positions of the indices among the $r$ indices of $x_{a_{1}a_2...a_{r-2q}b_1b_1 b_2b_2\ldots b_qb_q}$. The factor $2^{r-2q}$ corresponds to choosing between $x$ and $y$ for the $r-2q$ indices $a_i$ which are distributed over $x$ and $y$. The same sum can be expressed as the left-hand-side of \eqref{kequalr2} if we now first sum over all strings $c_{1}\leqslantslant c_{2}\leqslantslant\cdots\leqslantslant c_{2r}$, which implies dividing by the number of permutations $\binom{2r}{p_{1},p_{2},p_{3}}$, then consider all possible positions of the $a_{i}$ over the $r$ pairs, which implies multiplying by the number of permutations of the pairs $\binom{r}{p_{1}/2,p_{2}/2,p_{3}/2}$, and finally choose the $r$ entries among the $\epsilon_{i}$ and $\epsilon_{i}'$ that will take the value 0, hence the factor $\binom{2r}{r}$. Thus \eqref{kequalr2} holds, which proves \eqref{kequalr}. The tracelessness condition \eqref{traceless} then allows us to reduce the sums over $b$ in \eqref{kequalr} to invariants $\kappa_{r}$. We simply get \begin{equation} \begin{aligned} & \sum_{a_{i},b_{i}}x_{a_{1}...a_{r}}x_{b_{1}...b_{r}}x_{a_{1}...a_{r}b_{1}...b_{r}}^{(0)}\\ & \qquad=\lambda_{0}\kappa_{r}+\lambda_{1}\kappa_{r-2}+\lambda_{2}\kappa_{r-4}+\ldots \end{aligned} \label{eq: reduce sum} \end{equation} Following exactly the same procedure from \eqref{kequalr} to \eqref{eq: reduce sum} in the case where the strings of indices of $x$ and $y$ have different lengths, we obtain the more general expression \begin{align} & \sum_{a_{i},b_{i}}x_{a_{1}...a_{r}}x_{b_{1}...b_{2k-r}}x_{a_{1}...a_{r}b_{1}...b_{2k-r}}^{(0)}\nonumber \\ & =\frac{r!(2k-r)!}{(2k+1)!}\sum_{q=0}^{\lfloor\frac{r}{2}\rfloor}2^{r-2q}\binom{k}{r-2q,q,q+k-r}\kappa_{r-2q}.\label{kequalrgen} \end{align} From \eqref{Ptot} we finally get \begin{widetext} \begin{equation} \begin{aligned} {\cal F}_{|\psi\rangle}(\eta)={}&\sum_{k=0}^{N}(-1)^{k}\sin^{2k}\left(\frac{\eta}{2}\right)\cos^{2(N-k)}\left(\frac{\eta}{2}\right)\sum_{r=0}^{2k}(-1)^{r}\frac{N!^{2}}{(N-r)!(N-2k+r)!(2k+1)!}\\ & \times \sum_{q=0}^{\lfloor\frac{r}{2}\rfloor}2^{r-2q}\binom{k}{r-2q,q,q+k-r}\kappa_{r-2q}. \end{aligned}\label{Ptot2} \end{equation} Changing the summation over $r$ to a summation over $s=r-2q$, we get \begin{equation} \begin{aligned} {\cal F}_{|\psi\rangle}(\eta)={}&\sum_{k=0}^{N}(-1)^{k}\sin^{2k}\left(\frac{\eta}{2}\right)\cos^{2(N-k)}\left(\frac{\eta}{2}\right) \sum_{s=0}^{2k}(-2)^{s}\kappa_{s} \\ & \times\sum_{q=0}^{k-\lfloor\frac{s+1}{2}\rfloor} \binom{N}{s+2q} \binom{N}{2k-2q-s} \frac{(s+2q)!(2k-2q-s)!}{(2k+1)!}\binom{k}{s,q,k-q-s}. \end{aligned}\label{Ptot2b} \end{equation} Because of the multinomial coefficient at the end of \eqref{Ptot2b}, the sum over $s$ can be restricted to $s\leqslantslant k$ and the sum over $q$ to $q\leqslantslant k-s$, yielding \begin{equation} \begin{aligned} {\cal F}_{|\psi\rangle}(\eta)={}&\sum_{k=0}^{N}(-1)^{k}\sin^{2k}\left(\frac{\eta}{2}\right)\cos^{2(N-k)}\left(\frac{\eta}{2}\right)\frac{N!^{2}k!}{(2k+1)!}\\ & \times \sum_{s=0}^{k}\frac{(-2)^{s}}{s!(2N-2k)!(k-s)!}\sum_{q=0}^{k-s}\binom{2N-2k}{N-s-2q}\binom{k-s}{q}\kappa_{s}. \end{aligned}\label{Ptot3} \end{equation} Grouping the $\kappa_{s}$ together by changing the order of the sum we get \begin{equation} \begin{aligned} {\cal F}_{|\psi\rangle}(\eta)={}& N!^{2}\sum_{s=0}^{N}\frac{(-2)^{s}\kappa_{s}}{s!}\sum_{k=s}^{N}(-1)^{k}\sin^{2k}\left(\frac{\eta}{2}\right)\cos^{2(N-k)}\left(\frac{\eta}{2}\right)\frac{k!}{(2k+1)!}\\ & \times \sum_{q=0}^{k-s}\frac{1}{(N-s-2q)!(N-2k+s+2q)!(k-s-q)!q!}. \end{aligned}\label{Ptot4} \end{equation} Because of the sum over $q$ from 0 to $k-s$, we can make the sum over $k$ start at 0. We then use \eqref{invrel} to express the $\kappa_{s}$ in terms of $\mathrm{tr}\left[\rho_{t}^{2}\right]$. This gives \begin{equation} \begin{aligned} {\cal F}_{|\psi\rangle}(\eta) ={}& N!^{2}\sum_{t=0}^{N}\frac{(-2)^{t}}{t!}\mathrm{tr}\left[\rho_{t}^{2}\right]\sum_{k=0}^{N}(-1)^{k}\sin^{2k}\left(\frac{\eta}{2}\right)\cos^{2(N-k)}\left(\frac{\eta}{2}\right)\frac{k!}{(2k+1)!}\\ & \times\sum_{s=t}^{N}\frac{2^{s}}{(s-t)!}\sum_{q=0}^{k-s}\frac{1}{(N-s-2q)!(N-2k+s+2q)!(k-s-q)!q!}. \end{aligned}\label{Ptot5} \end{equation} It turns out that the sums in the second line of this expression can be performed. Indeed, the identity \begin{equation} \sum_{s=t}^{N}\frac{2^{s}}{(s-t)!}\sum_{q=0}^{k-s}\frac{1}{(N-s-2q)!(N-2k+s+2q)!(k-s-q)!q!}=\frac{2^{t}(2N-2t)!}{(N-t)!^{2}(k-t)!(2N-2k)!}\label{idcompliquee} \end{equation} holds for arbitrary $N,t,k$. This can be proved as follows. First change variables $N\to N-t$, $k\to k-t$ and $s\to s-t$, so that showing \eqref{idcompliquee} amounts to showing \begin{equation} \sum_{s=0}^{k}\frac{2^{s}}{s!}\sum_{q=0}^{k-s}\frac{1}{(N-s-2q)!(N-2k+s+2q)!(k-s-q)!q!}=\frac{(2N)!}{N!^{2}k!(2N-2k)!}\label{idcompliquee2} \end{equation} (the upper bound of the sum over $s$ can be changed from $N$ to $k$ since terms $s>k$ do not contribute). Equation~\eqref{idcompliquee2} can be rewritten \begin{equation} \sum_{s=0}^{k}\sum_{q=0}^{k-s}2^{s}\binom{k}{s}\binom{k-s}{q}\binom{2N-2k}{N-s-2q}=\binom{2N}{N}.\label{idcompliquee3} \end{equation} Such an identity can be proven by writing $(1+x)^{2N}=(1+2x+x^{2})^{k}(1+x)^{2N-2k}$ for any $k$ and any $x$, and expanding the first factor using multinomial coefficients and the second one using binomial coefficients: \begin{align*} (1+x)^{2N}= & (1+2x+x^{2})^{k}(1+x)^{2N-2k}\\ & =\sum_{s,q}\binom{k}{s,q,k-s-q}(2x)^{s}(x^{2})^{q}\sum_{u}\binom{2N-2k}{u}x^{u}\\ & =\sum_{s,q,u}2^{s}\binom{k}{s}\binom{k-s}{q}\binom{2N-2k}{u}x^{u+s+2q} \end{align*} (the boundaries of the sums are taken care of by the binomial coefficients which vanish outside a certain range of parameters). Identifying the coefficients of the term in $x^{N}$ readily gives \eqref{idcompliquee3}. Using \eqref{idcompliquee}, Eq.~\eqref{Ptot5} finally reduces to \begin{equation} {\cal F}_{|\psi\rangle}(\eta)=\frac{1}{2N+1}\frac{1}{\binom{2N}{N}}\sum_{t=0}^{N}(-4)^{t}\binom{2N-2t}{N-t}\mathrm{tr}\left[\rho_{t}^{2}\right]\sum_{k=0}^{N}(-1)^{k}\sin^{2k}\left(\frac{\eta}{2}\right)\cos^{2(N-k)}\left(\frac{\eta}{2}\right)\binom{2N+1}{2k+1}\binom{k}{t}.\label{Ptot6} \end{equation} \section{Angular functions for $j=2$} \label{Appendix_phi} Evaluating the expression \eqref{Phimain} for $j=2$ leads to these three angular functions: \begin{equation} \begin{aligned} \varphi_{0}^{(2)}(\eta)={} & \frac{1}{315}\left(130\cos(\eta)+46\cos(2\eta)+10\cos(3\eta)+\cos(4\eta)+128\right),\\ \varphi_{1}^{(2)}(\eta)={} & -\frac{4}{315}\left(10\cos(\eta)-11\cos(2\eta)+16\cos(3\eta)-20\cos(4\eta)+5\right),\\ \varphi_{2}^{(2)}(\eta)={} & -\frac{64}{105}\sin^{4}\left(\frac{\eta}{2}\right)(10\cos(\eta)+5\cos(2\eta)+6)\,. \end{aligned} \label{varphij2} \end{equation} \section{Sample code} \label{code} We give here a short sample code written in Mathematica\textsuperscript{\texttrademark} to find an optimal state for $j=5/2$ and $\eta=0.5$.\vspace*{2pt} \begin{lstlisting}[language=Mathematica, mathescape] (* Angular functions, see Eqs. (42), (45) and (44) *) a[n_,t_,k_]:=((-1)^(k+t)*4^t)/(2*k+1)*(Binomial[2*n,2*k]*Binomial[2*n-2*t,n-t] Binomial[k,t])/Binomial[2*n,n]; b[n_,t_,k_]:=If[t==0,Binomial[n,k]/(1+2*k),-(t/(t+1))*If[t==n/2,1/2,1]*(a[n,t,k]+a[n,n-t,k])]; phi[n_,t_,eta_]:=Sum[Sin[eta/2]^(2*k)*Cos[eta/2]^(2*(n-k))*b[n,t,k],{k,0,n}]; (* Measures of anticoherence of a pure state of the form (25), see Eqs. (10) and (27) *) A[cm__,n_,t_]:=(t+1)/t*(1-Sum[Abs[Sum[Conjugate[cm[[k+l+1]]]*cm[[k+q+1]]*Sqrt[Binomial[k+l,k]*Binomial[n-k-l,t-l]*Binomial[k+q,k]*Binomial[n-k-q,t-q]]/Binomial[n,t],{k,0,n-t}]^2],{q,0,t},{l,0,t}]) (* Average fidelity, see Eq. (3) *) F[cm__,n_,eta_]:=phi[n,0,eta]+Sum[phi[n,t,eta]*A[cm,n,t],{t,1,Floor[n/2]}]; (* Normalized state expressed in the Dicke basis for j=5/2 *) j=5/2; n=2*j; cm=Normalize@(Array[r,n+1,0]+I*Array[i,n+1,0]); (* Search for an optimal state for eta=0.5 *) eta=0.5; f=Simplify[ComplexExpand[F[cm,n,eta]]]; sol=NMinimize[f,Array[r,n+1,0]~Join~Array[i,n+1,0],AccuracyGoal->25,PrecisionGoal->25]; (* Minimal average fidelity *) Re@sol[[1]] (* Optimal state in the Dicke basis *) cmsol=cm/.sol[[2]] (* Measures of anticoherence of order 1 and 2 *) A[cmsol,n,1] A[cmsol,n,2] \end{lstlisting} \end{widetext} \noindent The evaluation of the code with Mathematica 12.0 yields the output \begin{lstlisting}[language=Mathematica, mathescape] 0.453337 {0.189461+0.48194 I, -0.155904-0.0488534 I, 0.0828666+0.00440845 I, 0.374917+0.583967 I, 0.257018-0.0504367 I, -0.165557+0.347373 I} 1. 0.99 \end{lstlisting} The state that is found, with measures of anticoherence $\mathcal{A}_{1}=1$ and $\mathcal{A}_{2}=99/100$, can be shown to be related by a rotation to the state \eqref{s52}. \end{document}
\begin{document} \title { \large \textbf{Burrows-Wheeler transform for terabases} } \author{ Jouni Sirén\thanks{This work was supported by the Wellcome Trust grant [098051].} \\[0.5em] {\small\begin{minipage}{\linewidth}\begin{center} \begin{tabular}{c} Wellcome Trust Sanger Institute \\ Wellcome Genome Campus \\ Hinxton, Cambridge, CB10 1SA, UK \\ \url{[email protected]} \end{tabular} \end{center}\end{minipage}} } \maketitle \thispagestyle{empty} \begin{abstract} In order to avoid the reference bias introduced by mapping reads to a reference genome, bioinformaticians are investigating reference-free methods for analyzing sequenced genomes. With large projects sequencing thousands of individuals, this raises the need for tools capable of handling terabases of sequence data. A key method is the Burrows-Wheeler transform (BWT), which is widely used for compressing and indexing reads. We propose a practical algorithm for building the BWT of a large read collection by merging the BWTs of subcollections. With our 2.4 Tbp datasets, the algorithm can merge 600 Gbp/day on a single system, using 30 gigabytes of memory overhead on top of the run-length encoded BWTs. \end{abstract} \Section{Introduction} The decrease in the cost of DNA sequencing has flooded the world with \emph{sequence data}. The \emph{1000 Genomes Project} \cite{1000GP2015} sequenced the genomes of over 2500 humans, and there are other projects that are similar or greater in scale. A sequencing machine produces a large number of \emph{reads} (short sequences) that cover the genome many times over. For a 3~Gbp human genome, the total length of the reads is often 100~Gbp or more. \emph{De novo assembly} of sequenced genomes is still too difficult to be routinely done. As a practical alternative, bioinformaticians usually align the reads to a \emph{reference genome} of the same species. Because most reference genomes come from the genomes of a small number of individuals, this introduces \emph{reference bias}, which may adversely affect the results of subsequent analysis. Switching from reference sequences to \emph{reference graphs} can reduce the bias, but such transition will likely take years \cite{Church2015}. Preprocessing large datasets can take weeks. It is often not feasible to rebuild everything when new methods of analysis require new functionalities. Structures based on the \emph{Burrows-Wheeler transform} (\textsf{BWT}) are often useful due to their versatility. A \emph{run-length encoded} \textsf{BWT}{} compresses repetitive sequence collections quite well \cite{Maekinen2010}, while the similarities to the suffix tree and the suffix array make \textsf{BWT}-based indexes suitable for many \emph{pattern matching} and \emph{sequence analysis} tasks \cite{Ohlebusch2013,Maekinen2015}. The \emph{Read Server} project at the Sanger Institute develops tools for large-scale \emph{reference-free} genome analysis, avoiding reference bias. Unique reads are compressed and indexed using the \textsf{BWT}, while metadata databases contain information on the original reads. Initially, the project works with the low-coverage and exome data from phase 3 of the 1000 Genomes Project. After error correction and trimming the reads to either 73~bp or 100~bp, the 922~billion original reads (86~Tbp) are reduced to 53.0~billion unique sequences (4.88~Tbp). These sequences are stored in 16 \textsf{BWT}-based indexes \cite{Ferragina2005a} taking a total of 561.5~gigabytes. The unique reads are partitioned between the \textsf{BWT}{}s by the last two bases. Every query must be repeated in all 16 indexes. The \textsf{BWT}{}s also require more space, as we cannot compress the similarities between the reads in different indexes. Reducing the number of indexes would improve both memory usage and query performance. This requires \textsf{BWT}{} construction algorithms that can handle terabases of data. There are four often contradictory requirements for large-scale \textsf{BWT}{} construction: \textbf{Speed.} Larger datasets require faster algorithms. As a rough guideline, an algorithm processing 1~Mbp/s is good for up to 100~Gbp, while remaining somewhat useful until 1~Tbp of data. \textbf{Memory.} We may have to process $n$~bp datasets on systems with less than $n$~bits of memory. \textbf{Hardware.} A single node in a typical computer cluster has tens of CPU cores, from tens to hundreds of gigabytes of memory, a limited amount of local disk space, and access to shared disk space with no performance guarantees. Algorithms using a GPU or a large amount of fast disk space require special-purpose hardware. \textbf{Efficiency.} Large \textsf{BWT}{}s can be built by doing a lot of redundant work on multiple nodes. As most computer clusters do not have large amounts of unused capacity, such inefficient algorithms are not suitable for repeated use. The most straightforward approach to \textsf{BWT}{} construction is to build a \emph{suffix array} using a fast general-purpose algorithm \cite{Mori2008,Nong2011}, and then derive the \textsf{BWT}{} from the suffix array. These algorithms cannot be used with large datasets, as they require much more memory than the sequences themselves. Suffix arrays can be built on disk \cite{Gonnet1992}, but even the fastest algorithms cannot index the data faster than 1\nobreakdash--2~Mbp/s \cite{Bingmann2013,Kaerkkaeinen2014a,Nong2014,Nong2015,Kaerkkaeinen2015a,Liu2015a}. There are many \emph{direct} \textsf{BWT}{} construction algorithms that do not need the suffix array. Some require a limited amount of working space on top of the \textsf{BWT}{} \cite{Hon2007,Kaerkkaeinen2007,Siren2009,Okanohara2009}, while others use the disk as additional working space \cite{Ferragina2012,Beller2013}. These general-purpose algorithms rarely exceed 1\nobreakdash--2~Mbp/s. \emph{Specialized algorithms} for DNA sequences achieve better time/space trade-offs. Some can index 5\nobreakdash--10~Mbp/s using ordinary hardware, with their memory usage becoming the bottleneck after about 1~Tbp \cite{Bauer2013,Li2014a}. GPU-based algorithms are even faster, but their memory usage is also higher \cite{Liu2014,Pantaleoni2014}. Distributing the \textsf{BWT}{} construction to multiple nodes can remove the obvious bottlenecks, at the price of using more resources for the construction \cite{Wang2015}. In this paper, we propose a practical algorithm for building the \textsf{BWT}{} for terabases of sequence data. The algorithm is based on dividing the sequence collection into a number of subcollections, building the \textsf{BWT}{} for each subcollection, and \emph{merging} the \textsf{BWT}{}s into a single structure \cite{Siren2009}. The merging algorithm is faster than \textsf{BWT}{} construction for the subcollections, while having a relatively small memory overhead on top of the final \textsf{BWT}-based index. As the index must be loaded in memory for use, it can be built on the same system as it is going to be used. \Section{Background} A \emph{string} $S[1,n] = s_{1} \dotsm s_{n}$ is a sequence of \emph{characters} over an \emph{alphabet} $\Sigma = \set{1, \dotsc, \sigma}$. For indexing purposes, we consider \emph{text} strings $T[1,n]$ terminated by an endmarker $T[n] = \$ = 0$ not occurring elsewhere in the text. \emph{Binary} sequences are strings over the alphabet $\set{0, 1}$. A \emph{substring} of string $S$ is a sequence of the form $S[i,j] = s_{i} \dotsm s_{j}$. We call substrings of the type $S[1,j]$ and $S[i,n]$ \emph{prefixes} and \emph{suffixes}, respectively. The \emph{suffix array} (\textsf{SA}) \cite{Manber1993} is a simple full-text index. Given a text $T$, its suffix array $\ensuremath{\mathsf{SA}}_{T}[1,n]$ is an array of pointers to the suffixes of the text in \emph{lexicographic order}.\footnote{If the text is evident from the context, we will omit the subscript and write just \textsf{SA}{}, \textsf{BWT}{}, etc.} We can build the suffix array in $\ensuremath{\mathsf{O}}(n)$ time using $2n$ bits of working space on top of the text and the suffix array \cite{Nong2011}. Given a \emph{pattern} $P$, we can find the \emph{lexicographic range} $[sp,ep]$ of suffixes prefixed by the pattern in $\ensuremath{\mathsf{O}}(\abs{P} \log n)$ time. The range of pointers $\ensuremath{\mathsf{SA}}[sp,ep]$ lists the \emph{occurrences} of the pattern in the text. The suffix array requires several times more memory than the original text. For large texts, this can be a serious drawback. We can use the \emph{Burrows-Wheeler transform} (\textsf{BWT}) \cite{Burrows1994} as a more space-efficient alternative to the suffix array. The \textsf{BWT}{} is an easily reversible permutation of the text with a similar combinatorial structure to the suffix array. Given a text $T[1,n]$ and its suffix array, we can easily produce the \textsf{BWT}{} as $\ensuremath{\mathsf{BWT}}[i] = T[\ensuremath{\mathsf{SA}}[i]-1]$ (with $\ensuremath{\mathsf{BWT}}[i] = T[n]$, if $\ensuremath{\mathsf{SA}}[i] = 1$). If $X \le Y$ in lexicographic order, we also have $cX \le cY$ for any character $c$. If $\ensuremath{\mathsf{BWT}}[i] = c$ is the $j$\nobreakdash-th occurrence of $c$ in the \textsf{BWT}{} and $\ensuremath{\mathsf{SA}}[i]$ points to suffix $X$, suffix $cX$ is the $j$\nobreakdash-th suffix starting with $c$ in lexicographic order. Let $\ensuremath{\mathsf{C}}[c]$ bet the number of suffixes starting with a character smaller than $c$, and let $S.\ensuremath{\mathsf{rank}}(i,c)$ be the number of occurrences of $c$ in the prefix $S[1,i]$. We define \textsf{LF}-\emph{mapping} as $\ensuremath{\mathsf{LF}}(i,c) = \ensuremath{\mathsf{C}}[c] + \ensuremath{\mathsf{BWT}}.\ensuremath{\mathsf{rank}}(i, c)$ and $\ensuremath{\mathsf{LF}}(i) = \ensuremath{\mathsf{LF}}(i,\ensuremath{\mathsf{BWT}}[i])$. The general form $\ensuremath{\mathsf{LF}}(i,c)$ is the number of suffixes $X$ of text $T$ with $X \le cT[\ensuremath{\mathsf{SA}}[i],n]$. This is known as the \emph{lexicographic rank} $\ensuremath{\mathsf{rank}}(cT[\ensuremath{\mathsf{SA}}[i],n],T)$ of text $cT[\ensuremath{\mathsf{SA}}[i],n]$ among the suffixes of text $T$. The specific form $\ensuremath{\mathsf{LF}}(i)$ gives the lexicographic rank of the previous suffix ($\ensuremath{\mathsf{SA}}[\ensuremath{\mathsf{LF}}(i)] = \ensuremath{\mathsf{SA}}[i]-1$, if $\ensuremath{\mathsf{SA}}[i] > 1$, and $\ensuremath{\mathsf{SA}}[\ensuremath{\mathsf{LF}}(i)] = n$ otherwise). The \emph{FM-index} (\textsf{FMI}) \cite{Ferragina2005a} is a full-text index based on the \textsf{BWT}{}. We use \emph{backward searching} in the FM-index to find the lexicographic range $[sp,ep]$ matching pattern $P$. Let $[sp_{i},ep_{i}]$ be the range of suffixes of text $T$ matching suffix $P[i, \abs{P}]$ of the pattern. We find $[sp_{i-1},ep_{i-1}]$ as $[\ensuremath{\mathsf{LF}}(sp_{i}-1, P[i-1]) + 1, \ensuremath{\mathsf{LF}}(ep_{i}, P[i-1])]$. By starting from $[sp_{\abs{P}}, ep_{\abs{P}}] = [\ensuremath{\mathsf{C}}[P[\abs{P}]]+1, \ensuremath{\mathsf{C}}[P[\abs{P}]+1]]$, we can find the lexicographic range of suffixes starting with the pattern in $\ensuremath{\mathsf{O}}(\abs{P} \cdot t_{r})$ time, where $t_{r}$ is the time required to answer \textsf{rank}{} queries on the \textsf{BWT}. In practice, the time complexity ranges from $\ensuremath{\mathsf{O}}(\abs{P})$ to $\ensuremath{\mathsf{O}}(\abs{P} \log n)$, depending on the encoding of the \textsf{BWT}{}. The FM-index \emph{samples} some suffix array pointers, including the one to the beginning of the text. When unsampled pointers are needed, they are derived by using \textsf{LF}\nobreakdash-mapping. If $\ensuremath{\mathsf{SA}}[i]$ is not sampled, the FM-index proceeds to $\ensuremath{\mathsf{LF}}(i)$ and continues from there. If $\ensuremath{\mathsf{SA}}[\ensuremath{\mathsf{LF}}^{k}(i)]$ is the first sample encountered, $\ensuremath{\mathsf{SA}}[i] = \ensuremath{\mathsf{SA}}[\ensuremath{\mathsf{LF}}^{k}(i)] + k$. Depending on the way the samples are selected, we may need a binary sequence to mark the pointers that have been sampled. Assume that we have an ordered \emph{collection} of texts $\ensuremath{\mathcal{A}} = (T_{1}, \dotsc, T_{m})$ of total length $n = \abs{\ensuremath{\mathcal{A}}} = \sum_{i} \abs{T_{i}}$. We want to build a (generalized) \textsf{BWT}{} for the collection. The usual way is to make all endmarkers distinct, giving the one at the end of text $T_{i}$ character value $(0,i)$. This also makes all suffixes of the collection distinct. To save space, we still encode each endmarker as a $0$ in the \textsf{BWT}{}. Because of this, \textsf{LF}\nobreakdash-mapping does not work with $c = 0$, and we cannot match patterns spanning text boundaries. When the texts are short (e.g.~reads), there are more space-efficient alternatives to sampling. Because all endmarkers have distinct values during sorting, we know that $\ensuremath{\mathsf{SA}}[i]$ with $i \le m$ points to the end of text $T_{i}$. To find the end, we iterate $\Psi(i) = \ensuremath{\mathsf{BWT}}.\ensuremath{\mathsf{select}}(i - \ensuremath{\mathsf{C}}[c], c)$, where $c$ is the largest value with $\ensuremath{\mathsf{C}}[c] < i$ and $S.\ensuremath{\mathsf{select}}(i,c)$ finds the $i$\nobreakdash-th occurrence of character $c$ in string $S$. If $k \ge 0$ is the smallest value for which $j = \Psi^{k}(i) \le m$, we know that $\ensuremath{\mathsf{SA}}[i]$ points to offset $\abs{T_{j}} - k$ in text $T_{j}$. We can \emph{extract} text $T_{i}$ in $\ensuremath{\mathsf{O}}(\abs{T_{i}} \cdot t_{r})$ time by using \textsf{LF}\nobreakdash-mapping \cite{Burrows1994}. We start from the endmarker at $\ensuremath{\mathsf{BWT}}[i]$ and extract the text backwards as $T_{i}[\abs{T_{i}} - j] = \ensuremath{\mathsf{BWT}}[\textsf{LF}^{j-1}(i)]$, for $1 \le j \le \abs{T_{i}}$. As $\ensuremath{\mathsf{SA}}[\textsf{LF}^{j}(i)]$ points to suffix $T_{i}[\abs{T_{i}}-j, \abs{T_{i}}]$, we also find the lexicographic ranks of all suffixes of text $T_{i}$ in the process. \Section{Space-efficient \textsf{BWT}{} construction} The FM-index was introduced as a more space-efficient alternative to the suffix array. If we need the suffix array to build the FM-index, a large part of this benefit is lost, and index construction becomes the bottleneck. To overcome the bottleneck, we can use \emph{incremental construction algorithms} that build the FM-index directly. Some of them use an adjustable amount of working space on top of the FM-index, making it possible to index text collections larger than the size of the memory. Assume that we have built the \textsf{BWT}{} of text $T$, and we want to \emph{transform} the \textsf{BWT}{} into that of text $cT$, where $c$ is a character \cite{Hon2007}. We find the pointer $\ensuremath{\mathsf{SA}}[i]$ to the beginning of text $T$ (where $\ensuremath{\mathsf{BWT}}[i] = 0$). Then we determine the lexicographic rank $j = \ensuremath{\mathsf{rank}}(cT, T) = \ensuremath{\mathsf{C}}[c] + \ensuremath{\mathsf{BWT}}.\ensuremath{\mathsf{rank}}(i, c)$ of text $cT$ among the suffixes of text $T$. Finally we \emph{replace} the endmarker at $\ensuremath{\mathsf{BWT}}[i]$ with the inserted character $c$ and \emph{insert} a new endmarker between $\ensuremath{\mathsf{BWT}}[j]$ and $\ensuremath{\mathsf{BWT}}[j+1]$. We can use the transformation for \textsf{BWT}{} construction in several ways. We can use \emph{batch updates} and transform the \textsf{BWT}{} of text $T$ into that of text $XT$, where $X$ is a string \cite{Hon2007}. We can start with the \textsf{BWT}{}s of text collections $\ensuremath{\mathcal{A}}$ and $\ensuremath{\mathcal{B}}$, and \emph{merge} them into the \textsf{BWT}{} of collection $\ensuremath{\mathcal{A}} \cup \ensuremath{\mathcal{B}}$ \cite{Siren2009}. We can also \emph{extend} multiple texts at once by inserting a new character to the beginning of each of them \cite{Bauer2013}. In all cases, we can use either \emph{static} or \emph{dynamic} \cite{Chan2007} structures for the \textsf{BWT}. Dynamic representations increase the size of the \textsf{BWT}{} (e.g.~by around 1.5x in \textsf{RopeBWT}ii{} \cite{Li2014a}), while static representations require more space overhead for buffering the updates. \begin{figure} \caption{Merging the \textsf{BWT} \label{fig:merge} \end{figure} Assume that we want to merge the \textsf{BWT}{}s of two text collections $\ensuremath{\mathcal{A}}$ and $\ensuremath{\mathcal{B}}$ of total length $n_{\ensuremath{\mathcal{A}}}$ and $n_{\ensuremath{\mathcal{B}}}$, respectively \cite{Siren2009}. We store the \textsf{BWT}{}s in two-level arrays, where the first level contains pointers to $b$\nobreakdash-bit \emph{blocks}. If a \textsf{BWT}{} takes $x$ bits, the space overhead from the array is $\frac{x}{b} \log x + \ensuremath{\mathsf{O}}(b)$ bits. This becomes $\ensuremath{\mathsf{O}}(\sqrt{x \log x})$ bits with $b = \sqrt{x \log x}$. The merging algorithm has three phases: search, sort, and merge. It uses $\ensuremath{\mathsf{O}}(n_{\ensuremath{\mathcal{A}}} + n_{\ensuremath{\mathcal{B}}} t_{r})$ time and $\min(n_{\ensuremath{\mathcal{B}}} \log n_{\ensuremath{\mathcal{A}}}, n_{\ensuremath{\mathcal{A}}} + n_{\ensuremath{\mathcal{B}}}) + \ensuremath{\mathsf{O}}(\sqrt{x \log x})$ bits of working space in addition to the \textsf{BWT}{}s and the structures required to use them as FM-indexes. See Figure~\ref{fig:merge} for an example with two texts. \smallbreak\noindent\textbf{Search.} We search for all texts of collection $\ensuremath{\mathcal{B}}$ in $\ensuremath{\mathsf{BWT}}_{\ensuremath{\mathcal{A}}}$, and output the lexicographic rank $\ensuremath{\mathsf{rank}}(X, \ensuremath{\mathcal{A}})$ for each suffix $X$ of $\ensuremath{\mathcal{B}}$. This takes $\ensuremath{\mathsf{O}}(n_{\ensuremath{\mathcal{B}}} t_{r})$ time. We either need the collection in plain form, or extract the texts from $\ensuremath{\mathsf{BWT}}_{\ensuremath{\mathcal{B}}}$ in the same asymptotic time. \smallbreak\noindent\textbf{Sort.} We build the \emph{rank array} (\textsf{RA}) of $\ensuremath{\mathcal{B}}$ relative to $\ensuremath{\mathcal{A}}$ by sorting the ranks. The rank array is defined as $\ensuremath{\mathsf{RA}}_{\ensuremath{\mathcal{B}} \mid \ensuremath{\mathcal{A}}}[i] = \ensuremath{\mathsf{rank}}(X, \ensuremath{\mathcal{A}})$, where $\ensuremath{\mathsf{SA}}_{\ensuremath{\mathcal{B}}}[i]$ points to suffix $X$. The array requires $n_{\ensuremath{\mathcal{B}}} \log n_{\ensuremath{\mathcal{A}}}$ bits of space, and we can build it in $\ensuremath{\mathsf{O}}(sort(n_{\ensuremath{\mathcal{B}}}, n_{\ensuremath{\mathcal{A}}}))$ time, where $sort(n, u)$ is the time required to sort $n$ integers from universe $[0,u]$. If we extracted the texts from $\ensuremath{\mathsf{BWT}}_{\ensuremath{\mathcal{B}}}$, we can write the ranks directly into the rank array, making this phase trivial. We can also encode the rank array as a binary sequence $B_{\ensuremath{\mathcal{A}} \cup \ensuremath{\mathcal{B}}}$ of length $n_{\ensuremath{\mathcal{A}}} + n_{\ensuremath{\mathcal{B}}}$. This \emph{interleaving bitvector} is built by setting $B_{\ensuremath{\mathcal{A}} \cup \ensuremath{\mathcal{B}}}[i + \ensuremath{\mathsf{RA}}_{\ensuremath{\mathcal{B}} \mid \ensuremath{\mathcal{A}}}[i]] = 1$ for $1 \le i \le n_{\ensuremath{\mathcal{B}}}$. If $B_{\ensuremath{\mathcal{A}} \cup \ensuremath{\mathcal{B}}}[j] = 1$, we know that $\ensuremath{\mathsf{SA}}_{\ensuremath{\mathcal{A}} \cup \ensuremath{\mathcal{B}}}[j]$ points to a suffix of $\ensuremath{\mathcal{B}}$. \smallbreak\noindent\textbf{Merge.} We interleave $\ensuremath{\mathsf{BWT}}_{\ensuremath{\mathcal{A}}}$ and $\ensuremath{\mathsf{BWT}}_{\ensuremath{\mathcal{B}}}$ according to the rank array. If $\ensuremath{\mathsf{RA}}_{\ensuremath{\mathcal{B}} \mid \ensuremath{\mathcal{A}}}[i] = j$, the merged \textsf{BWT}{} will have $j$ characters from $\ensuremath{\mathsf{BWT}}_{\ensuremath{\mathcal{A}}}$ before $\ensuremath{\mathsf{BWT}}_{\ensuremath{\mathcal{B}}}[i]$. This phase takes $\ensuremath{\mathsf{O}}(n_{\ensuremath{\mathcal{A}}} + n_{\ensuremath{\mathcal{B}}})$ time. By reusing the blocks of $\ensuremath{\mathsf{BWT}}_{\ensuremath{\mathcal{A}}}$ and $\ensuremath{\mathsf{BWT}}_{\ensuremath{\mathcal{B}}}$ for $\ensuremath{\mathsf{BWT}}_{\ensuremath{\mathcal{A}} \cup \ensuremath{\mathcal{B}}}$, we can merge the \textsf{BWT}{}s almost in-place. The total working space is $\ensuremath{\mathsf{O}}(\sqrt{x \log x})$ bits, where $x$ is the maximum of the sizes of $\ensuremath{\mathsf{BWT}}_{\ensuremath{\mathcal{A}}}$ and $\ensuremath{\mathsf{BWT}}_{\ensuremath{\mathcal{B}}}$ in bits. \Section{Large-scale \textsf{BWT}{} merging} If we split a text collection $\ensuremath{\mathcal{A}}$ of total length $n$ into $p$ \emph{subcollections} of equal size, we can build $\ensuremath{\mathsf{BWT}}_{\ensuremath{\mathcal{A}}}$ incrementally by merging the \textsf{BWT}{}s of the subcollections. This takes $\ensuremath{\mathsf{O}}((p+t_{r})n)$ time and uses essentially $\min(\frac{n}{p} \log n, n)$ bits of working space. When the collection is large, the space overhead of the construction algorithm often determines whether we can build the \textsf{BWT}{}. Even if a static encoding of the \textsf{BWT}{} fits in memory, a dynamic encoding may already be too large. The space overhead from the rank array or the interleaving bitvector (or their equivalents in the other space-efficient algorithms) may also be too much. We can make the rank array fit in memory by increasing the number of subcollections, but that can make the construction too slow. We can reduce the overhead by writing the lexicographic ranks to \emph{disk}. If we sort the ranks on disk, we just need to scan the rank array once during the merge phase. We can also \emph{compress} the ranks before writing them to disk and \emph{interleave} the sorting with the search and merge phases. We now describe the key ideas for fast and space-efficient \textsf{BWT}{} construction. \smallbreak\noindent\textbf{Search.} Instead of searching for every text in collection $\ensuremath{\mathcal{B}}$ separately, we can search for the \emph{reverse trie} of the collection. Assume that there are $m_{\ensuremath{\mathcal{A}}}$ texts in collection $\ensuremath{\mathcal{A}}$ and $m_{\ensuremath{\mathcal{B}}}$ texts in collection $\ensuremath{\mathcal{B}}$. The \emph{root} of the trie corresponds to suffix $\$$, which has lexicographic rank $m_{\ensuremath{\mathcal{A}}}$ in $\ensuremath{\mathcal{A}}$ and corresponds to lexicographic range $[1,m_{\ensuremath{\mathcal{B}}}]$ in $\ensuremath{\mathcal{B}}$. Assume that we have a \emph{node} of the trie corresponding to suffix $X$, lexicographic rank $r$, and lexicographic range $[sp,ep]$. As suffix $X$ occurs $ep+1-sp$ times in collection $\ensuremath{\mathcal{B}}$, we can output a \emph{run} of ranks $(r, ep+1-sp)$. Afterwards, we proceed to the \emph{children} of the node. For each character $c \in \Sigma$, we create a node corresponding to suffix $cX$, rank $\ensuremath{\mathsf{LF}}_{\ensuremath{\mathcal{A}}}(r,c)$, and range $[\ensuremath{\mathsf{LF}}_{\ensuremath{\mathcal{B}}}(sp-1, c) + 1, \ensuremath{\mathsf{LF}}_{\ensuremath{\mathcal{B}}}(ep, c)]$. Searching the branches of the trie can be done in parallel using multiple \emph{threads}. \smallbreak\noindent\textbf{Buffering.} To reduce disk I/O and space usage, we buffer and compress the lexicographic ranks before writing them to disk. Each thread has two buffers: a \emph{run buffer} and a \emph{thread buffer}. The run buffer stores the runs as pairs of integers $(r, \ell)$. Once the run buffer becomes full, we sort the runs by \emph{run heads} $r$, use \emph{differential encoding} for the run heads, and encode the differences and run lengths with a \emph{prefix-free code}. The compressed run buffer is then merged with the similarly compressed thread buffer. Once the thread buffer becomes full, we merge it with the global \emph{merge buffers}. There are $k$ merge buffers $M_{1}$ to $M_{k}$, with buffer $M_{i}$ containing $2^{i-1}$ thread buffers. The merging starts from $M_{1}$. If $M_{i}$ is empty, the thread swaps its thread buffer with the empty buffer and returns to the search phase. Otherwise it merges $M_{i}$ with its thread buffer, clearing $M_{i}$, and proceeds to $M_{i+1}$. If the a thread reaches $M_{k+1}$, it writes its thread buffer to disk and returns back to work. \smallbreak\noindent\textbf{Merge.} The ranks are stored in sorted order in multiple files on disk. For interleaving the \textsf{BWT}{}s, we need to merge the files and to scan through the rank array. We can also use multiple threads here. One thread reads the files and performs a \emph{multiway merge} using a priority queue, producing a stream of lexicographic ranks. Another thread consumes the stream and uses it to interleave the \textsf{BWT}{}s. If the disk is fast enough, we may want to use multiple threads for the multiway merge. \Section{Implementation} We have implemented the improved merging algorithm as a tool for merging the \textsf{BWT}{}s of large read collections. The tool, \textsf{BWT}merge{}, is written in C++, and the source code is available on GitHub.\footnote{\url{https://github.com/jltsiren/bwt-merge}} The implementation uses the \emph{SDSL library} \cite{Gog2014b} and the new features in C++11. As a result, it needs a fairly recent C++ compiler to compile. We have successfully built \textsf{BWT}merge{} on Linux and OS~X using g++. The target environment of \textsf{BWT}merge{} is a \emph{single node} of a \emph{computer cluster}. The system should have tens of CPU cores, hundreds of gigabytes of memory, and hundreds of gigabytes of local disk space for temporary files. The number of search threads is equal to the number of CPU cores, while the merge phase uses just one producer thread and one consumer thread. \textsf{BWT}merge{} can be adapted to many other environments by adjusting the number and the size of the buffers. The internal alphabet of \textsf{BWT}merge{} is \texttt{012345}, which corresponds to either \texttt{\$ACGTN} or \texttt{\$ACGNT}, depending on where the \textsf{BWT}{}s come from. \textsf{BWT}{}s using different alphabetic orders cannot be merged. We use simple byte-level codes for \emph{run-length encoding} the \textsf{BWT}{}s. The encoding of run $(c, \ell)$, where $c$ is the character value and $\ell$ is the length, depends on the length of the run. If $\ell \le 41$, the run is encoded in a single byte as $6 \cdot (\ell-1) + c$. Longer runs start with byte $6 \cdot 41 + c$, followed by the encoding of $\ell-42$. The remaining run length is encoded as a sequence of bytes, with the low 7 bits containing data and the high bit telling whether the encoding continues in the next byte. The compressed buffers use the same 7+1\nobreakdash-bit code for both the differentially encoded run heads and the run lengths. For \textsf{rank}/\textsf{select}{} support, we divide the \textsf{BWT}{}s into 64\nobreakdash-byte blocks of compressed data, ensuring that the runs do not cross block boundaries. For each block $i$, we store the total number of characters in blocks $1$ to $i-1$ as $n_{i}$, as well as the cumulative character counts $c_{i} = \ensuremath{\mathsf{BWT}}.\ensuremath{\mathsf{rank}}(n_{i},c)$ for $0 \le c \le 5$. These increasing sequences are stored using the \emph{sdarray} encoding \cite{Okanohara2007}. To compute $\ensuremath{\mathsf{BWT}}.\ensuremath{\mathsf{rank}}(j,c)$, we start with a \textsf{rank}{} query on the $n_{i}$ sequence to find the block. A \textsf{select}{} query on the same sequence transforms $j$ into a block offset, while a \textsf{select}{} query on the $c_{i}$ sequence gives the rank up to the beginning of the block. We then decompress the block to answer the query. \textsf{select}{} queries and accessing the \textsf{BWT}{} work in a similar way. There are also optimizations for e.g.~computing $\ensuremath{\mathsf{rank}}(i,c)$ for all characters $c$, and for finding the children of a reverse trie node corresponding to a short lexicographic range. We use two-level arrays with 8\nobreakdash-megabyte blocks to store the \textsf{BWT}{}s and the compressed buffers, managing the blocks using \texttt{mmap()} and \texttt{munmap()}. This reduces the space overhead by tens of gigabytes over using \texttt{malloc()} and \texttt{free()}. When multiple threads allocate memory in small enough blocks, the multithreaded \emph{glibc} implementation of \texttt{malloc()} creates a number of additional heaps that will never grow larger than 64~MB. Each thread tries to reuse the heap it used for the last allocation. If a heap is full or the thread cannot acquire the mutex, it moves to the next heap. With our workload of tens of threads allocating and freeing hundreds of gigabytes of memory, this created thousands of heaps with holes in the middle. Most \texttt{free()} calls did not release the memory back to the operating system, while it took a while before any thread could reuse the holes created by the \texttt{free()} calls. \Section{Experiments} We used a system with two 16\nobreakdash-core AMD Opteron 6378 processors and 256 gigabytes of memory. The system was running Ubuntu 12.04 on Linux kernel 3.2.0. We used a development version of \textsf{BWT}merge{} equivalent to v0.3, and the versions of the other tools that were available on GitHub in October 2015. All software was compiled with gcc/g++ version 4.9.2. We stored the input/output files on a distributed Lustre file system and used a local 0.5~TB disk for temporary files. \textsf{BWT}merge{} used 32 threads, while the other \textsf{BWT}{} construction tools were limited by design to 4 or 5 threads. The merging times include verification by querying the \textsf{BWT}{}s with 2~million $32$\nobreakdash-mers. \begin{table}[t!] \begin{center} \caption{Datasets. The amount of sequence data, the number of reads, and the size of the \textsf{BWT}{} in the native format and in the Read Server format. RLO indicates that the reads are sorted in reverse lexicographic order. The numbers in parentheses are estimates.}\label{table:datasets} { \renewcommand{1}{1}\footnotesize \begin{tabular}{c|cc|cc|cc} \hline & \multicolumn{2}{c|}{\textbf{Data}} & \multicolumn{2}{c|}{\textbf{Native \textsf{BWT}}} & \multicolumn{2}{c}{\textbf{Read Server}} \\ \textbf{Dataset} & \textbf{Size} & \textbf{Reads} & \textbf{Unsorted} & \textbf{RLO} & \textbf{\textsf{BWT}} & \textbf{\textsf{FMI}} \\ \hline \textsf{C}EU: All & 771 Gbp & 7.63G & 136 GB & 65.9 GB & -- & -- \\ NA12878 & 284 Gbp & 2.81G & 50.3 GB & 25.5 GB & -- & -- \\ NA12891 & 242 Gbp & 2.40G & 42.4 GB & 19.7 GB & -- & -- \\ NA12892 & 245 Gbp & 2.42G & 43.8 GB & 20.7 GB & -- & -- \\ Merged & 771 Gbp & 7.63G & 129 GB & 58.9 GB & -- & -- \\ \hline \textsf{RS}: AA, TT, AT, TA & 1.49 Tbp & 16.2G & -- & 136 GB & 140 GB & 170 GB \\ AA & 433 Gbp & 4.69G & -- & 38.5 GB & 39.9 GB & 48.3 GB \\ TT & 432 Gbp & 4.68G & -- & 38.7 GB & 40.0 GB & 48.4 GB \\ AT & 275 Gbp & 2.98G & -- & 26.6 GB & 26.9 GB & 32.6 GB \\ TA & 355 Gbp & 3.84G & -- & 32.7 GB & 33.5 GB & 40.6 GB \\ Merged & 1.49 Tbp & 16.2G & -- & 117 GB & 126 GB & (152 GB) \\ \hline \textsf{RS}: *A, *C & 2.45 Tbp & 26.5G & -- & 225 GB & 232 GB & 281 GB \\ Merged & 2.45 Tbp & 26.5G & -- & 181 GB & 197 GB & (239 GB) \\ \hline \textsf{RS}: *G, *T & 2.44 Tbp & 26.5G & -- & 226 GB & 232 GB & 281 GB \\ Merged & 2.44 Tbp & 26.5G & -- & 180 GB & 197 GB & (238 GB) \\ \hline \end{tabular}} \end{center} \end{table} Our datasets come from phase 3 of the \emph{1000 Genomes Project} \cite{1000GP2015}. \textsf{C}EU{} contains 101~bp reads from high-coverage sequencing of the \emph{CEU trio} (individuals NA12878, NA12891, and NA12892). We downloaded the gzipped FASTQ files (run accessions SRR622457, SRR622458, and SRR622459). For each individual, we concatenated the files and corrected the sequencing errors with BFC \cite{Li2015} (\texttt{bfc -s 3g -t 16}). \textsf{RS}{} is from the \emph{Read Server} project, which uses all low-coverage and exome data from the phase 3. There are 53.0~billion unique reads for a total of 4.88~Tbp. The reads are in 16 run-length encoded \textsf{BWT}{}s built by using the \emph{String Graph Assembler} (SGA) \cite{Simpson2012}, partitioned by the last two bases. See Table~\ref{table:datasets} for further details on the datasets. \smallbreak\noindent\textbf{Parameters.} For testing different parameter values, we took four \textsf{BWT}{} files (AA, TT, AT, and TA) containing a total of 1.49~Tbp from the \textsf{RS}{} dataset, and converted them to the \emph{native format} of \textsf{BWT}merge. This format includes the \textsf{rank}/\textsf{select}{} structures required by the FM-index. We then merged the \textsf{BWT}{}s (in the given order). We used 128~MB or 256~MB run buffers and 256~MB or 512~MB thread buffers. The number of merge buffers was chosen so that the files on disk were always merged from either 8~GB or 16~GB of thread buffers. \begin{figure} \caption{Time/space trade-offs. Left: Merging four \textsf{BWT} \label{fig:benchmark} \end{figure} The results can be seen in Figure~\ref{fig:benchmark} (left). The average speed for inserting 1.06~Tbp into file AA ranged from 8.27~Mbp/s to 9.40~Mbp/s, depending on the parameter values. Memory overhead was 21.1~GB to 41.5~GB on top of the 124.2~GB required by the last pair of \textsf{BWT}{}s. The temporary files used 287 to 306~gigabytes of disk space. Thread buffer size and the number of merge buffers were the most important parameters. The larger the individual files are, the more memory the search phase uses and the faster the merge phase is. Increasing run buffer size to 256~MB made the search phase faster with 512~MB thread buffers and slower with 256~MB thread buffers. For the further experiments, we chose 128~MB run buffers, 256~MB thread buffers, and 6 merge buffers (overhead 30.8~GB). \smallbreak\noindent\textbf{Comparison.} In the next experiment, we compared \textsf{BWT}merge{} to the fastest \textsf{BWT}{} construction tools on general hardware \cite{Li2014a}. We built the \textsf{BWT}{} of the \textsf{C}EU{} dataset using \textsf{RopeBWT}{} \cite{Li2011-2013} with parameters \texttt{-btORf -abcr} and \textsf{RopeBWT}ii{} \cite{Li2014a} with parameters \texttt{-bRm10g}. We also built individual \textsf{BWT}{}s using \textsf{RopeBWT}{} and merged them with \textsf{BWT}merge. All tools were set to write the \textsf{BWT}{}s in their preferred formats. The results are in Figure~\ref{fig:benchmark} (right). When the reads are in the original order, \textsf{BWT}merge{} is 1.85x slower and 1.46x more space-efficient than \textsf{RopeBWT}. \textsf{RopeBWT}2{} ran out of memory just before finishing. It would have been about 1.2x faster and 1.5x less space-efficient than \textsf{BWT}merge. The running time of \textsf{BWT}merge{} was split evenly between \textsf{BWT}{} construction and merging. When \textsf{RopeBWT}{} and \textsf{RopeBWT}2{} sort the reads in \emph{reverse lexicographic order} (RLO) to improve compression, all tools improve their performance. \textsf{BWT}merge{} becomes 1.70x slower and 2.12x more space-efficient than \textsf{RopeBWT}{}, and 1.21x \emph{faster} and 1.09x more space-efficient than \textsf{RopeBWT}2. Again, \textsf{BWT}merge{} spent around half of the time building the individual \textsf{BWT}{}s and another half merging them. Note that \textsf{BWT}merge{} builds a \textsf{BWT}{} for the concatenation of three input files that are in RLO, while the \textsf{BWT}{}s produced by the other tools are completely in RLO. Maintaining RLO during merging would reduce the size of the final \textsf{BWT}{} from 58.9~GB to 54.4~GB. \smallbreak\noindent\textbf{Read Server.} In the last experiment, we merged the 16 \textsf{BWT}{} files in the \textsf{RS}{} dataset into two files (AA, CA, TA, GA, AC, CC, GC, and TC into the first file; TT, GT, CT, AT, TG, GG, CG, AG into the second one). Merging the \textsf{BWT}{}s took 81.3~hours and 83.0~hours, required 221~GB and 219~GB of memory, and used 297~GB and 300~GB of disk space, respectively. This reduced the size of the FM-indexes from around 560~GB to 480~GB. By converting the \textsf{BWT}{}s to the native format of \textsf{BWT}merge{}, we further reduced the size of the indexes to 360~GB. This makes it possible to host the indexes on two servers instead of the original three. \Section{Conclusions} We have proposed an improved \textsf{BWT}{} merging algorithm for large read collections. Our implementation of the algorithm in the \textsf{BWT}merge{} tool is fast enough to be used with terabases of sequence data. It requires only 30~gigabytes of memory on top of the \textsf{BWT}{}s to be merged. As \textsf{BWT}-based indexes access large arrays in a random fashion, they must reside in memory in most applications. Hence \textsf{BWT}merge{} can build the index on the same system as it is going to be used. \textsf{BWT}merge{} can be used as a part of a \textsf{BWT}{} construction algorithm. We split the read collection into subcollections, build the \textsf{BWT}{}s of the subcollections, and merge the results. The resulting algorithm is typically slower but more space-efficient than the existing algorithms. The most important feature of our algorithm is its low memory usage. With it, we can build the \textsf{BWT}{}s of much larger read collections than before on commonly available hardware. As a concrete example, we merged the 16 Read Server \textsf{BWT}{} files into two files. This reduced the number of servers required to host the indexes from three to two, and also improved the query performance of the servers. In the future, we are going to extend \textsf{BWT}merge{} to support different \emph{text orders}, and to optionally \emph{remove duplicate texts} from the merged collection. The current algorithm maintains the existing order by inserting the texts from $\ensuremath{\mathsf{BWT}}_{\ensuremath{\mathcal{B}}}$ after the texts in $\ensuremath{\mathsf{BWT}}_{\ensuremath{\mathcal{A}}}$. This makes it easy to determine the original text identifiers without having to store a permutation. Other text orders are useful for different purposes. Read Server stores the reads in \emph{reverse lexicographic order} to improve compression \cite{Cox2012}. We can maintain this order with a few changes to the search phase \cite{Li2014a}. Sorting the reads by their \emph{reverse complements} also improves compression in a similar way. In this order, $\ensuremath{\mathsf{SA}}[\ensuremath{\mathsf{BWT}}.\ensuremath{\mathsf{select}}(i,0)]$ points to the beginning of the reverse complement of read $T_{i}$, if the collection includes the reverse complement of every read \cite{Li2014a}. With \emph{lexicographic order}, we can determine $\ensuremath{\mathsf{SA}}[i]$ without samples by using $\ensuremath{\mathsf{LF}}$ instead of $\Psi$, which is often faster in practice. We can also sort the reads by their likely positions in a reference genome. This \emph{position order} is useful for both compression and storing the \emph{pairing information} for the reads. Consider a graph with reads as its nodes and edges between paired reads. If we sort the reads in position order, most edges will be close to the diagonal of the edge matrix. Such matrices are very easy to compress. \Section{References} \end{document}
\begin{document} \title[Quantum states and space-time causality]{ Quantum states and space-time causality} \author[D.~C.~Brody]{Dorje C. Brody} \address{Blackett Laboratory, Imperial College, London SW7 2BZ, UK} \email{[email protected]} \urladdr{http://www.imperial.ac.uk/people/d.brody} \author[L.~P.~Hughston]{Lane P. Hughston} \address{Department of Mathematics, King's College London, London WC2R 2LS, UK} \email{[email protected]} \urladdr{http://www.mth.kcl.ac.uk/staff/l$\_$hughston.html} \maketitle \begin{abstract} Space-time symmetries and internal quantum symmetries can be placed on equal footing in a hyperspin geometry. Four-dimensional classical space-time emerges as a result of a decoherence that disentangles the quantum and the space-time degrees of freedom. A map from the quantum space-time to classical space-time that preserves the causality relations of space-time events is necessarily a density matrix. \end{abstract} \section{Introduction} This article presents a programme for the unification of space-time and internal quantum symmetries. An important role is played in this theory by certain higher-dimensional analogues of spinors. In four-dimensional space-time there is a local isomorphism between the Lorentz group $SO(1,3)$ and the spin transformation group $SL(2,{\mathbb C})$. In higher dimensions, however, this relation breaks down and we are left with two concepts of spinors---one for the groups $SO(N,{\mathbb C})$, and one for the groups $SL(r,{\mathbb C})$. The spinors associated with $SO(N,{\mathbb C})$ are the so-called Cartan spinors. The study of Cartan spinors has a long history, and there is a beautiful geometry associated with these spinors. The spinors associated with $SL(r,{\mathbb C})$, called `hyperspinors', have the advantage of being more directly linked with quantum mechanics. In fact, a relativistic model for hyperspin arises when one considers `multiplets' of two-component spinors, i.e. expressions of the form $\xi^{{\bf A}i}$ and $\eta^{{\bf A}^\prime}_i$, where ${\bf A},{\bf A}'$ are standard spinor indices and $i=1,2, \ldots,n$ is an `internal' index. In the general case ($n=\infty$) we can think of $\xi^{{\bf A}i}$ as an element of the tensor product space ${\mathbb S}^{{\bf A}i} ={\mathbb S}^{\bf A}\otimes{\mathbb H}^i$, where ${\mathbb S}^{\bf A}$ is the space of two-component spinors, and ${\mathbb H}^i$ is an infinite-dimensional complex Hilbert space. The theory of hyperspin constitutes a natural starting place for building up a theory of quantum geometry or, as we shall call it here, \emph{quantum space-time}. The hyperspinor route has the virtue that the resulting higher-dimensional space-time has a rich causal structure associated with it, and as a consequence is well-positioned to form the geometrical basis of a physical theory. \section{Relativistic causality} Let us review briefly the role of two-component spinors in the description of four-dimensional Minkowski space. We use bold upright Roman letters to denote two-component spinor indices, and we adopt the standard conventions for the algebra of two-component spinors~\cite{penrose}. Then we have the following correspondence between two-by-two Hermitian matrices $x^{{\bf AA}^\prime}~({\bf A},{\bf A}^\prime=1,2)$ and the positions $x^{\rm a}~({\rm a}=0,1,2,3)$ of space-time points relative to some origin. More explicitly, in a standard basis this correspondence is given by \begin{eqnarray} \frac{1}{\sqrt{2}} \left( \begin{array}{ll} t+z & x+{\rm i} y \\ x-{\rm i} y & t-z \end{array} {\rm i}ght) \quad \longleftrightarrow \quad (t,x,y,z). \label{eq:2} \end{eqnarray} We thus obtain the fundamental relation $2\det(x^{{\bf AA}^\prime}) = t^2-x^2-y^2-z^2$. It follows that two-component spinors are connected both with quantum mechanics and with the causal structure of space-time. It is a peculiar aspect of relativistic physics that there is a link of this nature between the spin degrees of freedom of spin one-half particles, and the causal geometry of four-dimensional space-time. For the interval between a pair of points $x^{{\bf AA}^\prime}$ and $y^{{\bf AA}^\prime}$ in space-time we write $r^{{\bf AA}^\prime} = x^{{\bf AA}^\prime}-y^{{\bf AA}^\prime}$. It follows that $2\det(r^{{\bf AA}^\prime}) = \epsilon_{\bf AB}\epsilon_{{\bf A}^\prime{\bf B}^\prime} r^{{\bf AA}^\prime} r^{{\bf BB}^\prime}$, where $\epsilon_{\bf AB}$ is the antisymmetric spinor. If we adopt the `index clumping' convention and write ${\rm a}={\bf AA}'$, ${\rm b}={\bf BB}'$, and so on, whereby a pair of spinor indices, one primed and the other unprimed, corresponds to a space-time vector index, then we can write $\epsilon_{\bf AB}\epsilon_{{\bf A}^\prime{\bf B}^\prime} r^{{\bf AA}^\prime} r^{{\bf BB}^\prime} = g_{\rm ab} r^{\rm a} r^{\rm b}$ for the corresponding squared space-time interval. Thus we identify $g_{\rm ab} = \epsilon_{\bf AB}\epsilon_{{\bf A}^\prime{\bf B}^\prime}$ as the metric of Minkowski space. There are three different situations that can arise for the interval $r^{\rm a}$. The first case is $g_{\rm ab}r^{\rm b}=0$; the second case is $g_{\rm ab}r^{\rm b}\neq0$ and $g_{\rm ab}r^{\rm a}r^{\rm b}=0$; and the third case is $g_{\rm ab}r^{\rm a}r^{\rm b}\neq0$. Each of these cases gives rise to a canonical form for the interval $r^{{\bf AA}^\prime}$, with various sub-cases, which can be summarised as follows: (i) $g_{\rm ab}r^{\rm b}=0$ implies $r^{{\bf AA}^\prime}=0$ (zero separation); (ii) $g_{ab} r^a r^b=0$ implies $r^{{\bf AA}^\prime}=\alpha^{\bf A} {\bar\alpha}^{{\bf A}^\prime}$ (future-pointing null separation) or $r^{{\bf AA}^\prime}=-\alpha^{\bf A} {\bar\alpha}^{{\bf A}^\prime}$ (past-pointing null separation); (iii) $g_{ab} r^a r^b\neq0$ implies $r^{{\bf AA}^\prime}= \alpha^{\bf A} {\bar\alpha}^{{\bf A}^\prime} + \beta^{\bf A}{\bar\beta}^{{\bf A}^\prime}$ (future-pointing time-like separation), $r^{{\bf AA}^\prime} = \alpha^{\bf A} {\bar\alpha}^{{\bf A}^\prime} - \beta^{\bf A}{\bar\beta}^{{\bf A}^\prime}$ (space-like separation), or $r^{{\bf AA}^\prime}=-\alpha^{\bf A}{\bar\alpha}^{{\bf A}^\prime} - \beta^{\bf A}{\bar\beta}^{{\bf A}^\prime}$ (past-pointing time-like separation). Once the canonical form for $r^{{\bf AA}^\prime}$ is specified, so is the causal relationship that it determines. \section{Hyperspin spaces and quantum space-time} In the case of hyperspinors (introduced by Finkelstein~\cite{finkelstein1}, cf. also \cite{finkelstein2,holm,honeycutt}) we replace the two-component spinors of four-dimensional space-time with $r$-component spinors. We can regard hyperspin space as the vector space ${\mathbb C}^r$ with some extra structure. In particular, in addition to the primary hyperspin space we have three other vector spaces---the dual hyperspin space, the complex-conjugate hyperspin space, and the dual complex-conjugate hyperspin space. Let us write ${\mathbb S}^{A}$ and ${\mathbb S}^{A'}$ for the complex $r$-dimensional vector spaces of unprimed and primed hyperspinors. For hyperspinors we use italic indices to distinguish them from the boldface indices used for two-component spinors. We assume that ${\mathbb S}^{A}$ and ${\mathbb S}^{A'}$ are related by an anti-linear isomorphism under complex conjugation. Thus if $\alpha^{A}\in{\mathbb S}^{A}$, then $\alpha^{A}\to {\bar\alpha}^{A'}$ under complex conjugation, where ${\bar\alpha}^{A'} \in {\mathbb S}^{A'}$. The dual spaces associated with ${\mathbb S}^{A}$ and ${\mathbb S}^{A'}$ are denoted ${\mathbb S}_{A}$ and ${\mathbb S}_{A'}$. If $\alpha^{A}\in{\mathbb S}^{A}$ and $\beta_{A}\in{\mathbb S}_{A}$, then their inner product is denoted $\alpha^{A}\beta_{A}$; if $\gamma^{\,A'}\in {\mathbb S}^{A'}$ and $\delta_{A'}\in {\mathbb S}_{A'}$ then their inner product is $\gamma^{\,A'}\delta_{A'}$. We also introduce the totally antisymmetric hyperspinors of rank $r$ associated with ${\mathbb S}^{A}$, ${\mathbb S}_{A}$, ${\mathbb S}^{A'}$, and ${\mathbb S}_{A'}$. These are denoted $\varepsilon^{AB\cdots C}$, $\varepsilon_{AB\cdots C}$, $\varepsilon^{A'B'\cdots C'}$, and $\varepsilon_{A'B'\cdots C'}$, and satisfy the relations $\varepsilon^{AB\cdots C} \varepsilon_{AB\cdots C}=r!$, $\varepsilon^{A'B'\cdots C'} \varepsilon_{A'B'\cdots C'}=r!$, and $\varepsilon_{A'B'\cdots C'}= {\bar\varepsilon}_{A'B'\cdots C'}$. Next we introduce the complex matrix space ${\mathbb C}^{AA'} ={\mathbb S}^{A}\otimes{\mathbb S}^{A'}$. An element $x^{\,AA'}\in{\mathbb C}^{AA'}$ is said to be \emph{real} if it satisfies the (weak) Hermitian property $x^{\,AA'}= {\bar x}^{A'A}$. We shall have more to say about weak versus strong Hermiticity in connection with the idea of symmetry breaking. We denote the vector space of real elements of ${\mathbb C}^{AA'}$ by ${\mathbb R}^{AA'}$. The points of ${\mathbb R}^{AA'}$ constitute what we call the quantum space-time ${\mathcal H}^{r^2}$ of dimension $r^2$. We regard $\mathcal{C\!H}^{r^2}={\mathbb C}^{AA'}$ as the complexification of ${\mathcal H}^{r^2}$. Many problems in ${\mathcal H}^{r^2}$ are best first approached as problems in $\mathcal{C\!H}^{r^2}$. Let $x^{\,AA'}$ and $y^{\,AA'}$ be points in ${\mathcal H}^{r^2}$, and write $r^{\,AA'}=x^{\,AA'} -y^{\,AA'}$ for their separation vector, which is independent of the choice of origin. Using the index-clumping convention we set $x^{\rm a}=x^{\,AA'}$, $y^{\rm a}=y^{\,AA'}$, $r^{\rm a} = r^{\,AA'}$, and for the separation of $x^{\rm a}$ and $y^{\rm a}$ in ${\mathcal H}^{r^2}$ we write $r^{\rm a}=x^{\rm a}- y^{\rm a}$. There is a natural causal structure induced on such intervals by Finkelstein's \emph{chronometric tensor}~\cite{finkelstein1}, defined by the relation $g_{{\rm ab\cdots c}} = \varepsilon_{AB\cdots C}\, \varepsilon_{A'B'\cdots C'}$. The chronometric tensor is of rank $r$, is totally symmetric and is nondegenerate in the sense that $v^{\rm a}g_{\rm ab \cdots c}\neq0$ for any vector $v^{\rm a}\neq0$. We say that $x^{\rm a}$ and $y^{\rm a}$ in ${\mathcal H}^{r^2}$ have a `degenerate' separation if the chronometric form $\Delta(r)=g_{{\rm ab\cdots c}}r^{\rm a}r^{\rm b}\cdots r^{\rm c}$ vanishes for $r^{\rm a}=x^{\rm a}- y^{\rm a}$. Degenerate separation is equivalent to the vanishing of the determinant of $r^{\,AA'}$. If the hyperspin space has dimension $r=2$, this condition reduces to the case where $x^{\rm a}$ and $y^{\rm a}$ are null-separated in Minkowski space. For $r>2$ the situation is more complicated since various degrees of degeneracy can arise between two points of a quantum space-time. In the case $r=3$, for example, the quantum space-time has dimension nine, and the chronometric form is $\Delta=g_{\rm abc}r^{\rm a}r^{\rm b}r^{\rm c}$. The different possibilities that can arise for the separation vector are as follows: (i) $g_{\rm abc}r^{\rm c}=0$ implies $r^{\,AA'}=0$ (zero separation); (ii) $g_{\rm abc}r^{\rm b}r^{\rm c}=0$ and $g_{\rm abc} r^{\rm c}\neq0$ implies $r^{\,AA'}= \alpha^A {\bar\alpha}^{A'}$ (future-pointing null separation) or $r^{\,AA'} =-\alpha^A {\bar\alpha}^{A'}$ (past-pointing null separation); (iii) $\Delta=0$ and $g_{\rm abc}r^{\rm b}r^{\rm c} \neq 0$ implies $r^{\,AA'}=\alpha^A{\bar\alpha}^{A'} + \beta^A{\bar\beta}^{A'}$ (degenerate time-like future-pointing separation), $r^{\,AA'} = \alpha^A{\bar\alpha}^{A'} - \beta^A {\bar\beta}^{A'}$ (degenerate space-like separation), or $r^{\,AA'}=-\alpha^A{\bar\alpha}^{A'} - \beta^A{\bar\beta}^{A'}$ (degenerate time-like past-pointing separation); (iv) $\Delta\neq0$ and $g_{\rm ab} r^{\rm a}r^{\rm b}\neq0$ implies $r^{\,AA'}=\alpha^A{\bar\alpha}^{A'} + \beta^A{\bar\beta}^{A'} + \gamma^{\,A}{\bar\gamma}^{\,A'}$ (future-pointing time-like separation), $r^{\,AA'} = \alpha^A {\bar\alpha}^{A'} + \beta^A {\bar\beta}^{A'} - \gamma^{\,A} {\bar\gamma}^{\,A'}$ (future semi-space-like separation), $r^{\,AA'}=\alpha^A {\bar\alpha}^{A'} - \beta^A{\bar\beta}^{A'}- \gamma^{\,A} {\bar\gamma}^{\,A'}$ (past semi-space-like separation), or $r^{\,AA'}=-\alpha^A {\bar\alpha}^{A'} - \beta^A{\bar\beta}^{A'}- \gamma^{\,A} {\bar\gamma}^{\,A'}$ (past-pointing time-like separation). When the separation of two points of a quantum space-time is degenerate, we define the `degree' of degeneracy by the rank of the matrix $r^{\,AA'}$. Null separation is the case for which the degeneracy is of the first degree, i.e. $r^{\,AA'}$ is of rank one, and thus satisfies a system of quadratic relations of the form $g_{{\rm ab\cdots c}}r^{\rm a}r^{\rm b}=0$. This implies $r^{\,AA'}=\pm \alpha^{A}{\bar\alpha}^{A'}$ for some $\alpha^A$. In the case of degeneracy of the second degree, $r^{\,AA'}$ is of rank two and satisfies a set of cubic relations given by $g_{{\rm abc\cdots d}} r^{\rm a} r^{\rm b}r^{\rm c}=0$. In this situation $r^{\,AA'}$ can be put into one of the following canonical forms: (a) $r^{\,AA'}=\alpha^{A} {\bar\alpha}^{A'}+ \beta^A{\bar\beta}^{A'}$, (b) $r^{\,AA'}= \alpha^A{\bar\alpha}^{A'}- \beta^{A} {\bar\beta}^{A'}$, and (c) $r^{\,AA'}= -\alpha^{A}{\bar\alpha}^{A'}- \beta^{A} {\bar\beta}^{A'}$. In case (a), the point $x^{\rm a}$ lies to the future of the point $y^{\rm a}$, and $r^{\rm a}$ can be thought of as a degenerate future-pointing time-like vector. In case (b), $r^{\rm a}$ is a degenerate space-like vector. In case (c), $x^{\rm a}$ lies to the past of $y^{\rm a}$, and $r^{\rm a}$ is a degenerate past-pointing time-like vector. A similar analysis can be applied to degenerate separations of other intermediate degrees. If the determinant of the $r$-by-$r$ weakly Hermitian matrix $r^{\,AA'}$ is nonvanishing, and $r^{\,AA'}$ is thus of maximal rank, then the chronometric form is nonvanishing. In that case $r^{\,AA'}$ can be represented in the following canonical form: \begin{eqnarray} r^{\,AA'}=\pm \alpha^{A}{\bar\alpha}^{A'} \pm \beta^{A} {\bar\beta}^{A'} \pm \cdots \pm \gamma^{\,A}{\bar\gamma}^{\,A'}, \end{eqnarray} with the presence of $r$ nonvanishing terms, where the $r$ hyperspinors $\alpha^{A},\beta^{A}, \cdots, \gamma^{\,A}$ are linearly independent. Let us write $(p,q)$ for the numbers of plus and minus signs appearing in the canonical form for $r^{\,AA'}$. We call $(p,q)$ the `signature' of $r^{\,AA'}$. When the signature is $(r,0)$ or $(0,r)$ we say that $r^{\,AA'}$ is future-pointing time-like or past-pointing time-like, respectively. Then we define the proper time interval between the events $x^{\rm a}$ and $y^{\rm a}$ by the formula $\|x-y\| = |\Delta|^{1/r}$. In the case $r=2$ we recover the Minkowskian proper-time interval. A remarkable feature of the causal structure of a quantum space-time is that many of the physical features of the causal structure of Minkowski space are preserved. In particular, the space of future-pointing time-like vectors forms a convex cone. The same is true for the structure of the associated momentum space, from which it follows that we can give a good definition of `positive energy'. \section{Equations of motion} Let $\lambda\mapsto x^{\,AA'}(\lambda)$ define a smooth curve $\gamma$ in ${\mathcal H}^{r^2}$ for $\lambda \in [a,b] \subset{\mathbb R}$. Then $\gamma$ is said to be time-like if the tangent vector $v^{AA'}(\lambda) = {\rm d} x^{\,AA'}(\lambda) /{\rm d}\lambda$ is time-like and future-pointing. In that case we define the proper time $s$ elapsed along $\gamma$ by \begin{eqnarray} s = \int_a^b \left[ g_{{\rm ab}\cdots{\rm c}} v^{{\rm a}} v^{\rm b}\cdots v^{\rm c} {\rm i}ght]^{1/r} {\rm d} \lambda . \label{eq:21} \end{eqnarray} In the case of a very small time interval, we can write this in the pseudo-Finslerian form $({\rm d} s)^r = g_{{\rm ab}\cdots{\rm c}}{\rm d} x^{\rm a} {\rm d} x^{\rm b} \cdots {\rm d} x^{\rm c}$. For $r=2$ this reduces to the standard pseudo-Riemannian expression for the line element. Now consider the condition $\gamma$ must satisfy in order to be a geodesic in ${\mathcal H}^{r^2}$. In the case of a time-like curve, we can choose the proper time as the parameter along the curve. The equation of motion for a time-like geodesic is obtained by an application of the calculus of variations to formula ({\rm e}f{eq:21}). We assume the variation vanishes at the endpoints. Writing $L$ for the integrand in ({\rm e}f{eq:21}), we can use a standard argument to show that $x^{\rm a} (s)$ describes a geodesic only if the velocity vector $v^{\rm a}$ satisfies the Euler-Lagrange equation ${\rm d}(\partial L/\partial v^{\rm a})/{\rm d} s= 0$. It follows that if $y^{\rm a}$ and $z^{\rm a}$ are quantum space-time points such that $y^{\rm a}-z^{\rm a}$ is time-like and future-pointing, then the affinely parametrised geodesic $\gamma$ connecting these points in ${\mathcal H}^{r^2}$ is given by (cf. Busemann~\cite{busemann}) \begin{eqnarray} x^{\rm a}(s) = z^{\rm a} + \frac{y^{\rm a}-z^{\rm a}}{[\Delta (y,z)]^{1/r}}\, s, \end{eqnarray} for $s\in(-\infty,\infty)$, where $\Delta(y,z)=g_{\rm ab\cdots c}(y^{\rm a}-z^{\rm a})(y^{\rm b}-z^{\rm b})\cdots (y^{\rm c}-z^{\rm c})$. \section{Conserved quantities} The chronometric form for the separation between two points is invariant when the points of ${\mathcal H}^{r^2}$ are subjected to transformations of the form \begin{eqnarray} x^{\,AA'} \to \lambda^{A}_{B} {\bar\lambda}^{A'}_{B'} x^{BB'} + \beta^{AA'} . \label{eq:33} \end{eqnarray} Here $\beta^{AA'}$ is a translation in the quantum space-time, and $\lambda^{A}_{B}$ is an element of $SL(r,{\mathbb C})$. The relation of this group of transformations to the Poincar\'e group in the case $r=2$ is clear. Indeed, one of the attractive features of the extension of space-time geometry that we are presenting is that the hyper-Poincar\'e group admits such a description, which has a number of important physical consequences. More generally, the proper hyper-Poincar\'e group preserves the signature of any space-time interval, whether or not the interval is degenerate, and hence preserves the causal relations between events. If $L^{\rm a}_{\rm b}= \lambda^{A}_{B} {\bar\lambda}^{A'}_{B'}$ for some $\lambda^{A}_{B} \in SL(r,{\mathbb C})$, we refer to a map of the form $r^{\rm a}\to L^{\rm a}_{\rm b}r^{\rm b}$ as a \emph{hyper-Lorentz transformation}. The real dimension of the hyper-Lorentz group is $2r^2-2$, and hence the real dimension of the hyper-Poincar\'e group is $3r^2-2$. The dimension of the hyper-Poincar\'e group thus grows linearly with the dimension of the quantum space-time. This can be contrasted with the dimension of the group arising if we endow ${\mathbb R}^{r^2}$ with a Lorentzian metric with signature $(1,r^2-1)$. In that case the associated pseudo-orthogonal group has dimension $\frac{1}{2} r^2(r^2-1)$, which together with the translation group gives a total dimension of $\frac{1}{2}r^2(r^2+1)$. The comparatively low dimensionality of the hyper-Poincar\'e group arises from the fact that it preserves a more elaborate system of causal relations than what one has in the Lorentzian case. In Minkowski space the symmetries of the Poincar\'e group are associated with a ten-parameter family of Killing vectors. Thus for $r=2$ we have the Minkowski metric $g_{\rm ab}$, and the Poincar\'e group is generated by vector fields $\xi^{\rm a}$ on ${\mathcal H}^{4}$ that satisfy ${\mathcal L}_\xi g_{\rm ab}=0$, where ${\mathcal L}_\xi$ denotes the Lie derivative. For any vector field $\xi^{\rm a}$ and any symmetric tensor field $g_{\rm ab}$ we have ${\mathcal L}_\xi g_{\rm ab} = \xi^{\rm c} \nabla_{\!\rm c}g_{\rm ab} + 2g_{{\rm c}({\rm a}} \nabla_{\!{\rm b})} \xi^{\rm c}$. If $g_{\rm ab}$ is the metric and $\nabla_{\!\rm a}$ denotes the torsion-free covariant derivative satisfying $\nabla_{\!\rm a}g_{\rm bc}=0$, we obtain the Killing equation $\nabla_{\!({\rm a}}\xi_{{\rm b})}=0$, where $\xi_{\rm a}=g_{\rm ab}\xi^{\rm b}$. The condition ${\mathcal L}_\xi g_{\rm ab}=0$ therefore implies that $\xi^{\rm a}$ is a Killing vector. For $r>2$ the usual relations between symmetries and Killing vectors are lost. Instead, we obtain a system of higher-rank Killing tensors. More specifically, to generate a symmetry of the quantum space-time the vector field $\xi^{\rm a}$ has to satisfy ${\mathcal L}_\xi g_{\rm ab\cdots c} = 0$, where $g_{\rm ab\cdots c}$ is the chronometric tensor. For a vector field $\xi^{\rm a}$ and a symmetric tensor field $g_{\rm ab\cdots c}$ we have \begin{eqnarray} {\mathcal L}_\xi g_{\rm ab\cdots c} = \xi^{\rm d} \nabla_{\!\rm d} g_{\rm ab\cdots c} + r\,g_{{\rm d}({\rm a\cdots b}} \nabla_{\!{\rm c})} \xi^{\rm d} . \end{eqnarray} In the case of the quantum space-time ${\mathcal H}^{r^2}$ we let $\nabla_{\!{\rm a}}$ be the natural flat connection for which $\nabla_{\!{\rm a}} g_{\rm bc\cdots d}=0$. Then to generate a symmetry of the chronometric structure of ${\mathcal H}^{r^2}$ the vector field $\xi^{\rm a}$ has to satisfy $g_{{\rm d}({\rm a\cdots b}} \nabla_{\!{\rm c})} \xi^{\rm d} = 0$. This equation can be written in a suggestive form if we define a symmetric tensor $K_{\rm ab\cdots c}$ of rank $r-1$ by setting $K_{\rm ab\cdots c} = g_{\rm ab\cdots cd}\xi^{\rm d}$. Then it follows that $K_{\rm ab\cdots c}$ satisfies the conditions for a symmetric Killing tensor: $\nabla_{\!({\rm a}}K_{\rm bc\cdots d)} = 0$. We thus see that ${\mathcal H}^{r^2}$ provides a symmetry group generated by a family of Killing tensors. The symmetries of the quantum space-time are generated by a system of $3r^2-2$ irreducible symmetric Killing tensors of rank $r-1$. The significance of Killing tensors is that they are associated with conserved quantities. In particular, if the vector field $v^{\rm a}$ satisfies the geodesic equation, which on a quantum space-time of dimension $r^2$ is given by $g_{\rm abc\cdots d}\left( v^{\rm e}\nabla_{\!\rm e}v^{\rm b}{\rm i}ght) v^{\rm c}\cdots v^{\rm d} = 0$, and if $K_{\rm ab\cdots c}$ is the Killing tensor of rank $r-1$, then we have the following conservation law: $v^{\rm e}\nabla_{\!\rm e}\left( K_{\rm ab\cdots c} v^{\rm a} v^{\rm b}\cdots v^{\rm c}{\rm i}ght) = 0$. In other words, $K_{\rm ab\cdots c} v^{\rm a}v^{\rm b}\cdots v^{\rm c}$ is a constant of the motion. \section{Hyper-relativistic mechanics} In higher-dimensional quantum space-times the conservation laws and symmetry principles of relativistic physics remain intact. In particular, the conservation of hyper-relativistic momentum and angular momentum for a system of interacting particles can be formulated by use of principles similar to those of the Minkowskian case. For this purpose we introduce the idea of an `elementary system' in hyper-relativistic mechanics. Such a system is defined by its momentum and angular momentum. The hyper-relativistic momentum of an elementary system is given by a momentum covector $P_{\rm a}$. The associated mass $m$ is given (cf. \cite{finkelstein1}) by: $m=( g^{\rm ab\cdots c}P_{\rm a}P_{\rm b}\cdots P_{\rm c})^{1/r}$. The hyper-relativistic angular momentum is given by a tensor $L^{\rm b}_{\rm a}$ of the form $L^{\rm b}_{\rm a}=l^{B}_{A} \delta^{B'}_{A'}+{\bar l}^{B'}_{A'} \delta^{B}_{A}$, where the hyperspinor $l^{B}_{A}$ is trace-free: $l^{A}_{A}=0$. The angular momentum is defined with respect to a choice of origin. Under a change of origin defined by a shift vector $\beta^{\rm a}$ we have $l^{B}_{A}\to l^B_A + P_{AC'}\beta^{BC'}$. In the case $r=2$ these formulae reduce to the usual expressions for momentum and angular momentum in a Minkowskian setting. The real covector $S_{AA'} = {\rm i} m^{-1} ( l^B_A P_{A'B} - {\bar l}^{B'}_{A'} P_{AB'})$ is invariant under a change of origin, and can be interpreted as the intrinsic spin of the elementary system. The magnitude of the spin is $S=|g^{\rm ab\cdots c} S_{\rm a}S_{\rm b}\cdots S_{\rm c}|^{1/r}$. In the case of a set of interacting hyper-relativistic systems we require that the total momentum and angular momentum should be conserved. This implies conservation of the total mass and spin. We thus see that the idea of relativistic mechanics carries through to the case of a general quantum space-time. We shall see later, once we introduce the idea of symmetry breaking, that hyper-momentum can be interpreted as the momentum operator for a relativistic quantum system. Conservation of hyper-momentum then can be thought of as conservation of four-momentum, in relativistic quantum mechanics, in the Heisenberg representation. \section{Weak and strong Hermiticity} As a prelude to our discussion of symmetry breaking in a quantum space-time, we digress briefly to review the notions of weak and strong Hermiticity. This material is relevant to the origin of unitarity in quantum mechanics. Intuitively speaking, when the weak Hermiticity condition is imposed on a hyperspinor $x^{\,AA'}$, then $x^{\,AA'}$ belongs to the real subspace ${\mathbb R}^{AA'}$. The hyper-relativistic symmetry of a quantum space-time is not affected by the imposition of this condition. If, however, we break the symmetry by selecting a preferred time-like direction, then we can speak of a stronger reality condition whereby \emph{an isomorphism is established between the primed and unprimed hyperspin spaces}. We begin with the weak Hermitian property. Let ${\mathbb S}^{A}$ denote, as before, an $r$-dimensional complex vector space. We also introduce the spaces ${\mathbb S}_{A}$, ${\mathbb S}^{A'}$, and ${\mathbb S}_{A'}$. In general, there is no natural isomorphism between ${\mathbb S}^{A'}$ and ${\mathbb S}_{A}$, and there is no natural matrix multiplication law or trace operation defined for elements of ${\mathbb S}^{A}\otimes{\mathbb S}^{A'}$. Certain matrix operations are well defined. For example, the determinant of a generic element $\mu^{AA'}$ is given by $r!\,\det(\mu) = \varepsilon_{AB\cdots C}\, \varepsilon_{A'B'\cdots C'}\, \mu^{AA'}\mu^{BB'}\cdots \mu^{CC'}$. The weak Hermitian property is also well-defined: if ${\bar\mu}^{A'A}$ is the complex conjugate of $\mu^{AA'}$, then we say that $\mu^{AA'}$ is weakly Hermitian if $\mu^{AA'}={\bar\mu}^{A'A}$. Next we consider the strong Hermitian property. In some situations there may exist a natural map ${\mathbb S}^{A'}\to{\mathbb S}_{A}$ defined by the context of the problem. Such a map is called a Hermitian correlation. In this case, the complex conjugate of an element $\alpha^{A}\in{\mathbb S}^{A}$ determines an element ${\bar\alpha}_{A}\in{\mathbb S}_{A}$. For any element $\mu^{A}_{B}\in {\mathbb S}^{A}\otimes{\mathbb S}_{B}$ we define the operations of determinant, matrix multiplication, and trace in the usual manner. The determinant is $r!\, \det(\mu) = \varepsilon_{AB\cdots C}\, \varepsilon^{PQ\cdots R}\, \mu^{A}_{P}\mu^{B}_{Q} \cdots \mu^{C}_{R}$, and the Hermitian conjugate of $\mu^{A}_{\ B}$ is ${\bar\mu}_{A}^{\ B}$. The Hermitian correlation is given by the choice of a preferred element $t_{AA'}\in {\mathbb S}_{A} \otimes{\mathbb S}_{A'}$. Then we write ${\bar\alpha}_{A} = t_{AA'}{\bar\alpha}^{A'}$, where ${\bar\alpha}_{A}$ is now called the complex conjugate of $\alpha^{A}$. When there is a Hermitian correlation ${\mathbb S}^{A'} \leftrightarrow{\mathbb S}_{A}$, we call the condition $\mu^{A}_{\ B}= {\bar\mu}^{A}_{\ B}$ the strong Hermitian property. \section{Symmetry breaking and quantum entanglement} We proceed to introduce a mechanism for symmetry breaking in a quantum space-time. We shall make the point that the breaking of symmetry in a quantum space-time is intimately linked to the notion of quantum entanglement. According to this point of view, the introduction of symmetry-breaking in the early stages of the universe can be understood as a \emph{sequence of phase transitions}, the ultimate consequence of which is an approximate disentanglement of a four-dimensional `classical' space-time. The breaking of symmetry is represented by an `index decomposition'. In particular, if the dimension $r$ of the hyperspin space is not a prime number, then a natural method of breaking the symmetry arises by consideration of the decomposition of $r$ into factors. The specific assumption we make is that the dimension of the hyperspin space ${\mathbb S}^{A}$ is \emph{even}. Then we write $r=2n$, where $n=1,2,\ldots$, and set ${\mathbb S}^{A}={\mathbb S}^{{\bf A}i}$, where ${\bf A}$ is a two-component spinor index, and $i$ will be called an `internal' index $(i=1,2, \ldots,n)$. Thus we can write ${\mathbb S}^{{\bf A}i}={\mathbb S}^{{\bf A}} \otimes{\mathbb H}^{i}$, where ${\mathbb S}^{{\bf A}}$ is a standard spin space of dimension two, and ${\mathbb H}^{i}$ is a complex vector space of dimension $n$. The breaking of symmetry amounts to the fact that we can identify the hyperspin space with the tensor product of these two spaces. We shall assume that ${\mathbb H}^{i}$ is endowed with a strong Hermitian structure, i.e. that there is a canonical anti-linear isomorphism between the complex conjugate of the internal space ${\mathbb H}^{i}$ and the dual space ${\mathbb H}_{i}$. If $\psi^i\in{\mathbb H}^{i}$, then we write ${\bar\psi}_i$ for the complex conjugate of $\psi^i$, where ${\bar\psi}_i\in{\mathbb H}_{i}$. We see that ${\mathbb H}^{i}$ is a complex Hilbert space---and indeed, although here we consider mainly the case for which $n$ is finite, one should have in mind also the infinite dimensional situation. For the other hyperspin spaces we write ${\mathbb S}_{A}={\mathbb S}_{{\bf A}i}$, ${\mathbb S}^{A'}={\mathbb S}^{{\bf A}'}_{\ \ i}$, and ${\mathbb S}_{A'}={\mathbb S}_{{\bf A}'}^{\ \ i}$, respectively. These identifications preserve the duality between ${\mathbb S}^{A}$ and ${\mathbb S}_{A}$, and between ${\mathbb S}^{A'}$ and ${\mathbb S}_{A'}$; and at the same time are consistent with the complex conjugation relations between ${\mathbb S}^{A}$ and ${\mathbb S}^{A'}$, and between ${\mathbb S}_{A}$ and ${\mathbb S}_{A'}$. Hence if $\alpha^{{\bf A}i} \in{\mathbb S}^{{\bf A}i}$ then under complex conjugation we have $\alpha^{{\bf A}i}\to {\bar\alpha}^{{\bf A}'}_{\ \ i}$, and if $\beta_{{\bf A}i}\in{\mathbb S}_{{\bf A}i}$ then $\beta_{{\bf A}i}\to{\bar\beta}_{{\bf A}'}^{\ \ i}$. In the case of a quantum space-time vector $r^{\,AA'}$ we have a corresponding structure induced by the identification $r^{\,AA'}=r^{{\bf AA}'i}_{\ \ \ \ \ j}$. When the quantum space-time vector is real, the weak Hermitian structure on $r^{\,AA'}$ is manifested in the form of a weak Hermitian structure on the two-component spinor index pair, together with a strong Hermitian structure on the internal index pair. In other words, the Hermitian condition on the space-time vector $r^{\,AA'}$ is given by ${\bar r}^{{\bf A}'{\bf A}i}_{\ \ \ \ \ j} = r^{{\bf AA}'i}_{\ \ \ \ \ j}$. One consequence of these relations is that we can interpret each point in a quantum space-time as being a \emph{space-time-point-valued operator}. The ordinary classical space-time then `sits' inside the quantum space-time in a canonical manner---namely, as the locus of those points of quantum space-time that factorise into the product of a space-time point $x^{{\bf AA}'}$ and the identity operator on the internal space: $x^{{\bf AA}'i}_{\ \ \ \ \ j} = x^{{\bf AA}'} \delta^{i}_{\,j}$. Thus, in situations where special relativity is a satisfactory theory, we regard the relevant physical events as taking place on or in the immediate neighbourhood of the embedding of Minkowski space in ${\mathcal H}^{4n^2}$. This picture can be presented in more geometric terms as follows. We introduce the notion of a \emph{hypertwistor} as a pair of hyperspinors $(\omega^A, \pi_{A'})$ with the pseudo-norm $\omega^A{\bar\pi}_{A} + \pi_{A'}{\bar\omega}^A$. The projective hypertwistor space ${\mathbb P}^{2r-1}$ in the case $r=2n$ admits a Segr\'e embedding of the form ${\mathbb P}^3 \times {\mathbb P}^{n-1}\subset{\mathbb P}^{4n-1}$. Many such embeddings are possible, though they are all equivalent under the action of the symmetry group $U(2n,2n)$. If the symmetry is broken and one such embedding is selected out, then we can introduce homogeneous coordinates and write $Z^{\alpha i}$ for the generic hypertwistor. Here the Greek letter ${\alpha}$ denotes an `ordinary' twistor index $({\alpha}=0,1,2,3)$ and $i$ denotes an internal index $(i=1,2,\ldots,n)$. These two indices, when clumped together, constitute a hypertwister index. The Segr\'e embedding consists of those points in ${\mathbb P}^{4n-1}$ for which we have a product decomposition given by $Z^{\alpha i}= Z^{\alpha}\psi^i$. Once symmetry breaking takes place---and this may happen in stages, corresponding to a successive factorisation of the underlying hypertwistor space---then one can think of ordinary four-dimensional space-time as becoming more or less disentangled from the rest of the universe, and behaving to some extent autonomously. Nonetheless, we might expect its global dynamics, on a cosmological scale, to be affected by the distribution of mass and energy elsewhere in the quantum space-time; and thus we obtain a possible model for `dark energy'. The idea of symmetry breaking being put forward here is related to the notion of \emph{disentanglement} in quantum mechanics \cite{brody1,gibbons}. That is to say, at the unified level the degrees of freedom associated with space-time symmetry are quantum mechanically entangled with the internal degrees of freedom associated with microscopic physics. The phenomena responsible for the breakdown of symmetry are thus analogous to the mechanisms of decoherence through which quantum entanglements are gradually diminished. There is also in this connection an interesting relation to the so-called \emph{twistor internal symmetry groups} (see, e.g., \cite{hughston}). Let us now examine the implications of our symmetry breaking mechanism for fields defined on a quantum space-time. For example, let $\phi(x^{\,AA'})$ be a scalar field on a quantum space-time. After we break the symmetry by writing $x^{\,AA'}= x^{{\bf AA}'i}_{\ \ \ \ \ j}$, we consider an expansion of the field around the embedded Minkowski space. More specifically, for such an expansion we have \begin{eqnarray} \phi(x^{\,AA'}) = \phi^{(0)}(x^{{\bf AA}'}) + \phi^{(1)\ \ i}_{{\bf AA}'\,j}(x^{\,{\bf AA}'})\left( x^{{\bf AA}'j}_{\ \ \ \ \ i} - x^{{\bf AA}'}\delta^j_{\ i} {\rm i}ght) + \cdots, \end{eqnarray} where $\phi^{(0)}(x^{{\bf AA}'}) =\phi(x)|_{x=x^{{\bf AA}'} \delta^{j}_{\,i}}$, and $\phi^{(1)\ \ i}_{{\bf AA}'\,j} (x^{\,{\bf AA}'}) = (\partial\phi(x) / \partial x) |_{x=x^{{\bf AA}'} \delta^{j}_{\,i}}$. Therefore the order-zero term defines a classical field on Minkowski space, and the first-order term can be interpreted as a `multiplet' of fields, transforming according to the adjoint representation of $U(n)$. Alternatively, if the internal space is infinite-dimensional, we can think of the first-order term as a field operator. \begin{figure} \caption{\label{fig:2} \label{fig:2} \end{figure} \section{Emergence of quantum states} The embedding of Minkowski space in the quantum space-time ${\mathcal H}^{4n^2}$ implies a corresponding embedding of the Poincar\'e group in the hyper-Poincar\'e group. This can be seen as follows. The standard Poincar\'e group in ${\mathcal H}^{4}$ consists of transformations of the form $x^{{\bf AA}'} \longrightarrow\, \lambda^{\bf A}_{\bf B}\, \bar{\lambda}^{{\bf A}'}_{{\bf B}'}\, x^{{\bf BB}'} + \beta^{{\bf AA}'}$. This action lifts naturally to a corresponding action on ${\mathcal H}^{4n^2}$ given by $x^{{\bf AA}'i}_{\ \ \ \ \ j}\longrightarrow\, \lambda^{\bf A}_{\bf B}\, \bar{\lambda}^{{\bf A}'}_{{\bf B}'}\, x^{{\bf BB}'i}_{\ \ \ \ \ j} + \beta^{{\bf AA}'}\delta^i_{\,j}$. On the other hand, the general hyper-Poincar\'e transformation in the broken symmetry phase can be expressed in the form \begin{eqnarray} x^{{\bf AA}'i}_{\ \ \ \ \ j}\longrightarrow\, L^{{\bf A}i}_{{\bf B}k}\, \bar{L}^{{\bf A}'l}_{{\bf B}'j}\, x^{{\bf BB}'k}_{\ \ \ \ \ l} + \beta^{{\bf AA}'i}_{\ \ \ \ \ j}. \label{eq:12.3} \end{eqnarray} Thus the embedding of the Poincar\'e group as a subgroup of the hyper-Poincar\'e group is given by $L^{{\bf A}i}_{{\bf B}j} = \lambda^{\bf A}_{\bf B}\delta^i_{\,j}$ and $\beta^{{\bf AA}'i}_{\ \ \ \ \ j} = \beta^{{\bf AA}'} \delta^i_{\,j}$. Bearing this in mind, we construct a class of maps from the quantum space-time ${\mathcal H}^{4n^2}$ to Minkowski space ${\mathcal H}^{4}$. Under rather general physical assumptions, such maps are necessarily of the form \begin{eqnarray} x^{{\bf AA}'i}_{\ \ \ \ \ j}\longrightarrow\, x^{{\bf AA}'} = \rho^j_i\,x^{{\bf AA}'i}_{\ \ \ \ \ j}, \label{eq:12.5} \end{eqnarray} where $\rho^j_i$ is a \emph{density matrix}. By a density matrix we mean, as usual, a positive semi-definite strongly Hermitian matrix with unit trace. The maps thus arising here can be regarded as quantum expectations. In particular, let $\rho:\ {\mathcal H}^{4n^2}\to {\mathcal H}^4$ satisfy the following conditions: {\rm (i)} $\rho$ is linear and maps the origin of ${\mathcal H}^{4n^2}$ to the origin of ${\mathcal H}^{4}$; {\rm (ii)} $\rho$ is Poincar\'e invariant; and {\rm (iii)} $\rho$ preserves causal relations. Then $\rho$ is given by a density matrix on the internal space (we refer the reader to \cite{brody2} for a proof). This result shows how the causal structure of quantum space-time is linked with the probabilistic structure of quantum mechanics. The concept of a quantum state emerges when we ask for consistent ways of `averaging' over the geometry of quantum space-time in order to obtain a reduced description of physical phenomena in terms of the geometry of Minkowski space. We see that a probabilistic interpretation of the map from a general quantum space-time to Minkowski space arises as a consequence of elementary causality requirements. We can thus view the space-time events in ${\mathcal H}^{4n^2}$ as representing space-time-point-valued quantum observables, the totality of which constitute a `fuzzy space-time' (see Figure~{\rm e}f{fig:2}), and the expectations of these fuzzy space-time points correspond to points of ${\mathcal H}^{4}$. Finally, we remark that the space of density matrices itself is endowed with a natural Finslerian metric induced from the ambient pseudo-Finslerian structure, as illustrated in Figure~{\rm e}f{fig:3} (cf. \cite{wojtkowski}). \begin{figure} \caption{\label{fig:3} \label{fig:3} \end{figure} \end{document}
\begin{document} \title[]{The Stein Str\"omberg Covering Theorem in metric spaces} \author{J. M. Aldaz} \address{Instituto de Ciencias Matem\'aticas (CSIC-UAM-UC3M-UCM) and Departamento de Matem\'aticas, Universidad Aut\'onoma de Madrid, Cantoblanco 28049, Madrid, Spain.} \email{[email protected]} \email{[email protected]} \thanks{2010 {\em Mathematical Subject Classification.} 42B25} \thanks{The author was partially supported by Grant MTM2015-65792-P of the MINECO of Spain, and also by by ICMAT Severo Ochoa project SEV-2015-0554 (MINECO)} \begin{abstract} In \cite{NaTa} Naor and Tao extended to the metric setting the $O(d \log d)$ bounds given by Stein and Str\"omberg for Lebesgue measure in $\mathbb{R}^d$, deriving these bounds first from a localization result, and second, from a random Vitali lemma. Here we show that the Stein-Str\"omberg original argument can also be adapted to the metric setting, giving a third proof. We also weaken the hypotheses, and additionally, we sharpen the estimates for Lebesgue measure. \end{abstract} \maketitle \markboth{J. M. Aldaz}{Stein Str\"omberg covering theorem} \section {Introduction} In \cite{StSt}, Stein and Str\"omberg proved that for Lebesgue measure in $\mathbb{R}^d$, and with balls defined by an arbitrary norm, the centered maximal function has weak type (1,1) bounds of order $O(d \log d)$, which is much better than the exponential bounds obtained via the Vitali covering lemma. Naor and Tao extended the Stein-Str\"omberg result to the metric setting in \cite{NaTa}. There, a localization result is proven (using the notion of microdoubling, which basically entails a very regular growth of balls) from which the Stein-Str\"omberg bounds are obtained (using the notion of strong microdoubling, which combines microdoubling with local comparability). Also, a second argument is given, via a random Vitali Theorem that has its origin in \cite{Li}. Here we note that the Stein-Str\"omberg original proof, which is shorter and conceptually simpler, can also be used in the metric setting, yielding a slightly more general result. We will divide the Stein-Str\"omberg argument into two parts, one with radii separated by large gaps, and the second, with radii inside an interval, bounded away from 0 and $\infty$. This will allow us to obtain more precise information about which hypotheses are needed in each case. We shall see that under the same hypotheses used by Naor and Tao, the Stein-Str\"omberg covering theorem for sparse radii (cf. Theorem \ref{StSt1} below) suffices to obtain the $d\log d$ bounds in the metric setting. But Theorem \ref{StSt1} itself is presented in a more general version. In particular, one does not need to assume that the metric space is geometrically doubling. We also show that the Stein-Str\"omberg method, applied to balls with no restriction in the radii, yields the $O(d \log d)$ bounds in the metric context, for doubling measures where the growth of balls can be more irregular than is allowed by the microdoubling condition. Finally, we lower the known weak type (1,1) bounds in the case of Lebesgue measure: For $d$ lacunary sets of radii, from $(e^2 + 1) (e + 1)$ to $(e^{1/d} + 1) (1 + 2 e^{1/d})$ (to 6 in the specific case of $\ell_\infty$ balls), and for unrestricted radii, from $e^2 (e^2 + 1) (1 + o(1)) d \log d$ to $(2 + 3 \varepsilon) d \log d$, where $\varepsilon > 0$ and $d = d(\varepsilon)$ is sufficiently large. \section {Notation and background material} Some of the definitions here come from \cite{A2}; we refer the interested reader to that paper for motivation and additional explanations. We will use $B(x,r) := \{y\in X: d(x,y) < r\}$ to denote open balls, $\overline{B(x,r)}$ to denote their topological closure, and $B^{cl}(x,r) := \{y\in X: d(x,y) \le r\}$ to refer to closed balls. Recall that in a general metric space, a ball $B$, considered as a set, can have many centers and many radii. When we write $B(x,r)$ we mean to single out $x$ and $r$, speaking respectively of the center and the radius of $B(x,r)$. \begin{definition} A Borel measure is {\em $\tau$-smooth} if for every collection $\{U_\alpha : \alpha \in \Lambda\}$ of open sets, $\mu (\cup_\alpha U_\alpha) = \sup \mu(\cup_{i=1}^nU_{\alpha_i})$, where the supremum is taken over all finite subcollections of $\{U_\alpha : \alpha \in \Lambda\}$. We say that $(X, d, \mu)$ is a {\em metric measure space} if $\mu$ is a Borel measure on the metric space $(X, d)$, such that for all balls $B(x,r)$, $\mu (B(x,r)) < \infty$, and furthermore, $\mu$ is $\tau$-smooth. \end{definition} The assumption of $\tau$-smoothness does not represent any real restriction, since it is consistent with standard set theory (Zermelo-Fraenkel with Choice) that in every metric space, every Borel measure which assigns finite measure to balls is $\tau$-smooth (cf. \cite[Theorem (a), pg. 59]{Fre}). \begin{definition}\label{maxfun} Let $(X, d, \mu)$ be a metric measure space and let $g$ be a locally integrable function on $X$. For each $x\in X$, the centered Hardy-Littlewood maximal operator $M_{\mu}$ is given by \begin{equation}\label{HLMFc} M_{\mu} g(x) := \sup _{\{r : 0 < \mu (B(x, r))\}} \frac{1}{\mu (B(x, r))} \int _{B(x, r)} |g| d\mu. \end{equation} \end{definition} Maximal operators can be defined using closed balls instead of open balls, and this does not change their values, as can be seen by an approximation argument. When the measure is understood, we will omit the subscript $\mu$ from $M_\mu$. A sublinear operator $T$ satisfies a weak type $(1,1)$ inequality if there exists a constant $c > 0$ such that \begin{equation}\label{weaktype} \mu (\{T g > s \}) \le \frac{c \|g\|_{L^1(\mu)}}{s }, \end{equation} where $c=c(T, \mu)$ depends neither on $g\in L^1 (\mu)$ nor on $s > 0$. The lowest constant $c$ that satisfies the preceding inequality is denoted by $\|T\|_{L^1\to L^{1, \infty}}$. \begin{definition} A Borel measure $\mu$ on $(X,d)$ is {\em doubling} if there exists a $C> 0 $ such that for all $r>0 $ and all $x\in X$, $\mu (B(x, 2 r)) \le C\mu(B(x,r)) < \infty$. \end{definition} \begin{definition} \label{geomdoub} A metric space is {\it $D$-geometrically doubling} if there exists a positive integer $D$ such that every ball of radius $r$ can be covered with no more than $D$ balls of radius $r/2$. \end{definition} If a metric space supports a doubling measure, then it is geometrically doubling. Regarding weak type inequalities for the maximal operator, in order to estimate $\mu \{M f > s\}$, one considers balls $B(x,r)$ over which $|f|$ has average larger than $s$. Now, while in the uncentered case any such ball is contained in the corresponding level set, this is not so for the centered maximal function. Thus, using the balls $B(x,r)$ to cover $\{M f > s\}$ can be very inefficient. A key ingredient in the Stein-Str\"omberg proof is to cover $\{M f > s\}$ by the much smaller balls $B(x,t r)$, $0 < t <<1$, something that leads to the ``microdoubling" notion of Naor and Tao. We slightly modify their notation, using $1/n$-microdoubling to denote what these authors call $n$-microdoubling. \begin{definition} (\cite[p. 735] {NaTa} Let $0 < t < 1$ and let $K\ge 1$. Then $\mu$ is said to be $t$-microdoubling with constant $K$ if for all $x \in X$ and all $r >0$, we have $$ \mu B\left(x,\left(1 + t \right) r \right) \le K \mu B(x,r). $$ \end{definition} The next property is mentioned in \cite{NaTa}, and more extensively studied in \cite {A2}. \begin{definition} \label{loccomp} A measure $\mu$ satisfies a {\it local comparability condition} if there exists a constant $C\in[1, \infty)$ such that for all pairs of points $x,y\in X$, and all $r >0$, whenever $d(x,y) < r$, we have $$\mu(B(x,r))\le C \mu(B(y,r)).$$ We denote the smallest such $C$ by $C(\mu)$ or $C_\mu$. \end{definition} \begin{remark} \label{doublingandmicro} If $\mu$ is doubling with constant $K_1$ then it is microdoubling and satisfies a local comparability condition with the same constant $K_1$, while if it is $t$-microdoubling with constant $K_2$ and $2 \le (1 + t)^M$, then $\mu$ is doubling and satisfies a local comparability condition with constant $K_2^M$. Thus, the difference between doubling and microdoubling lies in the size of the constants, it is quantitative, not qualitative: The microdoubling condition adds something new only when $K_2 < K_1$, in which case it entails a greater regularity in the growth of the measure of balls, as the radii increase. Likewise, bounds of the form $\mu B(x, T r) \le K \mu B(x, r)$ for $T > 2$, allow a greater irregularity in the growth of balls than standard doubling ($T = 2$) or than microdoubling. We mention that while local comparability is implied by doubling, it is a uniformity condition, not a growth condition. Thus, it is compatible with the failure of doubling, and even for doubling measures, it is compatible with any rate of growth for the volume of balls. Consider, for instance, the case of $d$-dimensional Lebesgue measure $\lambda^d$: A doubling constant is $2^d$, a $1/d$-microdoubling constant is $e$, and the smallest local comparability constant is $C(\lambda^d) = 1$. \end{remark} The next definition combines the requirement that the microdoubling and the local comparability constants be ``small" simultaneously. \begin{definition} (\cite[p. 737] {NaTa} Let $0 < t < 1$ and let $K\ge 1$. Then $\mu$ is said to be strong $t$-microdoubling with constant $K$ if for all $x \in X$, all $r > 0$, and all $y\in B(x,r)$, $$ \mu B\left(y,\left(1 + t \right)r\right) \le K \mu B(x,r). $$ \end{definition} Thus, if $\mu$ is strong $t$-microdoubling with constant $K$, then $C(\mu) \le K$. Also, local comparability is the same as strong $0$-microdoubling. To get a better understanding of how bounds depend on the different constants, it is useful to keep separate $C(\mu)$ and $ K$. \begin{definition} Given a set $S$ we define its $s$-{\em blossom} as the enlarged set \begin{equation} \label{altblossom} Bl(S, s):= \cup_{x\in S}B(x,s), \end{equation} and its {\em uncentered $s$-blossom} as the set \begin{equation} \label{altublossom} Blu(S, s):= \cup_{x\in S}\cup\{B(y, s): x\in B(y, s)\}. \end{equation} When $S= B(x,r)$, we simplify the notation and write $Bl(x,r, s)$, instead of $Bl(B(x,r), s)$, and likewise for uncentered blossoms. We say that $\mu$ {\em blossoms boundedly} if there exists a $K\ge 1$ such that for all $r>0 $ and all $x\in X$, $\mu (Blu(x, r, r)) \le K \mu(B(x,r)) < \infty$. \end{definition} Blossoms can be defined using closed instead of open balls, in an entirely analogous way. To help understand the relationship between blossoms and balls, we include the following definitions and results. \begin{definition} A metric space has the {\it approximate midpoint property} if for every $\varepsilon > 0$ and every pair of points $x,y$, there exists a point $z$ such that $d(x,z), d(z,y) < \varepsilon + d(x,y)/2$. \end{definition} \begin{definition} \label{quasi}A metric space $X$ is {\it quasiconvex} if there exists a constant $C\ge 1$ such that for every pair of points $x,y$, there exists a curve with $x$ and $y$ as endpoints, such that its length is bounded above by $C d(x,y)$. If for every $\varepsilon > 0$ we can take $C=1 + \varepsilon$, then we say that $X$ is a {\it length space}. \end{definition} It is well known that for a complete metric space, having the approximate midpoint property is equivalent to being a length space. \begin{example} \label{blossomsballs} The $s$-blossom of an $r$-ball may fail to contain a strictly larger ball, even in quasiconvex spaces. For instance, let $X \subset \mathbb{R}^2$ be the set $\{0\} \times [0,1] \cup [0,1]\times \{0\}$ with metric defined by restriction of the $\ell_\infty$ norm; then we can take $C = 2$. Now $B((1,0), 1) = (0,1]\times \{0\}$, while for every $r > 1$, $B((1,0), r) = X$, which is not contained in $Blu((1,0), 1, 1/6)$. Furthermore, neither $Blu((1,0), 1, 1/6)$ nor $Bl((1,0), 1, 1/6)$ are balls, i.e., given any $x\in X$ and any $r > 0$, we have that $B(x, r) \ne Blu((1,0), 1, 1/6)$ and $B(x, r) \ne Bl((1,0), 1, 1/6)$. On the other hand, if a metric space $X$ has the approximate midpoint property, then blossoms and balls coincide (as we show next) so in this case considering blossoms gives nothing new. \end{example} \begin{theorem} \label{equiv} Let $(X, d)$ be a metric space. The following are equivalent: a) $X$ has the approximate midpoint property. b) For all $x\in X$, and all $r, s >0$, $Bl(x, r, s) = B(x, r + s).$ c) For all $x\in X$, and all $r >0$, $Bl(x, r, r) = B(x, 2 r).$ \end{theorem} \begin{proof} Suppose first that $X$ has the approximate midpoint property. Since $Bl(x, r, s) \subset B(x, r + s),$ to prove b) it is enough to show that if $y\in B(x, r + s),$ then $y \in Bl(x, r, s)$, or equivalently, that there is a $z\in X$ such that $d(x, z) < r$ and $d(z, y) < s$. If either $d(x, y) < s$ or $d(x, y) < r$ we can take $z = x$ and there is nothing to prove, so assume otherwise. Let $(\hat X, \hat d)$ be the completion of $(X, d)$; then $\hat X$ is a length space, since it has the approximate midpoint property. Let $\Gamma :[0,1] \to \hat X$ be a curve with $\Gamma (0) = x$, $\Gamma (1) = y$, and length $\ell (\Gamma) < r + s$. Then $\Gamma([0, 1]) \subset B(x, r) \cup B(y, s)$, for if there is a $w \in [0,1]$ with $\Gamma (w) \notin B(x, r) \cup B(y, s)$, then $\ell (\Gamma) \ge r + s$. Now let $c \in [0,1]$ be the time of first exit of $\Gamma (t)$ from $B(x,r)$, that is, for all $t < c$, $\Gamma (t) \in B(x,r)$ and $\Gamma (c) \notin B(x,r)$ . Then $\Gamma (c) \in B(y,s)$, so by continuity of $\Gamma$, there is a $\delta \in [0, c) $ such that $\Gamma (\delta) \in B(y,s)$. Thus, the open set $ B(x, r) \cap B(y, s) \ne \emptyset$ in $\hat X$. But $X$ is dense in $\hat X$, so there exists a $z\in X$ such that $d(x, z) < r$ and $d(z, y) < s$, as we wanted to show. Part c) is a special case of part b). From part c) we obtain a) as follows. Let $x, y \in X$, and let $r >0$ be such that $d(x,y) < 2 r$. By hypothesis, $y\in Bl(x, r, r) = B(x, 2r)$, so there is a $z\in X$ such that $d(x, z) < r$ and $d(z, y) < r$. Thus, $X$ has the approximate midpoint property. \end{proof} \begin{example} \label{chordal} Let $X$ be the unit sphere (unit circumference) in the plane, with the chordal metric, that is, with the restriction to $X$ of the euclidean metric in the plane. While this space does not have the approximate midpoint property, blossoms are nevertheless geodesic balls. However, the equality $Bl(x, r, s) = B(x, r + s)$ no loger holds. For instance, $Bl((1, 0), 1, 1) \ne Bl((1, 0), \sqrt2, \sqrt2 ) = B((1, 0), 2) = X \setminus \{(-1,0)\}$. \end{example} \section{Microblossoming and related conditions} \begin{definition} \label{bmicroblu} Let $0 < t < 1$ and let $K\ge 1$. Then $\mu$ is said to $t$-microblossom boundedly with constant $K$, if for all $x \in X$ and all $r >0$, we have \begin{equation} \mu (Blu \left(x, r, t r \right)) \le K \mu B(x,r). \end{equation} \end{definition} We shall say $\mu$ is a measure that $(t,K)$-microblossoms, instead of using the longer expression. \begin{example} \label{bmicrobluex} Microblossoming (even together with doubling) is more general than microdoubling, in a quantitative sense. Consider $(\mathbb{Z}^d, \ell_\infty, \mu)$, where $\mu$ is the counting measure. Then $\mu$ is doubling, and ``microdoubling in the large", since for large radii ($r > d$), $\mu$ can be regarded as a discrete approximation to Lebesgue measure. However, $\mu B(0,1) = 1$, and for every $t > 0$, $\mu B(0,1 + t ) \ge 3^d$, no matter how small $t$ is. Thus, the measure $\mu$ is not $(t,K)$-microdoubling, for any $K < 3^d$, $0 < t << 1$. However, $\mu$ is $1/d$-microblossoming, since for $r > d$, $\mu$ behaves as a microdoubling measure, and for $r \le d$, $Blu(x, r, r/d) = B(x,r)$. A less natural but stronger example is furnished by the measure $\mu$ given by \cite[Theorem 5.9]{A2}. Since $\mu$ satisfies a local comparability condition, and is defined in a geometrically doubling space, it blossoms boundedly, so it microblossoms boundedly (at least with the blossoming constant). But $\mu$ is not doubling, and hence it is not microdoubling. \end{example} \begin{example} \label{bmicroblunonblu} While $(t,K_1)$-microdoubling entails $(2, K_2)$-doubling for some $K_2 \ge K_1$, the analogous statement is not true for microblossoming. The following example shows that $(1/2,1)$-microblossoming does not entail local comparability. Let $X = \{0, 1, 3\}$ with the inherited metric from $\mathbb{R}$, and let $\mu = \delta_3$. Then $B(0,3) \cap B(3, 3) = \{1\}$, but $\mu B(0,3) = 0$ while $\mu B(3,3) = 1$, so local comparability fails. Since bounded blossoming implies local comparability, all we have to do is to check that $\mu$ is $(1/2,1)$-microblossoming. For $t\le 3$, $B(0,t) \subset Blu(0, t, t/2) \subset \{0,1\}$, so $\mu B(0,t) = \mu Blu(0, t, t/2)= 0$, and for $t > 3$, $B(0,t) = Blu(0, t, t/2) = X$. Likewise, for $t\le 2$, $B(1,t) = Blu(1, t, t/2) \subset \{0,1\}$, so $\mu B(1,t) = \mu Blu(1, t, t/2)= 0$, and for $t > 2$, $B(1,t) = Blu(1, t, t/2) = X$. \end{example} \begin{definition} Given a metric measure space $(X, d, \mu)$, and denoting the support of $\mu$ by $supp(\mu)$, the {\em relative increment function} of $\mu$, $ri_{\mu}: supp(\mu)\times (0,\infty)\times [1,\infty)$, is defined as \begin{equation} \label{ri} ri_{\mu}(x, r, t) := \frac{\mu B(x, tr)}{\mu B(x, r)}, \end{equation} and the {\em maximal relative increment function}, as \begin{equation} \label{mri} mri_{\mu}(r, t) := \sup_{x \in supp(\mu)}\frac{\mu B(x, tr)}{\mu B(x, r)}. \end{equation} When $\mu$ is understood we will simply write $ri$ and $mri$. \end{definition} This notation allows one to unify different conditions that have been considered regarding the boundedness of maximal operators. For instance, on $supp(\mu)$ the doubling condition simply means that there is a constant $C\ge 1$ such that for all $r > 0$, $mri_{\mu}(r, 2) \le C$, and the $d^{-1}$-microdoubling condition, that for all $r > 0$, $mri_{\mu}(r, d^{-1}) \le C$. Note that by $\tau$-smoothness, the complement of the support of $\mu$ has $\mu$-measure zero, so the relative increment function is defined for almost every $x$. \begin{example} \label{macrodoub} The interest of considering values of $t >2$ in the preceding definition comes from the fact that, under the additional assumption of microblossoming, it will allow a much more irregular growth of balls than microdoubling or plain doubling, without a comparable worsening of the estimates for the weak type (1,1) bounds. To fix ideas, consider the right hand side $ C(\mu) \ K_1 K \left(2 + \frac{\log K_2}{\log K}\right)$ of formula (\ref{sum}) below. This bound is related to the centered maximal operator when the supremum is restricted to radii $R$ between $r$ and $T r$, $T > 1$. The constant $K_2$ depends on $T$, as it must satisfy $ mri_{\mu}(r, T) \le K_2$. For Lebesgue measure on $\mathbb{R}^d$ with the $\ell_\infty$-norm, $ C(\lambda^d) = 1$. If we set $T=2$, then we can take $K_2 = 2^d$, while $K_2 = d^d$ for $T=d$, a choice which yields bounds of order $O( d \log d)$. A $1/d$-microdoubling constant is $K_1 = e$ ($\mathbb{R}^d$ has the approximate midpoint property, and in fact it is a geodesic space, so microdoubling is the same as microblossoming in this case) and $K := \max\{K_1, e\} = e$. Returning to Example \ref{bmicrobluex}, by a rescaling argument it is clear that the situation for $(\mathbb{Z}^d, \ell_\infty, \mu)$ cannot be much worse than for $(\mathbb{R}^d, \ell_\infty, \lambda^d)$, and in fact it is easy to see that the same argument of Stein and Str\"omberg (which will be presented in greater generality below) yields the $O( d \log d)$ bounds. Now suppose we modify the measure so that at one single point it is much smaller. Clearly, this will have little impact in the weak type (1,1) bounds, since for $d >>1$, $x \in \mathbb{Z}^d,$ and $r>1$, the measure of $B(x,r)$ will be changed by little or not at all, while for $ r \le 1$, balls with distinct centers do not intersect. For definiteness, set $\nu = \mu$ on $\mathbb{Z}^d \setminus \{0\}$, and $\nu \{0\} = d ^{-d}$. Then the doubling constant, and the $(t,K)$-microdoubling constant, for any $t > 0$, is at least $d ^{d} (3 ^{d} -1) \le K = K_2$, much larger than the corresponding constants for $\mu$. However, the local comparability constant is still very close to 1, since intersecting balls of the same radius must contain at least $3^d$ points each, and a $1/d$-microblossoming constant can be taken to be very close to $e$. Setting $T = d$, we get $K_2 \le d^d (2d + 1)^d$, so $\log K_2$ in this case is comparable to the constant obtained when $T = 2$. \end{example} \begin{remark} One might define $(T, K)$-macroblossoming, with $T > 1$, by analogy with Definition \ref{bmicroblu}. However, since $B(x, T r) \subset Blu(x, r, T r)$, assuming directly that $mri_{\mu}(r, T) \le K$ is not stronger than $(T, K)$-macroblossoming, \end{remark} \section{The Stein-Str\"omberg covering theorem} Next, we present the Stein-Str\"omberg argument using the terminology of blossoms. Note that the next theorem does not require $X$ to be geometrically doubling. Given an ordered sequence of sets $A_1, A_2, \dots$, we denote by $D_1, D_2, \dots$ its sequence of disjointifications, that is $D_1 = A_1$, and $D_{n + 1} = A_{n + 1}\setminus \cup_1^n A_i$. We shall avoid reorderings and relabelings of collections of balls, as this may lead to confusion regarding the meaning of $D_j$. The unfortunate downside of this choice is an inflation of subindices. \begin{theorem}\label{StSt1} {\bf Stein-Str\"omberg covering theorem for sparse radii.} Let $(X, d, \mu)$ be a metric measure space, where $\mu$ satisfies a $C(\mu)$ local comparability condition, and let $R:= \{r_n: n\in \mathbb{Z}\}$ be a $T$-lacunary sequence of radii, i.e., $r_n >0$ and $r_{n+1}/r_n \ge T > 1$. Suppose there exists a $t > 0$ such that $T t \ge 1$ and $\mu$ $t$-microblossoms boundedly with constant $K$. Let $\{B(x_i, s_i): s_i \in R, 1 \le i \le M\}$ be a finite collection of balls with positive measure, ordered by non-increasing radii. Set $U:= \cup_{i = 1}^M B(x_i, t s_i)$. Then there exists a subcollection $\{B(x_{i_1}, s_{i_1}), \dots, B(x_{i_N}, s_{i_N})\}$, such that, denoting by $D_{i_j}$ the disjointifications of the reduced balls $B(x_{i_j}, t s_{i_j})$, \begin{equation}\label{set} \mu U \le (K + 1) \mu \cup_{j=1}^N B(x_{i_j}, t s_{i_j}), \end{equation} and \begin{equation}\label{bound} \sum_{j=1}^N \frac{\mu D_{i_j}}{\mu B(x_{i_j}, s_{i_j})}\mathbf{1}_{B(x_{i_j}, s_{i_j})} \le C(\mu) \ K + 1. \end{equation} \end{theorem} \begin{proof} We use the Stein-Str\"omberg selection algorithm. Let $B(x_{i_1}, s_{i_1}) = B(x_{1}, s_{1})$ and suppose that the balls $B(x_{i_1}, s_{i_1}), \dots ,B(x_{i_k}, s_{i_k})$ have already been selected. If $$ \sum_{j=1}^k \frac{\mu D_{i_j}}{\mu B(x_{i_j}, s_{i_j})}\mathbf{1}_{Bl(x_{i_j}, s_{i_j}, t s_{i_j})} (x_{i_{k} + 1}) \le 1, $$ accept $B(x_{i_{k + 1}}, s_{i_{k + 1}}) := B(x_{i_{k} + 1}, s_{i_{k} + 1})$ as the next ball in the subcollection. Otherwise, reject it. Repeat till we run out of balls. Let $\mathcal{C}$ be the collection of all rejected balls. Then $\mu$ a.e., $$ \mathbf{1}_{\cup \mathcal{C}} < \sum_{j=1}^N \frac{\mu D_{i_j}}{\mu B(x_{i_j}, s_{i_j})}\mathbf{1}_{Blu(x_{i_j}, s_{i_j}, t s_{i_j})}. $$ Integrating both sides and using microblossoming we conclude that $\mu \cup \mathcal{C} \le K \sum_i^N \mu D_{i_j} = K \mu \cup_{j=1}^N B(x_{i_j}, t s_{i_j})$, whence $\mu U \le (K + 1) \ \mu \cup_{j=1}^N B(x_{i_j}, t s_{i_j})$. Next we show that $$ \sum_{j=1}^N \frac{\mu D_{i_j}}{\mu B(x_{i_j}, s_{i_j})}\mathbf{1}_{B(x_{i_j}, s_{i_j})} \le C(\mu) \ K + 1. $$ Suppose $\sum_{j=1}^N \frac{\mu D_{i_j}}{\mu B(x_{i_j}, s_{i_j})}\mathbf{1}_{B(x_{i_j}, s_{i_j})}(z) > 0$. Let $\{B(x_{i_{k_1}}, s_{i_{k_1}}),$ $ \dots, B(x_{i_{k_n}}, s_{i_{k_n}})\}$ be the collection of all balls containing $z$ (keeping the original ordering by decreasing radii). Then each $B(x_{i_{k_j}}, s_{i_{k_j}})$ has radius either equal to or (substantially) larger than $s_{i_{k_n}}$. We separate the contributions of these balls into two sums. Suppose $B(x_{i_{k_1}}, s_{i_{k_1}}), \dots , B(x_{i_{k_m}}, s_{i_{k_m}})$ all have radii larger than $s_{i_{k_n}}$, while $B(x_{i_{k_{m} + 1}}, s_{i_{k_{m} + 1}}), \dots , B(x_{i_{k_n}}, s_{i_{k_n}})$ have radii equal to $s_{i_{k_{n}}}$. Now for $1 \le j \le m$, by $T$ lacunarity and the fact that $T t \ge 1$, we have $s_{i_{k_n}} \le t s_{i_{k_j}}$, so $z\in B(x_{i_{k_j}}, s_{i_{k_j}})$ implies that $x_{i_{k_n}}\in Bl(x_{i_{k_j}}, s_{i_{k_j}}, t s_{i_{k_j}})$, whence $$ \sum_{j=1}^m \frac{\mu D_{i_{k_j}}}{\mu B(x_{i_{k_j}}, s_{i_{k_j}})} \mathbf{1}_{Bl(x_{i_{k_j}}, s_{i_{k_j}}, t s_{i_{k_j}})} (x_{i_{k_n}}) \le 1, $$ and thus $$ \sum_{j=1}^m \frac{\mu D_{i_{k_j}}}{\mu B(x_{i_{k_j}}, s_{i_{k_j}})} \mathbf{1}_{B(x_{i_{k_j}}, s_{i_{k_j}})} (z) \le 1. $$ Next, note that the sets $D_{i_{k_{m} + 1}}, \dots , D_{i_{k_{n}}}$ are all disjoint and contained in $Bl(z, s_{i_{k_{n}}}, t s_{i_{k_{n}}})$. By microblossoming and local comparability, for $j = m + 1, \dots, n$ we have $$ \mu \cup_{j= m+1}^n D_{i_{k_{j}}}\le \mu Bl(z, s_{i_{k_{n}}}, t s_{i_{k_{n}}}) \le K \mu B(z, s_{i_{k_{n}}})\le K \ C(\mu) \ \mu B(x_{i_{k_{j}}}, s_{i_{k_{n}}}). $$ It follows that $$ \sum_{j= m +1}^n \frac{\mu D_{i_{k_{j}}}}{\mu B(x_{i_{k_{j}}}, s_{i_{k_{n}}})}\mathbf{1}_{B(x_{i_{k_{j}}}, s_{i_{k_{n}}})} (z) \le \frac{ C(\mu) \ \mu Bl(z, s_{i_{k_{n}}}, t s_{i_{k_{n}}})}{\mu B(z, s_{i_{k_{n}}})} \le C(\mu) \ K. $$ \end{proof} Denote by $M_R$ the centered Hardy-Littlewood maximal operator, with the additional restriction that the supremum is taken over radii belonging to the subset $R\subset (0,\infty)$ (cf. \cite[p. 735]{NaTa}). We mention that under the hypotheses of the next corollary, it is not known whether the centered Hardy-Littlewood maximal operator $M$ (with no restriction on the radii) is of weak type (1,1). \begin{corollary}\label{MR} Let $(X, d, \mu)$ be a metric measure space, where $\mu$ satisfies a $C(\mu)$ local comparability condition, and let $R:= \{r_n: n\in \mathbb{Z}\}$ be a $T$-lacunary sequence of radii. Suppose there exists a $t > 0$ with $T t \ge 1$ such that $\mu$ $(t, K)$-microblossoms boundedly. Then $\|M_R\|_{L^1-L^{1,\infty}} \le (K + 1) \ (C(\mu) \ K + 1)$. \end{corollary} The proof is standard. We present it to keep track of the constants. \begin{proof} Fix $\varepsilon > 0$, let $a > 0$, and let $f\in L^1(\mu)$. For each $x\in \{M_R f > a\}$ select $B(x,r)$ with $r\in R$, such that $a \mu B(x,r) < \int_{B(x,r)} |f|$. Then the collection of ``small" balls $\{ B(x, tr) : x\in \{M_R f > a\}\}$ is a cover of $\{M_R f > a\}$. By the $\tau$-smoothness of $\mu$, there is a finite subcollection $\{B(x_i, t s_i): s_i \in R, 1 \le i \le M\}$ of balls with positive measure, ordered by non-increasing radii, such that $$ (1 - \varepsilon) \mu \{M_R f > a\} \le (1 - \varepsilon) \mu \cup \{ B(x, tr) : x\in \{M_R f > a\}\} < \mu \cup_{i= 1}^M B(x_i, t s_i). $$ Next, let $\{B(x_{i_1}, s_{i_1}), \dots, B(x_{i_N}, s_{i_N})\}$ be the subcollection given by the Stein-Str\"omberg covering theorem for sparse radii. Then we have $$ \mu \cup_{i= 1}^M B(x_i, t s_i) \le (K + 1) \mu \cup_{j=1}^N B(x_{i_j}, t s_{i_j}) = (K + 1) \sum_{j=1}^N \mu D_{i_j} $$ $$ = (K + 1) \sum_{j=1}^N \frac{\mu D_{i_j} }{\mu B(x_{i_j}, s_{i_j})}\int \mathbf{1}_{B(x_{i_j}, s_{i_j})} \le (K + 1) \frac{1}{a }\int |f| \sum_{j=1}^N \frac{\mu D_{i_j} }{\mu B(x_{i_j}, s_{i_j})}\mathbf{1}_{B(x_{i_j}, s_{i_j})} $$ $$\le (K + 1) \ (C(\mu) \ K + 1) \frac{1}{a }\int |f| . $$ \end{proof} In the specific case of $d$-dimensional Lebesgue measure $\lambda^d$, $C(\lambda^d) = 1$. Choosing $t = 1/d$ and $T = d$, $K$ above can be taken to be $e^2$, so the constant obtained is $(e^2+1)^2$, which is worse than the constant $(e^2 + 1) (e + 1)$ yielded by the Stein-Str\"omberg argument. This discrepancy is due to the fact that our definition of microblossoming uses the uncentered blossom instead of the blossom, so from the assumption $\mu (Blu \left(x, r, t r \right)) \le K \mu B(x,r)$ we get the same bound $\mu (Bl \left(x, r, t r \right)) \le K \mu B(x,r)$ for the potentially smaller centered blossom. Of course, we could strengthen the definition, using blossoms, to obtain the same constant as in the Stein-Str\"omberg proof, but in the case of Lebesgue measure we prefer to consider it separately, using different values of $(t, K)$ to lower the known bounds. We do this in the next section. While Corollary \ref{MR} follows from the proof of the Stein-Str\"omberg covering theorem, it was not stated there but in \cite[Lemma 4]{MeSo} for Lebesgue measure, and in the microdoubling case, in \cite[Corollary 1.2]{NaTa}. A source of interest for this result comes from the fact that under $(t,K)$-microblossoming, the maximal operator defined by a $(1 + t)$-lacunary set of radii $R$ is controlled by the sum of $N$ maximal operators with lacunarity $1/t$, where $N$ is the least integer such that $(1 + t)^N \ge 1/t$. Thus, the bound $\|M_R\|_{L^1-L^{1,\infty}} \le N (K + 1) \ (C(\mu) \ K + 1)$ follows. Under the additional assumption of $(t, K^{1/2})$-microdoubling, the maximal operator defined by taking suprema of radii in $[a, (1 + t) a)$ is controlled by $ K^{1/2}$ times the averaging operator of radius $(1 + t) a$. Putting these estimates together, and using the better bound for $\mu Bl(x, r, tr)\le K^{1/2} \mu B(x,R)$, the following result due to Naor and Tao (cf. \cite[Corollary 1.2]{NaTa}) is obtained. Of course, in this case $\mu$ is doubling and $X$, geometrically doubling. \begin{corollary} Let $(X, d, \mu)$ be a metric measure space, where $\mu$ satisfies a $C(\mu)$ local comparability condition and is $(t, K^{1/2})$-microdoubling. If $N$ is the least integer such that $(1 + t)^N \ge 1/t$, then $$\|M\|_{L^1-L^{1,\infty}} \le N \ K^{1/2} \ (K + 1) \ (C(\mu) \ K^{1/2} + 1).$$ \end{corollary} This shows that the Stein-Str\"omberg covering theorem for sparse radii in metric spaces suffices to prove the Naor-Tao bounds, but no greater generality is achieved in either the spaces or the measures, since microdoubling is used in the last step. A second approach, which yields a slightly more general version of the result and gives better constants, consists in going back to the original Stein-Str\"omberg argument. Recall that when defining $(t,K_1)$-microblossoming, we set $0 < t < 1$ and $K_1\ge 1$. In the proof of the next result $K:= \max\{K_1, e\}$ is used to determine the size of the steps. For convenience we take $K \ge e$, but $e$ is just one possible choice. Note that the condition on $ mri(r, T)$ below entails that $\mu$ is doubling on its support, and hence $supp(\mu)$ is geometrically doubling. \begin{theorem}\label{StSt2} {\bf Stein-Str\"omberg covering theorem for bounded radii.} Let $(X, d, \mu)$ be a metric measure space such that $\mu$ satisfies a $C(\mu)$ local comparability condition, and is $(t,K_1)$-microblossoming. Set $K = \max\{K_1 , e\}$. Let $ r > 0$, and suppose there exists a $T > 1$ such that $K_2:= mri(r,T) <\infty$. Let $\{B(x_i, s_i): r \le s_i < T r, 1 \le i \le M\}$ be a finite collection of balls with positive measure, given in any order, and let $D_1 = B(x_1, t s_1), \dots, D_{M} = B(x_{M}, t s_{M})\setminus \cup_1^{M-1} B(x_{i}, t s_{i})$ be the disjointifications of the $t$-reduced balls. Then \begin{equation}\label{sum} \sum_{i=1}^M\frac{\mu D_i}{\mu B(x_{i}, s_{i})}\mathbf{1}_{B(x_{i}, s_{i})} \le C(\mu) \ K_1 K \left(2 + \frac{\log K_2}{\log K}\right). \end{equation} \end{theorem} Since the big $d\log d$ part in the estimates for the maximal operator (in $\mathbb{R}^d$ with Lebesgue measure) comes from this case, which does not require any particular ordering nor any choice of balls, it is natural to enquire whether some additional selection process can lead to an improvement in the bounds. In general metric spaces this cannot be done, by \cite[Theorem 1.4]{NaTa}, but it might be possible in $\mathbb{R}^d$. However, I have not been able to find such a new selection argument. In the statement above, $T$ is not assumed to be close to 1, and in fact it could be much larger than 2 (recall Example \ref{macrodoub}). From the viewpoint of the proof, the difference between $T >> 2$ and the assumption of $t$-microdoubling lies in the fact that the size of the steps will vary depending on the growth of balls, rather than having increments given by the constant factor $1 + t$ at every step. But the total number of steps will be determined by $K$ and $K_2$, not by whether the factors are all equal to $1 + t$ or not. \begin{proof} Suppose $$\sum_{i=1}^M\frac{\mu D_i}{\mu B(x_{i}, s_{i})}\mathbf{1}_{B(x_{i}, s_{i})} (y) > 0. $$ Let $s = \min \{s_i: 1 \le i \le M \mbox{ and } y\in B(x_{i}, s_{i})\}$. Then $r \le s < Tr$. Select $$h_1 = \sup \{h > 0 : \mu B(y , (1 + h) s) \le K \mu B^{cl} (y ,s) \mbox{ \ and \ } (1 + h) s \le T r\}. $$ This is always possible since $ \lim_{h\downarrow 0}\mu B(y , (1 + h) s) = \mu B^{cl} (y ,s) $. Now either $(1 + h_1) s = T r$, in which case the process finishes in one step, and then it could happen that $\mu B^{cl}(y , (1 + h_1) s) < K \mu B^{cl} (y ,s)$, or $(1 + h_1) s < T r$, in which case $\mu B(y , (1 + h_1) s) \le K \mu B^{cl} (y ,s) \le \mu B^{cl}(y , (1 + h_1) s)$ (the last inequality must hold, since otherwise we would be able to select a larger value for $h_1$). If $h_2, \dots, h_m$ have been chosen, let $$h_{m + 1} := \sup \{h > 0 : \mu B(y , s (1 + h)\Pi_{i=1}^m (1 + h_i) \le K \mu B^{cl} (y , s \Pi_{i=1}^m (1 + h_i) ) \mbox{ \ and \ } $$ $$ s (1 + h)\Pi_{i=1}^m (1 + h_i) \le T r\}.$$ Since $\mu B (y , Tr) < \infty$, the process stops after a finite number of steps, so there is an $N \ge 0$ (assigning value 1 to the empty product) such that $ s \Pi_{i=1}^{N + 1} (1 + h_i) = T r$ and $$ \mu B^{cl} (y , s \Pi_{i=1}^N (1 + h_i) ) \le \mu B (y , Tr) \le K \mu B^{cl} (y , s \Pi_{i=1}^N (1 + h_i) ). $$ To estimate $N$, note that since $r \le s$, $$ \mu B (y , T r) \le K_2 \mu B (y , s) \le \frac{K_2}{K} \mu B^{cl} (y ,(1 + h_1) s) $$ $$ \le \dots \le \frac{K_2}{K^N} \mu B^{cl} (y , s \Pi_{i=1}^N (1 + h_i) ) \le \frac{K_2}{K^N} \mu B (y , T r). $$ Hence $K^N \le K_2$ and thus $N\le \log K_2/ \log K$. The remaining part of the argument is a variant of what was done in Stein-Str\"omberg for sparse radii, when considering the contribution of balls with the same radius as the smallest ball. Here we arrange the balls containing $y$ into $N + 2$ ``scales" (instead of just one) depending on whether their radii $R$ are equal to $s$, or $s \Pi_{i=1}^m (1 + h_i) < R \le s \Pi_{i=1}^{m +1} (1 + h_i) $, or $ s \Pi_{i=1}^N (1 + h_i) < R \le T r$. For the first scale, consider all balls $B(x_{i_{1,1}}, s), \dots, B(x_{i_{1,k_1}}, s)$ containing $y$. Since for $1\le j \le k_1$, $x_{i_{1,j}} \in B(y, s)$, it follows that the disjoint sets $D_{i_{1,j}}$ are all contained in $Bl(y, s, t s).$ By microblossoming and local comparability we have, for $j = 1, \dots, k_1$, $$ \mu \cup_{j= 1}^{k_1} D_{i_{1,j}}\le \mu Bl(y, s, t s) \le K_1 \mu B(y, s)\le K_1 \ C(\mu) \ \mu B(x_{i_{1,j}}, s), $$ so $$ \sum_{j= 1}^{k_1} \frac{\mu D_{i_{1,j}}}{\mu B(x_{i_{1,j}}, s_{i_{1,j}})}\mathbf{1}_{B(x_{i_{1,j}}, s_{i_{1,j}})} (y) \le \frac{ C(\mu) \ \mu Bl(y, s, t s)}{\mu B(y, s)} \le C(\mu) \ K_1. $$ The contributions of all the other scales are estimated in the same way as the second one, which is presented next. Again, consider all balls $B(x_{i_{2,1}}, s_{i_{2,1}}), \dots, B(x_{i_{2,k_2}}, s_{i_{2,k_2}})$ containing $y$ and with radii $s_{i_{2,j}}$ in the interval $(s, (1 + h_1) s]$. Then all the sets $D_{i_{2,j}}$ are contained in $$ Bl(y, (1 + h_1) s, t (1 + h_1) s). $$ Using microblossoming, the choice of $h_1$, and the local comparability of $\mu$, for $j = 1, \dots, k_2$ we have \begin{equation}\label{minus} \mu \cup_{j= 1}^{k_2} D_{i_{2,j}} \le \mu Bl(y, (1 + h_1) s, t (1 + h_1) s) \end{equation} $$ \le K_1 \mu B (y, (1 + h_1) s) \le K_1 K \mu B^{cl}(y, s) \le K_1 K \ C(\mu) \ \mu B(x_{i_{2,j}}, s_{i_{2,j}}), $$ so $$ \sum_{j= 1}^{k_2} \frac{\mu D_{i_{2,j}}}{\mu B(x_{i_{2,j}}, s_{i_{2,j}})}\mathbf{1}_{B(x_{i_{2,j}}, s_{i_{2,j}})} (y) \le \frac{ C(\mu) \ \mu Bl(y, (1 + h_1) s, t (1 + h_1) s)}{\mu B^{cl}(y, s)} \le C(\mu) \ K_1 K. $$ Adding up over the $N + 2$ scales we get (\ref{sum}). \end{proof} Next we put together the two parts of the Stein-Str\"omberg covering theorem. This helps to see why the original argument gives better bounds than domination by several sparse operators. \begin{theorem}\label{StSt3} {\bf Stein-Str\"omberg covering theorem.} Let $(X, d, \mu)$ be a metric measure space, where $\mu$ satisfies a $C(\mu)$ local comparability condition, and is $(t,K_1)$-microblossoming. Set $K = \max\{K_1 , e\}$, and suppose $K_2:= \sup_{r > 0} mri(r,1/t) <\infty$. Let $\{B(x_i, s_i): s_i \in R, 1 \le i \le M\}$ be a finite collection of balls with positive measure, ordered by non-increasing radii, and let $U:= \cup_{i = 1}^M B(x_i, t s_i)$. Then there exists a subcollection $\{B(x_{i_1}, s_{i_1}), \dots, B(x_{i_N}, s_{i_N})\}, $ such that, denoting by $D_{i_1} = B(x_{i_1}, t s_{i_1}), \dots, D_{i_N} = B(x_{i_N}, t s_{i_N}) \setminus \cup_1^{N-1} B(x_{i_j}, t s_{i_j})$, we have \begin{equation}\label{set2} \mu U \le (K_1 + 1) \mu \cup_{j=1}^N B(x_{i_j}, t s_{i_j}), \end{equation} and \begin{equation} \label{bound2} \sum_{j=1}^N \frac{\mu D_{i_j}}{\mu B(x_{i_j}, s_{i_j})}\mathbf{1}_{B(x_{i_j}, s_{i_j})} \le 1 + C(\mu) \ K_1 K \left(2 + \frac{\log K_2}{\log K}\right). \end{equation} \end{theorem} \begin{proof} The selection process is the same as in the proof of Theorem \ref{StSt1}, yielding the desired subcollection, with (\ref{set2}) being the same as (\ref{set}). As for the right hand side of (\ref{bound2}) the 1 comes from the contribution of balls with very large radii, as in (\ref{bound}), while $C(\mu) \ K_1 K \left(2 + \frac{\log K_2}{\log K}\right)$ is the bound from (\ref{sum}). \end{proof} The same argument given for Corollary \ref{MR} now yields \begin{corollary}\label{M} Under the assumptions and with the notation of the preceding result, the centered maximal function satisfies the weak type (1,1) bound $$\|M\|_{L^1-L^{1,\infty}} \le (K_1 + 1) \left( 1 + C(\mu) \ K_1 K \left(2 + \frac{\log K_2}{\log K}\right)\right).$$ \end{corollary} For Lebesgue measure on $\mathbb{R}^d$, with balls defined by an arbitrary norm and $t = d^{-1}$, this is worse (by a factor of $e^2$) than the bound $(1 + e^2) (1 + o(1)) e^2 d \log d$ obtained by Stein and Str\"omberg. Regarding lower bounds, currently it is known that for the centered maximal function defined using $\ell^\infty$-balls (cubes) the numbers $\|M\|_{L^1-L^{1,\infty}}$ diverge to infinity (cf. \cite{A}) at a rate at least $O(d^{1/4})$ (cf. \cite{IaSt}). No information is available for other balls. In particular, the question (asked by Stein and Str\"omberg) as to whether or not the constants $\|M\|_{L^1-L^{1,\infty}}$ diverge to infinity with $d$, for euclidean balls, remains open. \section{Sharpening the bounds for Lebesgue measure} Here we revisit the original case studied by Stein and Str\"omberg, Lebesgue measure $\lambda^d$ on $\mathbb{R}^d$, with metric (and hence, with maximal function) defined by an arbitrary norm. Since $\lambda^d$ is $(t, (1 + t)^d)$-microdoubling for every $t > 0$, values of $t\ne 1/d$ can be used to obtain improvements on the size of the constants. \begin{theorem}\label{SSLebesgue} Consider $\mathbb{R}^d$ with Lebesgue measure $\lambda^d$ and balls defined by an arbitrary norm. Let $R:= \{r_n: n\in \mathbb{Z}\}$ be a $d$-lacunary sequence of radii, and let $M_R$ be the corresponding (sparsified) Hardy-Littlewood maximal operator. Then $\|M_R\|_{L^1-L^{1,\infty}} \le (e^{1/d} + 1) (1 + 2 e^{1/d})$. Furthermore, if the maximal function is defined using the $\ell_\infty$-norm, so balls are cubes with sides perpendicular to the coordinate axes, then $\|M_R\|_{L^1-L^{1,\infty}} \le 6.$ \end{theorem} As we noted above, using the original argument from \cite{StSt} one obtains $\|M_R\|_{L^1-L^{1,\infty}} \le (e^2 + 1) (e + 1)$. \begin{proof} Suppose, for simplicity in the writing, that $r_{n + 1} = d r_n$ (the case $r_{n + 1} \ge d r_n$ is proven in the same way). We apply the Stein Str\"omberg selection process with $t =1/d^2$ and microdoubling constant $K = (1 + 1/d^2)^{d} < e^{1/d}$. As before, given $0 \le f \in L^1$ and $a >0$, we cover the level set $\{M_R f > a\}$ almost completely, by a finite collection of ``small" balls $\{B(x_i, t s_i): s_i \in R, 1 \le i \le M\}$ ordered by non-increasing radii, and such that $a \mu B(x_i, s_i) < \int_{B(x_i, s_i)} f$. From this collection we extract a subcollection $\{B(x_{i_1}, t s_{i_1}), \dots, B(x_{i_N}, t s_{i_N})\}$ satisfying $$ \mu \cup_{i= 1}^M B(x_i, t s_i) \le (e^{1/d} + 1) \mu \cup_{j=1}^N B(x_{i_j}, t s_{i_j}) = (e^{1/d} + 1) \sum_{j=1}^N \mu D_{i_j}. $$ Next, we obtain the bound $$ \sum_{j=1}^N \frac{\mu D_{i_j}}{\mu B(x_{i_j}, s_{i_j})}\mathbf{1}_{B(x_{i_j}, s_{i_j})} \le 2 e^{1/d} + 1, $$ by considering $z$ such that $\sum_{j=1}^N \frac{\mu D_{i_j}}{\mu B(x_{i_j}, s_{i_j})}\mathbf{1}_{B(x_{i_j}, s_{i_j})}(z) > 0$. Select the ball $B$ with largest index that contains $z$. Since $B$ belongs to the subcollection obtained by the Stein-Str\"omberg method, all balls containing $z$ and with radii $\ge d^2 r(B)$ (where $r(B)$ denotes the radius of $B$), contribute at most 1 to the sum. Next we have to consider two more scales, all the balls with radius $r(B)$, and all the balls with radius $d r(B)$. By the usual argument (as in the proof of Theorem \ref{StSt1}) each of these scales contributes at most $e^{1/d}$ to the sum, so $\|M_R\|_{L^1-L^{1,\infty}} \le (e^{1/d} + 1) (1 + 2 e^{1/d})$ follows. The result for cubes is obtained by letting $d\to\infty$, since in this case it is known that the weak type (1,1) norms increase with the dimension (cf. \cite[Theorem 2]{AV}). \end{proof} \begin{theorem}\label{SS2Lebesgue} Consider $\mathbb{R}^d$ with Lebesgue measure $\lambda^d$ and balls defined by an arbitrary norm. If $\varepsilon > 0$, then $\|M\|_{L^1-L^{1,\infty}} \le (2 + 3 \varepsilon) d \log d$ for all $d = d(\varepsilon)$ sufficiently large. \end{theorem} The bound from the proof of \cite[Theorem 1]{StSt} is $\|M\|_{L^1-L^{1,\infty}} \le e^2 (e^2 + 1) (1 + o(1)) d \log d.$ \begin{proof} Fix $\varepsilon \in (0,1)$. Since $(1 + d^{-1 - \varepsilon})^d = 1 + d^{ - \varepsilon} + O(d^{ -2 \varepsilon})$, it follows that $\lambda^d$ is $(d^{-1 - \varepsilon}, 1 + d^{ - \varepsilon} + O(d^{ -2 \varepsilon}))$-microdoubling. Note that if a ball $B$ contains the center of a second ball of radius $1$, and the latter ball is contained in $(1 + d^{-1 - \varepsilon}) B$, then the radius $r_B$ of $B$ must satisfy $r_B \ge d^{ 1 + \varepsilon}$. Let $L$ be any natural number such that $(1 + d^{-1 - \varepsilon})^L \ge d^{1 + \varepsilon}$. Taking logarithms to estimate $L$, and using $\log(1 + x) > x - x^2$ for $x$ sufficiently close to $0$, we see that it is enough, for the preceding inequality to hold, to choose $L$ satisfying $L (d^{-1 - \varepsilon} - d^{-2 - 2\varepsilon}) \ge (1 + \varepsilon) \log d$, or, $L \ge (1 + o(d^{-1}))(1 + \varepsilon) d^{1 + \varepsilon} \log d$. For the least such integer we will have $$ L \le 1 + (1 + o(d^{-1}))(1 + \varepsilon) d^{1 + \varepsilon} \log d. $$ Again we apply the Stein Str\"omberg selection process with $t = d^{-1 - \varepsilon}$, covering a given level set $\{M f > a\}$ almost completely (up to a small $\delta > 0$) by a finite collection of small balls $\{B(x_i, t s_i): s_i \in R, 1 \le i \le k\}$ ordered by non-increasing radii, and such that $a \mu B(x_i, t s_i) < \int_{B(x_i, t s_i)} |f|$. Using the Stein Str\"omberg algorithm, we extract a subcollection $$ \{B(x_{i_1}, t s_{i_1}), \dots, B(x_{i_N}, t s_{i_N})\} $$ satisfying \begin{equation} \label{SSsum1} (1 - \delta) \mu \{M f > a\} \le (2 + d^{ - \varepsilon} + O(d^{ -2 \varepsilon})) \sum_{j=1}^N \mu D_{i_j}, \end{equation} where the sets $D_{i_j}$ denote the disjointifications determined by the above subcollection. To sharpen the usual uniform bound for \begin{equation*} \sum_{j=1}^N \frac{\mu D_{i_j}}{\mu B(x_{i_j}, s_{i_j})}\mathbf{1}_{B(x_{i_j}, s_{i_j})}, \end{equation*} we use the fact that the sets $D_i$ are disjoint across different steps, and not just within the same step. More precisely, let $z$ satisfy \begin{equation} \label{SSsum} \sum_{j=1}^N \frac{\mu D_{i_j}}{\mu B(x_{i_j}, s_{i_j})}\mathbf{1}_{B(x_{i_j}, s_{i_j})}(z) > 0. \end{equation} Select the ball $B$ with largest index that contains $z$. Since $B$ belongs to the subcollection obtained by the Stein-Str\"omberg method, all balls containing $z$ and with radii $\ge d^{ 1 + \varepsilon} r(B)$ contribute at most 1 to the sum. Next we consider the first two scales, since for all the others, the argument is the same as for the second. Take all the balls with radii equal to $r_B$. In order to bound (\ref{SSsum}) from above, we suppose that $(1 + d^{ - 1 - \varepsilon}) B$ is completely filled up with the sets $D_i$ associated to balls with radii $r_B$, and hence, no $D_j$ associated to a ball with larger radius intersects $(1 + d^{ -1 - \varepsilon}) B$. When we consider the sum (\ref{SSsum}), but just for the balls with radius $r_B$, we obtain the upper bound $(1 + d^{ - 1 - \varepsilon})^d$. For the second level, we consider all balls in the subcollection with radii in $(r_B, (1 + d^{ - 1 - \varepsilon}) r_B]$, and as before, we suppose that $(1 + d^{ - 1 - \varepsilon})^2 B \setminus (1 + d^{ - 1 - \varepsilon}) B$ is completely filled up with the sets $D_j$ associated to these balls. The estimate we obtain for this second level is $(1 + d^{ - 1 - \varepsilon})^d - 1 = d^{ - \varepsilon} + O(d^{ -2 \varepsilon})$. For balls with radii in $((1 + d^{ - 1 - \varepsilon})^k r_B, (1 + d^{ - 1 - \varepsilon})^{k + 1} r_B]$, $0 \le k < L$, we use the same estimate. Adding up over all scales we obtain $$ \sum_{j=1}^N \frac{\mu D_{i_j}}{\mu B(x_{i_j}, s_{i_j})}\mathbf{1}_{B(x_{i_j}, s_{i_j})}(z) \le 1 + 1 + d^{ - \varepsilon} + O(d^{ -2 \varepsilon}) $$ $$ + (1 + (d^{ - \varepsilon} + O(d^{ -2 \varepsilon})) (1 + o(d^{-1}))(1 + \varepsilon) d^{1 + \varepsilon} \log d) \le (1 + O(d^{-\varepsilon}))(1 + \varepsilon) d \log d. $$ Multiplying this bound with the bound from (\ref{SSsum1}) and adding an $\varepsilon$ to absorb the big Oh terms, for $d$ large enough we obtain $\|M\|_{L^1-L^{1,\infty}} \le (2 + 3 \varepsilon) d \log d$. \end{proof} \end{document}
\mathtt{b}egin{document} \mathtt{b}egin{abstract} We describe a generalization of Hashimoto and Kurano's Cauchy filtration for divided powers algebras. This filtration is then used to provide a cellular structure for generalized Schur algebras associated to an arbitrary cellular algebra, $A$. Applications to the cellularity of wreath product algebras $A\wr \Si_d$ are also considered. \end{abstract} \maketitle \mathtt{s}ection{Introduction} Let $\Bbbk$ be a noetherian integral domain and suppose $A$ is a cellular $\Bbbk$-algebra \mathtt{c}ite{GL}. Then Geetha and Goodman \mathtt{c}ite{GG} showed that the wreath product algebra \[A\wr \Si_d = A^{\otimes d} \mathtt{r}times \Bbbk\Si_d\] is cellular, provided that all of the cell ideals of $A$ are cyclic. On the other hand, the generalized Schur algebras $S^A(n,d)$ were defined by Evseev and Kleshchev \mathtt{c}ite{EK1, EK2} in order to prove the Turner double conjecture. These algebras are related to wreath product algebras by a generalized Schur-Weyl duality established in \mathtt{c}ite{EK1}. \mathtt{s}mallskip In this paper, we describe a cellular structure for the generalized Schur algebra $S^A(n,d)$ for an arbitrary cellular algebra $A$ and for all integers $n,d\geq 0$. This extends some results of Kleshchev and Muth \mathtt{c}ite{KM1, KM2, KM3}. It follows, for example, from results of \mathtt{c}ite{KM3} that the algebra $S^A(n,d)$ is cellular for certain algebras $A$ which are both cellular and quasi-hereditary. We note that for such algebras, the cell ideals are automatically cyclic. The method used in this paper, however, does not require any additional assumptions on the cellular algebra. \mathtt{s}mallskip Our approach is motivated by that of \mathtt{c}ite{Krause1}, where Krause used the Cauchy decomposition of divided powers \mathtt{c}ite{ABW, HK} to describe the highest weight structure of categories of strict polynomial functors. As Krause mentions, this leads to an alternate proof of the fact that classical Schur algebras $S^\Bbbk(n,d)$ are quasi-hereditary, which follows by a Morita equivalence. As we will see, this approach can similarly be used to describe cellular structure. \mathtt{s}mallskip We begin by constructing a generalized Cauchy filtration for the divided powers $\Gamma^d J$ of a given $\Bbbk$-module, $J$, which we assume is equipped with a filtration \mathtt{b}egin{equation*} 0 = J_1 \mathtt{s}ubset \mathtt{c}dots \mathtt{s}ubset J_r = J \end{equation*} such that $J_j/J_{j-1} \mathtt{c}ong U_j\otimes_\Bbbk V_j$, for some free $\Bbbk$-modules $U_j, V_j$ of finite rank. Our first main result is a generalized Cauchy decomposition formula (Theorem \mathtt{r}ef{thm:gen_Cauchy}), which provides a filtration of $\Gamma^d J$ such that the associated graded object is a direct sum of modules of the form \[\mathtt{b}igoplus_{{\mathtt{b}oldsymbol \lambda} \mathtt{i}n \mathtt{b}oldsymbol \L} \U_{\mathtt{b}oldsymbol \lambda} \otimes_\Bbbk \V_{\mathtt{b}oldsymbol \lambda},\] where $\U_{\mathtt{b}oldsymbol \lambda}, \V_{\mathtt{b}oldsymbol \lambda}$ are {\em generalized Weyl modules} defined in Section \mathtt{r}ef{ss:gen_Weyl} and $\mathtt{b}oldsymbol{\Lambda}$ denotes a set of $r$-multipartitions. \mathtt{s}mallskip The generalized Schur algebra $S^A(n,d)$ may be identified as the $d$-th divided power $\Gamma^d \mathrm{M}_n(A)$, where $\mathrm{M}_n(A)$ is the algebra of size $n$ matrices over $A$. We are thus able to use the above decomposition, together with K\"onig and Xi's characterization of cellular algebras in \mathtt{c}ite{KX}, to prove our second main result (Theorem \mathtt{r}ef{thm:cellular}) which shows that generalized Schur algebras are cellular. In Example \mathtt{r}ef{ex:zig}, we describe a corresponding cellular basis explicitly for a particular case, $S^Z(1,2)$, where $Z$ is a zig-zag algebra (considered as an ordinary algebra rather than a superalgebra, as in \mathtt{c}ite{KM3}). \mathtt{s}mallskip As a consequence of generalized Schur-Weyl duality, Corollary \mathtt{r}ef{wreath} shows that the wreath product algebras $A\wr \Si_d$ are cellular for an arbitrary cellular algebra $A$. This provides an alternate proof of the main result in \mathtt{c}ite{GG}, for the case where $A$ is cyclic cellular, and a more recent result of Green \mathtt{c}ite{RGr}, for the general case where $A$ is an arbitrary cellular algebra. \mathtt{s}ection{Preliminaries} Assume throughout that $\Bbbk$ is a commutative ring, unless mentioned otherwise. The notation $\mathtt{s}harp$ is used for the cardinality of a set. \mathtt{s}ubsection{Weights, partitions, and sequences} \label{ss:definition} Write $\mathbb{N}$ and $\mathbb{N}_0$ to denote the sets of positive and nonnegative integers, respectively, with the usual total order. More generally, suppose that $\B$ is a countable totally ordered set which is bounded below. Any elements $a,b \mathtt{i}n \B$ determine an interval \[ \mathtt{s}et{a,b} := \{c\mathtt{i}n \B \mid a \leq c \leq b\} \] which is empty unless $a\leq b$. \mathtt{s}mallskip A {\em weight} ({\em on $\B$}) is a sequence of nonnegative integers $\mu = (\mu_b)_{b\mathtt{i}n \B}$ such that $\mu_b =0$ for almost all $b$. Let $\L(\B)$ denote the set of all weights on $\B$. A {\em partition} ({\em on $\B$}) is a weight $\lambda \mathtt{i}n \L(\B)$ such that \[b<c \ \text{ implies }\ \lambda_b \geq \lambda_c, \ \, ^\forall b,c\mathtt{i}n \B.\] The subset of partitions is denoted $\L^+(\B) \mathtt{s}ubset \L(\B)$. The size of a weight $\mu$ is the integer $|\mu| := \mathtt{s}um_{b} \mu_b$. Let $\L_d(\B)$ denote the set of all weights of size $d$ and write \[\L^+_d(\B) := \L^+(\B) \mathtt{c}ap \L_d(\B) \] for each $d\mathtt{i}n \mathbb{N}_0$. \mathtt{s}mallskip \mathtt{b}egin{remark}\label{notation} In this notation and elsewhere, we will use the convention of replacing an argument of the form $\mathtt{s}et{1,n}$ by ``$n$" for any $n\mathtt{i}n \mathbb{N}_0$, so that for example $\Lambda(n)$ denotes the set $\Lambda(\mathtt{s}et{1,n})$ of weights of the form $\mu=(\mu_1, \dots, \mu_n)$. \end{remark} We also identify each set $\L(n)$ as a subset of $\L(\mathbb{N})$ in the obvious way and write \[ l(\mu) := \mathrm{min}\{n\mathtt{i}n \mathbb{N}_0\mid \mu \mathtt{i}n \L(n)\} \] to denote the length of a weight $\mu \mathtt{i}n \L(\mathbb{N})$. For example, the length $l(\lambda)$ of a partition $\lambda= (\lambda_1, \lambda_2, \dots)$ in $\L^+(\mathbb{N})$ equals the number of positive parts, $\lambda_i\mathtt{i}n \mathbb{N}$. \mathtt{b}egin{definition}\label{lex1} Let $d\mathtt{i}n \mathbb{N}_0$. Recall that the {\em lexicographic ordering} on $\L_d(\mathbb{N})$ is the total order defined by setting $\lambda \leq \mu$ if $\lambda_j \leq \mu_j$ whenever $\lambda_i=\mu_i$ for all $i<j$. We use the notation $\preceq$ to denote the restriction of $\leq$ to the subset $\Lambda^+_d(\mathbb{N})$ of partitions of size $d$. \end{definition} \mathtt{s}mallskip Now fix $d\mathtt{i}n \mathbb{N}$, and write $\mathtt{s}eq^{d}(\B)$ to denote the set of all functions \[\mathtt{b}: \mathtt{s}et{1,d} \to \B.\] We identify $\mathtt{s}eq^{d}(\B)$ with $\B^d$ by setting $\mathtt b = (b_1, \dots, b_d)$, with $b_i= \mathtt b(i)$ for all $i\mathtt{i}n \mathtt{s}et{1,d}$. The symmetric group $\Si_d$ of permutations of $\mathtt{s}et{1,d}$ acts on $\mathtt{s}eq^{d}(\B)$ from the right via composition. We write $\mathtt b \mathtt{s}im \mathtt c$ if there exists $\mathtt{s}igma \mathtt{i}n \Si_d$ with $\mathtt c = \mathtt b \mathtt{s}igma$. \mathtt{s}mallskip The {\em weight} of a sequence $\mathtt{b}\mathtt{i}n \mathtt{s}eq^d(\B)$ is the element of $\Lambda_d(\B)$ defined by \mathtt{b}egin{equation*} \mu(\mathtt{b}) := (\mu_{c})_{c\mathtt{i}n \B}, \quad \text{where}\quad \mu_{c} = \mathtt{s}harp \{i \mid b_i =c\}\ \ ^\forall c\mathtt{i}n \B. \end{equation*} We note the following elementary result. \mathtt{b}egin{lemma}\label{mu} The map $\mu: \mathtt{s}eq^{d}(\B) \to \L_d(\B)$, sending $\mathtt b \mapsto \mu({\mathtt b})$, induces a bijection: $\mathtt{s}eq^d(\B)/\Si_d \ \mathtt{s}imeq\ \L_d(\B).$ \end{lemma} \mathtt{b}egin{proof} We may assume that $\B$ is nonempty. Since $\B$ is bounded below, it is possible to write the elements explicity in the form \mathtt{b}egin{equation}\label{setB} \B = \{b^{\B}_1 < b^{\B}_2 < \dots\ \}. \end{equation} To show that the map $\mathtt b \mapsto \mu({\mathtt b})$ is surjective, note that a right inverse is given by \mathtt{b}egin{equation*} \L_d(\B) \to \mathtt{s}eq^{d}(\B): \ \mu \mapsto \mathtt b_\mu := (b^\B_1, \dots, b^\B_1, b^\B_2, \dots, b^\B_2, \dots) \end{equation*} where $b^\B_1$ occurs with multiplicity $\mu_{b^\B_1}$, etc. Finally, it is easy to see that $\mathtt b \mathtt{s}im \mathtt c$ if and only if $\mu(\mathtt b)= \mu(\mathtt c)$, which completes the proof. \end{proof} \mathtt{s}mallskip Suppose more generally that $\B_1, \dots, \B_r$ is a collection of bounded below, totally ordered sets. We again consider the product $\B=\B_1\times \dots\times \B_r$ as a bounded below, totally ordered set via the lexicographic ordering. The symmetric group $\Si_d$ acts diagonally on the following product \mathtt{b}egin{equation*} \mathtt{s}eq^d(\B_1, \dots, \B_r) := \mathtt{s}eq^d(\B_1) \times \dots \times \mathtt{s}eq^d(\B_r). \end{equation*} Notice that the bijection \[\theta: \mathtt{s}eq^d(\B_1, \dots, \B_r) \mathtt{s}imeq \mathtt{s}eq^d(\B)\] defined by \[\theta(\mathtt{b}^{(1)}, \dots, \mathtt{b}^{(r)}):\ i \mapsto (b^{(1)}_i, \dots, b^{(r)}_i), \quad ^\forall i\mathtt{i}n \mathtt{s}et{1,d},\] is $\Si_d$-equivariant. It thus follows as an immediate consequence of Lemma \mathtt{r}ef{mu} that there is a bijection \mathtt{b}egin{equation}\label{mu2} \mathtt{s}eq^d(\B_1, \dots, \B_r)/\Si_d \, \mathtt{s}imeq \, \L_d(\B), \end{equation} where $\mathtt{s}eq^d(\B_1, \dots, \B_r)/\Si_d$ denotes the set of diagonal $\Si_d$-orbits. \mathtt{s}ubsection{Multipartitions} Suppose $d\mathtt{i}n \mathbb{N}_0$ and let $\B_1, \dots, \B_r$ be as above. Then we use the following notation for the product \[ \L^+(\B_1, \dots, \B_r) := \, \L^+(\B_1) \times \dots \times \L^+(\B_r). \] whose elements are called {\em $r$-multipartions} and denoted ${\mathtt{b}oldsymbol \lambda} = (\lambda^{(1)}, \dots, \lambda^{(r)})$. The {\em weight} of an $r$-multipartion $\mathtt{b}oldsymbol \lambda$ is the element of $\L(r)$ defined by \[ |\mathtt{b}oldsymbol \lambda| := (|\lambda^{(1)}|, \dots, |\lambda^{(r)}|).\] We call $||\mathtt{b}oldsymbol \lambda|| := \mathtt{s}um |\lambda^{(j)}|$ the {\em total weight} (or {\em size}) of $\mathtt{b}oldsymbol \lambda$. \mathtt{s}mallskip Given $\mu\mathtt{i}n \Lambda(r)$ and $d\mathtt{i}n \mathbb{N}_0$, we write \[\L^+_{\mu}(\B_1, \dots, \B_r) \, := \, \L^+_{\mu_1}(\B_1) \times \dots \times \L_{\mu_r}^+(\B_r)\] and \[\L^+_d(\B_1, \dots, \B_r) \, := \mathtt{b}igsqcup_{\nu \mathtt{i}n \L_d(r)} \L^+_{\nu}(\B_1, \dots, \B_r) \] to denote the subset of $r$-multipartions of weight $\mu$, resp.~total weight $d$. \mathtt{s}mallskip In the special case where $\B_j=\mathbb{N}$ for $j\mathtt{i}n \mathtt{s}et{1,r}$, note that \[\L^+(\mathbb{N}, \dots, \mathbb{N}) = \L^+(\mathbb{N})^r.\] We then use the following notation \[\L^+_d(\mathbb{N})^r := \L_d^+(\mathbb{N}, \dots, \mathbb{N}), \qquad \L^+_{\mu}(\mathbb{N})^r :=\L_\mu^+(\mathbb{N}, \dots, \mathbb{N})\] for $d\mathtt{i}n \mathbb{N}_0$ and $\mu\mathtt{i}n \L_r(d)$, respectively. \mathtt{s}mallskip The next definition describes a total order on the set of $r$-multipartitions of a fixed total weight. \mathtt{b}egin{definition}\label{lex2} Suppose $d,r\mathtt{i}n \mathbb{N}$. Then $\Lambda_d^+(\mathbb{N})^r$ has a total order $\preceq$ defined as follows. For $r$-multipartitions $\mathtt{b}oldsymbol\mu, \mathtt{b}oldsymbol\lambda \mathtt{i}n \L^{+}_\nu(\mathbb{N})$ of weight $\nu\mathtt{i}n \Lambda_d(r)$, we set $\mathtt{b}oldsymbol \lambda \preceq \mathtt{b}oldsymbol \mu$ if \[\lambda^{(j)} \preceq \mu^{(j)},\text{ whenever } \lambda^{(i)} = \mu^{(i)} \text{ for all } i<j.\] We then extend $\preceq$ to all of $\L^{+}_d(\mathbb{N})^r$ by setting $\mathtt{b}oldsymbol\lambda \prec \mathtt{b}oldsymbol \mu$ whenever $|\mathtt{b}oldsymbol\lambda| < |\mathtt{b}oldsymbol \mu|$ in the lexicographic ordering on $\L_d(r)$. \end{definition} Suppose $n_1, \dots, n_r\mathtt{i}n \mathbb{N}_0$ and $d\mathtt{i}n \mathbb{N}$. Recalling the notation from Remark \mathtt{r}ef{notation}, we identify the set of $r$-multipartions \[ \L^+(n_1, \dots, n_r) := \L^+(\mathtt{s}et{1,n_1}, \dots, \mathtt{s}et{1,n_r}) \] as a subset of $\L^+(\mathbb{N})^r$ and view $\preceq$ as a total order on $\L^+_d(n_1, \dots, n_r)$ by restriction. \mathtt{s}ubsection{Finitely generated projective modules} Let $\M_\Bbbk$ denote the category of all $\Bbbk$-modules and $\Bbbk$-linear maps. The full subcategory of finitely generated projective $\Bbbk$-modules is denoted $\P_\Bbbk$. Given $M,N \mathtt{i}n \M_\Bbbk$, we write $M\otimes N= M\otimes_\Bbbk N$ and $\mathrm{Hom}(M,N) = \mathrm{Hom}_\Bbbk(M,N)$. Also write $\mathrm{End}(M)$ to denote the $\Bbbk$-algebra $\mathrm{Hom}(M,M)$. If $M\mathtt{i}n \P_\Bbbk$, we let $M^\vee= \mathrm{Hom}(M,\Bbbk)$ denote the $\Bbbk$-linear dual. For any $M, M',N, N'\mathtt{i}n \P_\Bbbk$, there is an isomorphism \mathtt{b}egin{equation}\label{eq:isom} \mathrm{Hom}(M\otimes N, M'\otimes N') \mathtt{c}ong \mathrm{Hom}(M, M')\otimes \mathrm{Hom}(N, N') \end{equation} which is natural with respect to composition. \mathtt{s}ubsection{Divided and symmetric powers} Let $d\mathtt{i}n \mathbb{N}$. Given $M\mathtt{i}n \P_\Bbbk$, there is a right action of the symmetric group $\Si_d$ on the tensor power $M^{\otimes d}$ given by permuting tensor factors. We define the {\em $d$-th divided power} of $M$ to be the invariant submodule \[ \G^dM := (M^{\otimes d})^{\Si_d}.\] Similarly, the coinvariant module is denoted \[ \Sym_d M := (M^{\otimes d})_{\Si_d}\] and called the {\em $d$-th symmetric power} of $M$. It follows by definition that \mathtt{b}egin{equation}\label{symmetric} \Gamma^d(M)^\vee \mathtt{c}ong \mathrm{Sym}_d(M^\vee). \end{equation} We also set $\Gamma^0M= \mathrm{Sym}_0M= \Bbbk$. Note that the isomorphism (\mathtt{r}ef{symmetric}) is usually taken as the definition of $\Gamma^d M$ (cf. \mathtt{c}ite{ABW}), while we have used the equivalent definition from \mathtt{c}ite{Krause1} in terms of symmetric tensors. \mathtt{s}ubsection{The divided powers algebra} The category $\M_\Bbbk$ (resp.~$\P_\Bbbk$) is a symmetric monoidal category with symmetry isomorphism \mathtt{b}egin{equation}\label{symmetry} \tw: M\otimes N \xrightarrow{\mathtt{s}im} N\otimes M \end{equation} defined by $x\otimes y \mapsto y\otimes x$, for all $x\mathtt{i}n M, y\mathtt{i}n N$. \mathtt{s}mallskip Suppose $M\mathtt{i}n \P_\Bbbk$. Then \[\Gamma (M) := \mathtt{b}igoplus_{d\mathtt{i}n \mathbb{N}_0} \Gamma^dM\] is an ($\mathbb{N}_0$-graded) commutative algebra called the {\em divided powers algebra}, with multiplication defined on homogeneous components via the shuffle product: for $x \mathtt{i}n \Gamma^{d} M$ and $y \mathtt{i}n \Gamma^{e}M$, define \mathtt{b}egin{align*} x \mathtt{a}st y := \mathtt{s}um_{\mathtt{s}igma\mathtt{i}n \Si_{d+ e}^{d, e}} (x\otimes y)\mathtt{s}igma \end{align*} where $\Si_{d+e}^{d,e}$ is the quotient group $\Si_{d+e}/\Si_{d}\times \Si_{e}$. For example, we have $x^{\otimes d} \mathtt{a}st x^{\otimes e} = \mathtt{b}inom{d+e}{d}\, x^{\otimes(d+e)}$ for any $x \mathtt{i}n M$. \mathtt{s}mallskip There is also a comultiplication, $\Delta: \G (M) \to \G (M) \otimes \G (M)$, which is the $\mathbb{N}_0$-homogenous map whose graded components \[\Delta: \Gamma^d M \to \Gamma^{d-c}M \otimes \Gamma^{c}M\] are defined as the inclusions \[(M^{\otimes d})^{\Si_d} \hookrightarrow (M^{\otimes d})^{\Si_{d-c} \times \Si_{c}}\] induced by the embeddings $\Si_{d-c}\times \Si_{c}\hookrightarrow \Si_d$, for $c\mathtt{i}n \mathtt{s}et{0,d}$. These maps, together with the unit, $\Bbbk=\Gamma^0M \hookrightarrow \Gamma (M)$, and the counit, $\Gamma(M) \twoheadrightarrow \Gamma^0M$ (projection onto degree 0), make $\Gamma (M)$ into a bialgebra. \mathtt{s}ubsection{Decompositions} The {\em symmetric algebra} $S(M)$ is defined as the free commutative $\Bbbk$-algebra generated by $M$ and has a decomposition \[ S(M) = \mathtt{b}igoplus_{d\mathtt{i}n \mathbb{N}_0} \mathrm{Sym}_d M. \] It follows that $S(-)$ defines a functor from $\P_\Bbbk$ to the category of all commutative $\Bbbk$-algebras, which preserves coproducts. Hence $S(M)\otimes S(N) \mathtt{c}ong S(M\oplus N)$, and by the duality \eqref{symmetric} there is an isomorphism \mathtt{b}egin{equation}\label{eq:expon} \G (M) \otimes \G (N) \mathtt{s}imeq \G(M\oplus N). \end{equation} The isomorphism (\mathtt{r}ef{eq:expon}) is given explicitly by restricting the multiplication map $x\otimes y \mapsto x \mathtt{a}st y$, where $\G(M)$, $\G( N)$ are considered as subalgebras of $\G(M\oplus N)$. It follows that for each $d\mathtt{i}n \mathbb{N}_0$ there is a decomposition \mathtt{b}egin{equation}\label{eq:expon2} \Gamma^d(M\oplus N) = \mathtt{b}igoplus_{0\leq c\leq d} \Gamma^c(M)\mathtt{a}st \Gamma^{d-c}(N) \end{equation} where $\G^c (M)\mathtt{a}st\G^{d-c}(N)$ denotes the image of $\Gamma^c (M)\otimes \Gamma^{d-c}(N)$ under (\mathtt{r}ef{eq:expon}). \mathtt{s}mallskip Note that $\Gamma^d \Bbbk \mathtt{c}ong \Bbbk$ for all $d\mathtt{i}n \mathbb{N}_0$. Thus, given a free $\Bbbk$-module $V$ of finite rank, it follows by induction from (\mathtt{r}ef{eq:expon2}) that the divided power $\Gamma^d V$ is again a free $\Bbbk$-module of finite rank. For example, suppose $V$ has a finite ordered $\Bbbk$-basis $\{x_b\}_{b\mathtt{i}n \B}$. Then $\Gamma^d V$ has the following $\Bbbk$-basis \mathtt{b}egin{equation}\label{basis} \Big\{ x^{\mu} := \prod_{b\mathtt{i}n \B}x_b^{\otimes \mu_b} \ \ml{\ml{|}}\ \mu \mathtt{i}n \L_d(\B) \Big\} \end{equation} where the product denotes multiplication in $\Gamma(V)$. \mathtt{s}mallskip The basis \eqref{basis} can also be parameterized by elements of $\mathtt{s}eq^d(\B)$. First notice that the the tensor power $V^{\otimes d}$ has the following basis \[ \{x_{\otimes\mathtt{b}} := x_{b_1}\otimes \dots \otimes x_{b_d} \, \Big|\ \mathtt{b} \mathtt{i}n \mathtt{s}eq^d(\B)\}. \] Given $\mathtt{b}\mathtt{i}n \mathtt{s}eq^d(\B)$, we then define $x_\mathtt{b} :=\mathtt{s}um_{{\tt b \mathtt{s}im c}} x_{\otimes{\tt c}}$. Notice that $x_\mathtt{b} = x_{\mu(\tt b)}$. It then follows from Lemma \mathtt{r}ef{mu} that the set \mathtt{b}egin{equation}\label{basis2} \{x_\mathtt{b} \mid \mathtt{b} \mathtt{i}n \mathtt{s}eq^d(\B)/\Si_d\}\end{equation} is also a basis of $\Gamma^d V$, indexed by any complete set of orbit representatives. \mathtt{s}mallskip \mathtt{s}ubsection{Polynomial functors}\label{ss:functor} We recall the definitions of some well known polynomial endofunctors on the category $\P_\Bbbk$ along with their associated natural transformations. \mathtt{s}mallskip Let $d\mathtt{i}n \mathbb{N}_0$. Then recall the functor $\otimes^d: \P_\Bbbk \to \P_\Bbbk$ sending $M \mapsto M^{\otimes d}$, whose action on morphisms is defined by \[ \otimes^d_{M,N}(\varphi) :=\, \varphi \otimes \mathtt{c}dots \otimes \varphi: M^{\otimes d} \to N^{\otimes d} \] for any $\varphi\mathtt{i}n \mathrm{Hom}(M,N)$. \mathtt{s}mallskip It follows easily from \eqref{eq:expon2} that the divided power $\G^d M$ of a finitely-generated, projective $\Bbbk$-module $M\mathtt{i}n \P_\Bbbk$ is again finitely-generated and projective. This yields a functor $\G^d: \P_\Bbbk \to \P_\Bbbk$ which is a subfunctor of $\otimes^d$. In particular, the action of $\G^d$ on morphisms is defined by restriction \[\G^d_{M,N}(\varphi) := (\varphi^{\otimes d})|_{\G^d M}: \Gamma^dM \to \Gamma^d N\] for any $\varphi\mathtt{i}n \mathrm{Hom}(M,N)$. \mathtt{s}mallskip Now let $S,T:\P_\Bbbk\to \P_\Bbbk$ be an arbitrary pair of functors. Then the tensor product $-\otimes - $ induces the following bifunctors \[S\mathtt{b}oxtimes T, \ \ T(-\otimes-) \ :\ \P_\Bbbk\times \P_\Bbbk \to \P_\Bbbk\] which are respectively defined by \[S\mathtt{b}oxtimes T := (-\otimes-)\mathtt{c}irc (S\times T), \ \qquad T(-\otimes-) := T\mathtt{c}irc (-\otimes -).\] We also have the ``object-wise" tensor product $S\otimes T: \P_\Bbbk \to \P_\Bbbk$ defined by \mathtt{b}egin{equation}\label{tensor} S\otimes T := (S\mathtt{b}oxtimes T)\mathtt{c}irc \delta \end{equation} where $\delta:\P_\Bbbk\to \P_\Bbbk\times \P_\Bbbk$ denotes the diagonal embedding: $M\mapsto (M,M)$. \mathtt{s}mallskip Now suppose $M, N\mathtt{i}n \P_\Bbbk$. As in \mathtt{c}ite{Krause1}, define $\psi^d=\psi^d(M,N)$ to be the unique map which makes the following square commute: \mathtt{b}egin{equation}\label{commute} \mathtt{b}egin{tikzcd}[row sep=large] \Gamma^d M \otimes \Gamma^d N \mathtt{a}r[d, tail ] \mathtt{a}r[r, "\psi^d"] & \Gamma^d(M\otimes N) \mathtt{a}r[to=Z, d, tail, ]\\ M^{\otimes d} \otimes N^{\otimes d} \mathtt{a}r[r, "\mathtt{s}im"] & (M \otimes N)^{\otimes d} \end{tikzcd} \end{equation} The following lemma is easy to check. \mathtt{s}mallskip \mathtt{b}egin{lemma}\label{psi} \mathtt{b}egin{enumerate} \mathtt{i}tem The maps $\psi^d(M,N)$ form a natural transformation of bifunctors \[\psi^d: \Gamma^d\mathtt{b}oxtimes \Gamma^d \to \Gamma^d(-\otimes-).\] \mathtt{i}tem If $M,N\mathtt{i}n \P_\Bbbk$, then the following diagram commutes \mathtt{b}egin{equation*} \mathtt{b}egin{tikzcd}[column sep=huge] \Gamma^d M \otimes \Gamma^d N \mathtt{a}r[r, "{\psi^d(M,N)}"] \mathtt{a}r[d, "\tw"' ] & \Gamma^d(M\otimes N) \mathtt{a}r[d, "\Gamma^d(\tw)"] \\ \Gamma^d N \otimes \Gamma^d M \mathtt{a}r[r, "{\psi^d(N,M)}"] & \Gamma^d(N\otimes M) \end{tikzcd} \end{equation*} where $\tw$ permutes tensor factors as in (\mathtt{r}ef{symmetry}). \end{enumerate} \end{lemma} \mathtt{s}ection{Generalized Schur Algebras} After recalling the definition of generalized Schur algebras \mathtt{c}ite{EK1} associated to a $\Bbbk$-algebra $A$, we introduce corresponding standard homomorphisms between certain modules of divided powers. \mathtt{s}ubsection{Associative $\Bbbk$-algebras} Suppose that $R,S$ are associative algebras in the category $\M_\Bbbk$. Recall that the tensor product $R\otimes S$ is the algebra in $\M_\Bbbk$ with multiplication $m_{R\otimes S}$ defined by \[ R\otimes S \otimes R \otimes S \xrightarrow{\, 1\otimes \tw \otimes 1\, } R\otimes R \otimes S \otimes S \xrightarrow{\, m_R \otimes m_S\, } R\otimes S. \] Given $d\mathtt{i}n \mathbb{N}$, the tensor power $R^{\otimes d}$ is an associative algebra in $\M_\Bbbk$ in a similar way. If $R$ is unital, then $R^{\otimes d}$ has unit $1_R^{\otimes d}$. \mathtt{s}mallskip In the remainder, the term {\em $\Bbbk$-algebra} will always refer to a unital, associative algebra in the category $\P_\Bbbk$. Let $A\mathtt{i}n \P_\Bbbk$ be a $\Bbbk$-algebra. Then $A\text{-mod}$ (resp.~$\text{mod-}A$) denotes the subcategory of $\P_\Bbbk$ consisting of all left (right) $A$-modules, $M\mathtt{i}n \P_\Bbbk$, and $A$-module homomorphisms. Write $\mathrm{Hom}_A(M,N)\mathtt{i}n \P_\Bbbk$ to denote the set of all $A$-homomorphisms from $M$ to $N$ for $M,N \mathtt{i}n A\text{-mod}$ (resp.~$\text{mod-}A$). We also write $\mathtt{r}ho_M:A\otimes M \to A$ (resp.~$\mathtt{r}ho_M:M\otimes A \to A$) to denote the induced linear map corresponding to a left (right) $A$-module. \mathtt{s}mallskip If $M\mathtt{i}n A\text{-mod}$ (resp.~$\text{mod-}A$) and $N\mathtt{i}n B\text{-mod}$ (resp.~$\text{mod-}B$), the tensor product $M\otimes N$ is a left (resp.~right) $A\otimes B$-module, with corresponding module map: $\mathtt{r}ho_{M\otimes N} = (\mathtt{r}ho_M\otimes \mathtt{r}ho_N)\mathtt{c}irc(1\otimes \tau\otimes 1)$. \mathtt{s}ubsection{The algebra $\Gamma^d A$} Suppose $A$ is a $\Bbbk$-algebra. Then $\Gamma^d A$ is a $\Bbbk$-algebra with multiplication $m_{\Gamma^dA}$ defined via the composition \[ \Gamma^d A\otimes \Gamma^d A \xrightarrow{\psi^d} \Gamma^d(A\otimes A) \xrightarrow{\Gamma^d(m_A)} \Gamma^d A, \] where the second map denotes the functorial action of $\Gamma^d$ on $m_A$. It follows that $\Gamma^d A$ is a unital subalgebra of $A^{\otimes d}$. \mathtt{b}egin{example}[The Schur algebra] Suppose $n\mathtt{i}n \mathbb{N}$, and let $\mathrm{M}_n(\Bbbk)$ denote the algebra of all $n\times n$-matrices in $\Bbbk$. Then $\Gamma^d \mathrm{M}_n(\Bbbk)$ is isomorphic to the classical {\em Schur algebra}, $S(n,d)$, defined by Green \mathtt{c}ite[Theorem 2.6c]{Green}. We view this isomorphism as an identification. \end{example} We now have two distinct multiplications on the direct sum $\Gamma(A) = \mathtt{b}igoplus_{d \mathtt{i}n \mathbb{N}} \Gamma^d A$. In order to distinguish them, we sometimes refer to the shuffle product \[\nabla: \Gamma^{d} A \otimes \Gamma^{e} A \to \Gamma^{d+e} A: \, x\otimes y \mapsto x\mathtt{a}st y \] as {\em outer} multiplication in $\Gamma(A)$, while {\em inner} multiplication refers to the map defined as multiplication in $\G^d A$ on diagonal components \[ m_{\Gamma^d A}: \Gamma^{d} A \otimes \Gamma^{d} A\to \Gamma^d A:\, x\otimes y \mapsto x y \] and then extended by zero to other components. \mathtt{s}ubsection{Generalized Schur algebras} \label{ss:Schur} Given a $\Bbbk$-algebra $A$, write $\mathrm{M}_n(A)$ for the algebra of $n\times n$-matrices in $A$. We identify $\mathrm{M}_n(A)$ with $\mathrm{M}_n(\Bbbk) \otimes A$ via \[\mathrm{M}_n(A)\, \xrightarrow{\,\mathtt{s}im\,}\, \mathrm{M}_n(\Bbbk) \otimes A:\, (a_{ij})\mapsto \mathtt{s}um_{i,j} E_{ij} \otimes a_{ij},\] where $E_{ij}$ are elementary matrices in $\mathrm{M}_n(\Bbbk)$. Next, suppose $V$ is any left (resp.~right) $\mathrm{M}_n(\Bbbk)$-module, and let $M\mathtt{i}n A\text{-mod}$ ($\text{mod-}A$). Then write $V(M) := V\otimes M$ to denote the corresponding $\mathrm{M}_n(A)$-module. \mathtt{b}egin{definition} Suppose $A$ is an algebra, and let $n\mathtt{i}n\mathbb{N}$, $d\mathtt{i}n \mathbb{N}_0$. Then the {\em generalized Schur algebra} $S^A(n,d)$ is the algebra $\Gamma^d \mathrm{M}_n(A)$. \end{definition} Using the notation of \mathtt{c}ite{EK1}, notice that $\mathrm{M}_n$ is spanned by the elements $\xi^a_{i,j} := E_{ij} \otimes a$, for all $a\mathtt{i}n A$ and $i,j \mathtt{i}n \mathtt{s}et{1,n}$. Now suppose that $A$ is free as a $\Bbbk$-module with finite ordered basis $\{x_b\}_{b\mathtt{i}n\B}$. Then $\mathrm{M}_n(A)$ has a corresponding basis \[\{\xi_{i,j,b} := \xi^{x_b}_{i,j} \mid i,j\mathtt{i}n \mathtt{s}et{1,n},\, b\mathtt{i}n \B \}.\] We view $\mathrm{M}_n(\Bbbk)$ as a subalgebra of $\mathrm{M}_n(A)$ by identifying $E_{ij} = \xi^{1}_{i,j}$. Notice that the classical Schur algebra $S(n,d)$ is thus a (unital) subalgebra of $S^A(n,d)$. \mathtt{s}mallskip For each triple $(\mathtt{i}, \mathtt{j}, \mathtt{b}) \mathtt{i}n \mathtt{s}eq^d(n,n,\B)$, there is a corresponding element of $S^A(n,d)$ denoted by \[\xi_{\mathtt{i},\mathtt{j}, \mathtt{b}}:= \mathtt{s}um_{(\mathtt{i},\mathtt{j}, \mathtt{b}) \mathtt{s}im ({\tt r},{\tt s}, \mathtt{c})} \xi_{r_1,s_1, c_1} \otimes \mathtt{c}dots \otimes \xi_{r_d,s_d, c_d},\] where the sum is over all triples $ ({\tt r},{\tt s}, \mathtt{c}) $ in the same diagonal $\Si_d$-orbit as $(\mathtt{i},\mathtt{j}, \mathtt{b})$. It thus follows from \eqref{mu2}, \eqref{basis} and \eqref{basis2} that the set \[ \{\xi_{\mathtt{i},\mathtt{j},\mathtt{b}} \mid (\mathtt{i},\mathtt{j},\mathtt{b}) \mathtt{i}n \mathtt{s}eq^d(n,n,\B)/\Si_d\}\] forms a basis of of $S^A(n,d)$. In a similar way, the subalgebra $S(n,d)$ has a basis given by \[\{\xi_{\mathtt{i}, \mathtt{j}} := \mathtt{s}um_{(\mathtt{i},\mathtt{j}) \mathtt{s}im (\mathtt{r},\mathtt{s})} \xi^{1}_{r_1,s_1} \otimes \mathtt{c}dots \otimes \xi^{1}_{r_d,s_d}\mid (\mathtt{i}, \mathtt{j}) \mathtt{i}n \mathtt{s}eq^d(n,n)/\Si_d\}. \] For each weight $\mu \mathtt{i}n \L_d(n)$, we write \[\xi_\mu := \xi_{\mathtt{i}_\mu, \mathtt{i}_\mu}\] to denote the corresponding idempotent in $S(n,d) \mathtt{s}ubset S^A(n,d)$. \mathtt{s}ubsection{Standard homomorphisms} Let us fix an algebra $A$ throughout the remainder of the section. Given $M\mathtt{i}n A\text{-mod}$, it follows from (\mathtt{r}ef{commute}) that $\Gamma^d M$ is a left $\Gamma^d A$-module with module map $\mathtt{r}ho_{\Gamma^d M}$ determined by the composition \[ \Gamma^d A\otimes \Gamma^d M \xrightarrow{\psi^d} \Gamma^d(A\otimes M) \xrightarrow{\Gamma^d(\mathtt{r}ho_M)} \Gamma^d(M), \] where the second map denotes the functorial action of $\Gamma^d$ on $\mathtt{r}ho_M$. \mathtt{b}egin{lemma}\label{lem:hom} Suppose $M,N\mathtt{i}n A\text{-mod}$, and let $\varphi: M\to N$ be an $A$-module homomorphism. Then the functorial map \[\Gamma^d(\varphi): \Gamma^d M \to \Gamma^d N\] is a homomorphism of $\Gamma^d A$-modules. Moreover, if $\varphi$ is injective (resp.~surjective) then so is $\Gamma^d(\varphi)$. \end{lemma} \mathtt{b}egin{proof} The map $\varphi^{\otimes d}: M^{\otimes d} \to N^{\otimes d}$ is a homomorphism of $A^{\otimes d}$-modules, and if $\varphi$ is injective (resp.~surjective) then so is $\varphi^{\otimes d}$. The statements for $\Gamma^d(\varphi)$ follow by restriction. \end{proof} Suppose $d, e\mathtt{i}n \mathbb{N}_0$ and $M, N \mathtt{i}n A\text{-mod}$. Notice that the homogeneous component of comultiplication \mathtt{b}egin{equation}\label{comult} \Delta: \Gamma^{d+e} A \mathtt{r}ightarrow \Gamma^{d} A \otimes \Gamma^{e} A \end{equation} is an injective (unital) map of $\Bbbk$-algebras. It follows that $\Gamma^{d} M \otimes \Gamma^{e} N$ has a corresponding $\Gamma^d A$-module structure, defined by restriction along \eqref{comult}. In the particular case $M=N$, we note that each of the following maps is a $\Gamma^d A$-module homomorphism: \mathtt{b}egin{align}\label{eq:homom} \Delta: \Gamma^{d+e} M \to \Gamma^{d} M \otimes \Gamma^{e} M, \qquad & \qquad \nabla:\Gamma^{d} M \otimes \Gamma^{e} M \to \Gamma^{d+e} M, \nonumber \\[.1cm] \tw: \Gamma^{d} M \otimes \Gamma^{e} M\, &\xrightarrow{\mathtt{s}im}\, \Gamma^{e} M \otimes \Gamma^{d} M, \end{align} where $\nabla$ (resp.~$\Delta$) are components of (co)multiplication in the bialgebra $\Gamma(M)$. Setting $A=\Bbbk$ then gives the following. \mathtt{s}mallskip \mathtt{b}egin{lemma}\label{lem:natural} Let $d, e\mathtt{i}n \mathbb{N}$. Then there are natural transformations \mathtt{b}egin{align*} \Delta: \Gamma^{d+e} \to \Gamma^{d} \otimes \Gamma^{e}, \qquad & \qquad \nabla:\Gamma^{d} \otimes \Gamma^{e} \to \Gamma^{d+e} \end{align*} of functors $\P_\Bbbk\to \P_\Bbbk$ induced by setting $\Delta(M)$ (resp.~$\nabla(M)$) equal to (co)multiplication in $\Gamma(M)$, for each $M\mathtt{i}n \P_\Bbbk$. \end{lemma} Now suppose $r\mathtt{i}n \mathbb{N}$ and $\mu \mathtt{i}n \L(r)$. Given $M, N_1, \dots, N_r \mathtt{i}n \P_\Bbbk$, we write \[\Gamma^{(\mu)}(N_1, \dots, N_r) :=\, \Gamma^{\mu_1}N_1 \otimes \mathtt{c}dots \otimes \Gamma^{\mu_r} N_r\] and set \[ \Gamma^\mu M := \Gamma^{(\mu)}(M, \dots, M). \] If $M_1, \dots, M_r \mathtt{i}n A\text{-mod}$, then we consider $\Gamma^{(\mu)}(M_1, \dots, M_r)$ as a left $\Gamma^d A$-module by restriction along the corresponding inclusion, $\Delta: \Gamma^d A \to \Gamma^\mu A$, of $\Bbbk$-algebras. \mathtt{s}mallskip Suppose that $\gamma= (\gamma_{ij})\mathtt{i}n \L_d(\mathbb{N}\times \mathbb{N})$ is a (semi-infinite) matrix whose entries sum to $d$. Then let $\lambda, \mu \mathtt{i}n \L_d(\mathbb{N})$ be weights such that $\lambda_i= \mathtt{s}um_j \gamma_{ij}$ and $\mu_j= \mathtt{s}um_i \gamma_{ij}$ for all $i,j\mathtt{i}n\mathbb{N}$. Slightly abusing notation, for a given $N\mathtt{i}n \P_\Bbbk$, we also write $\gamma = \gamma(N)$ to denote the corresponding {\em standard homomorphism:} \[\gamma: \Gamma^\mu N \mathtt{r}ightarrow \Gamma^{\lambda}N\] defined by the composition \mathtt{b}egin{align*} \mathtt{b}igotimes_j\Gamma^{\mu_j} N \xrightarrow{ \Delta \otimes \dots \otimes \Delta} \mathtt{b}igotimes_i \mathtt{b}igotimes_j \Gamma^{\gamma_{ij}} N \xrightarrow{\,\sim\,} \mathtt{b}igotimes_j \mathtt{b}igotimes_i \Gamma^{\gamma_{ij}}N \xrightarrow{\nabla\otimes \dots \otimes \nabla} \mathtt{b}igotimes_i \Gamma^{\lambda_i}N, \end{align*} where each $\nabla$ (resp.~$\Delta$) denotes an appropriate component of (co)multiplication in the bialgebra $\Gamma(N)$, and where the second map rearranges the tensor factors. \mathtt{s}mallskip If $M\mathtt{i}n A\text{-mod}$, then it follows from (\mathtt{r}ef{eq:homom}) that $\gamma(M): \Gamma^\mu M \to \Gamma^\lambda M$ is a homomorphism of $\Gamma^d A$-modules. In the same way, we obtain homomorphisms of $S^A(n,d)$-modules corresponding to any given $M \mathtt{i}n \mathrm{M}_n(A) \hyphen \mathrm{mod}$. \mathtt{s}mallskip \mathtt{s}ubsection{Quotient modules} \label{ss:quotient} Suppose $M\mathtt{i}n \P_\Bbbk$. Then we write $\langle L\rangle \mathtt{s}ubset M^{\otimes d}$ to denote the $\Si_d$-submodule generated by a subset $L \mathtt{s}ubset M^{\otimes d}$. For example if $L_1, \dots, L_d \mathtt{s}ubset M$ are $\Bbbk$-submodules and $L= L_1 \otimes \mathtt{c}dots \otimes L_d$, then \[\langle L \rangle = \mathtt{s}um_{\mathtt{s}igma \mathtt{i}n \Si_d} L_{1\mathtt{s}igma} \otimes \mathtt{c}dots \otimes L_{d\mathtt{s}igma}, \] where $i\mathtt{s}igma := \mathtt{s}igma^{-1}(i)$ denotes the right action of $\mathtt{s}igma$ on $i\mathtt{i}n \mathtt{s}et{1,d}$. \mathtt{s}mallskip Now suppose $M=N\oplus N'$ for some $\Bbbk$-submodules $N,N' \mathtt{s}ubset N$. Then notice that there is a corresponding decomposition \[ M^{\otimes d}\, =\, (N')^{\otimes d} \oplus \langleN\otimes M^{\otimes d-1}\rangle, \] which is a direct sum of $\Si_d$-submodules. Taking $\Si_d$-invariants on both sides results in the decomposition \mathtt{b}egin{equation}\label{invariant} \Gamma^d M = \Gamma^d(N') \oplus \langleN \otimes M^{\otimes d-1}\rangle^{\Si_d} \end{equation} into $\Bbbk$-submodules. The decomposition \eqref{invariant} then makes it possible to describe the kernel of the quotient map \[\Gamma^d(\pi): \Gamma^d M \twoheadrightarrow \Gamma^d(M/N)\] induced by projection $\pi:M\twoheadrightarrow M/N$. More generally, we note the following. \mathtt{b}egin{lemma}\label{lem:quotient} Let $A$ be a $\Bbbk$-algebra. Suppose $N\mathtt{s}ubset M$ is an inclusion of $A$-modules such that $M=N\oplus N'$ for some $\Bbbk$-submodule $N'\mathtt{s}ubset M$. Then there is an exact sequence \[0\, \to\, \langle N \otimes M^{\otimes d-1} \rangle^{\Si_d}\, \longrightarrow \, \Gamma^d M\, \xrightarrow{\Gamma^d(\pi)}\, \Gamma^d (M/N)\, \to\, 0\] of $\Gamma^d A$-module homomorphisms. \end{lemma} \mathtt{b}egin{proof} It follows from (\mathtt{r}ef{invariant}) that the required exact sequence of $\Gamma^d A$-modules is obtained by restriction from the exact sequence \[ 0\, \to\, \langle N\otimes M^{\otimes d-1} \rangle \, \longrightarrow \, M^{\otimes d}\, \xrightarrow{\pi^{\otimes d}}\, (M/N)^{\otimes d}\, \to\, 0 \] of $A^{\otimes d}$-module homomorphisms. \end{proof} We introduce some additional notation. Suppose $N_1, \dots, N_r\mathtt{s}ubset M$ is a finite collection of $\Bbbk$-submodules of some $M\mathtt{i}n \P_\Bbbk$, and let $\mu \mathtt{i}n \L_r(d)$. Then we write \[ N_{\otimes \mu} := N_1^{\otimes \mu_1}\otimes \dots \otimes N_r^{\otimes \mu_r}\] to denote the corresponding $\Bbbk$-submodule of $M^{\otimes d}$ and use the notation \mathtt{b}egin{equation}\label{invariant2} N_\mu := \langleN_{\otimes \mu}\rangle^{\Si_d} \mathtt{s}ubset \G^d M \end{equation} for the $\Bbbk$-submodule of $\Si_d$-invariants. \mathtt{s}mallskip \mathtt{s}ection{Wreath Products and Generalized Schur-Weyl Duality} Let us briefly recall the generalized Schur-Weyl duality \mathtt{c}ite{EK1} which establishes a relationship between a wreath product algebra $A\wr \Si_d$ and a corresponding $A$-Schur algebra via their respective actions on a common tensor space. \mathtt{s}ubsection{Wreath products} Fix a $\Bbbk$-algebra $A$. The {\em wreath product algebra} $A\wr \Si_d$ is the $\Bbbk$-module $A^{\otimes d} \otimes \Bbbk \Si_d$, with multiplication defined by \mathtt{b}egin{equation}\label{wr} (x \otimes \mathtt{r}ho ) \mathtt{c}dot (y \otimes \mathtt{s}igma) := x (y\mathtt{r}ho^{-1}) \otimes \mathtt{r}ho \mathtt{s}igma \end{equation} for all $x,y \mathtt{i}n A^{\otimes d}$ and $\mathtt{r}ho, \mathtt{s}igma \mathtt{i}n \Si_d$. If $G$ is a finite group, then note for example that $(\Bbbk G) \wr \Si_d$ is isomorphic to the group algebra of the classical wreath product, $G\wr \Si_d := G^{d} \mathtt{r}times \Si_d$. \mathtt{s}mallskip Assume for the rest of the section that $A$ is free as a $\Bbbk$-module. We then identify the tensor power $A^{\otimes d}$ and group algebra $\Bbbk\Si_d$ as subalgebras of $A\wr \Si_d$ by setting \[A^{\otimes d}=A^{\otimes d} \otimes 1_{\Si_d}, \quad \Bbbk\Si_d=1_{A^{\otimes d}} \otimes \Bbbk \Si_d\] respectively. \mathtt{s}ubsection{Generalized Schur-Weyl duality} Suppose $n, d\mathtt{i}n \mathbb{N}$. Write $\mathrm{V}_{\hspace{-1pt}n} := \Bbbk^n$ to denote the standard left $\mathrm{M}_n(\Bbbk)$-module, with basis elements \[ v_i:= (0, \dots, 1, \dots, 0)\] for $i\mathtt{i}n \mathtt{s}et{1,n}$, considered as column vectors. Then for simplicity, let us write \[{\mathrm{V}} := \mathrm{V}_{\hspace{-1pt}n}(A) = \Bbbk^n \otimes A\] to denote the corresponding left $\mathrm{M}_n(A)$-module. \mathtt{s}mallskip We may identify $\mathrm{V}$ and $A^n$ as right $A$-modules, and it follows that the {\em tensor space}, $\mathrm{V}^{\otimes d}$, is naturally a right $A^{\otimes d}$-module. A right action of $A\wr \Si_d$ on $\mathrm{V}^{\otimes d}$ is then defined by setting \mathtt{b}egin{equation}\label{space2} w(x\mathtt{c}dot\mathtt{s}igma) := (wx)\mathtt{s}igma, \quad \text{for}\, \ w\mathtt{i}n \mathrm{V}^{\otimes d}, \, x\mathtt{i}n A^{\otimes d}\text{, and}\ \mathtt{s}igma \mathtt{i}n \Si_d. \end{equation} More explicitly, suppose $w=w_1\otimes\dots \otimes w_d$ and $x=x_1\otimes \dots\otimes x_d$, for some $w_i\mathtt{i}n \mathrm{V}$ and $x_i\mathtt{i}n A$. Then notice that \mathtt{b}egin{equation*} (wx)\mathtt{s}igma = (w_{1\mathtt{s}igma}x_{1\mathtt{s}igma}) \otimes \dots \otimes (w_{d\mathtt{s}igma}x_{d\mathtt{s}igma}) =(w\mathtt{s}igma)(x\mathtt{s}igma) \end{equation*} for any $\mathtt{s}igma \mathtt{i}n \Si_d$. Hence, by \eqref{wr} we have \[w(\mathtt{s}igma \mathtt{c}dot x) = w((x\mathtt{s}igma^{-1})\mathtt{c}dot \mathtt{s}igma) = ( w(x\mathtt{s}igma^{-1}))\mathtt{s}igma = (w\mathtt{s}igma)x.\] It follows that \eqref{space2} is well-defined. \mathtt{b}egin{lemma}[{\mathtt{c}ite[Lemma 5.7]{EK1}}] The embedding $S^A(n,d) \hookrightarrow \mathrm{M}_n(A)^{\otimes d} \mathtt{c}ong \mathrm{End}_{A^{\otimes d}}(\mathrm{V}^{\otimes d})$ defines an algebra isomorphism \[S^A(n,d) \mathtt{c}ong \mathrm{End}_{A\wr\Si_d}(\mathrm{V}^{\otimes d})\] for all $n,d \mathtt{i}n \mathbb{N}$. \end{lemma} Given $n\geq d$, let $\omega \mathtt{i}n \Lambda_d(n)$ denote the weight $\omega = (1^d)= (1, \dots, 1, 0, \dots, 0)$. Then considering $\mathrm{V}$ again as a left $\mathrm{M}_n(A)$-module, notice that $\mathrm{V}^{\otimes d}$ is equal to the left $S^A(n,d)$-module $\Gamma^{\omega} \mathrm{V}$. \mathtt{s}mallskip For each weight $\mu\mathtt{i}n \Lambda_d(n)$, define a corresponding element \[v_{\otimes \mu} := v_1^{\otimes \mu_1} \otimes \dots \otimes v_n^{\otimes \mu_n}\] in the tensor space $\mathrm{V}^{\otimes d}$. \mathtt{s}mallskip The next result summarizes (5.15) and (5.17) of \mathtt{c}ite{EK1}. \mathtt{b}egin{proposition}[{\mathtt{c}ite{EK1}}]\label{prop:EK} Assume that $n\geq d$. \mathtt{b}egin{itemize} \mathtt{i}tem[(i)] There is a unique $(S^A(n,d), A\wr \Si_d)$-bimodule isomorphism $S^A(n,d)\xi_\omega \xrightarrow{\mathtt{s}im} \mathrm{V}^{\otimes d}$ which maps $\xi_\omega \mapsto v_{\otimes \omega}$. \mathtt{i}tem[(ii)] There is an algebra isomorphism, $A\wr \Si_d \xrightarrow{\mathtt{s}im} \xi_\omega S^A(n,d) \xi_\omega$, given by: \[(x_1\otimes \dots \otimes x_d) \otimes \mathtt{s}igma \mapsto \xi_{1,1\mathtt{s}igma}^{x_1} \mathtt{a}st \dots \mathtt{a}st \xi_{d,d\mathtt{s}igma}^{x_d}.\] \mathtt{i}tem[(iii)] $\mathrm{End}_{S^A(n,d)}(\mathrm{V}^{\otimes d})\, \mathtt{c}ong\, A\wr \Si_d$. \end{itemize} \end{proposition} \mathtt{s}ection{Cauchy Decompositions} \label{S:Cauchy} The Cauchy decomposition for symmetric algebras via Schur modules \mathtt{c}ite{ABW} is an analogue of Cauchy's formula for symmetric functions \mathtt{c}ite{Cauchy, Mac}. A corresponding decomposition for divided powers \mathtt{c}ite{HK, Krause1} is defined in terms of Weyl (or co-Schur) modules. In this section, we describe a generalized Cauchy decomposition (Theorem \mathtt{r}ef{thm:gen_Cauchy}) for divided powers of an $(A,B)$-bimodule with respect to a given filtration on the bimodule. \mathtt{s}ubsection{Weyl modules}\label{ss:Weyl} Weyl modules are defined in \mathtt{c}ite[Definition II.1.4]{ABW} as the image of a single map from a tensor product of divided powers of a module into a tensor product of exterior powers. We use an equivalent definition from the proof of \mathtt{c}ite[Theorem II.3.16]{ABW}) which involves quotients of divided powers. \mathtt{s}mallskip Throughout the section, we fix some $d\mathtt{i}n \mathbb{N}$. Suppose $\lambda \mathtt{i}n \L_d(\mathbb{N})$, and let $M\mathtt{i}n \P_\Bbbk$. For each pair $(i,t)$ with $1\leq i < l(\lambda)$ and $1\leq t \leq \lambda_{i+1}$, let us write \mathtt{b}egin{equation}\label{eq:pm} \lambda(i,t) =\, (\lambda_1,\, \dots,\, \lambda_{i-1} ,\, \lambda_i +t,\, \lambda_{i+1}-t,\, \lambda_{i+1},\, \dots, \lambda_m)\, \mathtt{i}n\, \L_d(\mathbb{N}). \end{equation} Then write $\gamma_{\lambda(i,t)} : \Gamma^{\lambda(i,t)} M \to \Gamma^\lambda M$ to denote the standard homomorphism corresponding to the matrix \[ \gamma_{\lambda(i,t)} := \mathtt{r}m{diag}(\lambda_1, \lambda_2, \dots ) + t E_{i+1, i} - t E_{i+1, i}. \] Similarly, let $\gamma^{\tr}_{\lambda(i,t)} : \Gamma^\lambda M \to \Gamma^{\lambda(i,t)} M$ denote the map corresponding to the transpose of the above matrix. \mathtt{b}egin{definition}[\mathtt{c}ite{ABW}] Suppose $M\mathtt{i}n \P_\Bbbk$ and $\lambda\mathtt{i}n \Lambda_d^+(\mathbb{N})$. Let $\mathtt{s}quare_\lambda(M)$ denote the $\Bbbk$-submodule of $\Gamma^\lambda M$ defined by \[\mathtt{s}quare_\lambda(M) :=\, \mathtt{s}um_{i \geq 1} \mathtt{s}um_{t=1}^{\lambda_{t+1} } \mathtt{r}m{Im}(\gamma_{\lambda(i,t)} ) \mathtt{s}ubset \Gamma^\lambda M.\] The {\em Weyl module}, $W_\lambda(M)$, is defined as the quotient $\Bbbk$-module \[ W_\lambda(M) := \Gamma^\lambda M \mathtt{b}ig / \mathtt{s}quare_\lambda(M). \] \end{definition} Let $A$ be a $\Bbbk$-algebra and suppose now that $M \mathtt{i}n A\hyphen \mathrm{mod}$. Then $\mathtt{s}quare_\lambda(M)$ is a $\Gamma^d A$-submodule of $\Gamma^\lambda M$, since the standard homomorphisms are $\Gamma^d A$-module maps. It follows that $W_\lambda(M)$ is a $\Gamma^d A$-module. In particular, $W_\lambda(\Bbbk^n)$ is an $S(n,d)$-module. \mathtt{s}ubsection{The standard basis} Consider a fixed partition $\lambda =(\lambda_1, \lambda_2, \dots ) \mathtt{i}n \Lambda^+_d(\mathbb{N})$. The {\em Young diagram} of $\lambda$ is the following subset of $\mathbb{N}\times \mathbb{N}$: \[\mathtt{s}et{\lambda} := \{(i,j)\ |\ 1\leq i\leq l(\lambda),\ 1\leq j \leq \lambda_i\}.\] Suppose $\B$ is a finite totally ordered set. Let ${\Tab}_{\lambda}(\B)$ denote the set of all functions ${\tt T}: \mathtt{s}et{\lambda} \to \B$, called {\em tableaux} ({\em of shape $\lambda$}). \mathtt{s}mallskip A tableau ${\tt T}$ will be identified with the diagram obtained by placing each value $\mathtt{r}m{T}_{i,j}:= {\tt T}(i,j)$ in the $(i,j)$-th entry of $\mathtt{s}et{\lambda}$. For example if ${\tt T}\mathtt{i}n \Tab_{(3,2)}(\B)$, then we write \mathtt{b}egin{equation}\label{eq:tab} {\tt T}\, = \mathtt{b}egin{array}{ccc} \mathtt{r}m{T}_{1,1} & \mathtt{r}m{T}_{1,2} & \mathtt{r}m{T}_{1,3} \\[3pt] \mathtt{r}m{T}_{2,1} & \mathtt{r}m{T}_{2,2} & \end{array} \end{equation} We say that a tableau ${\tt T}$ is {\em row} ({\em column}) {\em standard} if each row (column) is a nondecreasing (increasing) function of $i$ (resp.~$j$), and ${\tt T}$ is {\em standard} if it is both row and column standard. \mathtt{s}mallskip Let $\mathtt{s}t_\lambda(\B) \mathtt{s}ubset \Tab_\lambda(\B)$ denote the subset of all standard tableaux. This subset is nonempty if and only if $l(\lambda) \leq \mathtt{s}harp\B$. In particular, suppose $l(\lambda) \leq \mathtt{s}harp\B$ and assume the elements of $\B$ are listed as in \eqref{setB}. Then we write ${\tt T}^\lambda = {\tt T}^\lambda(\B)$ to denote the standard tableau in $\mathtt{s}t_\lambda(\B)$ with entries $ \mathtt{r}m{T}^{\lambda}_{i,j} := b_i^\B$ for all $(i,j) \mathtt{i}n \mathtt{s}et{\lambda}$. For example, if $d=7$, $\lambda = (4,2,1)$ and $\B=\mathtt{s}et{1,3}$, then \mathtt{b}egin{equation}\label{eq:tab} {\tt T}^\lambda = \mathtt{b}egin{array}{cccc} 1 & 1 & 1 & 1 \\ 2 & 2 & & \\ 3 & & & \end{array} \end{equation} Fix a free $\Bbbk$-module $V$ with finite ordered basis $\{x_b\}_{b\mathtt{i}n \B}$. If ${\tt T}\mathtt{i}n \Tab_{\lambda}(\B)$, then for $q=l(\lambda)$ and $i\mathtt{i}n \mathtt{s}et{1,q}$ we write \[{\tt T}_i:= {\tt T}(i,-) \mathtt{i}n \mathtt{s}eq^{\lambda_i}(\B)\] to denote the to the $i$-th row of ${\tt T}$, and we set \[x_{\tt T} :=\, x_{ {\tt T}_{1}} \otimes \mathtt{c}dots \otimes x_{ {\tt T}_{q} } \ \mathtt{i}n \Gamma^\lambda V.\] Notice that the set of $x_{\tt T}$ paramaterized by all row standard ${\tt T}\mathtt{i}n \Tab_\lambda(\B)$ forms a basis of $\Gamma^\lambda V$. \mathtt{s}mallskip The following result describes a basis for Weyl modules. \mathtt{b}egin{proposition}[\mathtt{c}ite{ABW}, Theorem III.3.16] \label{prop:standard} Let $\lambda\mathtt{i}n \Lambda^+(\mathbb{N})$ and suppose $V$ is a free $\Bbbk$-module with a finite ordered basis $\{x_b\}_{b\mathtt{i}n \B}$. Then the Weyl module $W_\lambda(V)$ is also a free $\Bbbk$-module, with basis given by the set of images \[\{\mathtt{b}ar{x}_{\tt T} := \pi(x_{\tt T})\, |\ {\tt T}\mathtt{i}n \mathtt{s}t_\lambda(\B)\}\] under the canonical projection $\pi: \Gamma^\lambda V \twoheadrightarrow \Gamma^\lambda V\mathtt{b}ig/\mathtt{s}quare_\lambda(V)$. \end{proposition} This result shows for example that the Weyl module $W_\lambda(V)$ is nonzero if and only if $l(\lambda)\leq\mathtt{s}harp \B$. Another consequence of the proposition is that $W_\lambda(M)$ is a projective $\Bbbk$-module for any $M\mathtt{i}n \P_\Bbbk$ (cf.\,\mathtt{c}ite[p.\,1013]{Krause1}). \mathtt{s}ubsection{The Cauchy decomposition} Suppose $M,N\mathtt{i}n \P_\Bbbk$. The maps $\psi^d$ appearing in (\mathtt{r}ef{commute}) can be generalized as follows. If $\lambda\mathtt{i}n \L^+_d(\mathbb{N})$, let \[\psi^\lambda(M,N) :\, \Gamma^\lambda M \otimes \Gamma^\lambda N \, \to\, \Gamma^d (M\otimes N)\] denote the map defined via the composition \mathtt{b}egin{align*} \Gamma^\lambda M \otimes \Gamma^\lambda N \xrightarrow{\,\sim\,} ( \Gamma^{\lambda_1} M \otimes \Gamma^{\lambda_1} N) & \otimes \dots \otimes (\Gamma^{\lambda_m} M \otimes \Gamma^{\lambda_m} N)\\[.1cm] \xrightarrow{\, \psi \otimes \dots \otimes \psi } \Gamma^{\lambda_1} (M \otimes N) & \otimes \dots \otimes \Gamma^{\lambda_m} (M \otimes N) \xrightarrow{\nabla} \Gamma^d (M\otimes N), \end{align*} where the first map permutes tensor factors and the last map is multiplication in the bialgebra $\Gamma(M\otimes N)$. \mathtt{s}mallskip Let us write $\Gamma^\lambda: \P_\Bbbk \to \P_\Bbbk$ to denote the tensor product of functors \[ \Gamma^\lambda := \Gamma^{\lambda_1}\otimes \mathtt{c}dots \otimes \Gamma^{\lambda_m} \] defined in the same way as \eqref{tensor}. Then it follows from Lemma \mathtt{r}ef{psi} that the maps $\psi^\lambda(M,N)$ induce a natural transformation \mathtt{b}egin{equation} \psi^\lambda : \Gamma^\lambda \mathtt{b}oxtimes \Gamma^\lambda \to \Gamma^\lambda(- \otimes - ) \end{equation} of bifunctors $\P_\Bbbk\times \P_\Bbbk \to \P_\Bbbk$. \mathtt{s}mallskip The following lemma is a special case of \mathtt{c}ite[Proposition III.2.6]{HK} which describes the relationship between $\psi$-maps and standard homomorphisms. \mathtt{b}egin{lemma}[\mathtt{c}ite{HK}]\label{lem:HK} Suppose $\lambda\mathtt{i}n \L_d^+(\mathbb{N})$, and set $q=l(\lambda)$. Given a pair $U,V$ of free $\Bbbk$-modules of finite rank, the following diagram is commutative \mathtt{b}egin{equation*} \mathtt{b}egin{tikzcd}[row sep=large] \Gamma^{\lambda(i,t)}U \otimes \Gamma^{\lambda}V \mathtt{a}r[rr, "\mathtt{r}m{id}\otimes {\gamma}^\tr_{\lambda(i,t)}"] \mathtt{a}r[d, "\, \gamma_{\lambda(i,t)}\otimes\mathtt{r}m{id}", shift left=1.7ex ] & & \Gamma^{\lambda(i,t)}U \otimes \Gamma^{\lambda(i,t)}V \mathtt{a}r[d, "\, \psi^{\lambda(i,t)}" ] & & \Gamma^{\lambda}U \otimes \Gamma^{\lambda(i,t)}V \mathtt{a}r[ll, "{\gamma}^\tr_{\lambda(i,t)}\otimes \mathtt{r}m{id}" ' ] \mathtt{a}r[d, "\, \mathtt{r}m{id} \otimes \gamma_{\lambda(i,t)}" , shift right=1.7ex] \\ \hspace{0.5cm} \Gamma^{\lambda}U \otimes \Gamma^{\lambda}V \mathtt{a}r[rr, "\psi^\lambda", shorten >=1.1em ] & & \hspace{-0.4cm} \Gamma^d(U \otimes V) & & \Gamma^{\lambda}U \otimes \Gamma^{\lambda} V \hspace{0.5cm} \mathtt{a}r[ll,"\psi^\lambda" '] \end{tikzcd} \end{equation*} for any $i\mathtt{i}n \mathtt{s}et{1,q-1}$ and $t \mathtt{i}n \mathtt{s}et{1,\lambda_{i+1}}$. \end{lemma} Recalling the total order $\preceq$ on $\Lambda^+_d(\mathbb{N})$ from Definition \mathtt{r}ef{lex1}, write $\lambda^+$ to denote the immediate successor of a partition $\lambda$ and set $(d)^+ := \mathtt{i}nfty$. The {\em Cauchy filtration} is then defined as the chain \mathtt{b}egin{equation*} 0\, =\, \F_{\mathtt{i}nfty}\, \mathtt{s}ubset \F_{(d)}\, \mathtt{s}ubset\, \dots \mathtt{s}ubset\, \F_{(1,\dots, 1)}\, = \, \Gamma^d(M\otimes N) \end{equation*} where $\F_\lambda := \mathtt{s}um_{\mu \mathtt{s}ucceq \lambda} \mathtt{r}m{Im}(\psi^\lambda)$. \mathtt{s}mallskip The following result describes the factors of this filtration. \mathtt{b}egin{theorem} [{\mathtt{c}ite[Theorem III.2.7]{HK}}] \label{thm:HK} Let $U,V$ be free $\Bbbk$-modules of finite rank. Then for each $\lambda \mathtt{i}n \L^+_d(\mathbb{N})$, the map $\psi^\lambda$ induces an isomorphism \[\mathtt{b}ar{\psi}^\lambda: W_\lambda(U) \otimes W_\lambda(V) \, \xrightarrow{\,\sim\,}\, \F_\lambda/ \F_{\lambda^+}\] which makes the following diagram commutative: \mathtt{b}egin{equation*} \mathtt{b}egin{tikzcd}[row sep=normal] \Gamma^{\lambda}U \otimes \Gamma^{\lambda}V \mathtt{a}rrow[r, shorten <=3, shorten >=3, " \psi^\lambda"] \mathtt{a}rrow[d, two heads] & \F_{\lambda} \mathtt{a}rrow[d, two heads] \\ W_\lambda(U) \otimes W_\lambda(V) \mathtt{a}rrow[r, xshift =5, shorten <=-3, shorten >=-3, "\, \mathtt{b}ar{\psi}^\lambda" ] &\hspace{1em} \F_\lambda/ \F_{\lambda^+} \end{tikzcd} \end{equation*} Hence, the associated graded module of the Cauchy filtration is \[ \mathtt{b}igoplus_{\lambda\mathtt{i}n \L^+_d(\mathbb{N})} W_\lambda(U) \otimes W_\lambda(V).\] \end{theorem} \mathtt{b}egin{proof} We recall the proof from \mathtt{c}ite{HK}. It follows by definition that $W_\lambda(U) \otimes W_\lambda(V)$ is the quotient of $\Gamma^\lambda U \otimes \Gamma^\lambda V$ by the submodule $\mathtt{s}quare_{\lambda}(U) \otimes \Gamma^\lambda V + \Gamma^\lambda U \otimes \mathtt{s}quare_{\lambda}(V) $. Hence, by Lemma \mathtt{r}ef{lem:HK} we have \[\mathtt{s}quare_{\lambda}(U) \otimes \Gamma^\lambda V\, +\, \Gamma^\lambda U \otimes \mathtt{s}quare_{\lambda}(V) \ \mathtt{s}ubset\ \mathtt{r}m{Im}(\psi^{\lambda(i,t)})\ \mathtt{s}ubset\ \F_{\lambda^+},\] since $\lambda(i,t) > \lambda$. This proves the existence of the induced map $\mathtt{b}ar{\psi}^\lambda$ satisfying the given commutative square. It is clear that $\mathtt{b}ar{\psi}^\lambda$ is surjective. Comparing the ranks of $\Gamma^d(U\otimes V)$ and $\mathtt{b}igoplus_{\lambda\mathtt{i}n \L^+_d(\mathbb{N})} W_\lambda(U) \otimes W_\lambda(V)$ shows that $\mathtt{b}ar{\psi}^\lambda$ must be an isomorphism for each $\lambda$. \end{proof} Given free $\Bbbk$-modules $U,V\mathtt{i}n \P_\Bbbk$ with finite ordered bases $\{x_b\}_{b\mathtt{i}n \B}$ and $\{y_c\}_{c\mathtt{i}n \C}$, respectively, let $\F^{\,\prime}_{\hm{3}\lambda} \mathtt{s}ubset \Gamma^d(U\otimes V)$ denote the $\Bbbk$-submodule generated by \[\{ \psi^{\lambda}(x_S \otimes y_T)\ |\ S \mathtt{i}n \mathtt{s}t_\lambda(\B),\, T \mathtt{i}n \mathtt{s}t_\lambda(\C)\}\] where $\F^{\,\prime}_{\hm{3}\lambda}$ is nonzero only if $l(\lambda) \leq \mathtt{r}m{min}(\mathtt{s}harp \B , \mathtt{s}harp \C)$. \mathtt{b}egin{corollary}\label{cor:HK} For each $\lambda \mathtt{i}n \L_d^+(\mathbb{N})$, the $\Bbbk$-submodule $\F^{\,\prime}_{\hm{3}\lambda} \mathtt{s}ubset \Gamma^d(U\otimes V)$ is free, and there is a corresponding decomposition: \[\Gamma^d(U\otimes V) =\mathtt{b}igoplus_{\lambda} \F^{\, \prime}_{\hm{3}\lambda}, \quad \text{such that} \quad \F_{\lambda} = \mathtt{b}igoplus_{\mu\geq \lambda} \F^{\,\prime}_{\hspace{-.01cm}\mu} \quad \text{for all } \lambda \mathtt{i}n \L^+_d(\mathbb{N}).\] \end{corollary} \mathtt{b}egin{proof} Suppose $\lambda \mathtt{i}n \L^+_d(\mathbb{N})$, and set $\T= \mathtt{s}t_\lambda(\B)\times \mathtt{s}t_\lambda(\C)$. By Proposition \mathtt{r}ef{prop:standard}, $\{\mathtt{b}ar{x}_S \otimes \mathtt{b}ar{y}_T \, |\, (S,T) \mathtt{i}n \T\}$ forms a basis of $W_\lambda(U) \otimes W_\lambda(V)$. So \[ \{ \mathtt{b}ar{\psi}^\lambda(x_S \otimes y_T)\, |\, (S,T) \mathtt{i}n \T\} \] gives a basis for $\F_\lambda/ \F_{\lambda^+}$ by Theorem \mathtt{r}ef{thm:HK}. This shows that the subset \[\{ \psi^\lambda(x_S \otimes y_T) \, |\, (S,T) \mathtt{i}n \T\}\ \mathtt{s}ubset\ \Gamma^d ( U \otimes V)\] is linearly independent. Thus $\F^{\,\prime}_{\hm{3}\lambda}$ is a free $\Bbbk$-submodule. It is also clear that $\F_\lambda = \F_{\lambda^+} \oplus \F^{\,\prime}_{\hm{3}\lambda}$, and the required decompositions follow by induction. \end{proof} \mathtt{s}ubsection{Bimodule filtrations} In the remainder of this section, we fix a set $\{ J_1', \dots, J_r'\}$ of nonzero free $\Bbbk$-submodules, $J_i' \mathtt{s}ubset J$, such that setting \mathtt{b}egin{equation}\label{eq:decomp} J_j := \mathtt{b}igoplus_{1\leq i \leq j} J_i'\quad \text{for}\ \ j \mathtt{i}n \mathtt{s}et{1, r} \end{equation} yields a chain \[ 0= J_0 \mathtt{s}ubset J_1 \mathtt{s}ubset \dots \mathtt{s}ubset J_r = J \] of $(A, B)$-bimodules. \mathtt{s}mallskip Recalling the notation \eqref{invariant2}, we then have for each $\mu\mathtt{i}n \L_d(r)$ the following $\Bbbk$-submodules of $\G^d J$: \[ J'_\mu = \langleJ'_{\otimes \mu}\rangle^{\Si_d}, \qquad J_\mu = \langleJ_{\otimes \mu}\rangle^{\Si_d}.\] Note first that $J_\mu$ is a $\Gamma^d(A\otimes B)$-submodule of $\Gamma^d J$, and hence a ($\G^d A, \G^d B$)-bimodule. It is also not difficult to check that there is a decomposition of $J^{\otimes d}$ into free $\Bbbk$-submodules \[ J^{\otimes d} = \mathtt{b}igoplus_{\mu \mathtt{i}n \L_d(r)} \langle J'_{\otimes \mu}\rangle. \] By taking $\Si_d$-invariants on both sides, we thus obtain the following decomposition \mathtt{b}egin{equation}\label{eq:decomp2} \Gamma^d J\ =\, \mathtt{b}igoplus_{\mu \mathtt{i}n \L_d(r)} \langleJ'_{\otimes\mu}\rangle \mathtt{c}ap \Gamma^d J\ =\, \mathtt{b}igoplus_{\mu \mathtt{i}n \L_d(r)} J'_\mu. \end{equation} Next recall that the {\em dominance order} on $\L_d(r)$ is the partial order defined by setting $\mu \trianglelefteq \nu$ if \[ {\mathtt{s}um_{i\leq j} \mu_i }\ \leq \ {\mathtt{s}um_{i\leq j} \nu_i } \ \text{ for } \ j\mathtt{i}n \mathtt{s}et{1, r}. \] Notice that $J_\mu \mathtt{s}ubset J_\nu$ if and only if $\mu \trianglerighteq \nu$. We further have $J_\nu = J'_\nu \oplus \mathtt{s}um_{\mu \triangleright \nu} J_\mu$, and it follows by induction that \mathtt{b}egin{equation}\label{eq:decomp_nu} J_{\nu} = \mathtt{b}igoplus_{\mu \trianglerighteq \nu} J'_{\mu} \end{equation} for all $\nu \mathtt{i}n \L_d(r)$, which generalizes the decomposition \eqref{eq:decomp2} of $\Gamma^d J$. \mathtt{s}mallskip Consider the map $\nabla: \Gamma^\mu J \to \Gamma^d J$ given by $r$-fold (outer) multiplication in $\Gamma( J)$, for some $\mu \mathtt{i}n \L_d(r)$. Note that the restriction \[\nabla^\mu : \Gamma^{(\mu)}(J_1, \dots, J_r) \to \Gamma^d J \quad \left(\text{resp.~} '\nabla^\mu : \Gamma^{(\mu)}(J'_1, \dots, J'_r) \to \Gamma^d J\mathtt{r}ight)\] is a $(\Gamma^d A, \Gamma^d B)$-bimodule (resp.~$\Bbbk$-module) homomorphism. \mathtt{b}egin{lemma}\label{lem:image} Suppose $\nu \mathtt{i}n \L_d(r)$. Then \\[-.25cm] \mathtt{b}egin{enumerate} \mathtt{i}tem $J'_\nu = \mathtt{r}m{Im}\, '\nabla^\nu$, \\[-.15cm] \mathtt{i}tem $'\nabla^{\nu} : \Gamma^{(\nu)}(J'_1,\dots, J'_r) \xrightarrow{\,\sim\,} J'_\nu$ is an isomorphism of $\Bbbk$-modules, \\[-.15cm] \mathtt{i}tem $J_\nu = \mathtt{s}um_{\mu\trianglerighteq\nu} \mathtt{r}m{Im}\, \nabla^\mu$, summing over $\mu\mathtt{i}n \L_d(r)$. \end{enumerate} \end{lemma} \mathtt{b}egin{proof} For each $\mu \mathtt{i}n \L_d(r)$, write $M_\mu$, $M'_\mu$ to denote the images of $\Gamma^{(\mu)}(J_1, \dots, J_r)$ and $\Gamma^{(\mu)}(J'_1, \dots, J'_r)$, respectively, under the map $\nabla^\mu: \Gamma^\mu J \to \Gamma^d J$. It is then clear from the definitions that $M'_\mu \mathtt{s}ubset J'_\mu$ and similarly $M_\mu = \mathtt{r}m{Im}\,\nabla^{\mu} \mathtt{s}ubset J_\mu$, for all $\mu$. It follows inductively from the isomorphism (\mathtt{r}ef{eq:expon2}) that there is a decomposition \mathtt{b}egin{equation*} \Gamma^d J\ =\ \Gamma^d(J'_1\oplus \dots \oplus J'_r) \ = \mathtt{b}igoplus_{\mu\mathtt{i}n \L_d(r)} M'_\mu\ \, \mathtt{s}ubset \mathtt{b}igoplus_{\mu\mathtt{i}n \L_d(r)} J'_\mu \end{equation*} It thus follows from (\mathtt{r}ef{eq:decomp2}) that $J'_\mu =M'_\mu \mathtt{c}ong \Gamma^{(\mu)}(J'_1, \dots, J'_r)$ which shows (1) and (2). Since $J_{\mu} \mathtt{s}ubset J_{\nu}$ whenever $\mu \trianglerighteq \nu$, it follows from (\mathtt{r}ef{eq:decomp_nu}) that \[J_{\nu}\, =\, \mathtt{b}igoplus_{\mu\trianglerighteq \nu} M'_\mu \, \mathtt{s}ubset \, \mathtt{s}um_{\mu\trianglerighteq \nu} M_\mu \, \mathtt{s}ubset \, \mathtt{s}um_{\mu\trianglerighteq \nu} J_\mu \, \mathtt{s}ubset \, J_{ \nu}\] showing (3). \end{proof} Recall the lexicographic ordering $\leq$ on $\L_d(r)$ from Definition \mathtt{r}ef{lex1}, and notice that there is a chain of $(\Gamma^d A, \Gamma^d B)$-sub-bimodules \[ 0 \, \mathtt{s}ubset \, \Gamma^d (J_1) \, = \, J_{\geq (d,0,\dots, 0)} \, \mathtt{s}ubset \, \mathtt{c}dots \, \mathtt{s}ubset \, J_{\geq \nu} \, \mathtt{s}ubset \, \mathtt{c}dots \, \mathtt{s}ubset \, J_{\geq (0, \dots, 0, d)} \, = \, \Gamma^d J \] where $J_{\geq \nu} := \mathtt{s}um_{\mu\geq \nu} J_\mu$ for each $\nu \mathtt{i}n \L_d(r)$. Since the lexicographic ordering refines the dominance order, it follows from (\mathtt{r}ef{eq:decomp_nu}) that \mathtt{b}egin{equation}\label{eq:sum_nu} J_{\geq\nu} = J_{> \nu} \oplus J'_\nu \end{equation} for all $\nu$. Thus \[J_{\geq \nu} = \mathtt{s}um_{\mu \geq \nu} \mathtt{r}m{Im}(\nabla^{\mu})\] by the preceding lemma. This allows us to describe the quotients $J_{\geq \nu}/ J_{>\nu}$ as follows. \mathtt{b}egin{proposition}\label{prop:commute} Let $\nu \mathtt{i}n \L_d(r)$. Then $\nabla^\nu$ induces an isomorphism \[\mathtt{b}ar{\nabla}^\nu: \Gamma^{(\nu)}(J_1/J_0, \dots, J_r/J_{r-1}) \mathtt{c}ong J_{\,\geq\nu} / J_{\,>\nu}\] which yields a commutative square of $(\Gamma^d A, \Gamma^d B)$-bimodule homomorphisms \mathtt{b}egin{equation*} \mathtt{b}egin{tikzcd}[row sep=large] \Gamma^{(\nu)}(J_1, \dots, J_r) \mathtt{a}rrow[r, " \nabla^\nu"] \mathtt{a}rrow[d, two heads, "\pi_{\phantom{1}}^\nu"' ] & J_{\,\geq\nu} \mathtt{a}rrow[d, two heads, "\pi" ] \\ \Gamma^{(\nu)}(J_1/J_{0}, \dots, J_r/J_{r\hyphen\hyphen1}) \mathtt{a}rrow[r, " \mathtt{b}ar{\nabla}^\nu"] & J_{\,\geq}(\nu) / J_{\,>}(\nu) \end{tikzcd} \end{equation*} where $\pi^\nu$ denotes the tensor product of functorial maps $\Gamma^{\nu_j}(\pi_j)$ associated to the projections, $\pi_j: J_j \to J_j/J_{j-1}$, for $j=1, \dots, r$, and where $\pi$ is also projection. \end{proposition} \mathtt{b}egin{proof} We first verify that $\Bbbker \pi^\nu \mathtt{s}ubset J_{>\nu}$ in order to show the existence of the map $\mathtt{b}ar{\nabla}^\nu$ satisfying the above diagram. If $1\leq j \leq r$, consider the $(\Gamma^d A , \Gamma^d B)$-sub-bimodule \[K_j :=\, \Gamma^{(\nu^1)}(J_1,\dots, J_r) \otimes \Bbbker \Gamma^{\nu_j}(\pi_j) \otimes \Gamma^{(\nu^2)}(J_1,\dots, J_r)\] where $\nu^1 = (\nu_1, \dots, \nu_{j-1}, 0, \dots, 0)$ and $\nu^2 = (0, \dots, 0, \nu_{j+1}, \dots, \nu_r)$. Then $\Bbbker \pi^\nu =\mathtt{s}um_{j=1}^r K_j$, and we must show that $K_j \mathtt{s}ubset J_{>\nu}$ for all $j$. \mathtt{s}mallskip Now $K_j = 0$, if either $j=1$ or $\nu_j=0$. If $K_j\neq 0$ and $1\leq t \leq \nu_j$, let $\nu(j,t)\mathtt{i}n \L_d(r)$ be defined as in (\mathtt{r}ef{eq:pm}). Since $\nu(j,1) > \nu$, it suffices to show that $\nabla^\nu(K_j) \mathtt{s}ubset \mathtt{r}m{Im}\, \nabla^{\nu(j,1)}$ for all such $j$. The fact that $\nu$ and $\nu(j,1)$ are equal except for entries in the $j$-th and $(j-1)$-st positions allows us to simplify to the case $r=2$. So we may assume $\nu = (\nu_1, \nu_2)$. Then for $j=2$, we have $\nu(2,1) = (\nu_1+1, \nu_2-1)$. In this case $K_2 = \Bbbker(\pi^\nu)$, and it follows by Lemma \mathtt{r}ef{lem:quotient} that \[ K_2 =\, \Gamma^{\nu_1} J_1 \otimes J_{(1,\nu_2-1)} \mathtt{s}ubset \, \Gamma^{(\nu)}(J_1, J_2).\] Notice by Lemma \mathtt{r}ef{lem:image} that $J_{(1,\nu_2-1)}$ is equal to the image of the map \[\nabla^{(1,\nu_2-1)}: J_1 \otimes \Gamma^{\nu_2-1} (J_2) \to \Gamma^{\nu_2}(J_2).\] By associativity of multiplication in $\Gamma(J)$, we also have a commutative diagram \mathtt{b}egin{equation*} \mathtt{b}egin{tikzcd} [column sep=small, row sep=small] & \Gamma^{((\nu_1, 1, \nu_2\hyphen\hyphen1))} (J_{1} , J_{1} , J_2) \mathtt{a}rrow[ddl, "\mathtt{r}m{id}\otimes \nabla^{(1,\nu_2-1)}"', shorten >=-0.2em] \mathtt{a}rrow[ddr, "\nabla \otimes \mathtt{r}m{id}"] \\ \\ \Gamma^{(\nu)}(J_1, J_2)\, \mathtt{a}rrow[dr, " \nabla^\nu"' , shorten <=-0.5em] & & \ \, \Gamma^{(\nu(2,1))}(J_1, J_2) \mathtt{a}rrow[dl, " \nabla^{\nu(2,1)}" , shorten >=-0.2em, shorten <=-0.9em ] \\ & J_{\geq\nu} & \end{tikzcd} \end{equation*} It follows that $\nabla^\nu(K_2) \mathtt{s}ubset \mathtt{r}m{Im}\, \nabla^{\nu(2,1)}$, which shows the existence of $\mathtt{b}ar{\nabla}^\nu$. To complete the proof, note that the restriction $\pi^\nu |_{\Gamma^{(\nu)}(J'_1, \dots, J'_r)}$ is a $\Bbbk$-module isomorphism. The map $(\pi\mathtt{c}irc \nabla^\nu) |_{\Gamma^{(\nu)}(J'_1, \dots, J'_r)}$ is also a $\Bbbk$-module isomorphism by Lemma \mathtt{r}ef{lem:image}. It follows that $\mathtt{b}ar{\nabla}^\nu$ is an isomorphism by commutativity. \end{proof} \mathtt{s}ubsection{Multitableaux} Suppose $\{\B_j\}_{j \mathtt{i}n\mathtt{s}et{1,r}}$ is a collection of finite totally ordered sets, and let $\mathtt{b}oldsymbol \lambda \mathtt{i}n \L^{+}_d(\mathbb{N})^r$ be an $r$-multipartition. Elements of the set \[ \Tab_{\mathtt{b}oldsymbol \lambda}(\B_1, \dots, \B_r) := \Tab_{\lambda^{(1)}}(\B_1) \times \dots \times \Tab_{\lambda^{(r)}}(\B_r). \] are called {\em multitableaux of shape $\mathtt{b}oldsymbol \lambda$} (or $\mathtt{b}oldsymbol \lambda$-{\em multitableaux}). \mathtt{s}mallskip We say that a $\mathtt{b}oldsymbol \lambda$-multitableau, $\mathbf T= ({\tt T}^{(1)}, \dots, {\tt T}^{(r)})$, is {\em standard} if each component ${\tt T}^{(j)}$ is a standard $\lambda^{(j)}$-tableau. The subset of standard ${\mathtt{b}oldsymbol \lambda}$-multitableaux is denoted \[ \mathtt{s}t_{\mathtt{b}oldsymbol \lambda}(\B_\mathtt{a}st) = \mathtt{s}t_{\mathtt{b}oldsymbol \lambda}(\B_1, \dots, \B_r).\] If $(n_1, \dots, n_r) \mathtt{i}n \mathbb{N}^r$ is the sequence of integers with $n_j := \mathtt{s}harp \B_j$ for all $j$, then it follows from \eqref{eq:tab} that $\mathtt{s}t_{\mathtt{b}oldsymbol \lambda}(\B_\mathtt{a}st)$ is non-empty if and only if $\mathtt{b}oldsymbol \lambda$ belongs to the subset ${\L}^+_d(n_1, \dots, n_r) \mathtt{s}ubset \L^+_d(\mathbb{N})^r$. In this case, we write $\mathbf T^{\mathtt{b}oldsymbol \lambda} = \mathbf T^{\mathtt{b}oldsymbol \lambda}(\B_\mathtt{a}st)$ to denote the standard $\mathtt{b}oldsymbol \lambda$-multitableau \mathtt{b}egin{equation}\label{multi-tab} \mathbf T^{\mathtt{b}oldsymbol \lambda} := ({\tt T}^{\lambda^{(1)}}, \dots,{\tt T}^{\lambda^{(r)}}). \end{equation} Suppose $\nu = (\nu_1, \dots, \nu_r) \mathtt{i}n \L_d(r)$. There is a corresponding $r$-multipartition $(\nu) := ((\nu_1), (\nu_2), \dots, (\nu_r)) \mathtt{i}n \L^{+}_d(\mathbb{N})^r$. For any $m\mathtt{i}n \mathbb{N}$, let us write $(1^m) := (1,\dots, 1) \mathtt{i}n \L^+_m(\mathbb{N})$, and set $(1^0) = 0$. Then we also have an element \[ (\nu)' := ((1^{\nu_1}), (1^{\nu_2}), \dots, (1^{\nu_r})) \mathtt{i}n \L^{+}_\nu(\mathbb{N}). \] Recalling the total order $\preceq$ from Definition \mathtt{r}ef{lex2}, notice that $(\nu)' \preceq \mathtt{b}oldsymbol \lambda \preceq (\nu)$ for all $\mathtt{b}oldsymbol \lambda \mathtt{i}n \L_\nu^+(\mathbb{N})$. We also write $\mathtt{b}oldsymbol \lambda^+$ to denote the immediate successor of any $\mathtt{b}oldsymbol \lambda \mathtt{i}n \L^{+}_d(\mathbb{N})^r$ and set $((d))^+ = \mathtt{i}nfty$. \mathtt{s}ubsection{Generalized Weyl modules}\label{ss:gen_Weyl} Given $\mathtt{b}oldsymbol \lambda \mathtt{i}n \L^{+}_d(\mathbb{N})^r$ and projective modules $M_j \mathtt{i}n \P_\Bbbk$ for $j\mathtt{i}n \mathtt{s}et{1,r}$, we will use the notation \[\Gamma^{\mathtt{b}oldsymbol \lambda} (M_\mathtt{a}st) := \mathtt{b}igotimes_{j} \Gamma^{\lambda^{(j)}} M_j, \qquad W_{\mathtt{b}oldsymbol \lambda} (M_\mathtt{a}st) := \mathtt{b}igotimes_{j} W_{\lambda^{(j)}} M_j\] in what follows. The outer tensor product $-\mathtt{b}oxtimes-$\,, defined in Section \mathtt{r}ef{ss:functor}, yields corresponding functors $\Gamma^{\mathtt{b}oldsymbol \lambda}, W_{\mathtt{b}oldsymbol \lambda}: \P_\Bbbk^{\times r} \to \P_\Bbbk$ defined by \[\Gamma^{\mathtt{b}oldsymbol \lambda} := \Gamma^{\lambda^{(1)}} \mathtt{b}oxtimes \mathtt{c}dots \mathtt{b}oxtimes \Gamma^{\lambda^{(r)} } \quad \text{and} \quad W_{\mathtt{b}oldsymbol \lambda} := W_{\lambda^{(1)}} \mathtt{b}oxtimes \dots \mathtt{b}oxtimes W_{\lambda^{(r)} }.\] Since Weyl modules are quotients of divided powers, it follows that there is a natural projection $\pi: \Gamma^{\mathtt{b}oldsymbol \lambda} \twoheadrightarrow W_{\mathtt{b}oldsymbol \lambda}$. \mathtt{s}mallskip Suppose $V_1, \dots, V_r \mathtt{i}n \P_\Bbbk$ are free $\Bbbk$-modules, and suppose $\{x^{(j)}_b\}_{b\mathtt{i}n \B_j}$ is a finite ordered basis of $V_j$ for each $j\mathtt{i}n \mathtt{s}et{1,r}$. Given a multitableau $\mathbf T \mathtt{i}n \Tab_{\mathtt{b}oldsymbol \lambda}(\B_1, \dots, \B_r)$, there is a corresponding element \[ x_{\mathbf T} := \mathtt{b}igotimes_{j} x^{(j)}_{{\tt T}^{(j)}} \mathtt{i}n\, \Gamma^{\mathtt{b}oldsymbol \lambda}(V_\mathtt{a}st)\] whose image in $W_{\mathtt{b}oldsymbol \lambda}(V_\mathtt{a}st)$ is denoted $\mathtt{b}ar{x}_{\mathtt{b}m T} := \pi(x_{\mathtt{b}m T})$. The next result follows easily from Proposition \mathtt{r}ef{prop:standard}. \mathtt{b}egin{lemma} Let $\mathtt{b}oldsymbol \lambda \mathtt{i}n \L^{+}_{d}(\mathbb{N})^r$ be an $r$-multipartition, and let $V_1, \dots, V_r$ be free $\Bbbk$-modules with bases as above. The set of images $\{\mathtt{b}ar{x}_{\mathtt{b}m T}\, |\ \mathbf T\mathtt{i}n \mathtt{s}t_{\mathtt{b}oldsymbol \lambda}(\B_1, \dots, \B_r) \}$ forms a basis of the free $\Bbbk$-module $W_{\mathtt{b}oldsymbol\lambda}( V_\mathtt{a}st )$ parametrized by standard $\mathtt{b}oldsymbol \lambda$-multitableaux. In particular, we have $W_{\mathtt{b}oldsymbol \lambda}(V_\mathtt{a}st) = 0$ unless $\mathtt{b}oldsymbol \lambda \mathtt{i}n \L^+_d(\mathtt{s}harp \B_1, \dots, \mathtt{s}harp \B_r)$. \end{lemma} Suppose $\nu \mathtt{i}n \Lambda_d(r)$ and fix some projective modules $M_j, N_j \mathtt{i}n \P_\Bbbk$ for $j\mathtt{i}n \mathtt{s}et{1,r}$. Using notation similar to the above, we write \[ \Gamma^{(\nu)}(M_\mathtt{a}st\otimes N_\mathtt{a}st) := \mathtt{b}igotimes_{j} \Gamma^{\nu_j} (M_j\otimes N_j).\] Given $\mathtt{b}oldsymbol \lambda \mathtt{i}n \L^+_\nu(\mathbb{N})$, we then define a map \[\psi^{\mathtt{b}oldsymbol \lambda}: \Gamma^{\mathtt{b}oldsymbol \lambda}(M_\mathtt{a}st) \otimes \Gamma^{\mathtt{b}oldsymbol \lambda}(N_\mathtt{a}st)\to \Gamma^{(\nu)}(M_\mathtt{a}st\otimes N_\mathtt{a}st)\] via the composition \mathtt{b}egin{align} \mathtt{b}ig\{\mathtt{b}igotimes_j \Gamma^{\lambda^{(j)}} M_j \mathtt{b}ig\} &\otimes \mathtt{b}ig\{ \mathtt{b}igotimes_j \Gamma^{\lambda^{(j)}} N_j \mathtt{b}ig\} \label{eq:new_psi}\\ \mathtt{c}ong\ & \mathtt{b}igotimes_j \mathtt{b}ig\{ \Gamma^{\lambda^{(j)}}(M_j) \otimes \Gamma^{\lambda^{(j)}}(N_j)\mathtt{b}ig\} \xrightarrow{\, \psi \otimes\dots \otimes \psi\, } \mathtt{b}igotimes_j \Gamma^{\nu_j}(M_j\otimes N_j). \nonumber \end{align} Note that if $M_j \mathtt{i}n A\text{-mod}$ and $N_j \mathtt{i}n B\text{-mod}$ for all $j$, then $\psi^{\mathtt{b}oldsymbol \lambda}$ is a homomorphism of $(\Gamma^d A, \Gamma^d B)$-bimodules by Lemma \mathtt{r}ef{psi}.1. \mathtt{s}ubsection{Generalized Cauchy filtrations of bimodules} Fix a chain $(J_j)_{j\mathtt{i}n \mathtt{s}et{0, r}}$ of $(A, B)$-bimodules. For each $j\mathtt{i}n \mathtt{s}et{1,r}$, suppose there exists an isomorphism \mathtt{b}egin{equation}\label{eq:alpha_isom} \mathtt{a}lpha_j: J_j/ J_{j-1} \equi U_j \otimes V_j \end{equation} of $(A, B)$-bimodules for some $U_j\mathtt{i}n A\text{-mod}$ and $V_j\mathtt{i}n B\text{-mod}$. Assume for all $j$ that $U_j$ and $V_j$ are free as $\Bbbk$-modules, with finite ordered bases $\{x^{(j)}_b\}_{b\mathtt{i}n \B_j}$ and $\{y^{(j)}_c\}_{c\mathtt{i}n \C_j}$, respectively. Assume further that $\{J'_j\}_{j\mathtt{i}n [r]}$ is any collection of free $\Bbbk$-submodules of $J_r$ such that (\mathtt{r}ef{eq:decomp}) holds. \mathtt{s}mallskip We first define a filtration of $\Gamma^{(\nu)}(U_{\mathtt{a}st}\otimes V_{\mathtt{a}st})$ for some fixed weight $\nu \mathtt{i}n \L_d(r)$. For each $r$-multipartition $\mathtt{b}oldsymbol \lambda \mathtt{i}n \L^{+}_\nu(\mathbb{N})^r$, let us write \[\F_{\mathtt{b}oldsymbol \lambda, (\nu)} := \mathtt{s}um_{\mathtt{b}oldsymbol \lambda \leq \mathtt{b}oldsymbol \mu \leq (\nu)} \F_{\mu^{(1)}}(U_1, V_1) \otimes \mathtt{c}dots \otimes \F_{\mu^{(r)}}(U_r, V_r)\] which is a sum of sub-bimodules of $\Gamma^{(\nu)}(U_{\mathtt{a}st}\otimes V_{\mathtt{a}st})$. It follows that there is a chain of sub-bimodules: \mathtt{b}egin{equation}\label{eq:chain} 0 =: \, \F_{(\nu)^+,(\nu)} \mathtt{s}ubset\, \F_{\mathtt{b}oldsymbol (\nu), (\nu)} \mathtt{s}ubset \dots \mathtt{s}ubset\, \F_{(\nu)',(\nu)} =\, \Gamma^{(\nu)}(U_\mathtt{a}st \otimes V_\mathtt{a}st). \end{equation} Recalling \eqref{eq:new_psi}, notice that for each $\mathtt{b}oldsymbol \lambda \mathtt{i}n \L^{+}_\nu(\mathbb{N})^r$ we have \[\F_{\mathtt{b}oldsymbol \lambda, (\nu)} = \mathtt{s}um_{\mathtt{b}oldsymbol \lambda \preceq \mathtt{b}oldsymbol \mu \preceq (\nu)} \mathtt{r}m{Im}(\psi^{\mathtt{b}oldsymbol \mu}).\] Note also that $\F_{\mathtt{b}oldsymbol \lambda, (\nu)}$ contains the $\Bbbk$-submodule \[ \F'_{\mathtt{b}oldsymbol \lambda} := \mathtt{b}igotimes \F'_{\lambda^{(j)}}(U_j, V_j). \] It then follows by Corollary \mathtt{r}ef{cor:HK} that $\F'_{\mathtt{b}oldsymbol \lambda}$ is a free $\Bbbk$-submodule, with the set \mathtt{b}egin{equation}\label{eq:basis} \{ \psi^{\mathtt{b}oldsymbol \lambda}(x_{\mathtt{b}m S} \otimes y_{\mathtt{b}m T}) \, |\ \mathbf S \mathtt{i}n \mathtt{s}t_{\mathtt{b}oldsymbol \lambda}(\B_1, \dots, \B_r),\, \mathbf T \mathtt{i}n \mathtt{s}t_{\mathtt{b}oldsymbol \lambda} (\C_1, \dots, \C_r) \} \end{equation} as a basis. \mathtt{b}egin{proposition}\label{prop:gen_HK} Suppose $\nu \mathtt{i}n \L_d(r)$. Then for each $\mathtt{b}oldsymbol \lambda \mathtt{i}n \L^+_\nu(\mathbb{N})$, the map \[\psi^{\mathtt{b}oldsymbol \lambda}: \Gamma^{\mathtt{b}oldsymbol \lambda}(U_\mathtt{a}st) \otimes \Gamma^{\mathtt{b}oldsymbol \lambda}(V_\mathtt{a}st) \to \F_{\mathtt{b}oldsymbol \lambda, (\nu)}\] induces an isomorphism \[ \mathtt{b}ar{\psi}^{\mathtt{b}oldsymbol \lambda}:\, \F_{\mathtt{b}oldsymbol \lambda,(\nu)} / \F_{\mathtt{b}oldsymbol \lambda^+,(\nu)} \, \xrightarrow{\,\sim\,}\, W_{\mathtt{b}oldsymbol \lambda} (U_\mathtt{a}st) \otimes W_{\mathtt{b}oldsymbol \lambda} (V_\mathtt{a}st) \] of bimodules. We also have decompositions \mathtt{b}egin{equation}\label{eq:decomp_HK} \Gamma^{(\nu)}(U_\mathtt{a}st \otimes V_\mathtt{a}st) = \mathtt{b}igoplus_{\mathtt{b}oldsymbol \lambda \mathtt{i}n \L^+_\nu(\mathbb{N})} \F'_{\mathtt{b}oldsymbol \lambda}, \qquad \F_{\mathtt{b}oldsymbol \lambda, (\nu)} = \mathtt{b}igoplus_{\mathtt{b}oldsymbol \lambda \preceq \mathtt{b}oldsymbol \mu \preceq (\nu)} \F'_{\mathtt{b}oldsymbol \mu} \end{equation} into free $\Bbbk$-submodules. \end{proposition} We now wish to lift the filtrations \eqref{eq:chain}, for varying $\nu$, to a single filtration of $\Gamma^d J$, with $J=J_r$ as above. First note that there is an isomorphism \mathtt{b}egin{equation*} \phi_\nu: J_{\geq \nu}/ J_{>\nu} \xrightarrow{\,\sim\,} \Gamma^{(\nu)}(U_\mathtt{a}st \otimes V_{\mathtt{a}st}) \end{equation*} satisfying the following commutative triangle of $(\Gamma^dA, \Gamma^d B)$-bimodule isomorphisms: \mathtt{b}egin{equation}\label{eq:isom_phi} \mathtt{b}egin{tikzcd}[row sep=large, column sep=small] \mathtt{b}igotimes \Gamma^{\nu_j}(J_j/J_{j-1}) \mathtt{a}rrow[rr, " \Gamma^{(\nu)}(\mathtt{a}lpha_\mathtt{a}st)"] \mathtt{a}rrow[dr, " \mathtt{b}ar{\nabla}^\nu"'] & & \mathtt{b}igotimes \Gamma^{\nu_j}\mathtt{b}ig(U_j\otimes V_j \mathtt{b}ig) \\ & J_{\,\geq\nu} / J_{\,>\nu} \mathtt{a}rrow[ur, " \phi_\nu"'] \end{tikzcd} \end{equation} where $\Gamma^{(\nu)}(\mathtt{a}lpha_\mathtt{a}st) = \mathtt{b}igotimes \Gamma^{\nu_j}(\mathtt{a}lpha_j)$ is a tensor product of isomorphisms induced by the maps (\mathtt{r}ef{eq:alpha_isom}) and $\mathtt{b}ar{\nabla}^\nu$ is defined in Proposition \mathtt{r}ef{prop:commute}. We then have a surjective map \[\hat{\phi}_\nu: J_{\geq \nu}\ \twoheadrightarrow\ \Gamma^{(\nu)}(U_\mathtt{a}st \otimes V_\mathtt{a}st)\] obtained by composing $\phi_\nu$ with the projection $\pi: J_{\geq \nu} \twoheadrightarrow J_{\geq \nu}/ J_{>\nu}$. \mathtt{b}egin{definition} Suppose $\mathtt{b}oldsymbol \lambda \mathtt{i}n \L^+_d(\mathbb{N})^r$ and set $\nu = |\mathtt{b}oldsymbol \lambda|$. Then define $\J_{\mathtt{b}oldsymbol \lambda}$ to be the sub-bimodule of $J_{\geq \nu}$, corresponding to the inverse image of $\F_{\mathtt{b}oldsymbol \lambda, (\nu)}$ under the map $\phi_\nu$ considered above. The {\em generalized Cauchy filtration} of $\Gamma^d J$ is then defined as the chain \mathtt{b}egin{equation} \label{eq:gen_Cauchy} 0 = \J_{\mathtt{i}nfty} \mathtt{s}ubset \J_{((d))} \mathtt{s}ubset \dots \mathtt{s}ubset \J_{\mathtt{b}oldsymbol \lambda^+} \mathtt{s}ubset \J_{\mathtt{b}oldsymbol \lambda} \mathtt{s}ubset \dots \mathtt{s}ubset \J_{((1^d))} = \Gamma^d J \end{equation} of $(\Gamma^d A, \Gamma^d B)$-bimodules parametrized by multipartitions $\mathtt{b}oldsymbol \lambda \mathtt{i}n \L^+_d(\mathbb{N})^r$. \end{definition} We next define a decomposition of $\Gamma^d J$ via certain $\Bbbk$-submodules, $\J'_{\mathtt{b}oldsymbol \lambda} \mathtt{s}ubset \J_{\mathtt{b}oldsymbol \lambda}$. Recall from (\mathtt{r}ef{eq:decomp}) that $J_j = J'_j \oplus J_{j-1}$, for all $j$. For each $j\mathtt{i}n \mathtt{s}et{1,r}$, let \[\mathtt{a}lpha'_j: J'_j \xrightarrow{\mathtt{s}im} U_j\otimes V_j\] denote the isomorphism defined via the composition \[\mathtt{b}egin{tikzcd}[column sep=small] J'_j \mathtt{a}rrow[r,hook] & J_j \mathtt{a}rrow[r, two heads] & J_j / J_{j-1} \mathtt{a}rrow[rr, "\mathtt{a}lpha_j"] & & U_j \otimes V_j . \end{tikzcd}\] Similar to (\mathtt{r}ef{eq:isom_phi}), there is a resulting $\Bbbk$-module isomorphism \[\phi'_\nu: J'_\nu\, \xrightarrow{\mathtt{s}im}\, \Gamma^{(\nu)}(U_\mathtt{a}st \otimes V_\mathtt{a}st) \] satisfying the following commutative triangle of isomorphisms: \mathtt{b}egin{equation}\label{eq:isom_phi'} \mathtt{b}egin{tikzcd}[row sep=large, column sep=small] \Gamma^{(\nu)}(J'_\mathtt{a}st) \mathtt{a}rrow[rr, " \Gamma^{(\nu)}(\mathtt{a}lpha'_\mathtt{a}st)"] \mathtt{a}rrow[dr, " {}'\nabla^\nu"'] & & \Gamma^{(\nu)}(U_\mathtt{a}st \otimes V_\mathtt{a}st ) \\ & J'_{\nu} \mathtt{a}rrow[ur, " \phi'_\nu"'] \end{tikzcd} \end{equation} where \[\Gamma^{(\nu)}(\mathtt{a}lpha'_\mathtt{a}st) := \mathtt{b}igotimes \Gamma^{\nu_j}(\mathtt{a}lpha'_j)\] and where ${}'\nabla^\nu$ is restriction of $r$-fold multiplication as in Lemma \mathtt{r}ef{lem:image}.(i). We write \[\J'_{\mathtt{b}oldsymbol \lambda}\, := \, (\phi'_\nu)^{-1}(\F'_{\mathtt{b}oldsymbol \lambda})\] to denote the inverse image of $\F'_{\mathtt{b}oldsymbol \lambda}$ under $\phi'_\nu$. \mathtt{b}egin{lemma} \label{lem:gen_decomp} There exist decompositions into free $\Bbbk$-submodules \[\Gamma^d J = \mathtt{b}igoplus_{\mathtt{b}oldsymbol \lambda \mathtt{i}n \L^+_d(\mathbb{N})^r } \J'_{\mathtt{b}oldsymbol \lambda}, \qquad \text{and} \qquad \J_{\mathtt{b}oldsymbol \lambda} = \mathtt{b}igoplus_{\mathtt{b}oldsymbol \lambda \preceq \mathtt{b}oldsymbol \mu \prec \mathtt{i}nfty} \J'_{\mathtt{b}oldsymbol \mu} \quad \text{for each } \mathtt{b}oldsymbol \lambda. \] \end{lemma} \mathtt{b}egin{proof} It follows by definition from (\mathtt{r}ef{eq:isom_phi}) and (\mathtt{r}ef{eq:isom_phi'}) that $\phi'_\nu$ can be obtained from $\hat{\phi}$ by restriction. In particular, we have a commutative diagram: \mathtt{b}egin{equation}\label{eq:gen_decomp} \mathtt{b}egin{tikzcd}[column sep = large, ] J'_\nu \mathtt{a}rrow[d, shorten <=1pt, shorten >=1, tail] \mathtt{a}rrow[r, "\phi'_\nu"] &\Gamma^{(\nu)}(U_\mathtt{a}st \otimes V_\mathtt{a}st) \mathtt{a}rrow[d, no head, shift right=7.2, shorten >=1] \mathtt{a}rrow[d, no head, shift right=8, shorten >=1] \\ J_{\geq \nu} \mathtt{a}rrow[r, two heads, "\hat{\phi}_\nu"] &\Gamma^{(\nu)}(U_\mathtt{a}st \otimes V_\mathtt{a}st) \end{tikzcd} \end{equation} Since $J_{\geq \nu} = J_{>\nu}\oplus J'_\nu$ by (\mathtt{r}ef{eq:sum_nu}), we further have a decomposition \mathtt{b}egin{equation}\label{eq:sum_phi} \hat{\phi}_\nu^{-1}(N) \, = \, J_{>\nu}\oplus (\phi'_\nu)^{-1}(N) \end{equation} for any $\Bbbk$-submodule $N \mathtt{s}ubset \Gamma^{(\nu)}(U_\mathtt{a}st \otimes V_{\mathtt{a}st})$. If we set $N= \F_{\mathtt{b}oldsymbol \lambda, (\nu)}$ in the above, then it follows from (\mathtt{r}ef{eq:decomp_HK}) that \mathtt{b}egin{equation*} \J_{\mathtt{b}oldsymbol \lambda} \, = \, J_{>\nu}\oplus \mathtt{b}igoplus_{\mathtt{b}oldsymbol \lambda \preceq \mathtt{b}oldsymbol\mu \preceq (\nu)} \J'_{\mathtt{b}oldsymbol \lambda} \end{equation*} for each $\mathtt{b}oldsymbol \lambda \mathtt{i}n \L_\nu(\mathbb{N})$. The decomposition of $\J_{\mathtt{b}oldsymbol \lambda}$ now follows by induction since $J_{> \nu} = \J_{(\nu_+)}$, where $\nu_+$ denotes an immediate successor of $\nu$ in the lexicographic order on $\L_d(r)$. The decomposition for $\Gamma^d J = \J_{(1^d)}$ follows as a special case. \end{proof} Now suppose $\mathtt{b}oldsymbol \lambda \mathtt{i}n \L^+_d(\mathbb{N})^r$. To each element of the basis (\mathtt{r}ef{eq:basis}), we associate a corresponding element in $\J'_{\mathtt{b}oldsymbol \lambda}$, defined by \mathtt{b}egin{equation}\label{z_basis} z_{\mathtt{b}m S, \mathtt{b}m T} := \, \mathtt{b}ig({}'\nabla^\nu \mathtt{c}irc \Gamma^{(\nu)}(\mathtt{a}lpha'_\mathtt{a}st)^{-1} \mathtt{c}irc \psi^{\mathtt{b}oldsymbol \lambda}\mathtt{b}ig) (x_{\mathtt{b}m S} \otimes y_{\mathtt{b}m T}). \end{equation} Since the map appearing in \eqref{z_basis} is a composition of isomorphisms, it follows that the set \[ \{ z_{\mathtt{b}m S, \mathtt{b}m T} \mid \mathtt{b}m S \mathtt{i}n \mathtt{s}t_{\mathtt{b}oldsymbol \lambda}(\B_\mathtt{a}st),\, \mathtt{b}m T \mathtt{i}n \mathtt{s}t_{\mathtt{b}oldsymbol \lambda}(\C_\mathtt{a}st) \} \] forms a basis of $\J'_{\mathtt{b}oldsymbol \lambda}$. \mathtt{s}mallskip Let $(m_1,\dots, m_r) \mathtt{i}n \mathbb{N}^r$ be the sequence defined by \[m_j := \min(\mathtt{s}harp \B_j, \mathtt{s}harp \C_j)\] for all $j$, and set \[\mathtt{b}oldsymbol \L: = \L_r^+(m_1, \dots, m_r).\] \mathtt{b}egin{remark}\label{rmk:multi_tab} Suppose $\mathtt{b}oldsymbol \lambda \mathtt{i}n \L^+(\mathbb{N})^r$. If $\mathtt{b}oldsymbol \lambda$ belongs to $\mathtt{b}oldsymbol \L\mathtt{s}ubset \L^+(\mathbb{N})^r$, then $\mathtt{s}t(\B_\mathtt{a}st)$ and $\mathtt{s}t(\C_\mathtt{a}st)$ are both non-empty since they contain the elements ${\tt T}^{\mathtt{b}oldsymbol \lambda} = {\tt T}^{\mathtt{b}oldsymbol \lambda}(\B_\mathtt{a}st)$ and ${\tt T}^{\mathtt{b}oldsymbol \lambda} = {\tt T}^{\mathtt{b}oldsymbol \lambda}(\C_\mathtt{a}st)$ defined in \eqref{multi-tab}, respectively. We thus have $\J'_{\mathtt{b}oldsymbol \lambda} \neq 0$ if and only if $\mathtt{b}oldsymbol \lambda \mathtt{i}n \mathtt{b}oldsymbol \L$. \end{remark} Let $\mathtt{b}oldsymbol \lambda \mathtt{i}n \mathtt{b}oldsymbol \L$. Since $\J_{\mathtt{b}oldsymbol \lambda} = \J'_{\mathtt{b}oldsymbol \lambda} \oplus \J_{\mathtt{b}oldsymbol \lambda^+}$ by Lemma \mathtt{r}ef{lem:gen_decomp}, it follows that $\J_{\mathtt{b}oldsymbol \lambda} / \J_{\mathtt{b}oldsymbol \lambda^+}$ is a free $\Bbbk$-module with basis \[ \left\{\mathtt{b}ar{z}_{\mathtt{b}m S, \mathtt{b}m T} \mid \mathtt{b}m S \mathtt{i}n \mathtt{s}t_{\mathtt{b}oldsymbol \lambda}(\B_\mathtt{a}st),\, \mathtt{b}m T \mathtt{i}n \mathtt{s}t_{\mathtt{b}oldsymbol \lambda}(\C_\mathtt{a}st) \mathtt{r}ight\} \] where $\mathtt{b}ar{x} := x + \J_{\mathtt{b}oldsymbol \lambda^+}$ denotes the image of $x\mathtt{i}n \J_{\mathtt{b}oldsymbol \lambda}$ in the quotient. \mathtt{b}egin{definition} Given $\mathtt{b}oldsymbol \lambda \mathtt{i}n \mathtt{b}oldsymbol \L$, define a pair of $\Bbbk$-submodules \[\U_{\mathtt{b}oldsymbol \lambda},\ \V_{\mathtt{b}oldsymbol \lambda} \, \mathtt{s}ubset\ \J_{\mathtt{b}oldsymbol \lambda} / \J_{\mathtt{b}oldsymbol \lambda^+}\] generated by the subsets \[ \left\{\mathtt{b}ar{z}_{\mathtt{b}m S, \mathtt{b}m T^{\mathtt{b}oldsymbol \lambda}} \mid \mathtt{b}m S \mathtt{i}n \mathtt{s}t_{\mathtt{b}oldsymbol \lambda}(\B_\mathtt{a}st) \mathtt{r}ight\} \quad \text{and} \quad \left\{\mathtt{b}ar{z}_{\mathtt{b}m T^{\mathtt{b}oldsymbol \lambda}, \mathtt{b}m T} \mid \mathtt{b}m T \mathtt{i}n \mathtt{s}t_{\mathtt{b}oldsymbol \lambda}(\C_\mathtt{a}st) \mathtt{r}ight\}, \] respectively. It is then clear that $\U_{\mathtt{b}oldsymbol \lambda}$ is a $\Gamma^d A$-submodule of the $(\Gamma^d A, \Gamma^d B)$-bimodule $\J_{\mathtt{b}oldsymbol \lambda} / \J_{\mathtt{b}oldsymbol \lambda^+}$, and $\V_{\mathtt{b}oldsymbol \lambda}$ is a $\Gamma^d B$-submodule. \end{definition} The following analogue of Theorem \mathtt{r}ef{thm:HK} is the main result in this section. \mathtt{b}egin{theorem}[Generalized Cauchy Decomposition] \label{thm:gen_Cauchy} Suppose $\mathtt{b}oldsymbol \lambda \mathtt{i}n \mathtt{b}oldsymbol \L$. Then the map of $\Bbbk$-modules defined by \mathtt{b}egin{align*} \mathtt{a}lpha_{\mathtt{b}oldsymbol \lambda}: \, \J_{\mathtt{b}oldsymbol \lambda} / \J_{\mathtt{b}oldsymbol \lambda^+}& \, \to\, \U_{\mathtt{b}oldsymbol \lambda} \otimes \V_{\mathtt{b}oldsymbol \lambda}: \ \ \mathtt{b}ar{z}_{\mathtt{b}m S, \mathtt{b}m T}\ \mapsto\ \mathtt{b}ar{z}_{\mathtt{b}m S, \mathtt{b}m T^{\mathtt{b}oldsymbol \lambda}} \otimes \mathtt{b}ar{z}_{ \mathtt{b}m T^{\mathtt{b}oldsymbol \lambda}, \mathtt{b}m T}, \end{align*} for all $(\mathtt{b}m S, \mathtt{b}m T) \mathtt{i}n \mathtt{s}t_{\mathtt{b}oldsymbol \lambda}(\B_\mathtt{a}st) \times \mathtt{s}t_{\mathtt{b}oldsymbol \lambda}(\C_\mathtt{a}st)$, is an isomorphism of $(\Gamma^d A, \Gamma^d B)$-bimodules. The associated graded module of the generalized Cauchy filtration is thus given by \[\mathtt{b}igoplus_{\lambda\mathtt{i}n \mathtt{b}oldsymbol \L} \U_{\mathtt{b}oldsymbol \lambda} \otimes \V_{\mathtt{b}oldsymbol \lambda}.\] \end{theorem} \mathtt{b}egin{proof} Write $\phi_{\mathtt{b}oldsymbol \lambda}: \J_{\mathtt{b}oldsymbol \lambda} \to \F_{\mathtt{b}oldsymbol \lambda, (\nu)}$ to denote the map obtained from $\hat{\phi}_\nu$ by restriction. There is an induced bimodule isomorphism \[ \mathtt{b}ar{\phi}_{\mathtt{b}oldsymbol \lambda} : \J_{\mathtt{b}oldsymbol \lambda} / \J_{\mathtt{b}oldsymbol \lambda^+} \xrightarrow{\,\sim\,} \F_{\mathtt{b}oldsymbol \lambda, (\nu)} /\F_{\mathtt{b}oldsymbol \lambda^+, (\nu)}\] which follows from the definitions by using the decompositions $\J_{\mathtt{b}oldsymbol \lambda} = \J'_{\mathtt{b}oldsymbol \lambda} \oplus \J_{\mathtt{b}oldsymbol \lambda^+}$ and $\F_{\mathtt{b}oldsymbol \lambda, (\nu)} = \F'_{\mathtt{b}oldsymbol \lambda} \oplus \F_{\mathtt{b}oldsymbol \lambda^+, (\nu)}$. Hence by Proposition \mathtt{r}ef{prop:gen_HK}, there is an isomorphism $\varphi_{\mathtt{b}oldsymbol \lambda}$ making the upper right triangle commute in the following diagram \mathtt{b}egin{equation}\label{eq:diagram} \mathtt{b}egin{tikzcd}[column sep= large, row sep=large] \J_{\mathtt{b}oldsymbol \lambda}/ \J_{\mathtt{b}oldsymbol \lambda^+} \mathtt{a}rrow[d, "\mathtt{a}lpha_{\mathtt{b}oldsymbol \lambda}"' , shift right =.1em] \mathtt{a}rrow[r, "{\mathtt{b}ar{\phi}}_{\mathtt{b}oldsymbol \lambda}"] & \F_{\mathtt{b}oldsymbol \lambda, (\nu)} / \F_{\mathtt{b}oldsymbol \lambda^+, (\nu)} \\ \U_{\mathtt{b}oldsymbol \lambda} \otimes \V_{\mathtt{b}oldsymbol \lambda} & W_{\mathtt{b}oldsymbol \lambda}(U_\mathtt{a}st) \otimes W_{\mathtt{b}oldsymbol \lambda}(V_\mathtt{a}st). \mathtt{a}rrow[u, "\mathtt{b}ar{\psi}^{\mathtt{b}oldsymbol \lambda}"', shift left = 0.1em] \mathtt{a}rrow[ul, "\varphi_{\mathtt{b}oldsymbol \lambda}"'] \mathtt{a}rrow[l, "\ \varphi'_{\mathtt{b}oldsymbol \lambda} \otimes \varphi''_{\mathtt{b}oldsymbol \lambda}"] \end{tikzcd} \end{equation} In the bottom arrow, the map $\varphi'_{\mathtt{b}oldsymbol \lambda}$ (resp.~$\varphi''_{\mathtt{b}oldsymbol \lambda}$) denotes the homomorphism obtained by composing $\varphi_{\mathtt{b}oldsymbol \lambda}$ with the embedding \[ W_{\mathtt{b}oldsymbol \lambda} (U_\mathtt{a}st) \xrightarrow{\,\sim\,} W_{\mathtt{b}oldsymbol \lambda} (U_\mathtt{a}st) \otimes \mathtt{b}ar{y}_{\mathtt{b}m T^{\mathtt{b}oldsymbol \lambda}} \quad (\text{resp.}\ W_{\mathtt{b}oldsymbol \lambda}(V_\mathtt{a}st) \xrightarrow{\,\sim\,} \mathtt{b}ar{x}_{\mathtt{b}m T^{\mathtt{b}oldsymbol \lambda}} \otimes W_{\mathtt{b}oldsymbol \lambda}(V_\mathtt{a}st)).\] In order to complete the proof, it suffices to show that the lower triangle in (\mathtt{r}ef{eq:diagram}) is a commutative triangle of isomorphisms. For this, we compute: \mathtt{b}egin{align*} \varphi_{\mathtt{b}oldsymbol \lambda}(\mathtt{b}ar{x}_{\mathtt{b}m S} \otimes \mathtt{b}ar{y}_{\mathtt{b}m T}) \, = &\ (\mathtt{b}ar{\phi}_{\mathtt{b}oldsymbol \lambda}^{-1} \mathtt{c}irc \mathtt{b}ar{\psi}^{\mathtt{b}oldsymbol \lambda}) (\mathtt{b}ar{x}_{\mathtt{b}m S} \otimes \mathtt{b}ar{y}_{\mathtt{b}m T}) \\[.1cm] = &\ \mathtt{b}ar{\phi}_{\mathtt{b}oldsymbol \lambda}^{-1} (\, \overline{ \psi^{\mathtt{b}oldsymbol \lambda} ({x}_{\mathtt{b}m S} \otimes {y}_{\mathtt{b}m T})} \, ) & \text{by Prop.~\mathtt{r}ef{prop:gen_HK} } \\[.1cm] = &\ \overline{ (\phi'_{\mathtt{b}oldsymbol \lambda})^{-1} \mathtt{c}irc \psi^{\mathtt{b}oldsymbol \lambda} ({x}_{\mathtt{b}m S} \otimes {y}_{\mathtt{b}m T}) } & \text{by (\mathtt{r}ef{eq:gen_decomp})\, \text{and}\, (\mathtt{r}ef{eq:sum_phi})} \\[.1cm] = &\ \mathtt{b}ar{z}_{\mathtt{b}m S, \mathtt{b}m T}. \end{align*} It follows that $\varphi'_{\mathtt{b}oldsymbol \lambda} \otimes \varphi''_{\mathtt{b}oldsymbol \lambda}$ is an isomorphism since \[\varphi'_{\mathtt{b}oldsymbol \lambda} \otimes \varphi''_{\mathtt{b}oldsymbol \lambda}\, (\mathtt{b}ar{x}_{\mathtt{b}m S} \otimes \mathtt{b}ar{y}_{\mathtt{b}m T}) \, =\, \mathtt{b}ar{z}_{\mathtt{b}m S, \mathtt{b}m T^{\mathtt{b}oldsymbol \lambda}} \otimes \mathtt{b}ar{z}_{\mathtt{b}m T^{\mathtt{b}oldsymbol \lambda}, \mathtt{b}m T} \] for all $(\mathtt{b}m S, \mathtt{b}m T) \mathtt{i}n \mathtt{s}t_{\mathtt{b}oldsymbol \lambda}(\B_\mathtt{a}st)\times \mathtt{s}t_{\mathtt{b}oldsymbol \lambda}(\C_\mathtt{a}st)$. Since it is now clear that the lower triangle is commutative, the proof is complete. \end{proof} It follows from the proof of the theorem that $\U_{\mathtt{b}oldsymbol \lambda}$ and $\V_{\mathtt{b}oldsymbol \lambda}$ are each isomorphic to a respective (generalized) Weyl module. In the case $B= A^\op$, we call $\U_{\mathtt{b}oldsymbol \lambda}$ (resp.~$\V_{\mathtt{b}oldsymbol \lambda}$) a left (resp.~right) Weyl submodule of the $\Gamma^d A$-bimodule $\J_{\mathtt{b}oldsymbol \lambda}/\J_{\mathtt{b}oldsymbol \lambda^+}$. \mathtt{s}ection{Cellular Algebras} Assume throughout this section that $\Bbbk$ is a noetherian integral domain. We first recall the definition of cellular algebras from \mathtt{c}ite{GL}, along with the reformulation given in \mathtt{c}ite{KX}. We then use the generalized Cauchy decomposition to describe a cellular structure on generalized Schur algebras $S^A(n,d)$. \mathtt{s}ubsection{Definition of cellular algebras} \mathtt{b}egin{definition}[Graham-Lehrer] An associative $\Bbbk$--algebra $A$ is called a {\em cellular algebra} with cell datum $(I, M, C, \tau)$ if the following conditions are satisfied: \mathtt{b}egin{itemize} \mathtt{i}tem[(C1)] $(I, \trianglerighteq)$ is a finite partially ordered set. Associated to each $\lambda \mathtt{i}n I$ is a finite set $M(\lambda)$. The algebra $A$ has a $\Bbbk$-basis $C_{S,T}^\lambda$, where $(S,T)$ runs through all elements of $M(\lambda)\times M(\lambda)$ for all $\lambda \mathtt{i}n I$. \mathtt{s}mallskip \mathtt{i}tem[(C2)] The map $\tau$ is an anti-involution of $A$ such that $\tau(C^{\lambda}_{S,T}) = C^\lambda_{T,S}$. \mathtt{s}mallskip \mathtt{i}tem[(C3)] For each $\lambda \mathtt{i}n I$ and $S,T \mathtt{i}n M(\lambda)$ and each $a\mathtt{i}n A$, the product $a C^\lambda_{S,T}$ can be written as $(\mathtt{s}um_{U\mathtt{i}n M(\lambda)} r_a(U,S) C^{\lambda}_{U,T}) + r'$, where $r'$ is a linear combination of basis elements with upper index $\mu$ strictly larger than $\lambda$, and where the coefficients $r_a(U,S)\mathtt{i}n \Bbbk$ do not depend on $T$. \end{itemize} \end{definition} Let $A$ be a cellular algebra with cell datum $(I, M, C,\tau)$. Given $\lambda \mathtt{i}n I$, it is clear that the set $J(\lambda)$ spanned by the $C_{S, T}^{\mu}$ with $\mu \trianglerighteq \lambda$ is a $\tau$--invariant two sided ideal of $A$ (see \mathtt{c}ite{GL}). Let $J(\triangleright \lambda)$ denote the sum of ideals $J(\mu)$ with $\mu \triangleright \lambda$. \mathtt{s}mallskip For $\lambda \mathtt{i}n I$, the {\em standard module} $\Delta(\lambda)$ is defined as follows: as a $\Bbbk$-module, $\Delta(\lambda)$ is free with basis indexed by $M(\lambda)$, say $\{C_{S}^\lambda\ |\ S \mathtt{i}n M(\lambda)\}$; for each $a \mathtt{i}n A$, the action of $a$ on $\Delta(\lambda)$ is defined by $ aC_{S}^\lambda=\mathtt{s}um_{ U} r_{ a}(U, S) C_{U}^\lambda$ where the elements $r_{a}(U,S) \mathtt{i}n \Bbbk$ are the coefficients in (C3). Any left $A$-module isomorphic to $\Delta(\lambda)$ for some $\lambda$ will also be called a standard module. Note that for any $T \mathtt{i}n M(\lambda)$, the assignment $C_S^\lambda \mapsto C^\lambda_{S,T} + J(\triangleright \lambda)$ defines an injective $A$--module homomorphism from $\Delta(\lambda)$ to $J(\lambda)/J(\triangleright\lambda)$. \mathtt{s}ubsection{Basis-free definition of cellular algebras}\label{ss:KX} In \mathtt{c}ite{KX}, K\"onig and Xi provide an equivalent definition of cellular algebras which does not require specifying a particular basis. This definition can be formulated as follows. \mathtt{b}egin{definition}[K\"onig-Xi] \label{def:KX} Suppose $A$ is a $\Bbbk$-algebra with an anti-involution $\tau$. Then a two-sided ideal $J$ in $A$ is called a {\em cell ideal} if, and only if, $J = \tau(J)$ and there exists a left ideal $\Delta \mathtt{s}ubset J$ such that $\Delta$ is finitely generated and free over $\Bbbk$ and such that there is an isomorphism of $A$-bimodules $\mathtt{a}lpha: J \xrightarrow{\mathtt{s}im} \Delta \otimes \tau(\Delta)$ making the following diagram commutative: \mathtt{b}egin{equation*} \mathtt{b}egin{tikzcd} J \mathtt{a}rrow[r, "\mathtt{a}lpha"] \mathtt{a}rrow[d, "\tau\,"' ] & \Delta \otimes \tau(\Delta) \mathtt{a}rrow[d, "\,x\otimes y\, \mapsto\, \tau(y) \otimes \tau(x)"] \\ J \mathtt{a}rrow[r, "\mathtt{a}lpha"] & \Delta \otimes \tau(\Delta) \end{tikzcd} \end{equation*} We say that a decomposition $A=J'_1\oplus \dots \oplus J'_r$ (for some $r$) into $\Bbbk$-submodules with $\tau(J'_j) = J'_j$ for each $j=1, \dots, r$ is a {\em cellular decomposition} of $A$ if setting $J_j:= \mathtt{b}igoplus_{1\leq i\leq j} J'_i$ gives a chain of ($\tau$-invariant) two-sided ideals \[ 0=J_0 \mathtt{s}ubset J_1 \mathtt{s}ubset J_2 \mathtt{s}ubset \dots \mathtt{s}ubset J_r = A \] such that the quotient $J_j/J_{j-1}$ is a cell ideal (with respect to the anti-involution induced by $\tau$ on the quotient) of $A/J_{j-1}$. \end{definition} The above chain of ideals in $A$ is called a {\em cell chain}. For each ideal $J_j$ in a cell chain, we write \mathtt{b}egin{equation}\label{eq:Delta_alpha} \Delta_j \mathtt{s}ubset J_j/J_{j-1},\qquad \mathtt{a}lpha_j: J_j/J_{j-1} \equi \Delta_j \otimes \tau(\Delta_j) \end{equation} to denote the corresponding left ideal and $A$-bimodule isomorphism. Since $J_j = J'_j \oplus J_{j-1}$ for all $j$, we have a $\Bbbk$-module isomorphism $\mathtt{a}lpha'_j: J'_j \mathtt{c}ong \Delta_j\otimes \tau(\Delta_j)$ defined as the composition \mathtt{b}egin{equation*}\label{eq:alpha} \mathtt{b}egin{tikzcd}[cramped, column sep=small] \mathtt{a}lpha'_j : J'_j \mathtt{a}rrow[r, hook] & J_j \mathtt{a}rrow[r, two heads] & J_j/J_{j-1} \mathtt{a}rrow[r, " \mathtt{a}lpha_j " ] &[3mm] \Delta_j\otimes \tau(\Delta_j). \end{tikzcd} \end{equation*} It then follows by definition that we have a commutative diagram \mathtt{b}egin{equation}\label{eq:alpha2} \mathtt{b}egin{tikzcd} J'_j \mathtt{a}rrow[r, "\mathtt{a}lpha'_j"] \mathtt{a}rrow[d, "i\,"' ] & \Delta_j \otimes_\Bbbk \tau(\Delta_j) \mathtt{a}rrow[d, "\,x \otimes y\, \mapsto\, \tau(y) \otimes \tau(x)"] \\ J'_j \mathtt{a}rrow[r, "\mathtt{a}lpha'_j"] & \Delta_j \otimes_\Bbbk \tau(\Delta_j) \end{tikzcd} \end{equation} of $\Bbbk$-module isomorphisms. \mathtt{b}egin{lemma}[K\"onig-Xi, \mathtt{c}ite{KX}]\label{lem:KX} Let $A$ be an associative $\Bbbk$-algebra with an anti-involution $\tau$. Then $A$ is a cellular algebra in the sense of \mathtt{c}ite{GL} if and only if $A$ has a cellular decomposition. \end{lemma} \mathtt{b}egin{proof} We summarize the proof from \mathtt{c}ite{KX}. Let $A$ be a cellular algebra with cell datum $(I, M, C, \tau)$. First, suppose $\lambda \mathtt{i}n I$ is maximal. Then $J= J(\lambda)$ is a two-sided ideal by (C3) and $J=\tau(J)$ by (C2). Fix any element $T_\lambda \mathtt{i}n M(\lambda)$. Define $\Delta$ as the $\Bbbk$--span of $C^\lambda_{S,T_\lambda}$ where $S$ varies. Defining $\mathtt{a}lpha$ by sending $C_{S,T_\lambda}^\lambda \otimes \tau(C_{T,T_\lambda}^\lambda)$ to $C_{S,T}^\lambda$ gives the required isomorphism. Thus $J(\lambda)$ is a cell ideal. Next, choose any enumeration $\lambda_1, \dots, \lambda_r$ of the elements of $I$ such that $i<j$ whenever $\lambda_j \triangleright \lambda_i$. Set $J'_j\mathtt{s}ubset A$ (for each $j$) equal to the $\Bbbk$--span of all $C^{\lambda_j}_{S,T}$ (for varying $S,T$). We have $\tau(J'_j) = J'_j$ by (C2). Since $J(\lambda_j) = J'_j \mathtt{b}igoplus J(\triangleright \lambda_j)$ for all $j$, it follows that $A= \mathtt{b}igoplus_j J'_j$ is a cellular decomposition. For the converse, consider the index set $I = \{1, \dots, r\}$ with the reversed ordering $1\triangleright \dots \triangleright r$. Choose a $\Bbbk$-basis $\{x^{(j)}_b\}_{b\mathtt{i}n \B_j}$ of $\Delta_j$, for each $j\mathtt{i}n I$. Setting $C^j_{b,c}\mathtt{i}n J'_j$ to be the inverse image of $x^{(j)}_{b} \otimes \tau(x^{(j)}_{c})$ (for $b,c\mathtt{i}n \B_j$) under $\mathtt{a}lpha'_j$ (for $j\mathtt{i}n I$) gives a $\Bbbk$-basis for $A$ of the form (C1). Since $\Delta_j$ is a left $A$-module, (C3) is satisfied. Finally, (C2) follows from the required commutative diagram and the $\tau$-invariance of $J'_j$. It follows that $\{C^j_{b,c}\}$ is a cellular basis. \end{proof} From now on, we say that an algebra $A$ with anti-involution $\tau$ is {\em cellular} if either of the equivalent statements in Lemma \mathtt{r}ef{lem:KX} is satisfied. The proof of the lemma shows that each ideal $\Delta_j$ (for $j=1, \dots, r$) for a cellular algebra $A$ is a standard module. \mathtt{s}ubsection{Matrix algebras}\label{ss:Matrix} Consider the matrix ring, $\mathrm{M}_n(\Bbbk)$, with matrix transpose, $\tr$, as anti-involution. Let us write, $c: \mathrm{V}_{\hspace{-1pt}n} \otimes \mathrm{V}_{\hspace{-1pt}n}^\tr \xrightarrow{\,\sim\,} \mathrm{M}_n(\Bbbk)$, to denote the isomorphism mapping $v_i \otimes v_j^\tr \mapsto E_{ij}$ for all $i,j \mathtt{i}n \mathtt{s}et{1,n}$. \mathtt{s}mallskip Now suppose $A$ is an algebra with anti-involution $\tau$, and let $J$ be a cell ideal with defining isomorphism $\mathtt{a}lpha: J \xrightarrow{\,\sim\,} \Delta \otimes \tau(\Delta)$. Then \[\mathrm{M}_n(J) := \mathrm{M}_n(\Bbbk)\otimes J\] is a cell ideal of the matrix ring $\mathrm{M}_n(A)$ with respect to the anti-involution $\tr \otimes \tau$. The corresponding isomorphism is the map \[c^{-1}(\mathtt{a}lpha): \mathrm{M}_n(J) \xrightarrow{\,\sim\,} \mathrm{V}_{\hspace{-1pt}n}(\Delta) \otimes \mathrm{V}_{\hspace{-1pt}n}^\tr(\tau(\Delta))\] defined by the composition \[\mathrm{M}_n(\Bbbk) \otimes J \xrightarrow{\ c^{-1}\hp{1}\otimes\hp{3} \mathtt{a}lpha \ } \left( \mathrm{V}_{\hspace{-1pt}n}\otimes \mathrm{V}_{\hspace{-1pt}n}^\tr \mathtt{r}ight) \otimes \left( \Delta \otimes \tau(\Delta) \mathtt{r}ight) \xrightarrow{\ \mathtt{s}im \ } \mathrm{V}_{\hspace{-1pt}n}\otimes \Delta \otimes \mathrm{V}_{\hspace{-1pt}n}^\tr \otimes \tau(\Delta).\] More generally, we have the following. \mathtt{b}egin{lemma}\label{lem:Mat} Suppose $A$ is a cellular algebra with anti-involution $\tau$ and cell chain $(J_j)_{j\mathtt{i}n \mathtt{s}et{1,r}}$. Then the matrix ring $\mathrm{M}_n(A)$ is cellular with anti-involution $\tr \otimes \tau$ and cell chain $(\mathrm{M}_n(J_j))_{j\mathtt{i}n \mathtt{s}et{1,r}}$, where $\mathrm{M}_n(J_j) := \mathrm{M}_n(\Bbbk)\otimes J_j$ for all $j$. \end{lemma} \mathtt{b}egin{proof} It follows from the preceding paragraph that the ideals, $\mathrm{M}_n(J_j)$, form a cell chain, since $\mathrm{M}_n(J_j) / \mathrm{M}_n(J_{j-1}) \mathtt{s}imeq \mathrm{M}_n(\Bbbk) \otimes (J_j / J_{j-1})$ as $\mathrm{M}_n(A)$-bimodules. It is also clear that $\mathrm{M}_n(A)$ has a cellular decomposition \[\mathrm{M}_n(A) = \mathtt{b}igoplus \mathrm{M}_n(J'_j)\] where $A= \mathtt{b}igoplus J'_j$ denotes a corresponding cellular decomposition of $A$. \end{proof} \mathtt{s}ubsection{Cellularity of generalized Schur algebras} We now describe a cellular structure for generalized Schur algebras $S^A(n,d)$. In this case, the generalized Cauchy filtration forms a cell chain, with the Weyl submodules from Theorem \mathtt{r}ef{thm:gen_Cauchy} as standard modules. \mathtt{b}egin{theorem}\label{thm:cellular} Suppose $A$ is a cellular algebra with anti-involution $\tau$. Then the generalized Schur algebra $S^A(n,d)$ is a cellular algebra, with respect to the anti-involution ${\mathtt{b}oldsymbol \tau}:= (\tr \otimes \tau)^{\otimes d}$, for all $n,d \mathtt{i}n \mathbb{N}$. \end{theorem} \mathtt{b}egin{proof} If $A$ is cellular then so is $\mathrm{M}_n(A)$, by Lemma \mathtt{r}ef{lem:Mat}. Since $S^A(n,d) = \G^d \mathrm{M}_n(A)$, it suffices to show that $\Gamma^d A$ is cellular, with respect to the anti-involution ${\mathtt{b}oldsymbol \tau} = \tau^{\otimes d}$. Suppose that $A=J'_1 \oplus \dots \oplus J_r'$ is a cellular decomposition of $A$, with corresponding cell chain \[ 0 = J_0 \mathtt{s}ubset J_1\mathtt{s}ubset \dots \mathtt{s}ubset J_r =A.\] For each $j\mathtt{i}n\mathtt{s}et{1,r}$, suppose $\{x^{(j)}_b\}_{b\mathtt{i}n\B_j}$ and $\{y^{(j)}_b\}_{ b\mathtt{i}n \B_j}$ are $\Bbbk$-bases of $\Delta_j$ and $\tau(\Delta_j)$, respectively, such that $y^{(j)}_b:= \tau(x^{(j)}_b)$ for all $j$, and let $\Delta_j$ and $\mathtt{a}lpha_j$ be as in (\mathtt{r}ef{eq:Delta_alpha}). Considering $\mathtt{b}oldsymbol \Lambda = \L^+(\mathtt{s}harp \B_1, \dots, \mathtt{s}harp \B_r)$ as a totally ordered subset of $\L^+_d(\mathbb{N})^r$ by restricting the order $\preceq$ in Definition \mathtt{r}ef{lex2}, it follows from Lemma \mathtt{r}ef{lem:gen_decomp} and Remark \mathtt{r}ef{rmk:multi_tab} that we have decompositions \mathtt{b}egin{equation}\label{eq:cell_decomp} \Gamma^d J = \mathtt{b}igoplus_{\mathtt{b}oldsymbol \lambda \mathtt{i}n \mathtt{b}oldsymbol \Lambda} \J'_{\mathtt{b}oldsymbol \lambda}, \quad \text{and} \quad \J_{\mathtt{b}oldsymbol \lambda} = \mathtt{b}igoplus_{\mathtt{b}oldsymbol \mu\mathtt{s}ucceq \mathtt{b}oldsymbol \lambda} \J'_{\mathtt{b}oldsymbol \mu} \quad \text{for each } \mathtt{b}oldsymbol \lambda \mathtt{i}n \mathtt{b}oldsymbol \Lambda, \end{equation} since $\J'_{\mathtt{b}oldsymbol \lambda} = 0$ if $\mathtt{b}oldsymbol \lambda \notin \mathtt{b}oldsymbol \Lambda$. Notice that ${\mathtt{b}oldsymbol \tau} = \tau^{\otimes d}$ coincides with the map $\Gamma^d(\tau): \Gamma^d A\to \Gamma^d A$ induced by the functor $\Gamma^d$. To complete the proof, we need to show that the left-hand side of (\mathtt{r}ef{eq:cell_decomp}) gives a cellular decomposition of $\Gamma^d A$ with respect to this anti-involution. Let $\Delta_{\mathtt{b}oldsymbol \lambda}$ be the the left Weyl submodule $\U_{\mathtt{b}oldsymbol \lambda} \mathtt{s}ubset \J_{\mathtt{b}oldsymbol \lambda}/ \J_{\mathtt{b}oldsymbol \lambda^+}$ of Theorem \mathtt{r}ef{thm:gen_Cauchy}. Then it remains to check the following hold for each $\mathtt{b}oldsymbol \lambda \mathtt{i}n \mathtt{b}oldsymbol \Lambda$: \mathtt{b}egin{enumerate}[(i)] \mathtt{i}tem ${\mathtt{b}oldsymbol \tau}(\J'_{\mathtt{b}oldsymbol \lambda}) = \J'_{\mathtt{b}oldsymbol \lambda}$, \mathtt{i}tem ${\mathtt{b}oldsymbol \tau}(\Delta_{\mathtt{b}oldsymbol \lambda}) = \V_{\mathtt{b}oldsymbol \lambda }$, \mathtt{i}tem $\J_{\mathtt{b}oldsymbol \lambda} / \J_{\mathtt{b}oldsymbol \lambda^+}$ is a cell ideal. \mathtt{s}mallskip \end{enumerate} Assuming (i) and (ii) hold for each $\mathtt{b}oldsymbol \lambda$, (iii) will follow from the commutativity of the diagram \mathtt{b}egin{equation*} \mathtt{b}egin{tikzcd} \J_{\mathtt{b}oldsymbol \lambda} / \J_{\mathtt{b}oldsymbol \lambda^+} \mathtt{a}rrow[r, "\mathtt{a}lpha_{\mathtt{b}oldsymbol \lambda}"] \mathtt{a}rrow[d, "\tau\,"' ] & \Delta_{\mathtt{b}oldsymbol \lambda} \otimes \tau(\Delta_{\mathtt{b}oldsymbol \lambda}) \mathtt{a}rrow[d, "\,x\otimes y\, \mapsto\, \tau(y)\otimes \tau(x)"] \\ \J_{\mathtt{b}oldsymbol \lambda} / \J_{\mathtt{b}oldsymbol \lambda^+} \mathtt{a}rrow[r, "\mathtt{a}lpha_{\mathtt{b}oldsymbol \lambda}"] & \Delta_{\mathtt{b}oldsymbol \lambda} \otimes \tau(\Delta_{\mathtt{b}oldsymbol \lambda}) \end{tikzcd} \end{equation*} where $\mathtt{a}lpha_{\mathtt{b}oldsymbol \lambda}$ is the $\Gamma^d A$-bimodule isomorphism from Theorem \mathtt{r}ef{thm:gen_Cauchy}. Now fix $\mathtt{b}oldsymbol \lambda \mathtt{i}n \mathtt{b}oldsymbol \Lambda$, and set $\nu = |\mathtt{b}oldsymbol \lambda|$. Then $\J'_{\mathtt{b}oldsymbol \lambda}$, $\Delta_{\mathtt{b}oldsymbol \lambda}$, and $\J_{\mathtt{b}oldsymbol \lambda}/ \J_{\mathtt{b}oldsymbol \lambda^+}$ have $\Bbbk$-bases given by the sets \[\left\{ z_{\mathtt{b}m S, \mathtt{b}m T } \mid \mathtt{b}m S, \mathtt{b}m T \mathtt{i}n \mathtt{s}t(\B_\mathtt{a}st) \mathtt{r}ight\}, \qquad \left\{\mathtt{b}ar{z}_{\mathtt{b}m S, \mathtt{b}m T_{\mathtt{b}oldsymbol \lambda}} \mid \mathtt{b}m S \mathtt{i}n \mathtt{s}t(\B_\mathtt{a}st) \mathtt{r}ight\}, \qquad \left\{\mathtt{b}ar{z}_{\mathtt{b}m S, \mathtt{b}m T} \mid \mathtt{b}m S, \mathtt{b}m T \mathtt{i}n \mathtt{s}t(\B_\mathtt{a}st) \mathtt{r}ight\}\] respectively, where $z_{\mathtt{b}m S,\mathtt{b}m T}\mathtt{i}n \J'_{\mathtt{b}oldsymbol \lambda}$ is defined in \eqref{z_basis}. It follows that each of the conditions (i)-(iii) will be satisfied provided that $\tau(z_{\mathtt{b}m S, \mathtt{b}m T}) = z_{\mathtt{b}m T, \mathtt{b}m S}$ for all $\mathtt{b}m S, \mathtt{b}m T\mathtt{i}n \mathtt{s}t(\B_\mathtt{a}st)$. We claim that the following diagram is commutative: \[\mathtt{b}egin{tikzcd}[row sep = huge, column sep = 4.3em] \Gamma^{\mathtt{b}oldsymbol \lambda}(\Delta_\mathtt{a}st) \otimes \Gamma^{\mathtt{b}oldsymbol \lambda}(\tau(\Delta_\mathtt{a}st)) \mathtt{a}rrow[r, "\psi^{\mathtt{b}oldsymbol \lambda}" ] \mathtt{a}rrow[d, "\, \tw\hs{1pt} \mathtt{c}irc {\left( \Gamma^{\mathtt{b}oldsymbol \lambda}(\tau)\, \otimes\, \Gamma^{\mathtt{b}oldsymbol \lambda}(\tau)\mathtt{r}ight)} " , shift right=0.1em ] &[-1em] \Gamma^{(\nu)}(\Delta_\mathtt{a}st {\otimes} \tau(\Delta_\mathtt{a}st)) \mathtt{a}rrow[d, "\, \Gamma^{(\nu)}\left(\tw\hs{1pt}\mathtt{c}irc{\left( \tau\, \otimes\, \tau \mathtt{r}ight ) } \mathtt{r}ight) " , shift left=0.75em ] & \Gamma^{(\nu)}(J'_\mathtt{a}st) \mathtt{a}rrow[l, "\ \Gamma^{(\nu)}(\mathtt{a}lpha'_\mathtt{a}st) "' ] \mathtt{a}rrow[r, " \nabla^\nu" ] \mathtt{a}rrow[d, "\, \Gamma^{(\nu)}(\tau) " ] &[-1em] J'_\nu \mathtt{a}rrow[d, "\, \tau " ] \\ \Gamma^{\mathtt{b}oldsymbol \lambda}(\Delta_\mathtt{a}st) \otimes \Gamma^{\mathtt{b}oldsymbol \lambda}(\tau(\Delta_\mathtt{a}st)) \mathtt{a}rrow[r, "\psi^{\mathtt{b}oldsymbol \lambda}" ] & \Gamma^{(\nu)}(\Delta_\mathtt{a}st { \otimes} \tau(\Delta_\mathtt{a}st)) & \Gamma^{(\nu)}(J'_\mathtt{a}st) \mathtt{a}rrow[l, "\ \Gamma^{(\nu)}(\mathtt{a}lpha'_\mathtt{a}st) "' ] \mathtt{a}rrow[r, " \nabla^\nu" ] & J'_\nu, \end{tikzcd} \] with the first (middle) vertical map(s) induced by the action of $\Gamma^{\mathtt{b}oldsymbol \lambda}$ (resp.~$\Gamma^{(\nu)}$) considered as a functor $\P_\Bbbk^{\times r} \to \P_\Bbbk$. The commutativity of the left-hand square can be checked using the definition of $\psi^{\mathtt{b}oldsymbol \lambda}$ together with Lemma \mathtt{r}ef{psi}. The commutativity of the middle square follows from the functoriality of $\Gamma^{(\nu)}$ and diagram (\mathtt{r}ef{eq:alpha2}). Finally, the commutativity of the right-hand square follows by Lemma \mathtt{r}ef{lem:natural}. We thus have $\tau(z_{\mathtt{b}m S, \mathtt{b}m T})= z_{\mathtt{b}m T, \mathtt{b}m S}$ for all $\mathtt{b}m S, \mathtt{b}m T\mathtt{i}n \mathtt{s}t(\B_\mathtt{a}st)$, and the proof is complete. \end{proof} Let us write $\mathtt{b}oldsymbol \Lambda^{\mathrm{op}}$ to denote the set $\mathtt{b}oldsymbol \Lambda$ with opposite total ordering. Then it follows from the above proofs of Lemma \mathtt{r}ef{lem:KX} and Theorem \mathtt{r}ef{thm:cellular} that the set \[\mathtt{b}ig\{ z_{\mathtt{b}m S, \mathtt{b}m T} \mid {\mathtt{b}oldsymbol \lambda} \mathtt{i}n \mathtt{b}oldsymbol \Lambda^{\mathrm{op}},\ {\mathtt{b}m S}, {\mathtt{b}m T} \mathtt{i}n \mathtt{s}t_{\mathtt{b}oldsymbol \lambda}(\B_1,\dots, \B_r) \mathtt{b}ig\}.\] is a cellular basis for $\G^d A$. A corresponding cellular basis for $S^A(n,d)$ can be obtained in a similar way, by replacing $A$ by $\mathrm{M}_n(A)$. \mathtt{s}mallskip In the next example, we describe an explicit cellular basis for a special case of a generalized Schur algebra of the form $S^Z(n,d)$, where $Z$ is a zig-zag algebra. We essentially follow the definition in \mathtt{c}ite{KM3}, using slightly different notation. Note also that we only consider $Z$ as an ordinary non-graded algebra, rather than a $\mathbb{Z}/2$-graded superalgebra as in \mathtt{c}ite{KM3}. \mathtt{b}egin{example}[Zig-zag algebra]\label{ex:zig} We consider the zig-zag algebra associated to the quiver below. \[\mathscr Q : \qquad \mathtt{b}egin{tikzpicture}[ baseline=-2pt, black,line width=1pt, scale=0.4, every node/.append style={font=\fontsize{8}{8}\mathtt{s}electfont} ] \mathtt{c}oordinate (0) at (0,0); \mathtt{c}oordinate (1) at (4,0); \mathtt{c}oordinate (2) at (8,0); \draw [thin, black,->,shorten <= 0.1cm, shorten >= 0.1cm] (0) to[distance=1.5cm,out=100, in=100] (1); \draw [thin,black,->,shorten <= 0.25cm, shorten >= 0.1cm] (1) to[distance=1.5cm,out=-100, in=-80] (0); \draw [thin, black,->,shorten <= 0.25cm, shorten >= 0.1cm] (1) to[distance=1.5cm,out=80, in=100] (2); \draw [thin,black,->,shorten <= 0.1cm, shorten >= 0.1cm] (2) to[distance=1.5cm,out=-100, in=-80] (1); \draw(0,0) node{$\mathtt{b}ullet$}; \draw(4,0) node{$\mathtt{b}ullet$}; \draw(8,0) node{$\mathtt{b}ullet$}; \draw(0,0) node[left]{$0$}; \draw(4,0) node[right]{$1$}; \draw(8,0) node[right]{$2$}; \draw(2,1.2) node[above]{$a_{10}$}; \draw(6,1.2) node[above]{$a_{21}$}; \draw(2,-1.2) node[below]{$a_{01}$}; \draw(6,-1.2) node[below]{$a_{12}$}; \end{tikzpicture}\] Recall from \mathtt{c}ite[Section 7.9]{KM3} that the {\em extended zig-zag algebra}, $\tilde{Z}$, is defined in this case as the quotient of the path algebra $\Bbbk \mathscr Q$ modulo the following relations: \mathtt{b}egin{enumerate} \mathtt{i}tem All paths of length three or greater are zero. \mathtt{i}tem All paths of length two that are not cycles are zero. \mathtt{i}tem All length-two cycles based at the same vertex are equivalent. \mathtt{i}tem $ a_{21} a_{12}=0$. \end{enumerate} The length zero paths are denoted $e_0, e_1, e_2$ and correspond to standard idempotents, with $e_i a_{ij} e_j = a_{ij}$ for all admissible $i,j$. Let $e:= e_0 + e_1 \mathtt{i}n \tilde{Z}$. Then the corresponding {\em zig-zag algebra} is $Z:=e \tilde{Z} e \mathtt{s}ubset \tilde{Z}$. Then $Z$ is a cellular algebra, with anti-involution defined by $\tau(e_i)=e_i$ and $\tau(a_{ij}) = a_{ji}$ for all $i,j$. Let us describe a corresponding cellular decomposition. First let \[ x_1:=a_{12},\ \ x_2:=e_1,\ \ x_3:=a_{01},\ \ x_4:=e_0. \] and set $y_i:= \tau(x_i)$, for $i\mathtt{i}n \mathtt{s}et{1,4}$. Then we have corresponding sets \[ X(1):=\{x_1\}, \quad X(2):= \{x_2,x_3\}, \quad X(3):= \{x_4\}, \] and \[ Y(1):=\{y_1\}, \quad Y(2):= \{y_2,y_3\}, \quad Y(3):= \{y_4\}, \] parametrized by the totally ordered sets $\B_1:= \{1\}$, $\B_2:= \{2<3\}$, and $\B_3:= \{4\}$, respectively. We may then define a cellular decomposition \[Z= J_1' \oplus J_2' \oplus J_3', \] where $J_j' := \text{span}\{ xy \mid x\mathtt{i}n X(j), y\mathtt{i}n Y(j) \}$, for $j\mathtt{i}n \mathtt{s}et{1,3}$. Now let $\mathtt{b}oldsymbol \L^{\mathrm{op}}$ denote the set $\mathtt{b}oldsymbol \L = \L_3^+(1,2,1)$ with the opposite total ordering. Then one may then check using formula \eqref{z_basis} and the proof of Lemma \mathtt{r}ef{lem:KX} that $S^Z(1,2) = \Gamma^2 Z$ has the cellular basis described in the table below, where $\mathtt{b}oldsymbol \lambda$ runs through all multipartitions in the set $\mathtt{b}oldsymbol \L^{\mathrm{op}}$, and where $\mathbf S$, $\mathbf T$ denote standard multitableaux of shape $\mathtt{b}oldsymbol \lambda$, respectively. \mathtt{s}mallskip \mathtt{b}egin{center} \hspace*{-1cm} \mathtt{b}egin{tabular}{|c| c c| c | } \hline \mathtt{r}ule{0pt}{1.0\normalbaselineskip} $\mathtt{b}oldsymbol{\lambda}$ & $\mathbf{S}$ & $\mathbf{T}$ & $z_{\mathbf{S,T}}$ \\[.1cm] \hline \mathtt{r}ule{0pt}{1.25\normalbaselineskip} $(\o,\o,(2))$ & $( \o, \o, \mathtt{s}criptsize\young(44))$ & $( \o, \o, \mathtt{s}criptsize\young(44))$ & \ $e_0^{\otimes 2}$ \\[.3cm] \hline \mathtt{r}ule{0pt}{1.25\normalbaselineskip} $(\o,(1),(1))$ & $(\o, \mathtt{s}criptsize\young(2),\, \young(4))$ & $(\o, \mathtt{s}criptsize\young(2),\, \young(4))$ & $e_0 \mathtt{a}st e_1$ \\[.25cm] & $''$ & $(\o, \mathtt{s}criptsize\young(3),\, \young(4))$ & $e_0 \mathtt{a}st a_{10}$ \\[.25cm] & $(\o, \mathtt{s}criptsize\young(3),\, \young(4))$ & $(\o, \mathtt{s}criptsize\young(2),\, \young(4))$ & $e_0 \mathtt{a}st a_{01}$ \\[.25cm] & $''$ & $(\o, \mathtt{s}criptsize\young(3),\, \young(4))$ & $e_0 \mathtt{a}st ( a_{01}a_{10})$ \\[.3cm] \hline \mathtt{r}ule{0pt}{1.25\normalbaselineskip} $(\o,(1,1),\o)$ & $(\o, \mathtt{s}criptsize\young(2,3), \o)$ & $(\o, \mathtt{s}criptsize\young(2,3), \o)$ & $e_1 \mathtt{a}st ( a_{01}a_{10})$ \\[.3cm] \hline \mathtt{r}ule{0pt}{1.25\normalbaselineskip} $(\o,(2),\o)$ & $(\o, \mathtt{s}criptsize\young(22), \o)$ & $(\o, \mathtt{s}criptsize\young(22), \o)$ & \ $e_1^{\otimes 2}$ \\[.25cm] & $''$ & $(\o, \mathtt{s}criptsize\young(23), \o)$ & $ e_1 \mathtt{a}st a_{10}$ \\[.25cm] & $''$ & $(\o, \mathtt{s}criptsize\young(33), \o)$ & $a_{10}^{\otimes 2}$ \\[.25cm] & $(\o, \mathtt{s}criptsize\young(23), \o)$ & $(\o, \mathtt{s}criptsize\young(22), \o)$ & $ e_1 \mathtt{a}st a_{01}$ \\[.25cm] & $''$ & $(\o, \mathtt{s}criptsize\young(23), \o)$ & $ e_1 \mathtt{a}st ( a_{01}a_{10})+ a_{10} \mathtt{a}st a_{01}$ \\[.25cm] & $''$ & $(\o, \mathtt{s}criptsize\young(33), \o)$ & $a_{10} \mathtt{a}st a_{01}a_{10}$ \\[.25cm] & $(\o, \mathtt{s}criptsize\young(33), \o)$ & $(\o, \mathtt{s}criptsize\young(22), \o)$ & $a_{01}^{\otimes 2}$ \\[.25cm] & $''$ & $(\o, \mathtt{s}criptsize\young(23), \o)$ & $a_{01} \mathtt{a}st (a_{01}a_{10})$ \\[.25cm] & $''$ & $(\o, \mathtt{s}criptsize\young(33), \o)$ & $( a_{01} a_{10})^{\otimes 2}$ \\[.25cm] \hline \mathtt{r}ule{0pt}{1.25\normalbaselineskip} \mathtt{r}ule{0pt}{1.25\normalbaselineskip} $((1),\o,(1))$ & $({\mathtt{s}criptsize\young(1)}, \o, {\mathtt{s}criptsize\young(4)})$ & $({\mathtt{s}criptsize\young(1)}, \o, {\mathtt{s}criptsize\young(4)})$ & $(a_{12} a_{21})\mathtt{a}st e_0$ \\[.3cm] \hline \mathtt{r}ule{0pt}{1.25\normalbaselineskip} $((1),(1),\o)$ & $(\mathtt{s}criptsize\young(1), \young(2), \o)$ & $(\mathtt{s}criptsize\young(1), \young(2), \o)$ & $e_1 \mathtt{a}st (a_{12} a_{21})$ \\[.25cm] & $''$ & $(\mathtt{s}criptsize\young(1), \young(3), \o)$ & $a_{10} \mathtt{a}st (a_{12} a_{21})$ \\[.25cm] & $(\mathtt{s}criptsize\young(1), \young(3), \o)$ & $(\mathtt{s}criptsize\young(1), \young(2), \o)$ & $a_{01} \mathtt{a}st (a_{12} a_{21})$ \\[.25cm] & $''$ & $(\mathtt{s}criptsize\young(1), \young(3), \o)$ & $(a_{01} a_{10}) \mathtt{a}st (a_{12} a_{21})$ \\[.3cm] \hline \mathtt{r}ule{0pt}{1.25\normalbaselineskip} $((2),\o,\o)$ & $({\mathtt{s}criptsize\young(11)}, \o, \o)$ & $({\mathtt{s}criptsize\young(11)}, \o, \o)$ & $(a_{12} a_{21})^{\otimes 2}$ \\[.35cm] \hline \end{tabular} \hspace*{-1cm} \end{center} The symbol, $\o$, is used above to denote an empty partition or tableau, respectively, and the symbol $''$ denotes a repeated item from the above entry. \end{example} \mathtt{s}ubsection{Cellularity of wreath products $A\wr \Si_d$} Let us first recall a result of \mathtt{c}ite{KX} concerning idempotents fixed by an anti-involution. \mathtt{b}egin{lemma}[\mathtt{c}ite{KX}]\label{lem:KX2} Let $A$ be a cellular algebra with anti-involution $\tau$. If $e\mathtt{i}n A$ is an idempotent fixed by $\tau$, then the algebra $eAe$ is cellular with respect to the restriction of $\tau$. \end{lemma} We then have the following consequence of Theorem \mathtt{r}ef{thm:cellular}, which is obtained via generalized Schur-Weyl duality. \mathtt{b}egin{corollary}\label{wreath} Suppose $d\mathtt{i}n \mathbb{N}$. If $A$ is a cellular algebra, then $A\wr \Si_d$ is also cellular. \end{corollary} \mathtt{b}egin{proof} Fix some $n \geq d$. Write $S^A = S^A(n,d)$, and let $e \mathtt{i}n S^A$ denote the idempotent $e:=\xi_\omega$. It then follows by Proposition \mathtt{r}ef{prop:EK}.(ii) that there is an algebra isomorphism $A\wr \Si_d \mathtt{c}ong e \hp{3} S^A \hp{1} e$. Since \[\mathtt{b}oldsymbol \tau(e) = (E_{1,1})^\tr \mathtt{a}st \mathtt{c}dots \mathtt{a}st (E_{d,d})^\tr = e,\] the cellularity of $A\wr \Si_d$ follows from Theorem \mathtt{r}ef{thm:cellular} and Lemma \mathtt{r}ef{lem:KX2}. \end{proof} Since the above result holds for an arbitrary cellular algebra $A$, we thus obtain an alternate proof of the main results of \mathtt{c}ite{GG} and \mathtt{c}ite{RGr} mentioned in the introduction. \mathtt{b}egin{thebibliography}{99} \mathtt{b}ibitem{ABW} K. Akin, D.A. Buchsbaum and J. Weyman, {\em Schur functors and Schur complexes}, Adv.~in Math. {\mathtt{b}f 44} (1982), no. 3, 207--278. \mathtt{b}ibitem{Cauchy} A.-L. Cauchy, M\'emoire sur les fonctions altern\'ees et sur les sommes altern\'ees, Exercices d'analyse et de phys. math., ii (1841), 151--159; or \OE uvres compl\`etes, 2\`eme s\'erie xii, Gauthier- Villars, Paris, 1916, 173--182. \mathtt{b}ibitem{CPS} E. Cline, B. Parshall and L. Scott, {\em Finite-dimensional algebras and highest weight categories}, J.~Reine Angew.~Math. {\mathtt{b}f 391} (1988), 85--99. \mathtt{b}ibitem{EK1} A. Evseev and A. Kleshchev, {\em Turner doubles and generalized Schur algebras}, Adv.~Math. {\mathtt{b}f 317} (2017), 665--717. \mathtt{b}ibitem{EK2} \mathtt{b}ysame, {\em Blocks of symmetric groups, semicuspidal KLR algebras and zigzag Schur-Weyl duality}, Ann.~of Math. (2) {\mathtt{b}f 188} (2018), no. 2, 453--512. \mathtt{b}ibitem{FS} E.M. Friedlander and A. Suslin, {\em Cohomology of finite group schemes over a field}, Invent.~Math. {\mathtt{b}f 127} (1997), no. 2, 209--270. \mathtt{b}ibitem{GG} T. Geetha and F.M. Goodman, {\em Cellularity of wreath product algebras and $A$-Brauer algebras}, J.~Algebra {\mathtt{b}f 389} (2013), 151--190. \mathtt{b}ibitem{GL} J.J. Graham and G.I. Lehrer, {\em Cellular algebras}, Invent.~Math. {\mathtt{b}f 123} (1996), 1--34. \mathtt{b}ibitem{Green} J.A. Green, Polynomial Representations of $GL_n$. (Lecture Notes in Math. 830), Springer-Verlag, New York 1980. \mathtt{b}ibitem{Green2} \mathtt{b}ysame, {\em Combinatorics and the Schur algebra}, J. Pure Appl.~Algebra {\mathtt{b}f 88} (1993), 89--106. \mathtt{b}ibitem{RGr} R. Green, {\em Cellular Structure of Wreath Product Algebras}, J. Pure Appl. Algebra 224 (2020), no. 2, 819–835. \mathtt{b}ibitem{HK} M. Hashimoto and K. Kurano, {\em Resolutions of determinantal ideals:~$n$-minors of $(n+2)$-square matrices}, Adv.~Math. {\mathtt{b}f 94} (1992), no. 1, 1--66. \mathtt{b}ibitem{KM1} A. Kleshchev and R. Muth, {\em Based quasi-hereditary algebras}, arXiv:1810.02844. \mathtt{b}ibitem{KM2} \mathtt{b}ysame, {\em Generalized Schur algebras}, arXiv:1810.02846. \mathtt{b}ibitem{KM3} \mathtt{b}ysame, {\em Schurifying quasi-hereditary algebras}, arXiv:1810.02849. \mathtt{b}ibitem{KX} S.~K{\"o}nig, and C.C.~Xi, {\em On the structure of cellular algebras}, Algebras and modules, II (Geiranger, 1996), 365--386, CMS Conf. Proc., 24, Amer. Math. Soc., Providence, RI, 1998. \mathtt{b}ibitem{Krause1} H.~Krause, {\em Highest weight categories and strict polynomial functors. With an appendix by Cosima Aquilino}, EMS Ser.~Congr.~Rep., Representation theory, current trends and perspectives, 331--373, Eur.~Math.~Soc., Z\"urich, 2017. \mathtt{b}ibitem{Mac} I.G. Macdonald, Symmetric functions and Hall polynomials, second edition, Oxford Math. Mon., (1995). \end{thebibliography} \end{document} \end{document}
\begin{document} \title[]{Dynamical behavior of the entanglement, purity and energy between atomic qubits in motion under the influence of thermal environment} \author{L Tan$^{1,2}$, Y Q Zhang$^{1}$, Z H Zhu$^{1}$ and L W Liu$^{1}$} \address{$^{1}$Institute of Theoretical Physics, Lanzhou University, Lanzhou 730000, China \\ $^{2}$Key Laboratory for Magnetism and Magnetic materials of the Ministry of Education, Lanzhou University, Lanzhou 730000, China} \ead{Email: [email protected]} \def\langle{\langlengle} \def\rangle{\ranglengle} \def\omega{\omegaega} \def\Omega{\Omegaega} \def\varepsilon{\varepsilon} \def\widehat{\widehat} \def\rm{Tr}{\rm{Tr}} \def\dagger{\daggergger} \newcommand{\begin{equation}}{\begin{equation}} \newcommand{\end{equation}}{\end{equation}} \newcommand{\begin{equation}a}{\begin{eqnarray}} \newcommand{\end{equation}a}{\end{eqnarray}} \newcommand{{\bf V}}{{\bf V}} \newcommand{{\bf K}}{{\bf K}} \newcommand{{\bf G}}{{\bf G}} \begin{abstract} The entanglement, purity and energy of two isolated two-level atoms which are initially prepared in Bell state and each interacts with a thermal cavity field are investigated by considering the atomic motion and the field-mode structure. We achieve the analytical solutions of the atomic qubits by using the algebraic dynamical approach and the influences of the field-mode structure parameter, the strength of the thermal field and the detuning on the entanglement, purity and energy are discussed. We also investigate the state evolution of the atomic qubits based on the entanglement-purity-energy diagrams. Our results show that the disentanglement process of the atomic qubits accompanies by excitations transferring from atomic subsystem to cavity field modes and atomic state from a pure state convert to the mixed states. \end{abstract} \section{Introduction} Entanglement is one of the most remarkable features of quantum mechanics and has many practical applications in quantum information processing~\cite{Nielsen00}. However, realistic quantum systems are inevitably influenced by the surrounding environment, which always leads to decoherence of the quantum states. Particularly, the thermal field is frequently discussed in this problem. A thermal field, which is emitted by a source in thermal equilibrium at temperature $T$, is a highly chaotic field with minimal information about its mean value of the energy. However, such a chaotic field can entangle qubits that are prepared initially in a separable state~\cite{Kim02}, lead to entangled states in the interaction of a single qubit in a pure state with a thermal field regardless of the temperature of the field and reduce the system to a mixed state when the field variables are traced over~\cite{Bose01}. The influence of the thermal field strength on the atom-atom entanglement~\cite{Yan09} and atom-field entanglement~\cite{Yan08} have also been investigated. Besides, Zheng~\cite{Zheng02} proposed a scheme for realizing two-qubit quantum phase gates with atoms in a thermal cavity. Jin~\cite{Jin05} suggested a scheme of teleporting a two-atom entangled state with a thermal cavity and the success probability can reach $1.0$. In this paper, we consider two isolated two-level atoms each interacting with a single-mode thermal cavity field. The effects of the atomic motion and the field-mode structure are considered at the same time. The atomic motion and the field-mode structure not only lead to nonlinear transient effects in the atomic population~\cite{Schlicher89,Joshi90}, which are similar to self-induced transparency and adiabatic effects, but also give rise to the periodic evolution of the entropy squeezing~\cite{Liao04}, the field entropy, the atomic inversion~\cite{Fang98} and the entanglement~\cite{Yan09,Yan08}. Other effects by regulating the field-mode structure parameters have also been observed, e.g. decreasing the squeezing in the two-photon JC model~\cite{Bartzis92}, operating the entanglement and realizing the quantum gate operation~\cite{Joshi10}. The recent cavity quantum electrodynamics experiments which use an atomic beam passing along the axis of a rectangular or cylindrical cavity provide the feasibility of discussing the different field mode structures~\cite{Meschede85,Rempe87}. Then, in the present work, we are interested in the effects of the atomic motion and the field-mode structure on the time evolution of the atomic state in thermal cavity field environment. We suppose the two atoms are initially prepared in one of the Bell states, which is a maximal entangled pure state. The previous studies of maximally entangled states usually use entanglement, purity and energy to characterized the set of two-qubit states~\cite{Cavalcanti06,Ziman05,McHugh06,Yu07}. As the atom-field interacting process accompanied by exchanging excitations between atoms and fields, the energy in the system is a direct influences on the entanglement and mixedness properties. On the other hand, the initial pure state of the atomic qubits must be influenced by interacting with a thermal cavity environment, so the mixedness is an important characteristic in the process of entanglement evolution. As a result, we will investigate the time evolution of the entanglement, purity and energy via manipulating of the field-mode structure parameters, the strength of the thermal field and the detunings between atoms and thermal cavity fields. The relationships among entanglement, pure and energy will also be presented with entanglement-pure-energy (EPE) diagram, which can offer a nice visual to the allowed state of the atomic qubits. The paper is organized as follows: In Sec.$2$, we first describe the model under consideration and then derive the exact expression for the atomic reduced density matrix using algebraic dynamical approach~\cite{Yu95,Jie97,Xu99,Cen00}. The quantities used to quantify the entanglement, purity and energy of atomic qubits are also defined in this section. Sec.$3$ is devoted to investigate the time evolution of entanglement, purity and energy for the atomic qubits. In Sec.$4$, we discuss the time evolution of the atomic qubits with a EPE diagram. Finally, we present our conclusion in Sec.$5$. \section{Model} We consider two identical moving two-level atoms (A and B) and two spatially separated cavities (a and b) with non-decaying single mode fields by using very high quality factor cavities~\cite{Meschede85,Weidinger99}. Atoms $A$ and $B$ fly through cavities $a$ and $b$ with a constant velocity, respectively. We suppose the two subsystems $Aa$ and $Bb$ are identical with same value of atom-field coupling strength, frequencies and field-mode structure. The Hamiltonian for the considered system in the rotating-wave approximation can be written as ($\hbar=1$) \begin{eqnarray} H&=&H_{1}+H_{2} ,\nonumber\\ H_{1}&=&\omegaega_{c}a^{\daggergger}a+\omegaega_{0}S_{z}^{A}+gf(z)(a^{\daggergger}S_{-}^{A}+aS_{+}^{A}),\nonumber\\ H_{2}&=&\omegaega_{c}b^{\daggergger}b+\omegaega_{0}S_{z}^{B}+gf(z)(b^{\daggergger}S_{-}^{B}+bS_{+}^{B}) \end{eqnarray} where $H_{1}$ and $H_{2}$ are the Hamiltonians for subsystems Aa and Bb, respectively. $a^{\daggergger}$ and $a$ ($b^{\daggergger}$ and $b$ ) are the creation and annihilation operators of the cavity field a (b). $S_{+}^{i}$, $S_{-}^{i}$ and $S_{z}^{i}$ represent the atomic raising, lowering and inversion operators of the atom $i(i=A,B)$. $\omegaega_{c}$ and $\omegaega_{0}$ are the frequencies for the field $a$ and the atom $A$ (or the field $b$ and the atom $B$), respectively. $g$ is the atom-field coupling strength, and $f(z)$ is the shape function of the cavity field mode. When the interaction energy of atom-field coupling is much larger than the transverse kinetic energy spread of the atom, we can neglect the transverse velocity spread and restrict our investigation to atomic motion along the cavity axis ($z$ axis). Then the atomic motion is incorporated into $f(z)$ as follows \begin{eqnarray} f(z)\rightarrow f(\upsilon t), \end{eqnarray} where $\upsilon$ is the atomic motion velocity. In this regard the cavity field-mode $TEM_{mnp}$ is defined like $f(\upsilon t)=\sin(p\pi\upsilon t/L)$, where $p$ represents the number of half wavelengths of the field-mode inside a cavity with length $L$. If the atom passes through the cavity so fast that the atomic motion can be considered as a constant. For a proper choice of the atomic motion velocity $\upsilon=gL/\pi$, then $\int_{0}^{t}f(\upsilon t^{'})dt^{'}=[1-\cos(pgt)]/pg$. In the following, we propose the algebraic dynamical approach to derive the time evolution operator and the density operator based on the Hamiltonian (1). The key idea of the algebraic dynamical approach is introducing a canonical transformation operator that transforms the Hamiltonian into a liner function in terms of a set of Lie algebraic generators. According to algebraic dynamics, linear systems are integrable and solvable, then the time evolution operator and the density operator can be obtained easily. In the case of symmetric atom-field interaction, the two subsystems are completely equivalent. For simplicity, we will work with the subsystem $Aa$. A straightforward analysis of the Hamiltonian (1) shows that the total excitation number for subsystem $Aa$ is \begin{eqnarray} N_{1}=a^{\daggergger}a+S_{z}^{A}+\frac{1}{2}, \end{eqnarray} which is a conserved quantity for the subsystem Aa and commutes with the Hamiltonian $H_{1}$. Based on the algebraic dynamical approach, introducing SU(2) algebra generators $\{J_{0}$, $J_{+}$, $J_{-}\}$, with $J_{0}=S_{z}^{A}$, $J_{+}=N_{1}^{-1/2}aS_{+}^{A}$, $J_{-}=N_{1}^{-1/2}a^{\daggergger}S_{-}^{A}$, which are nonlinear expressions and obey the following commutation relations \begin{eqnarray} [J_{0},J_{+}]=J_{+},\quad [J_{0},J_{-}]=-J_{-}, \quad [J_{+},J_{-}]=2J_{0}. \end{eqnarray} In terms of the SU(2) algebra generators and the canonical transformation operator $U_{g}=\exp(\theta J_{+}-\theta J_{-})$, we can obtained the time evolution operator of the subsystem Aa \begin{eqnarray} U_{1}(t)&=&e^{-iH_{1}dt}=U_{g}e^{-i(U_{g}^{-1}H_{1}U_{g})t }U_{g}^{-1}\nonumber\\ &=& e^{-iE_{1}t}[\cos\frac{\langlembda t}{2}-2i J_{0}\sin\frac{\langlembda t}{2}\cos 2\theta + i(J_{+}+J_{-})\sin\frac{\langlembda t}{2}\sin2\theta]. \end{eqnarray} where $E_{1}=\omegaega_{c}(N_{1}-\frac{1}{2})$, $\theta=-\arctan[(\sqrt{\Delta^{2}/4+g'^{2}N_{1}}-\Delta/2)/g'N_{1}^{1/2}]$, $\langlembda=\sqrt{\Delta^{2}+4g'^{2}N_{1}}$, $g'=g\alpha/t$, $\alpha=\int_{0}^{t}f(\upsilon t^{'})dt^{'}=[1-\cos(pgt)]/pg$ and $\Delta=\omegaega_{0}-\omegaega_{c}$ is the detuning between the atom $A$ and the cavity $a$. What should be noticed here is that using canonical transformation operator to diagonalize the nonlinear Hamiltonian (1) doesn't change its intrinsic qualities. Likewise, we can get the evolution operator $U_{2}$ of the subsystem Bb, which has the similar form as $U_{1}$. \begin{eqnarray} U_{2}(t)=e^{-iE_{2}t}[\cos\frac{\eta t}{2}-2i L_{0}\sin\frac{\eta t}{2}\cos 2\phi + i(L_{+}+L_{-})\sin\frac{\eta t}{2}\sin2\phi]. \end{eqnarray} where $E_{2}=\omegaega_{c}(N_{2}-\frac{1}{2})$, $\phi=-\arctan[(\sqrt{\Delta^{2}/4+g'^{2}N_{2}}-\Delta/2)/g'N_{2}^{1/2}]$, and $\eta=\sqrt{\Delta^{2}+4g'^{2}N_{2}}$. $N_{2}=b^{\daggergger}b+S_{z}^{B}+\frac{1}{2}$ is the total excitation number for subsystem $Bb$. $\{L_{0}$, $L_{+}$, $L_{-}\}$ are the SU(2) algebra generators with $L_{0}=S_{z}^{B}$, $L_{+}=N_{2}^{-1/2}bS_{+}^{B}$, $L_{-}=N_{2}^{-1/2}b^{\daggergger}S_{-}^{B}$. Throughout this paper we suppose the two atoms $AB$ to be initially prepared in one of the Bell states, $|\Psi\ranglengle=\frac{1}{\sqrt{2}}(|eg\ranglengle+|ge\ranglengle)$, and the two thermal cavity fields $ab$ are in single-mode thermal field states $\rho_{a}(0)=\sum_{n=0}^{\infty}P_{n}|n\ranglengle\langlengle n|$, $\rho_{b}(0)=\sum_{m=0}^{\infty}P_{m}|m\ranglengle\langlengle m|$. As a result, the initial density operators for the two atoms and the two thermal cavity fields can be written as \begin{eqnarray} \rho_{AB}(0)&=&|\Psi\ranglengle\langlengle\Psi|=\frac{1}{2}(|eg\ranglengle\langlengle eg|+|eg\ranglengle\langlengle ge|+|ge\ranglengle\langlengle eg|+|ge\ranglengle\langlengle ge|),\nonumber\\ \rho_{f}(0)&=&\rho_{a}(0)\otimes\rho_{b}(0)=\sum_{n=0}^{\infty}\sum_{n=0}^{\infty}P_{n}P_{m}|nm\ranglengle\langlengle nm|, \end{eqnarray} where $P_{n}=\frac{k^{n}}{(k+1)^{n+1}}$,$P_{m}=\frac{l^{m}}{(l+1)^{m+1}}$. $k=1/[\exp(\omegaega_{c}/T_{a})-1]$ and $l=1/[\exp(\omegaega_{c}/T_{b})-1]$, $k$ and $l$ are the mean photon numbers of the thermal cavity field mode a and the thermal cavity field mode b, corresponding to the temperatures $T_{a}$ and $T_{b}$, respectively. Then, the initial density operator for the total system can be derived as \begin{eqnarray} \rho_{AB-f}(0)&=& \rho_{AB}(0)\otimes\rho_{f}(0)\nonumber\\ &=&\frac{1}{2}\sum_{n}\sum_{m}P_{n}P_{m}(|engm\ranglengle\langlengle engm|+|engm\ranglengle\langlengle gnem|\nonumber\\ &+&|gnem\ranglengle\langlengle engm|+|gnem\ranglengle\langlengle gnem|), \end{eqnarray} where the $|engm\ranglengle$ indicates that atom $A$ is in the excited state and atom $B$ is in the ground state, field mode $A$ and field mode $B$ are in the states $ |n\ranglengle$ and $|m\ranglengle$, respectively. The initial state (8) under the action of the operator $U_{1}(t)\otimes U_{2}(t)$ evolves to \begin{eqnarray} \rho_{AB-f}(t)=U_{1}(t)U_{2}(t)\rho_{AB-f}(0)U_{2}^{\daggergger}(t)U_{1}^{\daggergger}(t). \end{eqnarray} Then, from Eq.(9), we can get the reduced density matrix $\rho_{AB}(t)$ of the subsystem $AB$ by tracing over the thermal cavity field variables. In terms of the atomic basis states $|gg\ranglengle$, $|ge\ranglengle$, $|eg\ranglengle$, and $|ee\ranglengle$, the reduced density operator $\rho_{AB}(t)$ can be expressed as \begin{equation} \rho_{AB}(t)=Tr_{f}[\rho_{AB-f}(t)]=\left( \begin{array}{cccc} x_{1} & 0 & 0 & 0 \\ 0 & x_{2} & x_{3} & 0 \\ 0 &x_{4} & x_{5} & 0 \\ 0 & 0 & 0 & x_{6} \end{array} \right) \end{equation} where $x_{1}+x_{2}+x_{5}+x_{6}=1$, \begin{eqnarray} x_{1}&=&\frac{1}{2}\sum_{n}\sum_{m}\{P_{n-1}P_{m} [\sin^{2}(\frac{\langlembda_{n}t}{2})\sin^{2}(2\theta_{n})][\cos^{2}(\frac{\eta_{m}t}{2})+\sin^{2}(\frac{\eta_{m}t}{2})\cos^{2}(2\phi_{m})]\nonumber\\ &+&P_{n}P_{m-1}[\cos^{2}(\frac{\langlembda_{n}t}{2})+\sin^{2}(\frac{\langlembda_{n}t}{2})\cos^{2}(2\theta_{n})] [\sin^{2}(\frac{\eta_{m}t}{2})\sin^{2}(2\phi_{m})]\},\nonumber\\ x_{2}&=&\frac{1}{2}\sum_{n}\sum_{m}\{P_{n}P_{m} [\cos^{2}(\frac{\langlembda_{n}t}{2})+\sin^{2}(\frac{\langlembda_{n}t}{2})\cos^{2}(2\theta_{n})]\nonumber\\ &\times&[\cos^{2}(\frac{\eta_{m+1}t}{2})+\sin^{2}(\frac{\eta_{m+1}t}{2})\cos^{2}(2\phi_{m+1})]\nonumber\\ &+&P_{n-1}P_{m+1} [\sin^{2}(\frac{\langlembda_{n}t}{2})\sin^{2}(2\theta_{n})][\sin^{2}(\frac{\eta_{m+1}t}{2})\sin^{2}(2\phi_{m+1})]\},\nonumber\\ x_{3}&=&\frac{1}{2}\sum_{n}\sum_{m}\{P_{n}P_{m} [\cos(\frac{\langlembda_{n}t}{2})+i\sin(\frac{\langlembda_{n}t}{2})\cos2\theta_{n})]\nonumber\\ &\times&[\cos(\frac{\langlembda_{n+1}t}{2})+i\sin(\frac{\langlembda_{n+1}t}{2})\cos(2\theta_{n+1})]\nonumber\\ &\times&[\cos(\frac{\eta_{m+1}t}{2})-i\sin(\frac{\eta_{m+1}t}{2}\cos2\phi_{m+1})][\cos(\frac{\eta_{m}t}{2})-i\sin(\frac{\eta_{m}t}{2})\cos(2\phi_{m})]\}, \nonumber\\ x_{4}&=&x_{3}^{\ast},\nonumber\\ x_{5}&=& \frac{1}{2}\sum_{n}\sum_{m}\{P_{n+1}P_{m-1} [\sin^{2}(\frac{\langlembda_{n+1}t}{2})\sin^{2}(2\theta_{n+1})][\sin^{2}(\frac{\eta_{m}t}{2})\sin^{2}(2\phi_{m})]\nonumber\\ &+&P_{n}P_{m} [\cos^{2}(\frac{\langlembda_{n+1}t}{2})+\sin^{2}(\frac{\langlembda_{n+1}t}{2})\cos^{2}(2\phi_{n+1})]\nonumber\\ &\times&[\cos^{2}(\frac{\eta_{m}t}{2})+\sin^{2}(\frac{\eta_{m}t}{2})\cos^{2}(2\phi_{m}]\},\nonumber\\ x_{6}&=&\frac{1}{2}\sum_{n}\sum_{m}\{P_{n}P_{m+1} [\cos^{2}(\frac{\langlembda_{n+1}t}{2})+\sin^{2}(\frac{\langlembda_{n+1}t}{2})\cos^{2}(2\theta_{n+1})]\nonumber\\ &\times&[\sin^{2}(\frac{\eta_{m+1}t}{2})\sin^{2}(2\phi_{m+1})]\nonumber\\ &+&P_{n+1}P_{m} [\sin^{2}(\frac{\langlembda_{n+1}t}{2})\sin^{2}(2\theta_{n+1})][\cos^{2}(\frac{\eta_{m+1}t}{2})+\sin^{2}(\frac{\eta_{m+1}t}{2})\cos^{2}(2\phi_{m+1})]\},\nonumber\\ \end{eqnarray} and $\langlembda_{n}=\sqrt{\Delta^{2}+4g'^{2}n}$, $\eta_{m}=\sqrt{\Delta^{2}+4g'^{2}m}$, $\theta_{n}=-\arctan[(\sqrt{\Delta^{2}/4+g'^{2}n}-\Delta/2)/g'n^{1/2}]$, $\phi_{m}=-\arctan[(\sqrt{\Delta^{2}/4+g'^{2}m}-\Delta/2)/g'm^{1/2}]$. Based on the analytical solution of $\rho_{AB}(t)$, we can conveniently do some approximation in analyzing the numerical results in the following section. Besides, we can define the quantities to quantify the entanglement, purity and energy of the atomic qubits and give their expressions in terms of the matrix elements of $\rho_{AB}(t)$. We adopt Wootters' concurrence as a measure of entanglement in this discussion~\cite{Wootters98}, which is denoted as $C_{AB}=Max\{0,\sqrt{\langlembda_{1}}-\sqrt{\langlembda_{2}}-\sqrt{\langlembda_{3}}-\sqrt{\langlembda_{4}}\}$, and $\langlembda_{i}$ are the eigenvalues of the matrix $(\rho_{AB}\tilde{\rho}_{AB})$ in non increasing order. Following Eq.(10), the expression of $C_{AB}$ turns out to be \begin{eqnarray} C_{AB}=2Max\{0,|x_{3}|-\sqrt{x_{1}\times x_{6}}\}. \end{eqnarray} When the value of $C_{AB}$ is positive, the atomic system is entangled. $C_{AB}=1$ corresponds to the maximal entanglement state, while $C_{AB}=0$ indicates the atom A and the atom B are separable. The energy $U_{AB}$ of subsystem $AB$ is defined here as the expectation value of the Hamiltonian $H_{AB}=\omegaega_{0}S_{z}^{A}+\omegaega_{0}S_{z}^{B}$. We set $\omegaega_{0}=1$, then the energy $U_{AB}$ can be obtained based on the expression of Eq.(10) \begin{eqnarray} U_{AB}=Tr\{\rho_{AB-f}(t)H_{AB}\}=x_{6}-x_{1} \end{eqnarray} For two two-level atoms, $U_{AB}$ ranges from -1 for $\rho_{AB}=|gg\ranglengle\langlengle gg|$ to 1 for $\rho_{AB}=|ee\ranglengle\langlengle ee|$. To quantify the mixedness of the state $\rho_{AB}(t)$, we use the purity \begin{eqnarray} P_{AB}=Tr\{\rho_{AB}^{2}(t)\}=x_{1}^{2}+ x_{2}^{2}+ x_{5}^{2}+ x_{6}^{2}+2x_{3}x_{4} \end{eqnarray} For the atomic qubits, $P_{AB}$ ranges from 1/d for completely mixed state to 1 for pure state for d-dimensional systems, which is closely to the linear entropy measure of mixedness. The aim of this paper is to address the question how the atomic motion and the field-mode structure influence the state $\rho_{AB}(t)$ of the atomic qubits in the cases of thermal environment. We know that the two atoms are initially in the maximal entanglement state, and the atomic state will evolves with time followed by the variation of the entanglement, purity and the transfer of the energy. Their time evolution will be discussed in the next section. \section{Entanglement, purity and energy versus time} There are three controllable parameters in the analytical expression of $\rho_{AB}(t)$: the field-mode structure parameter, the mean photon number in each cavity and the detuning. In this section, we will discuss their effects of the three parameters on the time evolution of the entanglement, purity and energy of the atomic subsystem. In Fig.1 and Fig.2 we plot the time evolution of $C_{AB}$, $P_{AB}$ and $U_{AB}$ affected by different values of the field-mode structure parameters and the mean photon number in the situation of exact resonance. Form Eq.(11), we can easily find that for resonant atom-field coupling, $\Delta$=0, $\langlembda_{n}=2g'\sqrt{n}$, $\eta_{m}=2g'\sqrt{m}$, $\sin2\theta_{n}=\sin2\phi_{m}=-1$, $\cos2\theta_{n}=\cos2\phi_{m}=0$. The elements of the matrix $\rho_{AB}$ expressed in Eq.(11) convert to \begin{eqnarray} x_{1}&=&\frac{1}{2}\sum_{n}\sum_{m}[P_{n-1}P_{m} \sin^{2}(g't\sqrt{n})\cos^{2}(g't\sqrt{m})\nonumber\\ &+&P_{n}P_{m-1} \cos^{2}(g't\sqrt{n})\sin^{2}(g't\sqrt{m})],\nonumber\\ x_{2}&=&\frac{1}{2}\sum_{n}\sum_{m}[P_{n}P_{m} \cos^{2}(g't\sqrt{n})\cos^{2}(g't\sqrt{m+1})\nonumber\\ &+&P_{n-1}P_{m+1} \sin^{2}(g't\sqrt{n})\sin^{2}(g't\sqrt{m+1})],\nonumber\\ x_{3}&=&\frac{1}{2}\sum_{n}\sum_{m}[P_{n}P_{m}\cos(g't\sqrt{n})\cos(g't\sqrt{n+1}) \cos(g't\sqrt{m})\cos(g't\sqrt{m+1})],\nonumber\\ x_{4}&=&x_{3},\nonumber\\ x_{5}&=& \frac{1}{2}\sum_{n}\sum_{m}[P_{n+1}P_{m-1} \sin^{2}(g't\sqrt{n+1})\sin^{2}(g't\sqrt{m})\nonumber\\ &+&P_{n}P_{m}\cos^{2}(g't\sqrt{n+1}) \cos^{2}(g't\sqrt{m})],\nonumber\\ x_{6}&=&\frac{1}{2}\sum_{n}\sum_{m}[P_{n}P_{m+1} \cos^{2}(g't\sqrt{n+1})\sin^{2}(g't\sqrt{m+1})\nonumber\\ &+&P_{n+1}P_{m} \sin^{2}(g't\sqrt{n+1})\cos^{2}(g't\sqrt{m+1})]. \end{eqnarray} \begin{figure} \caption{\langlebel{apop} \end{figure} \subsection{the effects of field-mode structure parameters} The atomic initial state is a Bell state, which is a pure state with $P_{AB}=1$ as well as a maximal entanglement state with $C_{AB}=1$. However, the initial energy for the atomic subsystem is zero, that is $U_{AB}=0$. Fig.1 illustrates the cases when the atom is in motion at the velocity $\upsilon=gL/\pi$ for parameters $p=1$ and $p=4$, respectively. It has been studied that when the atomic motion is considered, the time behaviors of field entropy, atomic inversion~\cite{Fang98}, and entropy squeezing~\cite{Liao04} are periodical, and their evolution periods are shorten with the increase of parameter $p$. Similar behaviors occur in this work. From Fig.1 we can find that the evolution periods is decreased with the increase of parameter $p$. This is because the time factor is the scaled time $gt$ when the atomic motion is neglected, and is $g't$ when the atomic motion is taken into account. $g't=[1-\cos(pgt)]/p$ is a periodical function on the scaled time $gt$ with period $2\pi/p$. In addition, the amplitudes for $C_{AB}$, $P_{AB}$ and $U_{AB}$ are reduced while their maximum values are still unchanged. That is, compared with $p=1$, the ``sudden death of entanglement" disappears, the maximum mixedness of the atomic state reduces and the energy exchange between atoms and field modes decreases with the increase of the field-mode structure parameter $p$. \subsection{the effects of mean photon number in each cavity} What we talked about in Fig.1 is just limited to the situation of very weak thermal field with mean photon number $k=l=0.1$ in each cavities. Then we are interested in how the atomic qubits evolves as the mean photon number in each cavity increases. It has been demonstrated that thermal cavity field can lead to entangled state of quantum qubits interacting with it~\cite{Kim02,Bose01}, while strong thermal cavity field can inhibit the atom-atom entanglement~\cite{Yan09} and atom-field entanglement~\cite{Yan08}. Here, we pay our attention to the influence of mean photon number on the non-local atom-atom entanglement, in the case of exact resonance, as is shown in Fig.2(a). In addition, in Fig.2(b) and Fig.2(c), further study is employed to the time evolution of purity and energy, which can help us to get more information about entanglement evolution. From Fig.2 we can find that with the increase of mean photon number in each cavity, both the amplitudes and the maximum values for $C_{AB}$, $P_{AB}$ and $U_{AB}$ decrease. That is, compared with the case of weak thermal cavity field, the atomic qubits, which couples to strong thermal cavity fields, can not evolve to a maximal entanglement state with $C_{AB}=1$ as well as a pure state with $P_{AB}=1$. Meanwhile, the time interval of the ``sudden death of entanglement" lengthens, the maximum mixedness increases, while the energy transfer between atomic qubits and field modes in each interacting period is more and more less. \begin{figure} \caption{\langlebel{xpulses} \end{figure} \subsection{the effects of detunings between atom and field} The effects of detuning on the entanglement, purity and energy between atom A and atom B are depicted in Fig.3. In Fig.3(a), $C_{AB}$ oscillates at first and as time evolves it will maintain the maximal entanglement state when the influence of the atomic motion is considered and $\Delta\neq0$. Moreover, the larger the value of detuning, the faster the $C_{AB}$ reaches the stable maximal value 1. When $\Delta\gg g$, the oscillating almost vanishes and the two atoms nearly entangle maximally all the time as time evolves. Similar behavior to the time evolution of $P_{AB}$ is depicted in Fig.3(b). In Fig.3(c) we can find that with the increase of $\Delta$, the amplitude of $U_{AB}$ is reduced gradually, and large values of $\Delta$ make it more easy to get to zero. In a word, with the increase of detuning, the atomic subsystem is almost "frozen" in the initial state. This can be explained as follows: on the one hand, based on the expression of $\rho_{AB}$, for weak thermal cavity fields $k=l=0.1$ and large detuning $\Delta\gg g$, $\langlembda_{n}=\eta_{m}\approx\Delta$, $\sin2\theta_{n}=\sin2\phi_{m}\approx0$, $\cos2\theta_{n}=\cos2\phi_{m}\approx1$. As a result, $x_{1}=x_{6}\approx0$, $x_{2}=x_{3}=x_{4}=x_{5}\approx\frac{1}{2}\sum_{n}\sum_{m}P_{n}P_{m}$. Then, $C_{AB}=2Max\{0,|x_{3}|-\sqrt{x_{1}\times x_{6}}\}\approx\sum_{n}\sum_{m}P_{n}P_{m}=1$, $P_{AB}=Tr\{\rho_{AB}^{2}(t)\}=x_{1}^{2}+ x_{2}^{2}+ x_{5}^{2}+ x_{6}^{2}+2x_{3}x_{4}\approx\sum_{n}\sum_{m}P_{n}P_{m}=1$, $U_{AB}=Tr\{\rho_{AB-f}(t)H_{AB}\}=x_{6}-x_{1}\approx0$; on the other hand, the interacting process between atoms and fields is accompanied by the transfer of the excitation between the localized atom and cavity mode, which depends on the atom-field coupling and is distinctly influenced by the value of detuning. Larger detuning can inhibit the atom-field coupling and restrain this transfer process greatly, therefore the initially maximal entanglement pure state can be "frozen" in the atomic subsystem. \begin{figure} \caption{\langlebel{switchfunc} \end{figure} \section{Entanglement-purity-energy diagram} In this section, we devote to investigate the relationships among entanglement, purity and energy for atomic subsystem, which reflects much of the nontrivial information about the particular atomic state in the atom-field interacting process. Here, we limit our study to the weak thermal field with $k=l=0.1$ and the field-mode structure parameter $p=1$ in resonant situation. Under these conditions, $\cos2\theta_{n}=\cos2\phi_{m}=0$, $\sin2\theta_{n}=\sin2\phi_{m}=-1$. We plot entanglement-purity-energy diagram in Fig.4(a) and show its projections on entanglement-energy and entanglement-purity planes in Fig.4(b) and Fig.4(c), respectively. At initial time, $C_{AB}=1$, $P_{AB}=1$ and $U_{AB}=0$, the atomic qubits in the maximal entangled state. From Fig.4(b) we can find that the disentanglement process accompanies by excitations transferring from atomic subsystem to cavity field modes and atomic state from a pure state convert to mixed states. The minimal energy for atomic subsystem is about -0.7 when the two atoms are separable, and the maximum value is zero when the two atoms are in the maximal entanglement state. This suggests the atomic state can not evolve to $\rho_{AB}=|gg\ranglengle\langlengle gg|$ ($U_{AB}=-1$) or $\rho_{AB}=|ee\ranglengle\langlengle ee|$ ($U_{AB}=1$) in the atom-field interaction process, which can also be confirmed in Fig.4(c). When $C_{AB}=0$, $P_{AB}\neq1$, the atomic qubits is in mixed state when they are separable. While $C_{AB}=1$ corresponding to $P_{AB}=1$, this indicates that the atomic qubits can just realize the maximal entanglement pure state but the maximal entanglement mixed state can not be obtained. \begin{figure} \caption{\langlebel{switchfunc} \end{figure} \section{Conclusion} In this paper, we employed three parameters---entanglement, purity and energy to describe the information about two distant atoms which are initially prepared in Bell state. Our results showed that considering the atomic motion and the field-mode structure can lead to the periodic evolution of entanglement, purity and energy. With the increase of field-mode structure parameter $p$, both their evolution periods and their amplitudes are decreased while their maximum values are unchanged. However, strong thermal field can reduce the peak values of entanglement, purity and energy of the atomic qubits and make the atomic state initially in a pure state to mixed states. Meanwhile, in such a chaotic field, energy transfer between atoms and fields is more and more less with the increase of thermal field strength. In addition, large detuning is in favour of reducing their oscillation time and "frozing" the initial maximal entanglement state in the atomic subsystem. We also analyzed the possible state that the atomic qubits may evolves into. From the entanglement-purity-entanglement diagram we found that the disentanglement process for the atomic subsystem accompanies both by the excitation transferring from atomic subsystem to cavity field modes and the state converts from a pure state to mixed states. Our number results showed that, when the atomic state is in the maximal entanglement state, it is in a pure state at the same time; when the two atoms are separable, the atomic state is in a mixed state. However, in the atom-field interacting process, the state for atomic qubits can not evolve to the maximal entanglement mixed state. \section*{Acknowledgment} This research is supported by the National Natural Science Foundation of China under Grant No. 10704031, the National Science Foundation for Fostering Talents in Basic Research of the National Natural Science Foundation of China Under Grant No. J0630313, the fundamental Research Fund for Physical and Mathematical of Lanzhou University Under Grant No. Lzu05001, and the Natural Science Foundation of Gansu Under Grant No. 3ZS061-A25-035. \section*{References} \end{document}
\begin{document} \title{Asymmetric R\'enyi Problem} \author {\spreadout{M. DRMOTA}{$^1$} \thanks{Research partially supported by the Austrian Science Foundation FWF Grant~No. F50-02.} and \spreadout{A. MAGNER}{$^2$} \thanks{Research supported by NSF Center for Science of Information (CSoI) Grant CCF-0939370.} and \spreadout{W. SZPANKOWSKI}{$^ยง$} \thanks{Research partially supported by the NSF Center for Science of Information (CSoI) Grant CCF-0939370, and in addition by the NSF Grants CCF-1524312, and NIH Grant 1U01CA198941-01.} \\ \affilskip {$^1$} Institute for Discrete Mathematics and Geometry, TU Wien, Vienna, Austria, \\ \affilskip A-1040 Wien, Wiedner Hauptstr. 8--10 \\ {$^2$} Coordinated Science Lab, UIUC, Champaign, \\ \affilskip IL 61820, USA\\ {$^3$} Department of Computer Science, Purdue University, \\ \affilskip IN 47907, USA \\ [email protected], [email protected], and [email protected]} \label{firstpage} \maketitle \begin{abstract} In 1960 R\'enyi in his Michigan State University lectures asked for the number of random queries necessary to recover a hidden bijective labeling of $n$ distinct objects. In each query one selects a random subset of labels and asks, which objects have these labels? We consider here an asymmetric version of the problem in which in every query an object is chosen with probability $p > 1/2$ and we ignore ``inconclusive'' queries. We study the number of queries needed to recover the labeling in its entirety ($H_n$), before at least one element is recovered ($F_n$), and to recover a randomly chosen element $(D_n)$. This problem exhibits several remarkable behaviors: $D_n$ converges in probability but not almost surely; $H_n$ and $F_n$ exhibit phase transitions with respect to $p$ in the second term. We prove that for $p>1/2$ with high probability (whp) we need $ H_n=\log_{1/p} n +\frac 12 \log_{p/(1-p)}\log n +o(\log \log n) $ queries to recover the entire bijection. This should be compared to its symmetric ($p=1/2$) counterpart established by Pittel and Rubin, who proved that in this case one requires $ H_n=\log_{2} n +\sqrt{2 \log_{2} n} +o(\sqrt{\log n}) $ queries. As a bonus, our analysis implies novel results for random PATRICIA tries, as the problem is probabilistically equivalent to that of the height, fillup level, and typical depth of a PATRICIA trie built from $n$ independent binary sequences generated by a biased($p$) memoryless source. \end{abstract} \section{Introduction} In his lectures in the summer of 1960 at Michigan State University, Alfred R\'enyi discussed several problems related to random sets \cite{renyi}. Among them there was a problem regarding recovering a labeling of a set $X$ of $n$ distinct objects by asking random subset questions of the form ``which objects correspond to the labels in the (random) set $B$?'' For a given method of randomly selecting queries, R\'enyi's original problem asks for the typical behavior of the number of queries necessary to recover the hidden labeling. Formally, the unknown labeling of the set $X$ is a bijection $\phi$ from $X$ to a set $A$ of labels (necessarily with equal cardinality $n$), and a query takes the form of a subset $B \subseteq A$. The response to a query $B$ is $\phi^{-1}(B) \subseteq X$. Our contribution in this paper is a precise analysis of several parameters of R\'enyi's problem for a particular natural probabilistic model on the query sequence. In order to formulate this model precisely, it is convenient to first state a view of the process that elucidates its tree-like structure. In particular, a sequence of queries corresponds to a refinement of partitions of the set of objects, where two objects are in different partition elements if they have been distinguished by some sequence of queries. More precisely, the refinement works as follows: before any questions are asked, we have a trivial partition $ {\frak{P}}_0 = X$ consisting of a single class (all objects). Inductively, if $ {\frak{P}}_{j-1}$ corresponds to the partition induced by the first $j-1$ queries, then $ {\frak{P}}_j$ is constructed from $ {\frak{P}}_{j-1}$ by splitting each element of $ {\frak{P}}_{j-1}$ into at most two disjoint subsets: those objects that are contained in the preimage of the $j$th query set $B_j$ and those that are not. The hidden labeling is recovered precisely when the partition of $X$ consists only of singleton elements. An instance of this process may be viewed as a rooted binary tree (which we call the \emph{partition refinement tree}) in which the $j$th level, for $j \geq 0$, corresponds to the partition resulting from $j$ queries; a node in a given level corresponds to an element of the partition associated with that level. A right child corresponds to a subset of a parent partition element that is included in the subsequent query, and a left child corresponds to a subset that is not included. See Example~\ref{QuerySequenceExample} for an illustration. \begin{example}[Demonstration of partition refinement] \label{QuerySequenceExample} Consider an instance of the problem where $X = [5] = \{1, ..., 5\}$, with labels $(d, e, a, c, b)$ respectively (so $A = \{a, b, c, d, e\}$). Consider the following sequence of queries: \tikzstyle{level 1}=[level distance=1.0cm, sibling distance=2.5cm] \tikzstyle{level 2}=[level distance=1.0cm, sibling distance=1.5cm] \tikzstyle{bag} = [rectangle, minimum width=3pt,inner sep=0pt] \tikzstyle{end} = [circle, minimum width=3pt,fill, inner sep=0pt] \begin{minipage}{0.5\textwidth} \begin{enumerate} \item $B_1 = \{b, d\} \mapsto \{1, 5\}$ \item $B_2 = \{a, b, d\} \mapsto \{1, 3, 5\}$, \item $B_3 = \{a, c, d\} \mapsto \{1, 3, 4\}$, \end{enumerate} \end{minipage} \begin{minipage}{0.5\textwidth} \begin{tikzpicture}[grow=down] \node[bag]{\{1, 2, 3, 4, 5\}} child{ node[bag]{\{2,3,4\}} child{ node[bag]{\{2,4\}} child{ node[rectangle,draw]{2} } child{ node[rectangle,draw]{4} } } child{ node[rectangle,draw]{3} } } child{ node[bag]{\{1,5\}} child{ node[bag,right]{\{1, 5\}} child{ node[rectangle,draw]{5} edge from parent node[above]{} } child{ node[rectangle,draw]{1} edge from parent node[above]{} } } }; \end{tikzpicture} \label{QuerySequenceExampleDiagram} \end{minipage} Each level $j\geq 0$ of the tree depicts the partition $ {\frak{P}}_j$, where a right child node corresponds to the subset of objects in the parent set which are contained in the response to the $j$th query. Singletons are only explicitly depicted in the first level in which they appear. We can determine the labels of all objects using the tree and the sequence of queries: for example, to determine the label of the object $3$, we traverse the tree until we reach the leaf corresponding to $3$. This indicates that the label corresponding to $3$ is in the singleton set \[ \neg B_1 \cap B_2 = \{a,c,e\} \cap \{a, b, d\} = \{a\}. \] Note that leaves of the tree always correspond to singleton sets. \end{example} In this work we consider a version of the problem in which, in every query, each label is included independently with probability $p > 1/2$ (the \emph{asymmetric case}) and we \emph{ignore inconclusive queries}. In particular, if a candidate query fails to non trivially split some element of the previous partition, we modify the query by deciding again independently whether or not to include each label of that partition element with probability $p$. We perform this modification until the resulting query splits every element of the previous partition non trivially. See Example~\ref{QuerySequenceIgnoreExample}. \begin{example}[Ignoring inconclusive queries] \label{QuerySequenceIgnoreExample} Continuing Example~\ref{QuerySequenceExample}, the query $B_2$ fails to split the partition element $\{1, 5\}$, so it is an example of an inconclusive query and would be modified in our model to, say, $B_2' = \phi(\{1,3\})$. The resulting refinement of partitions is depicted as a tree here. Note that the tree now does not contain non-branching paths and that $B_2$ is ignored in the final query sequence. \begin{minipage}{0.5\textwidth} \begin{enumerate} \item $B_1 = \{b, d\} \mapsto \{1,5\}$ \item $B_2' = \{a,d\} \mapsto \{1,3\}$ \item $B_3 = \{a, c, d\} \mapsto \{1, 3, 4\}$. \end{enumerate} \end{minipage} \begin{minipage}{0.5\textwidth} \tikzstyle{level 1}=[level distance=1.0cm, sibling distance=2.5cm] \tikzstyle{level 2}=[level distance=1.0cm, sibling distance=1.5cm] \tikzstyle{bag} = [rectangle, minimum width=3pt,inner sep=0pt] \tikzstyle{end} = [circle, minimum width=3pt,fill, inner sep=0pt] \begin{tikzpicture}[grow=down] \node[bag]{\{1, 2, 3, 4, 5\}} child{ node[bag]{\{2,3,4\}} child{ node[bag]{\{2,4\}} child{ node[rectangle,draw]{2} } child{ node[rectangle,draw]{4} } } child{ node[rectangle,draw]{3} } } child{ node[bag]{\{1,5\}} child{ node[rectangle,draw]{5} edge from parent node[above]{} } child{ node[rectangle,draw]{1} edge from parent node[above]{} } }; \end{tikzpicture} \end{minipage} \end{example} We study three parameters of this random process: $H_n$, the number of such queries needed to recover the entire labeling; $F_n$, the number needed before at least one element is recovered; and $D_n$, the number needed to recover an element selected uniformly at random. Our objective is to present precise probabilistic estimates of these parameters. The symmetric version (i.e., $p=1/2$) of the problem (with a variation) was discussed by Pittel and Rubin in \cite{pittelrubin1990}, where they analyzed the typical value of $H_n$. In their model, a query is constructed by deciding whether or not to include each label from $A$ independently with probability $p=1/2$. To make the problem more interesting, they added a constraint similar to ours: namely, a query is, as in our model, admissible if and only if it splits every nontrivial element of the current partition. In contrast with our model, however, Pittel and Rubin completely discard inconclusive queries (rather than modifying their inconclusive subsets as we do). Despite this difference, the model considered in \cite{pittelrubin1990} is probabilistically equivalent to ours for the symmetric case. Our primary contribution is the analysis of the problem in the asymmetric case ($p > 1/2$), but our methods of proof allow us to recover the results of Pittel and Rubin. The question asked by R\'enyi brings some surprises. For the symmetric model ($p=1/2$) Pittel and Rubin \cite{pittelrubin1990} were able to prove that the number of necessary queries is with high probability (whp) (see Theorem~\ref{HeightTheorem}) \begin{eqnarray} \label{e1} H_n = \log_{2} n +\sqrt{2\log_{2} n} +o(\sqrt{\log n}). \end{eqnarray} In this paper, we develop a different method that could be used to re-establish this result {and} prove that for $p > 1/2$ the number of queries grows whp as \begin{eqnarray} \label{e2} H_n = \log_{1/p} n +\frac{1}{2}\log_{p/q} \log n +o(\log \log n), \end{eqnarray} where $q:=1-p$. Note a phase transition in the second term. Moreover, this result is perhaps interesting in the sense that, for $p > 1/2$, $H_n$ exhibits the second-order behavior that Pittel and Rubin stated that they fully expected but did not find in the $p=1/2$ case \cite{pittelrubin1990}. We show that another phase transition, also in the second term, occurs in the asymptotics for $F_n$ (see Theorem~\ref{FillupTheorem}): \begin{eqnarray} \label{e3} F_n = \left\{ \begin{array}{ll} \log_{1/q} n - \log_{1/q}\log\log n + o(\log\log\log n) & p > q \\ \log_{2} n - \log_2\log n + o(\log\log n) & p = q = 1/2. \end{array} \right. \end{eqnarray} We also state in Theorem~\ref{DepthCorollary} some interesting probabilistic behaviors of $D_n$. We have $D_n/\log n \to 1/h(p)$ (in probability) where $h(p) := -p\log p - q\log q$, but we do not have almost sure convergence. We establish these results in a novel way by considering first the \emph{external profile} $B_{n,k}$, whose analysis was, until recently, an open problem of its own (the second and third authors gave a precise analysis of the external profile in an important range of parameters in \cite{magnerPhD2015,magnerspa2015}, but the present paper requires really nontrivial extensions). The external profile at level $k$ is the number of bijection elements revealed by the $k$th query (one may also define the \emph{internal} profile at level $k$ as the number of non-singleton elements of the partition immediately after the $k$th query). Its study is motivated by the fact that many other parameters, including all of those that we mention here, can be written in terms of it. Indeed, $ {\frak{P}}r[D_n=k]= {\mathbb{E}}[B_{n,k}]/n$, $H_n = \max\{k : ~ B_{n,k} > 0\}$, and $F_n = \min\{k : ~ B_{n,k} > 0\} - 1$. We now discuss our new results concerning the probabilistic behavior of the external profile. We establish in \cite{magnerspa2015,magnerPhD2015} precise asymptotic expressions for the expected value and variance of $B_{n,k}$ in the \emph{central range}, that is, with $k \sim \alpha\log n$, where, for any fixed $\epsilon > 0$, $\alpha \in (1/\log(1/q) + \epsilon, 1/\log(1/p) - \epsilon)$ (the left and right endpoints of this interval as $\epsilon \to 0$ are associated with $F_n$ and $H_n$, respectively). Specifically, it was shown that both the mean and the variance are of the same (explicit) polynomial order of growth (with respect to $n$). More precisely, expected value and variance grow for $k\sim\alpha \log n$ as $$ H(\rho(\alpha), \log_{p/q}(p^kn)) ~ \frac{n^{\beta(\alpha)}}{\sqrt{C \log n}} $$ where $\beta(\alpha)\leq 1$ and $\rho(\alpha)$ are complicated functions of $\alpha$, $C$ is an explicit constant, and $H(\rho, x)$ is a function that is periodic in $x$. The oscillations come from infinitely many regularly spaced saddle points that we observe when inverting the Mellin transform of the Poisson generating function of $ {\mathbb{E}}[B_{n,k}]$. Finally, in \cite{magnerspa2015} we prove a central limit theorem; that is, ${(B_{n,k} - {\mathbb{E}}[B_{n,k}])}/{\sqrt{ {\mathrm{Var}}[B_{n,k}]}} \to {\mathcal{N}}(0,1) $ where $ {\mathcal{N}}(0,1)$ represents the standard normal distribution. In order to establish the most interesting results claimed in the present paper for $H_n$ and $F_n$, the analysis sketched above does not suffice: we need to estimate the mean and the variance of the external profile \emph{beyond} the range $\alpha \in (1/\log(1/q) + \epsilon, 1/\log(1/p) - \epsilon)$; in particular, for $F_n$ and $H_n$ we need expansions at the left and right side (as $\epsilon \to 0$), respectively, of this range. Having described most of our main results, we mention an important equivalence pointed out by Pittel and Rubin \cite{pittelrubin1990}. They observed that their version of the R\'enyi process resembles the construction of a digital tree known as a PATRICIA trie\footnote{We recall that a trie is a binary digital tree, where data that are represented by binary strings are stored at leaves of the tree according to finite prefixes of the corresponding binary strings in a minimal way such that all appearing prefixes are different. A PATRICIA trie is a trie in which non-branching paths are \emph{compressed}; that is, there are no unary paths.} \cite{knuth1998acp,szpa2001Book}. In fact, the authors of \cite{pittelrubin1990} show that $H_n$ is probabilistically equivalent to the height (longest path) of a PATRICIA trie built from $n$ binary strings generated independently by a memoryless source with bias $p=1/2$ (that is, with a ``1'' generated with probability $p$; this is often called the \emph{Bernoulli model with bias $p$}); the equivalence is true more generally, for $p \geq 1/2$. It is easy to see that $F_n$ is equivalent to the fillup level (depth of the deepest full level), $D_n$ to the typical depth (depth of a randomly chosen leaf), and $B_{n,k}$ to the external profile of the tree (the number of leaves at level $k$; the internal profile at level $k$ is similarly defined as the number of non-leaf nodes at that level). We spell out this equivalence in the following simple claim. \begin{lemma}[Equivalence of the R\'enyi problem with those of PATRICIA tries] \label{EquivalenceLemma} Any parameter (in particular, $H_n, F_n, D_n$, and $B_{n,k}$) of the R\'enyi process with bias $p$ that is a function of the partition refinement tree is equal in distribution to the same function of a random PATRICIA trie generated by $n$ independent infinite binary strings from a memoryless source with bias $p \geq 1/2$. \end{lemma} \begin{proof} In a nutshell, we couple a random PATRICIA trie and the sequence of queries from the R\'enyi process by constructing both from the same sequence of binary strings from a memoryless source. We do this in such a way that the resulting PATRICIA trie and the partition refinement tree are isomorphic with probability $1$ (in fact, always isomorphic), so that parameters defined in terms of either tree structure are equal in distribution. More precisely, we start with $n$ independent infinite binary strings $S_1, \ldots, S_n$ generated according to a memoryless source with bias $p$, where each string corresponds, in a way to be made precise below, to a unique element of the set of labels (for simplicity, we assume that $A = [n]$, and $S_j$ is associated to the object $j$, for $j \in [n]$; intuitively, $S_j$ encodes the decision, for each query, of whether or not to include $j$). These induce a PATRICIA trie $T$, and our goal is to show that we can simulate a R\'enyi process using these strings, such that the corresponding tree $T_R$ is isomorphic to $T$ as a rooted plane-- oriented tree (see Example~\ref{QuerySequenceIgnoreExample}). The basic idea is as follows: we maintain for each string $S_j$ an index $k_j$, initially set to $1$. Whenever the R\'enyi process demands that we make a decision about whether or not to include label $j$ in a query, we include it if and only if $S_{j,k_j} = 1$, and then increment $k_j$ by $1$. Clearly, this scheme induces the correct distribution on queries. Furthermore, the resulting partition refinement tree (ignoring inconclusive queries) is easily seen to be isomorphic to $T$. Since the trees are isomorphic, the parameters of interest are equal in each case. \end{proof} Thus, our results on these parameters for the R\'enyi problem directly lead to novel results on PATRICIA tries, and vice versa. In addition to their use as data structures, PATRICIA tries also arise as combinatorial structures which capture the behavior of various processes of interest in computer science and information theory (e.g., in leader election processes without trivial splits \cite{jansonszpa1996} and in the solution to R\'{e}nyi's problem which we study here \cite{pittelrubin1990, devroye1992}). Similarly, the version of the R\'enyi problem that allows inconclusive queries corresponds to results on tries built on $n$ binary strings from a memoryless source. We thus discuss them in the literature survey below. Now we briefly review relevant facts about PATRICIA tries and other digital trees when built over $n$ independent strings generated by a memoryless source. Profiles of tries in both the asymmetric and symmetric cases were studied extensively in \cite{park2008}. The expected profiles of digital search trees (another common digital tree with connections to Lempel-Ziv parsing) in both cases were analyzed in \cite{drmotaszpa2011}, and the variance for the asymmetric case was treated in \cite{kazemi2011}. Some aspects of trie and PATRICIA trie profiles (in particular, the concentration of their distributions) were studied using probabilistic methods in \cite{devroye2004, devroye2002}. The depth in PATRICIA for the symmetric model was analyzed in \cite{devroye1992,knuth1998acp} while for the asymmetric model in \cite{szpa1990}. The leading asymptotics for the PATRICIA height for the symmetric Bernoulli model was first analyzed by Pittel \cite{Pittel85} (see also \cite{szpa2001Book} for suffix trees). The two-term expression for the height of PATRICIA for the symmetric model was first presented in \cite{pittelrubin1990} as discussed above (see also \cite{devroye1992}). To our knowledge, precise asymptotics beyond the leading term for the height and fillup level have not been given in the asymmetric case for either tries or digital search trees. Finally, in \cite{magnerPhD2015,magnerspa2015}, the second two authors of the present paper presented a precise analysis of the external profile (including its mean, variance, and limiting distribution) in the asymmetric case, for the range in which the profile grows polynomially. The present work relies on this previous analysis, but the analyses for $H_n$ and $F_n$ involve a significant extension, since they rely on precise asymptotics for the external profile outside this central range. Regarding methodology, the basic framework (which we use here) for analysis of digital tree recurrences by applying the Poisson transform to derive a functional equation, converting this to an algebraic equation using the Mellin transform, and then inverting using the saddle point method/singularity analysis followed by depoissonization, was worked out in \cite{park2008} and followed in \cite{drmotaszpa2011}. While this basic chain is common, the challenges of applying it vary dramatically between the different digital trees, and this is the case here. As we discuss later (see (\ref{pg1}) and the surrounding text), this variation starts with the quite different forms of the Poisson functional equations, which lead to unique analytic challenges. The plan for the paper is as follows. In the next section we formulate more precisely our problem and present our main results regarding $B_{n,k}$, $H_n$, $F_n$, and $D_n$, along with sketches of the derivations. Complete proofs for $H_n$ (and a roadmap for the proof for $F_n$) are provided in Section~\ref{Proofs}. Section~\ref{sec:depo} provides some background on the depoissonization step. Finally, Section~\ref{secmiracle} details a surprising series identity which arises in the analysis of $H_n$, leading to significant complications. \section{Main Results} \label{MainResults} In this section, we formulate precisely R\'enyi's problem and present our main results. Our goal is to provide precise asymptotics for three natural parameters of the R\'enyi problem on $n$ objects with each label in a given query being included with probability $p \geq 1/2$: the number $F_n$ of queries needed before at least a single element of the bijection can be identified, the number $H_n$ needed to recover the bijection in its entirety, and the number $D_n$ needed to recover an element of the bijection chosen uniformly at random from the $n$ objects. If one wishes to determine the label for a particular object, these quantities correspond to the best, worst, and average case performance, respectively, of the random subset strategy proposed by R\'enyi. We recall that we can express $F_n$, $H_n$, and $D_n$ in terms of the {\it profile} $B_{n,k}$ (defined as the number of bijection elements revealed by the $k$th query) \begin{equation} F_n = \min\{k : ~ B_{n,k} > 0\} - 1, \ H_n = \max\{k : ~ B_{n,k} > 0\}, \ {\frak{P}}r[D_n = k] = \frac{ {\mathbb{E}}[B_{n,k}]}n. \label{eqrel2} \end{equation} Using the first and second moment methods, we can then obtain upper and lower bounds on $H_n$ and $F_n$ in terms of the moments of $B_{n,k}$: \begin{eqnarray} \label{eq-h} {\frak{P}}r[H_n>k] \leq \sum_{j>k} {\mathbb{E}}[B_{n,j}], \ \ \ \ \ {\frak{P}}r[H_n<k] \leq \frac{ {\mathrm{Var}}[B_{n,k}]}{ {\mathbb{E}}[B_{n,k}]^2}, \end{eqnarray} and \begin{eqnarray} \label{eq-f} {\frak{P}}r[F_n > k] \leq \frac{ {\mathrm{Var}}[B_{n,k}]}{ {\mathbb{E}}[B_{n,k}]^2}, \quad {\frak{P}}r[F_n < k] \leq {\mathbb{E}}[B_{n,k}]. \end{eqnarray} The analysis of the distribution of $D_n$ reduces simply to that of $ {\mathbb{E}}[B_{n,k}]$, as in (\ref{eqrel2}). Having reduced the analyses of $F_n, H_n$, and $D_n$ to that of the moments of $B_{n,k}$, we now explain our approach to the latter analysis, starting in Section~\ref{BasicFactsSection} with a review of the work done in \cite{magnerPhD2015}. We will then show in Section~\ref{MainResultsSection} how the present paper requires extensions far beyond \cite{magnerPhD2015,magnerspa2015} to give new results on the quantities of interest in the R\'enyi problem. \subsection{Basic facts for the analysis of $B_{n,k}$} \label{BasicFactsSection} Here we recall some facts, worked out in detail in \cite{magnerPhD2015}, which will form the starting point of the analysis in the present paper. In order to derive our main results, we need proper asymptotic information about $ {\mathbb{E}}[B_{n,k}]$ and $ {\mathrm{Var}}[B_{n,k}]$ at the boundaries of this region. We start by deriving a recurrence for the average profile, which we denote by $\mu_{n,k} := {\mathbb{E}}[B_{n,k}]$. It satisfies \begin{eqnarray} \label{muRecurrence} \mu_{n,k} = (p^n + q^n)\mu_{n,k} + \sum_{j=1}^{n-1} { n\choose j } p^j q^{n-j} (\mu_{j,k-1} + \mu_{n-j,k-1}) \end{eqnarray} for $n\geq 2$ and $k \geq 1$, with some initial/boundary conditions; most importantly, $\mu_{n,k} = 0$ for $k \geq n$ and any $n$. Moreover, $\mu_{n,k} \leq n$ for all $n$ and $k$ owing to the elimination of inconclusive queries. This recurrence arises from conditioning on the number $j$ of objects that are included in the first query. If $1 \leq j \leq n-1$ objects are included, then the conditional expectation is a sum of contributions from those objects that are included and those that aren't. If, on the other hand, all objects are included or all are excluded from the first potential query (which happens with probability $p^n + q^n$), then the partition element splitting constraint on the queries applies, the potential query is ignored as {\it inconclusive}, and the contribution is $\mu_{n,k}$. The tools that we use to solve this recurrence (for details see \cite{magnerPhD2015,magnerspa2015}) are similar to those of the analyses for digital trees \cite{szpa2001Book} such as tries and digital search trees (though the analytical details differ significantly). We first derive a functional equation for the Poisson transform $ {\frak{P}}o{G}_k(z) = \sum_{m \geq 0} \mu_{m,k}\frac{z^m}{m!}e^{-z}$ of $\mu_{n,k}$, which gives \[ {\frak{P}}o{G}_k(z) = {\frak{P}}o{G}_{k-1}(pz) + {\frak{P}}o{G}_{k-1}(qz) + e^{-pz}( {\frak{P}}o{G}_k - {\frak{P}}o{G}_{k-1})(qz) + e^{-qz}( {\frak{P}}o{G}_{k} - {\frak{P}}o{G}_{k-1})(pz). \] This we write as \begin{eqnarray} \label{pg1} {\frak{P}}o{G}_k(z) = {\frak{P}}o{G}_{k-1}(pz) + {\frak{P}}o{G}_{k-1}(qz) + {\frak{P}}o{W}_{k,G}(z), \end{eqnarray} and at this point the goal is to determine asymptotics for $ {\frak{P}}o{G}_k(z)$ as $z\to\infty$ in a cone around the positive real axis. When solving (\ref{pg1}), $ {\frak{P}}o{W}_{k,G}(z)$ significantly complicates the analysis because it has no closed-form Mellin transform (see below). Finally, depoissonization \cite{szpa2001Book} will allow us to directly transfer the asymptotic expansion for $ {\frak{P}}o{G}_k(z)$ back to one for $\mu_{n,k}$ since $\mu_{n,k}$ is well approximated by $ {\frak{P}}o{G}_k(n)$. To convert (\ref{pg1}) to an equation that is easier to handle, we use the \emph{Mellin transform} \cite{Flajolet95mellintransforms}, which, for a function $f: {\mathbb{R}} \to {\mathbb{R}}$ is given by \[ \Me{f}(s) = \int_{0}^\infty z^{s-1} f(z) \dee{z}. \] Using the Mellin transform identities and defining $T(s) = p^{-s} + q^{-s}$, we end up with an expression for the Mellin transform $\Me{G_k}(s)$ of $ {\frak{P}}o{G}_k(z)$ of the form \[ \Me{G_k}(s) = \Gamma(s+1)A_k(s)( p^{-s} + q^{-s})^k= \Gamma(s+1)A_k(s) T(s)^k, \] where $A_k(s)$ is an infinite series arising from the contributions coming from the function $ {\frak{P}}o{W}_{k,G}(z)$, and the fundamental strip of $ {\frak{P}}o{G}_k(z)$ contains $(-k-1, \infty)$. It involves unknown $\mu_{m,j} - \mu_{m,j-1}$ for various $m$ and $j$ (see \cite{magnerPhD2015,magnerknesslszpa2014}), that is: \begin{eqnarray} \label{A_kFormula} A_k(s) = \sum_{j=0}^k T(s)^{-j} \sum_{m \geq j} T(-m)(\mu_{m,j} - \mu_{m,j-1})\frac{\Gamma(m+s)}{\Gamma(s+1)\Gamma(m+1)}. \end{eqnarray} Locating and characterizing the singularities of $\Me{G_k}(s)$ then becomes important. In \cite{magnerspa2015} it is shown that for any $k$, $A_k(s)$ is entire, with zeros at $s \in {\mathbb{Z}} \cap [-k, -1]$, so that $\Me{G_k}(s)$ is meromorphic, with possible simple poles at the negative integers less than $-k$. The fundamental strip of $ {\frak{P}}o{G}_k(z)$ then contains $(-k-1, \infty)$. We then must asymptotically invert the Mellin transform to recover $ {\frak{P}}o{G}_k(z)$. The Mellin inversion formula for $\Me{G_k}(s)$ is given by \begin{eqnarray} \label{MellinInversionFormula} {\frak{P}}o{G}_k(z) = \frac{1}{2\pi i} \int_{\rho - i\infty}^{\rho + i\infty} z^{-s}\Me{G_k}(s)\dee{s} = \frac{1}{2\pi i} \int_{\rho - i\infty}^{\rho + i\infty} z^{-s}\Gamma(s+1)A_k(s)T(s)^k\dee{s}, \end{eqnarray} where $\rho$ is any real number inside the fundamental strip associated with $ {\frak{P}}o{G}_k(z)$. \subsection{Main results via extension of the analysis of $B_{n,k}$} \label{MainResultsSection} Having explained the relevant functional equations and the integral representation (\ref{MellinInversionFormula}) for $ {\frak{P}}o{G}_k(z)$, we now move on to describe the main results of this paper. For Theorem~\ref{HeightTheorem} and \ref{FillupTheorem} we start with a sketch of the derivation whereas the proof of Theorem~\ref{DepthCorollary} is given immediately. The complete proof of Theorem~\ref{HeightTheorem} and a roadmap for Theorem~\ref{FillupTheorem}, both for the case $p>q$, is given in Section~\ref{Proofs}. \subsubsection{Result on $H_n$} \label{HeightSketch} Our first aim is to derive two-term expansions for the typical values of $H_n$ and $F_n$. To do this for, e.g., $H_n$, we define, for $p \geq q$, \[ k_* = \log_{1/p} n + \psi_*(n), \] where $\psi_*(n) = o(\log n)$ is a function to be determined. We also define \begin{eqnarray} \label{PsiLDefinition} \psi_L(n) = (1-\epsilon)\psi_*(n) && k_L = \log_{1/p} n + \psi_L(n) \\ \psi_U(n) = (1+\epsilon)\psi_*(n) && k_U = \log_{1/p} n + \psi_U(n), \label{PsiUDefinition} \end{eqnarray} for arbitrarily small $\epsilon > 0$. We require that $\psi_*(n)$ be such that \begin{eqnarray} \label{PhaseTransitionExpr} {\mathbb{E}}[B_{n,k_L}] \to \infty, && {\mathbb{E}}[B_{n,k_U}] \to 0, \end{eqnarray} and a proper upper bound for $ {\mathrm{Var}}[B_{n,k_L}]$ (see Lemma~\ref{VarianceLemma}). However, in order to make the following pre-analysis more transparent we will not dwell on the variance. To determine a candidate for $\psi_*(n)$, we start with the inverse Mellin integral representation for $ {\frak{P}}o{G}_{k_*}(n)$: \begin{eqnarray} {\frak{P}}o{G}_{k_*}(n) = \frac{1}{2\pi i} \int_{\rho - i\infty}^{\rho + i\infty} J_{k_*}(n, s)\dee{s}, \label{InverseMellinIntegral} \end{eqnarray} where we define \begin{eqnarray} \nonumber J_k(n, s) &=& n^{-s}T(s)^k\Gamma(s+1)A_k(s) \\ &=& \sum_{j=0}^k n^{-s}T(s)^{k-j}\sum_{m \geq j} T(-m)(\mu_{m,j} - \mu_{m,j-1 })\frac{\Gamma(m+s)}{\Gamma(m+1)}. \label{eq-j} \end{eqnarray} Note that by depoissonization (see Section~\ref{sec:depo} and \cite{jacquetszpa1997}) we have \[ \mu_{n,k_*} = {\frak{P}}o{G}_{k_*}(n) - \frac n2 {\frak{P}}o{G}''_{k_*}(n) + O(n^{-1+\epsilon}). \] Indeed, because of the exponential decay of $A_k(s)\Gamma(s+1)$ along vertical lines, the entire integral is at most of the same order as the integrand on the real axis (we justify this more carefully in Section~\ref{HProof}). Furthermore, since the second derivative has an additional factor $s(s+1)n^{-2}$ in the integrand we will get a similar bound for $\frac n2 {\frak{P}}o{G}''_{k_*}(n)$ which is just $\rho^2/n$ times the corresponding bound for $ {\frak{P}}o{G}_{k_*}(n)$ and, thus, negligible in comparison to $ {\frak{P}}o{G}_{k_*}(n)$. In this proof roadmap we focus on estimating the integrand $J_{k_*}(n, \rho)$, $\rho \in {\mathbb{R}}$, as precisely as possible. Using Lemma~\ref{KnesslLemma}, we find (see (\ref{eqfirstesti}) in Section~\ref{HProof}) that the $j$th term in the representation (\ref{eq-j}) of $J_{k_*}(n,\rho)$ is of order \begin{eqnarray}\label{eqjthterm} n^{-\rho} T(\rho)^{k_*-j} p^{j^2/2 + O(j\log j)}, \end{eqnarray} where $\rho < 0$ and $T(\rho) = p^{-\rho} + q^{-\rho}$. Hence, by setting $j_0 = -\log_{1/p} T(\rho)$ we have \begin{eqnarray}\label{eqJk*bound} J_{k_*}(n, \rho) = O\left( n^{-\rho} T(\rho)^{k_*} p^{-j_0^2/2 + O(j_0\log j_0)} \right). \end{eqnarray} Next we have to choose $\rho \in {\mathbb{R}}_-$ that minimizes this upper bound. Here we distinguish between the symmetric case $p = q = 1/2$ and the case $p> q$. In the symmetric case we have $T(\rho) = 2^{\rho +1}$ and $j_0 = -\rho-1$ and, thus, \[ J_{k_*}(n, \rho) = O\left( n^{-\rho} 2^{(\rho+1)(\log_2 n + \psi_*(n))+\rho^2/2 + O(|\rho|\log |\rho|)} \right). \] Consequently by disregarding the error term $O(|\rho|\log |\rho|)$ the optimal choice of $\rho$ is $\rho = -\psi_*(n)$ which gives the upper bound \[ J_{k_*}(n, \rho) = O\left( 2^{\log_2 n - \psi_*(n)^2/2 + O(|\psi_*(n)|\log |\psi_*(n)|)} \right). \] Hence, the threshold for this upper bound is $\psi_*(n) = \sqrt{2\log_2 n}$. In particular it also follows that \[ J_{k_U}(n, \rho) = O\left( 2^{-(2\epsilon+\epsilon^2)\log_2 n + O(\sqrt{\log n} \log\log n )} \right), \] where $k_U = \log_{1/p} n + (1+\epsilon)\sqrt{2\log_2 n}$. We also note that we get the same bound if $\rho = -\psi_*(n) + O(1)$. In the case $p> q$ we have to be slightly more careful. Nevertheless we can start with the upper bound (\ref{eqJk*bound}) and obtain \[ J_{k_*}(n, \rho) = O\left( p^{ ( \rho- \log_{1/p} T(\rho))\log_{1/p}n - \psi_*(n) \log_{1/p} T(\rho) - (\log_{1/p} T(\rho))^2/2 + O(j_0\log j_0) } \right). \] From the representation $T(\rho) = p^{-\rho}( 1+ (p/q)^\rho)$ we obtain \[ \log_{1/p} T(\rho) = \rho + \frac{ (p/q)^\rho }{\log(1/p)} + O\left( (p/q)^{2\rho} \right). \] It is clear that we have to choose $\rho < 0$ that tends to $-\infty$ if $n\to\infty$. Hence, $\log_{1/p} T(\rho) = \rho + o(1)$ and consequently a proper choice for $\rho$ is the solution of the equation \begin{eqnarray*} \frac{\partial}{\partial \rho} \left( - \frac{ (p/q)^\rho }{\log(1/p)} \log_{1/p}n - \psi_*(n) \rho - \frac{\rho^2}2 \right) = \frac{ (p/q)^\rho \log(p/q)}{\log(1/p)} \log_{1/p}n- \psi_*(n) -\rho = 0. \end{eqnarray*} Actually this gives $\rho < - \psi_*(n)$ and, thus, \[ \rho = - \log_{p/q}\log n + O(\log\log\log n). \] With this choice the upper bound for $J_{k_*}(n, \rho)$ writes as \begin{eqnarray} J_{k_*}(n, \rho) = O\left( p^{ (\psi_*(n)+\rho)/\log(p/q) - \psi_*(n) \rho - \frac{\rho^2}2 + O(j_0\log j_0) } \right) = O\left( p^{ - \psi_*(n) \rho - \frac{\rho^2}2 + O(j_0\log j_0) } \right). \label{MyEqn} \end{eqnarray} This implies that the threshold for this upper bound is given by \[ \psi_*(n) = -\frac \rho 2 = \frac 12 \log_{p/q} \log n + O(\log\log\log n). \] In particular, if we replace $\psi_*(n)$ by $\psi_U(n) = \frac 12(1+\epsilon) \log_{p/q} \log n$ we obtain \begin{eqnarray} \label{HeightMaxContribution0} J_{k_U}(n, \rho) = O\left( p^{ \epsilon(\log_{p/q} \log n)^2/2 + O(\log\log n \log\log \log n) } \right) \end{eqnarray} and for $\psi_L(n) = \left(1 -\epsilon\right) \frac 12 \log_{p/q} \log n$, \begin{eqnarray} \label{HeightMaxContribution} J_{k_L}(n, \rho) = O\left( p^{ -\epsilon(\log_{p/q} \log n)^2/2 + O(\log\log n \log\log \log n } \right). \end{eqnarray} The above pre-analysis suggests asymptotic estimates for $ {\frak{P}}o{G}_k(n)$ and, thus, by depoissonization estimates for $\mu_{n,k}$, which imply a two-term expansion for $H_n$. The complete proof of this result is given in Section~\ref{HProof}. In summary, we formulate below our first main result. \begin{theorem}[Asymptotics for $H_n$] \label{HeightTheorem} With high probability, \begin{eqnarray*} H_n = \left\{ \begin{array}{ll} \log_{1/p} n + \frac 12\log_{p/q}\log n + o(\log\log n) & p > q \\ \log_{2} n + \sqrt{2\log_2 n} + o(\sqrt{\log n}) & p = q \end{array} \right. \end{eqnarray*} for large $n$. \end{theorem} \subsubsection{Result on $F_n$} We take a similar approach for the derivation of $F_n$, with some differences. We set \begin{eqnarray*} k_* = \log_{1/q} n + \phi_*(n) \end{eqnarray*} with \begin{eqnarray*} \phi_L(n) = (1+\epsilon)\phi_*(n), && \phi_U(n) = (1-\epsilon)\phi_*(n), \end{eqnarray*} and $k_L$ and $k_U$, respectively, defined with $\phi_L$ (respectively, $\phi_U$) in place of $\phi_*$. The derivation of an estimate for the $j$th term of $J_{k_*}(n, \rho)$, $\rho\in {\mathbb{R}}$, is similar to that in Section~\ref{HeightSketch}, except now the asymptotics of $\Gamma(\rho+1)$ play a role (this is reflected in the proof, where $\Gamma(\rho+1)$ determines the location of the saddle points of the integrand). We find that the $j$th term is at most $q^{\lambda_j(n, \rho)}$, where \begin{eqnarray} \lambda_j(n, \rho) = \rho(j - \phi_*(n)) + (j - \phi_*(n) - \log_{1/q} n)\log_{1/q}(1 + (q/p)^{\rho}) - \rho\log_{1/q} \rho + O(\rho). \label{OptimalNu} \end{eqnarray} Optimizing over $j$ gives $j = 0$. The behavior with respect to $\rho$ depends on whether or not $p=q$, because $\log_{1/q}(1 + (q/p)^\rho) = 1$ when $p=q$ and is dependent on $\rho$ otherwise. Taking this into account and minimizing over all $\rho$ gives an optimal value of \begin{eqnarray*} \rho = \left\{\begin{array}{ll} 2^{-\phi_*(n) - 1/\log 2} & p=q=1/2, \\ \log_{p/q}\log n & p > 1/2. \end{array} \right. \end{eqnarray*} Note that this corresponds to the real part of the saddle points in the proof. Plugging this into (\ref{OptimalNu}), setting the resulting expression equal to $0$, and solving for $\phi_*(n)$ gives \begin{eqnarray*} \phi_*(n) = \left\{\begin{array}{ll} -\log_2\log n + O(1) & p=q=1/2 \\ -\log_{1/q}\log\log n & p > 1/2. \end{array} \right. \end{eqnarray*} This heuristic derivation suggests that the following theorem holds. More details are given in Section~\ref{FProof}. \begin{theorem}[Asymptotics for $F_n$] \label{FillupTheorem} With high probability, \begin{eqnarray*} F_n = \left\{\begin{array}{ll} \log_{1/q} n - \log_{1/q}\log\log n + o(\log\log\log n) & p > q \\ \log_{2} n - \log_2\log n + o(\log\log n) & p = q \end{array} \right. \end{eqnarray*} for large $n$. \end{theorem} \subsection{Result on $D_n$} We move to our results concerning $D_n$. To state them, we first need to observe that there is a natural way to define the sequence $\{D_n\}_{n\geq 0}$ on a single probability space, so that we may ask whether or not $D_n$, properly normalized, converges almost surely, and to what limiting value. This common space is defined by appealing to the correspondence between the sequence of R\'enyi problem queries and the growth of a random PATRICIA trie. For each $n \geq 0$, we define a tree $T_n$ which is a PATRICIA trie constructed on $n$ strings (equivalently, a terminating sequence of R\'enyi queries recovering a bijection between two sets of $n$ elements): $T_0$ is an empty tree, and $T_{n+1}$ is constructed from $T_n$ by generating an independent string of i.i.d. $ {\mathfrak{B}}ernoulli(p)$ random variables and inserting this string into $T_n$. Then, for each $n$, $D_n$ is the depth of a leaf chosen uniformly at random (and independent of everything else) from the leaves of $T_{n}$. With this construction in mind, we have the following result about the convergence of $D_n$. Its proof combines known facts about the profile with the new ones proved here, as well as a proof technique that was used before in, e.g., \cite{Pittel85}. \begin{theorem}[Asymptotics of $D_n$] \label{DepthCorollary} For $p > 1/2$, the normalized depth $D_n/\log n$ converges in probability to $1/h(p)$ where $h(p) := -p\log p - q\log q$ is the entropy of a $ {\mathfrak{B}}ernoulli(p)$ random variable, but not almost surely. In fact, \begin{eqnarray} \label{D_nNoAlmostSure} \liminf_{n\to\infty} D_n/\log n = 1/\log(1/q), && \limsup_{n\to\infty} D_n/\log n = 1/\log(1/p) \end{eqnarray} almost surely. \end{theorem} \begin{proof} The fact that $D_n/\log n$ converges in probability to $1/h(p)$ follows directly from the central limit theorem for $D_n$ given in \cite{szpa2001Book}. Next we show that (\ref{D_nNoAlmostSure}) holds. Clearly $F_n \le D_n \le H_n$. Now let us consider the following sequences of events: $A_n$ is the event that $D_n = F_n+1$, and $A'_n$ is the event that $D_n = H_n$. We note that all elements of the sequences are independent, and $ {\frak{P}}r[A_n] \ge 1/n$, $ {\frak{P}}r[A'_n] \geq 1/n$. This implies that $\sum_{n=1}^\infty {\frak{P}}r[A_n] = \sum_{n=1}^\infty {\frak{P}}r[A'_n] = \infty$, so that the Borel-Cantelli lemma tells us that both $A_n$ and $A'_n$ occur infinitely often almost surely. In the next step we show that, almost surely, $F_n/\log n \to 1/\log(1/q)$ and $H_n/\log n \to 1/\log(1/p)$. Then (\ref{D_nNoAlmostSure}) is proved. We cannot apply the Borel-Cantelli lemmas directly, because the relevant sums do not converge. Instead, we apply the following trick: we observe that both $(F_n)$ and $(H_n)$ are non-decreasing sequences. Next, we show that, on some appropriately chosen subsequence, both of these sequences, when divided by $\log n$, converge almost surely to their respective limits. Combining this with the observed monotonicity yields the claimed almost sure convergence, and, hence, the equalities in (\ref{D_nNoAlmostSure}). We illustrate this idea more precisely for $H_n$. By our analysis above, we know that \[ {\frak{P}}r[|H_n/\log n - 1/\log(1/p)| > \epsilon] = O(e^{-\Theta(\log\log n)^2}). \] Then we fix $t$, and we define $n_{r,t} = 2^{t^2 2^{2r}}$. On this subsequence, by the probability bound just stated, we can apply the Borel-Cantelli lemma to conclude that $H_{n_{r,t}}/\log(n_{r,t}) \to 1/\log(1/p) \cdot (t+1)^2/t^2$ almost surely. Moreover, for every $n$, we can choose $r$ such that $n_{r,t} \leq n \leq n_{r,t+1}$. Then \[ H_n/\log n \leq H_{n_{r,t+1}}/\log n_{r,t}, \] which implies \[ \limsup_{n\to\infty} \frac{H_n}{\log n} \leq \limsup_{r\to\infty} \frac{H_{n_{r,t+1}}}{\log n_{r,t+1}} \frac{\log n_{r,t+1}}{\log n_{r,t}} = \frac{1}{\log(1/p)} \cdot \frac{(t+1)^2}{t^2}. \] Taking $t \to \infty$, this becomes $1/\log(1/p)$, as desired. The argument for the $\liminf$ is similar, and this establishes the almost sure convergence of $H_n$. The derivation is entirely similar for $F_n$. \end{proof} \section{Proof of Theorems~\ref{HeightTheorem} and \ref{FillupTheorem}} \label{Proofs} We give a detailed proof of Theorem~\ref{HeightTheorem} and indicate the main lines of the proof of Theorem~\ref{FillupTheorem}. We also concentrate just on the case $p>q$. The proof of the symmetric case can be done by the same techniques (properly adapted) but it just reproves the result by Pittel and Rubin \cite{pittelrubin1990}. \subsection{Proof of Theorem~\ref{HeightTheorem}} \label{HProof} \subsubsection{A-Priori Bounds for $\mu_{n,k}$} For the analysis of the profile around the height level, we need precise information about $\mu_{n,k}$ with $n\to\infty$ when $k$ close to $n$. This is captured in the following lemma, which first appeared in a similar form in \cite{magnerknesslszpa2014}. We consider $\mu_{n,k}$ where $k$ is close to $n$, so we set $k = n-\ell$ and represent it as \[ \mu_{n,k} = \mu_{n,n-\ell} = n! C_*(p) p^{(n-\ell)(n-\ell+1)^2/2}q^{n-\ell} \xi_{\ell}(n), \] where \[ C_*(p) = \prod_{j=2}^\infty (1-p^j - q^j)^{-1} \cdot (1 + (q/p)^{j-2}), \] $\xi_1(1) = 1/C_*(p)$ and for $n > \ell \ge 1$ \begin{equation}\label{eqxirec} \xi_{\ell}(n)(1-p^n-q^n) = \sum_{J=1}^\ell \frac{\xi_{\ell+1-J}(n-J)}{J!}q^{-1}p^{\ell-n}(p^{n-J}q^{J} + p^{J}q^{n-J}). \end{equation} Note that $\xi_\ell(n) = 0$ for $n\le \ell$. The above formulas were first derived in \cite{magnerknesslszpa2014}. \begin{lemma}[Asymptotics for $\mu_{n,k}$, $k\to\infty$ and $n$ near $k$] \label{KnesslLemma} \label{KNESSLLEMMA} \label{mu_mjUpperBoundLemma} Let $p \geq q$. \par\noindent {\rm (i)} {\rm Precise estimate: } For every fixed $\ell \ge 1$ and $n\to\infty$ \begin{eqnarray*} \mu_{n,n-\ell} \sim n! C_*(p) p^{(n-\ell)^2/2 + (n-\ell)/2}q^{n-\ell} \xi_{\ell}, \end{eqnarray*} where the sequence $\xi_\ell$, $\ell\ge 1$ satisfies the recurrence \begin{equation}\label{eqexrec2} \xi_\ell = q^{-1}p^{\ell} \sum_{J=1}^\ell \frac{\xi_{\ell+1-J}}{J!} (q/p)^{J} \end{equation} with $\xi_1 = 1$. Furthermore we have (for some positive constant $C$) \begin{equation}\label{eqxiapprox} |\xi_{\ell+1-J}(n-J)-\xi_{\ell+1-J}| \le C(p^{n-\ell-1} + (q/p)^{n-\ell-1})/(\ell-J)!, \end{equation} \par\noindent {\rm (ii)} {\rm Upper bound: } We have $\xi_\ell(n) \le C_1/(\ell-1)!$ for some constant $C_1$ and, thus, for $1\le k< n$ (and some constant $C$) \begin{eqnarray} \mu_{n,k} \leq C\frac{n!}{(n-k-1)!}p^{(k^2 + k)/2 }q^{k}. \label{mu_mjUpperBound} \end{eqnarray} \end{lemma} \begin{proof} From the recurrence (\ref{eqxirec}) it follows easily that for each $\ell\ge 1$ the limit $ \xi_\ell = \lim_{n\to\infty} \xi_\ell(n)$ exists by (\ref{mu_mjUpperBound}), and in particular for $\ell =1$ we have $\xi_1 = 1$. Clearly this limits satisfy the recurrence (\ref{eqexrec2}). Next we show by induction a uniform upper bound of the form $\xi_\ell(n) \le C_1/(\ell-1) !$ The induction step for $n>\ell > \ell_1$ runs as follows (where $C_1$ and $\ell_1$ is appropriately chosen such that the upper bound is true for $\ell \le \ell_1$ and that $2/(q \ell_1(1-p^{\ell_1}-q^{\ell_1}) \le 1$): \begin{eqnarray*} \xi_\ell(n) &\le& \frac{C_1}{1-p^n-q^n} \left( \sum_{J=1}^{\ell} \frac{p^{\ell-J}q^{J-1}}{J!(\ell-J)!} + \sum_{J=1}^{\ell} \frac{p^{\ell+J-n}q^{n-J-1}}{J!(\ell-J)!} \right) \\ &\le& \frac{C_1}{\ell!(1-p^n-q^n)} \left( \frac 1{q} \sum_{J=0}^{\ell} {\ell \choose J} p^{\ell-J}q^{J} + \frac{(q/p)^{n-\ell}}{q} \sum_{J=0}^{\ell} {\ell\choose J} p^{J}q^{\ell-J} \right) \\ &\le& \frac{C_1}{(\ell-1)!} \frac 1{\ell_1(1-p^{\ell_1}-q^{\ell_1})} \frac 2q \le \frac{C_1}{(\ell-1)!}. \end{eqnarray*} In a similar way we obtain the approximation estimate (\ref{eqxiapprox}). We give a full proof in Section~\ref{KnesslLemmaProof}. \end{proof} \subsubsection{Upper bound on $H_n$} Now we set \begin{eqnarray}\label{eqkUdef} k = k_U = \log_{1/p} n + \psi_U(n) = \log_{1/p} n + \frac 12\left(1 +\epsilon\right)\log_{p/q}\log n \end{eqnarray} just as in (\ref{PsiUDefinition}). We will first estimate the value of $J_k(n, s)$ (which is defined in (\ref{eq-j})) for $s = \rho' = - 2\psi(n) + O(1)\in {\mathbb{Z}}^{-} - 1/2$ (i.e., the set $\{ -3/2, -5/2, ... \}$), as hinted at in Section~\ref{MainResults}. \begin{lemma}\label{LeJest} Suppose that $p>q$, that $\epsilon> 0$, that $k_U$ is given by (\ref{eqkUdef}), and that $\rho' = \lfloor \rho \rfloor + \frac 12$, where $\rho = - \log_{p/q} \log n + O(\log\log \log n)$ is the solution of the equation \[ \frac{ (p/q)^\rho \log(p/q)}{\log(1/p)} \log_{1/p}n+ \psi_U(n) +\rho = 0. \] Then we have for $k \ge k_U$ \begin{eqnarray} J_{k}(n, \rho') = O\left( T(\rho')^{k-k_U} p^{ \epsilon (\log_{p/q} \log n)^2/2 + O(\log\log n \cdot \log\log \log n) } \right). \label{anm1} \end{eqnarray} \end{lemma} \begin{proof} First we observe that the assumption $\rho' \in {\mathbb{Z}}^{-} - 1/2$ with $|\rho'| \to \infty$ assures that for all $m\ge 0$ we have $\left| \Gamma(m+\rho')/\Gamma(m+1) \right| \leq 1$. Next by (\ref{mu_mjUpperBound}) of Lemma~\ref{mu_mjUpperBoundLemma} we have $\mu_{m,j} = O\left( m^{j+1} p^{j^2/2 + O(j)}\right)$ which implies that \[ \sum_{m\ge j} T(-m) \mu_{m,j} = O\left( p^{j^2/2 + O(j\log j)} \right). \] Hence, the $j$th term in the representation (\ref{eq-j}) of $J_k(n, \rho')$ can be estimated by \begin{eqnarray} && \left| n^{-\rho'}T(\rho')^{k-j} \sum_{m \geq j} T(-m)(\mu_{m,j} - \mu_{m,j-1}) \frac{\Gamma(m+\rho')}{\Gamma(m+1)}\right| \label{eqfirstesti}\\ &&\le n^{-\rho'}T(\rho')^{k-j} \sum_{m \geq j} T(-m)(\mu_{m,j} +\mu_{m,j-1}) = O\left(n^{-\rho'}T(\rho')^{k-j} p^{j^2/2 + O(j\log j)} \right). \nonumber \end{eqnarray} Thus, we have shown (\ref{eqJk*bound}) which implies (\ref{anm1}) for $k = k_U$ (see (\ref{HeightMaxContribution0})). However, it is easy to extend it to larger $k$ (since equation (\ref{MyEqn}) holds for generic $k_* = k$ and the given choice of $\rho$). Actually we get uniformly for $k\ge k_U$ \[ J_k(n, \rho') = O\left( T(\rho')^{k-k_U} p^{ \epsilon (\log_{p/q} \log n)^2/2 + O(\log\log n \log\log \log n) } \right) \] for large $n$. \end{proof} Our next goal is to evaluate the integral (\ref{InverseMellinIntegral}) and to obtain a bound for $\mu_{n,k}$. \begin{lemma}\label{LeJest2} Suppose that $p>q$, that $\epsilon> 0$, and that $k_U$ and $\rho'$ are given as in Lemma~\ref{LeJest}. Then we have (for some $\delta> 0$) \begin{equation} \mu_{n,k} = O\left( T(\rho')^{k-k_U} p^{ \epsilon (\log_{p/q} \log n)^2/2 + O((\log\log n)^{1-\delta}) } \right) + O(n^{-1+\epsilon}) \label{mu_nkUBound} \end{equation} uniformly for $k \ge k_U$. \end{lemma} \begin{proof} Letting $\mathcal{C}$ denote the vertical line $ {\mathbb{R}}e(s) = \rho'$, we evaluate the integral (\ref{InverseMellinIntegral}) by splitting it into an inner region $\mathcal{C}^I$ and outer tails $\mathcal{C}^O$: \[ \mathcal{C}^I = \{\rho' + it :~ |t| \leq e^{(\log\log n)^{2-\delta}}\}, \quad \mathcal{C}^O = \{\rho' + it :~ |t| > e^{(\log\log n)^{2-\delta}}\}, \] where $0<\delta < 1$ is some fixed real number. The inner region we evaluate by showing that it is of the same order as the integrand on the real axis, and the outer tails are shown to be negligible by the exponential decay of the $\Gamma$ function. It is easily checked that $|n^{-s}T(s)^{k-j}\Gamma(m+s)| \leq n^{-\rho'}T(\rho')^{k-j} |\Gamma(m+\rho')|$ when $ {\mathbb{R}}e(s) = \rho'$ (and any value for $\Im(s)$). Thus, \begin{eqnarray*} |J_k(n, s)| \leq T(\rho')^{k-k_U} \sum_{j=0}^k n^{-\rho'}T(\rho')^{k_U-j}\sum_{m\geq j} T(-m)|\mu_{m,j} - \mu_{m,j-1}| \frac{|\Gamma(m+\rho')|}{\Gamma(m+1)}, \end{eqnarray*} which can be upper bounded as in the proof of Lemma~\ref{LeJest}. Multiplying by the length of the contour, we find \begin{eqnarray*} \left|\int_{\mathcal{C}^I} J_k(n, s)\dee{s}\right| = O\left( T(\rho')^{k-k_U} p^{ \epsilon (\log_{p/q} \log n)^2/2 + O((\log\log n)^{2-\delta}) } \right). \end{eqnarray*} We use the following standard bound on the $\Gamma$ function: for $s = \rho' + it$, provided that $|\mathrm{Arg}(s)|$ is less than and bounded away from $\pi$ and $|s|$ is sufficiently large, we have \[ |\Gamma(s)| \leq C|t|^{\rho' - 1/2} e^{-\pi|t|/2}. \] This is applicable on $\mathcal{C}^O$, and we again use the fact that $|T(s)| \leq T(\rho')$ and $|\mu_{m,j} - \mu_{m,j-1}| \leq m$, which yields an upper bound of the form \begin{eqnarray*} \left| \sum_{m \geq j} T(-m)(\mu_{m,j} - \mu_{m,j-1}) \frac{\Gamma(m+s)}{\Gamma(m+1)} \right| & =& O\left( \sum_{m \geq j} T(-m) m \frac{|t|^{m+\rho' - 1/2}e^{-\pi|t|/2})}{\Gamma(m+1)} \right) \\ &=& O\left( p|t|^{\rho' + 1/2}e^{-\pi|t|/2} e^{p|t|} \right), \end{eqnarray*} where we have used the inequality \begin{eqnarray*} |t|^{\rho' - 1/2}e^{-\pi|t|/2} \sum_{m \geq j} \frac{m(p|t|)^{m}}{m!} \leq p|t|^{\rho' + 1/2}e^{-\pi|t|/2} e^{p|t|} = e^{-\Theta(|t|)}, \end{eqnarray*} uniformly in $j$, by our choice of $|t|$. Furthermore, since $T(\rho')< 1$ we have \[ \sum_{j=0}^k n^{-\rho'}T(\rho')^{k-j} = O(n^{-\rho'}) = e^{O(\log n \log\log n)}. \] Hence, integrating this on $\mathcal{C}^O$ gives \begin{eqnarray*} \left| \int_{\mathcal{C}^O} J_k(n, s) \dee{s} \right| &=& O\left( T(\rho')^{k-k_U} e^{O(\log n \log \log n)} e^{-\Theta( e^{(\log\log n)^{2-\delta}})} \right)\\ & =& O\left( T(\rho')^{k-k_U} e^{-\Theta( e^{(\log\log n)^{2-\delta}})} \right). \end{eqnarray*} Adding these together gives \begin{eqnarray*} {\frak{P}}o{G}_{k}(n) &\leq &\left| \int_{\mathcal{C}^I} J_{k_U}(n, s)\dee{s} + \int_{\mathcal{C}^O} J_{k_U}(n, s)\dee{s} \right| \\ & = & O\left( T(\rho')^{k-k_U} p^{ \epsilon (\log_{p/q} \log n)^2/2 + O((\log\log n)^{2-\delta}) } \right). \end{eqnarray*} Similarly we get a bound for $ {\frak{P}}o{G''}_{k}(n)$: \begin{eqnarray*} {\frak{P}}o{G''}_{k}(n) = O\left( {\rho'}^2 T(\rho')^{k-k_U} p^{ \epsilon (\log_{p/q} \log n)^2/2 + O((\log\log n)^{2-\delta}) } \right). \end{eqnarray*} Hence by depoissonization (see (\ref{eqDep3.3}) from Section~\ref{sec:depo}) we get \[ \mu_{n,k} = O\left( T(\rho')^{k-k_U} p^{ \epsilon (\log_{p/q} \log n)^2/2 + O((\log\log n)^{2-\delta}) } \right) + O(n^{-1+\epsilon}) \] as needed. \end{proof} Our original goal was to bound the tail $ {\frak{P}}r[H_n > k_U]$ by the following sum which we split into two parts: \[ {\frak{P}}r[H_n > k_U] \le \sum_{k \geq k_U} \mu_{n,k} = \sum_{k = k_U}^{\ceil{(\log n)^2}} \mu_{n,k} + \sum_{k=\ceil{(\log n)^2} + 1}^n \mu_{n,k}. \] The initial part can be bounded using (\ref{mu_nkUBound}), and the final part we handle using (\ref{mu_mjUpperBound}) in Lemma~\ref{mu_mjUpperBoundLemma}. Indeed, since $T(\rho') < 1$ the first sum can be bounded by \[ \sum_{k=k_U}^{\ceil{(\log n)^2}}\mu_{n,k} \leq e^{-\Theta(\epsilon(\log\log n)^2)} . \] The second sum is at most \[ \sum_{k=\ceil{(\log n)^2}+1}^{n} \mu_{n,k} \leq ne^{-\Theta((\log n)^4)} \\ = e^{-\Theta((\log n)^4)}. \] Adding these upper bounds together shows that $ {\frak{P}}r[H_n > k_U] = e^{-\Theta(\epsilon(\log\log n)^2)} \to 0, $ as desired. \subsubsection{Upper bound on the variance of the profile} We consider now the case \begin{equation}\label{eqkLdef} k = k_L = \log_{1/p} n + \psi_L(n) = \log_{1/p} n + \psi(n), \ \ \ \ \psi(n)=\frac 12 \left(1 -\epsilon \right) \log_{p/q}\log n \end{equation} and start with an upper bound for the variance of the profile $ {\mathrm{Var}}[B_{n,k}]$. \begin{lemma}\label{VarianceLemma} Suppose that $p>q$, that $\epsilon> 0$, and that $k_L$ is given by (\ref{eqkLdef}). Then we have \begin{eqnarray} {\mathrm{Var}}[B_{n,k}] &= O\left( p^{-\epsilon (\log_{p/q} \log n)^2/2 + O((\log\log n)^{2-\delta}) } \right). \label{VarB_nkUBound} \end{eqnarray} \end{lemma} \begin{proof} The proof technique here is the same as for the proof of the upper bound on $\mu_{n,k}$. Our goal is to upper bound the expression \[ {\frak{P}}o{V}_k(n) = \sum_{n\ge 0} \mathbb{E}[B_{n,k}^2] \frac{n^n}{n!}e^{-n} - \tilde G_k(n)^2 = \frac{1}{2\pi i} \int_{\rho' - i\infty}^{\rho' + i\infty} J^{(V)}_k(n, s)\dee{s}, \] where \begin{eqnarray*} J^{(V)}_k(n, s) = n^{-s}T(s)^k\Gamma(s+1)B_k(s), \end{eqnarray*} and \begin{eqnarray*} B_k(s) = 1 - (s+1) 2^{-(s+2)} + \sum_{j=1}^k T(s)^{-j} \frac{\Me{W_{j,V}}(s)}{\Gamma(s+1)}, \end{eqnarray*} with \cite{magnerPhD2015} \begin{eqnarray*} \Me{W_{j,V}}(s) &=& \sum_{m \geq j} \frac{\Gamma(m+s)}{m!} \left[ \right. \left.T(-m) ( c_{m,j} - c_{m,j-1} + \mu_{m,j} - \mu_{m,j-1}) \right.\\ &&\left. + T(s)2^{-(s+m)} \sum_{\ell=0}^m \mu_{\ell,j-1}\mu_{m-\ell,j-1} \right. \\ &&\left.+ 2\sum_{\ell=0}^m \mu_{\ell,j-1} \mu_{m-\ell,j-1} p^\ell q^{m-\ell} - 2^{-(m+s)} \sum_{\ell=0}^m \mu_{\ell,j}\mu_{m-\ell,j} \right] . \end{eqnarray*} As above we need a bound on the moments of $B_{m,j}$ for $m$ sufficiently close to $j$: for $\mu_{m,j} = {\mathbb{E}}[B_{m,j}]$, this is (\ref{mu_mjUpperBound}) in Lemma~\ref{mu_mjUpperBoundLemma}. It turns out that $c_{m,j} = {\mathbb{E}}[B_{m,j}(B_{m,j}-1)]$ satisfies a similar recurrence as $\mu_{m,j}$ (see \cite{magnerspa2015}) and also similar inequality: for $j\to\infty$ and $m> j$, \begin{eqnarray*} c_{m,j} \leq \frac{m!}{(m-j-1)!} p^{j^2/2 + O(j)}. \end{eqnarray*} The proof is by induction and follows along the same lines as that of the upper bound in Lemma~\ref{mu_mjUpperBoundLemma}. Using this, we can upper bound the inverse Mellin integral as in the upper bound for $\tilde G_k(n)$. In particular it follows that \[ \tilde V_{k_L}(n) = O\left( p^{ -\epsilon(\log_{p/q} \log n)^2/2 + O((\log\log n)^{2-\delta}) } \right) \] and similarly we have \[ \tilde V_{k_L}''(n) = O\left( {\rho'}^2 n^{-2} p^{ -\epsilon(\log_{p/q} \log n)^2 + O((\log\log n)^{2-\delta}) } \right), \] where $\rho'=-\log_{p/q}\log n + O(\log\log\log n)$. With the help of depoissonization, see (\ref{eqVarest}), we thus obtain (\ref{VarB_nkUBound}). \end{proof} \subsubsection{ Lower bound on $H_n$ } \label{H_nLowerBoundSection} The most difficult part of the proof of Theorem~\ref{HeightTheorem} is to prove a lower bound for the expected profile. \begin{lemma}\label{LemunkLbound} Suppose that $p>q$, that $\epsilon> 0$, and that $k_L$ is given by (\ref{eqkLdef}). Then we have \begin{eqnarray} \mu_{n,k_L} = \Omega\left( p^{ -\epsilon(\log_{p/q} \log n)^2/2 + O(\log\log n \log\log \log n) } \right). \label{eqEB_nkLBound} \end{eqnarray} \end{lemma} By combining Lemma~\ref{VarianceLemma} and Lemma~\ref{LemunkLbound} it immediately follows that \[ {\frak{P}}r[H_n < k_L] \le \frac{ {\mathrm{Var}}[B_{n,k_L}]}{ \mu_{n,k_L}^2} \to 0 \] which proves the lower bound on $H_n$. The plan to prove Lemma~\ref{LemunkLbound} is as follows: we evaluate the inverse Mellin integral exactly by a residue computation. This results in a nested summation, which we simplify using the binomial theorem and the series of the exponential function. From this representation we will then detect several terms that contribute to the leading term in the asymptotic expansion. \begin{lemma}\label{Lelower-1} Suppose that $\rho < 0$ but not an integer. Then we have \begin{eqnarray} {\frak{P}}o{G}_k(n) = \sum_{j=0}^k \sum_{m \ge j} \kappa_{m,j}\left( \mu_{m,j}-\mu_{m,j-1}\right), \label{G_kExplicitFormHLowerBound} \end{eqnarray} where \begin{eqnarray}\label{eqkapparep} \kappa_{m,j} = \frac{T(-m)n^m}{m!} \sum_{\ell= (-\ceil{m+\rho} + 1) {~\lor~} 0}^{\infty} \frac{(-n)^{\ell}}{\ell!} T(-m-\ell)^{k-j} \end{eqnarray} and $x {~\lor~} y$ denotes the maximum of $x$ and $y$. \end{lemma} \begin{proof} By shifting the line of integration and collecting residues we have \[ \frac 1{2\pi i} \int_{\rho-i\infty}^{\rho + i\infty} n^{-s}T(s)^{k-j} \Gamma(m+s)\, ds = \sum_{\ell \ge \max\{0, - m - \rho\} } \frac{n^{m + \ell} (-1)^\ell }{\ell!} T(-\ell- m)^{k-j}, \] where the remaining integral after shifting by any finite amount becomes $0$ in the limit as a result of the superexponential decay of the $\Gamma$ function on the points $1/2 - j$ for positive integer $j$. Hence the lemma follows. \end{proof} We now choose $\rho$ as $\rho = -j^*-1$ and set $j_0 = \lfloor j^* + \frac 12 \rfloor$, where $j^*$ is the root of the equation \begin{eqnarray}\label{eqj0def} (q/p)^{j*}(k_L-j^*) = \frac {\log(1/p)}{\log(p/q)}(j^* - \psi_L(n)), \end{eqnarray} where $\psi_L(n) = \frac 12 \left( 1 - \epsilon\right) \log_{p/q}\log n$. In particular, let us define \begin{eqnarray} \overline r_0 := (q/p)^{j_0} (k_L - j_0), && \overline r_1 := \frac{\log(1/p)}{\log(p/q)}(j_0 - \psi_L(n)). \end{eqnarray} Then it follows that \begin{eqnarray}\label{eqj0def-app1} \sqrt{q/p} \overline r_1 \le \overline r_0 \le \sqrt{p/q} \overline r_1. \end{eqnarray} If $j> j_0$ and $m\ge j$ then we certainly have $(-\ceil{m+\rho} + 1) {~\lor~} 0 = 0$, whereas for $j=j_0$ we have $(-\ceil{j_0+\rho} + 1) {~\lor~} 0 = 1$. Asymptotically we have $j^* = \log_{p/q}\log n -\log_{p/q}\log\log n + O(1)$. Hence we also have $j_0 = \log_{p/q}\log n -\log_{p/q}\log\log n + O(1)$ and $\rho = - \log_{p/q}\log n -\log_{p/q}\log\log n + O(1)$. In what follows we will encounter several different asymptotic behaviors. In particular we will show that \begin{eqnarray} \tilde G_k(n) &=& D(p) C_*(p) p^{j_0(j_0+1)/2} q^{j_0-1} n^{j_0} p^{j_0(k-j_0)} e^{\overline r_0} {\frak{P}}hi\left( \frac{\overline r_1 - \overline r_0}{\sqrt{\overline r_0}} \right) \label{eqasymrel}\\ &+& C_*(p) p^{j_0(j_0+1)/2} q^{j_0-1} n^{j_0} p^{j_0(k-j_0)} \frac{{\overline r_0}^{\overline r_1}}{\Gamma(\overline r_1+1)} \left( C(p, \overline r_0/\overline r_1, \langle \overline r \rangle) + o(1) \right) \nonumber \end{eqnarray} where $\langle x \rangle = x - \lfloor x \rfloor$ denotes the fractional part of a real number $x$, and \begin{eqnarray} \label{eqDefAp} D(p) = \sum_{L,M \ge 0} \xi_{L+1}\frac{(-1)^{M}}{M!} p^{((L+M)^2 + L-M)/2} q^{-L-M} \end{eqnarray} and $C(p,u,v)$ is a certain function in $p,u,v$ that is strictly positive (see below). Here and elsewhere, $ {\frak{P}}hi$ denotes the distribution function of the normal distribution. Since ${{\overline r_0}^{\overline r_1}}/{\Gamma(\overline r_1+1)} = O(e^{\overline r_0}/\sqrt{\overline r_0} )$. Thus, the first term seems to be the asymptotically leading one. However, it turns out that $D(p) \equiv 0$ (as we will prove in Section~\ref{secmiracle}) so it follows that \begin{eqnarray} \label{eqpositive} \tilde G_k(n) \ge C(p) p^{j_0(j_0+1)/2} q^{j_0-1} n^{j_0} p^{j_0(k-j_0)} \frac{{\overline r_0}^{\overline r_1}}{\Gamma(\overline r_1+1)} \end{eqnarray} for some constant $C(p) > 0$. Note also that this lower bound implies (\ref{eqEB_nkLBound}) since by definition $\sqrt{q/p}\, \overline r_0 \le \overline r_1 \le \sqrt{p/q}\, \overline r_0$ so that $e^{-\overline r_0} {{\overline r_0}^{\overline r_1}}/{\Gamma(\overline r_1+1)} = e^{\Omega(\log\log n)}$. The calculations for proving (\ref{eqasymrel}) are quite involved, in particular the proof of positivity of $C(p,u,v)$. So we will only present a part of the calculations and refer for a full proof to the appendix. In what follows we will have some error terms that are smaller by a factor $p^{j_0}$ or $(q/p)^{j_0}$ compared to the asymptotic leading term. However, it is easy to check that for $\frac 12 < p < 1$ we have $p^{j_0} = o(E)$ and $(q/p)^{j_0}= o(E)$, where $E: = e^{-\overline r_0} {{\overline r_0}^{\overline r_1}}/{\Gamma(\overline r_1+1)}$ so that they will not influence the asymptotic leading term. For $j\le j_0$ and $j\le m\le j_0$ we have \begin{eqnarray*} \kappa_{m,j} = \frac{T(-m)n^m}{m!} \sum_{r=0}^{k-j} {k-j \choose r} p^{m(k-j-r)} q^{mr} \left( e^{-np^{k-j-r}q^r} - \sum_{\ell\le j_0-m} \frac{(-n)^\ell}{\ell!} (p^{k-j-r}q^{r})^\ell \right) \end{eqnarray*} and otherwise \begin{eqnarray*} \kappa_{m,j} = \frac{T(-m)n^m}{m!} \sum_{r=0}^{k-j_0} {k-j \choose r} p^{m(k-j-r)} q^{mr)} e^{-np^{k-j-r}q^r}. \end{eqnarray*} In view of the above discussion we can thus replace the term $T(-m)$ (in $\kappa_{m,j}$) by $p^m$; the resulting sum will be denoted by $\overline \kappa_{m,j}$. We can also replace $\mu_{m,j} - \mu_{m,j-1}$ by \[ \overline \nu_{m,j} := - C_*(p) m! p^{j(j-1)/2} q^{j-1} \xi_{m-j+1} . \] By a careful look we thus obtain \begin{equation} {\frak{P}}o{G}_k(n) = \sum_{j=0}^k \sum_{m \ge j} \overline\kappa_{m,j} \overline \nu_{m,j} + O\left( n^{j_0} T(-j_0)^{k-j_0}p^{j_0(j_0+1)/2}q^{j_0} \left( p^{j_0} + (q/p)^{j_0} \right) \right). \label{G_kExplicitFormHLowerBound-2} \end{equation} In order to analyze the sum representation (\ref{G_kExplicitFormHLowerBound-2}) we split it into several parts: \[ T_1 := \sum_{j> j_0} \sum_{m\ge j} \overline\kappa_{m,j} \overline \nu_{m,j}, \quad T_2 := \sum_{j\le j_0} \sum_{m > j_0} \overline \kappa_{m,j} \overline \nu_{m,j}, \quad T_3 := \sum_{j\le j_0} \sum_{m=j}^{j_0} \overline \kappa_{m,j} \overline \nu_{m,j}. \] Note that the exponential function $e^{-np^{k-j-r}q^r} = e^{-(q/p)^{r-r_1(j)}}$ behaves completely differently for $r\le r_1(j)$ and for $r> r_1(j)$ where $r_1(j) = (j- \psi(n))\frac{\log(1/p)}{\log(p/q)}$. Hence it is convenient to split $T_3$ into three parts $T_{30} + T_{31}+T_{32}$, where the $T_{30}$ and $T_{31}$ correspond to the terms with $r\le r_1(j)$ and $T_{32}$ for those with $r> r_1(j)$. $T_{30}$ involves the exponential function $e^{-np^{k-j-r}q^r}$ whereas $T_{31}$ takes care of the polynomial sum $\sum_{\ell\le j_0-m} \frac{(-n)^\ell}{\ell!} (p^{k-j-r}q^{r})^\ell$. For notational convenience we set \begin{equation}\label{eqF0-0} F_0 := p^{j_0(j_0+1)2} q^{j_0-1} n^{j_0} p^{j_0(k-j_0)} \frac{ \overline r_0^{\overline r_1}} {\Gamma(\overline r_1 +1)}. \end{equation} We recall that \[ T_1 = - C_*(p) \sum_{j> j_0} \sum_{m\ge j} p^{j(j-1)/2} q^{j-1} \xi_{m-j+1} p^m n^m \sum_{r=0}^{k-j} {k-j \choose r} p^{m(k-j-r)} q^{mr} e^{-n p^{k-j-r} q^r}. \] We use now the substitutions $j=j_0 + J$ and $m = j+ L = j_0 + J + L$, where $J > 0$ and $L\ge 0$. Furthermore by using approximation ${k-j \choose r} \sim (k-j)^r/r! \sim (k-j_0)^r/r!$ we obtain \begin{eqnarray*} T_1 &\sim & - C_*(p) p^{j_0(j_0+1)2} q^{j_0-1} n^{j_0} p^{j_0(k-j_0)} \sum_{J > 0} \sum_{L\ge 0} p^{J (J+1)/2} q^{J} \xi_{L+1} p^L \\ &&\qquad \times \sum_{r} \frac{ {\overline r_0}^r } {r!} (q/p)^{(L+J)(r-r_1(j))} e^{-(q/p)^{r-r_1(j)}} \\ &\sim& - C_*(p)F_0 \cdot\sum_{J > 0} p^{J (J+1)/2} q^{J} \left( \frac {\overline r_0}{\overline r_1} \right)^{J \frac{\log(1/p)}{\log(p/q)}} \sum_{L\ge 0} \xi_{L+1} p^L \\ &&\qquad \times \sum_r (q/p)^{(L+J)(r-r_1(j))} \left( \frac {\overline r_0}{\overline r_1} \right)^{r-r_j(j)} e^{-(q/p)^{r-r_1(j)}}, \end{eqnarray*} where $F_0$ is given in (\ref{eqF0-0}). Thus, if we define (with the implicit notation $q = 1-p$) \begin{eqnarray} C_1(p,u,v) &=& \sum_{J > 0} p^{J (J+1)/2} q^{J} u^{J \frac{\log(1/p)}{\log(p/q)}} \sum_{L\ge 0} \xi_{L+1} p^L \label{eqC1puv-0} \\ &&\qquad \times \sum_{R\in \mathbb{Z}} \left( (q/p)^{(L+J)} u \right)^{R - v -J\frac{\log(1/p)}{\log(p/q)}} e^{-(q/p)^{R - v -J\frac{\log(1/p)}{\log(p/q)}}} \nonumber \end{eqnarray} we obtain \[ T_1 \sim - C_*(p)\,F_0\, C_1\left( p, \frac{\overline r_0}{\overline r_1}, \langle \overline r_1 \rangle \right). \] Note that we have substituted $r-r_1(j)$ by \begin{eqnarray*} r - r_1(j) &=& (r-\lfloor \overline r_1 \rfloor) - \langle \overline r_1 \rangle + (\overline r_1 - r_1(j)) \\ &=& R - v - J\frac{\log(1/p)}{\log(p/q)}. \end{eqnarray*} Similarly we obtain $T_2 \sim - C_*(p)\,F_0\, C_2\left( p, \frac{\overline r_0}{\overline r_1}, \langle \overline r_1 \rangle \right)$, where \begin{eqnarray} C_2(p,u,v) &=& \sum_{J \le 0} p^{J (J+1)/2} q^{J} u^{J \frac{\log(1/p)}{\log(p/q)}} \sum_{L> -J} \xi_{L+1} p^L \label{eqC2puv-0} \\ &&\qquad \times \sum_{R\in \mathbb{Z}} \left( (q/p)^{(L+J)} u \right)^{R - v -J\frac{\log(1/p)}{\log(p/q)}} e^{-(q/p)^{R - v -J\frac{\log(1/p)}{\log(p/q)}}}, \nonumber \end{eqnarray} and $T_{30} \sim - C_*(p)\,F_0\, C_{30}\left( p, \frac{\overline r_0}{\overline r_1}, \langle \overline r_1 \rangle \right),$ where \begin{eqnarray} C_{30}(p,u,v) &=& \sum_{J \le 0} p^{J (J+1)/2} q^{J} u^{J \frac{\log(1/p)}{\log(p/q)}} \sum_{L=0}^{-J} \xi_{L+1} p^L \label{eqC30puv-0} \\ &&\qquad \times \sum_{R\in \mathbb{Z}, R - v -J\frac{\log(1/p)}{\log(p/q)}\le 0 } \left( (q/p)^{(L+J)} u \right)^{R - v -J\frac{\log(1/p)}{\log(p/q)}} e^{-(q/p)^{R - v -J\frac{\log(1/p)}{\log(p/q)}}}, \nonumber \end{eqnarray} and $T_{32} \sim - C_*(p)\,F_0\, C_{32}\left( p, \frac{\overline r_0}{\overline r_1}, \langle \overline r_1 \rangle \right)$, where \begin{eqnarray} C_{32}(p,u,v) &=& \sum_{J \le 0} p^{J (J+1)/2} q^{J} u^{J \frac{\log(1/p)}{\log(p/q)}} \sum_{L=0}^{-J} \xi_{L+1} p^L \nonumber \\ &&\qquad \times \sum_{R\in \mathbb{Z}, R - v -J\frac{\log(1/p)}{\log(p/q)} > 0 } \left( (q/p)^{(L+J)} u \right)^{R - v -J\frac{\log(1/p)}{\log(p/q)}} \label{eqC32puv-0} \\ && \qquad \qquad \times \left( e^{-(q/p)^{R - v -J\frac{\log(1/p)}{\log(p/q)}}} - \sum_{\ell=0}^{-J-L} \frac{(-1)^\ell}{\ell!} (q/p)^{(R - v -J\frac{\log(1/p)}{\log(p/q)})\ell} \right). \nonumber \end{eqnarray} Finally we deal with $T_{31}$. First of all we regroup the summation by setting $m=j_0-M$, $j=j_0-M-L$, and $\ell = M-K$ which gives \begin{eqnarray*} T_{31} &=& C_*(p) p^{j_0(j_0+1)/2} q^{j_0-1} n^{j_0} p^{j_0(k-j_0)} \sum_{K\ge 0} \left( \frac qp \right)^{K\overline r_1} \\ &&\times \sum_{L\ge 0, \, M\ge K} \xi_{L+1}\frac{(-1)^{M-K}}{(M-K)!} p^{((L+M)^2 + L-M)/2-K(L+M)} q^{-L-M} \\ && \times \quad \sum_{r\le r_1(j_0-M-L)} {k-j_0+M+L \choose r } \left( \frac qp \right)^{(j_0-K)r}. \end{eqnarray*} We single out the case $K=0$ (and consider only the sum over $K,M,r$) which we write as \begin{eqnarray*} D(p) C_*(p) \sum_{r\le \overline r_1} {k-j_0+L+M \choose r } \left( \frac qp \right)^{j_0r} + S_0, \end{eqnarray*} where $D(p)$ is given by (\ref{eqDefAp}) and \begin{eqnarray*} S_0&:=& - C_*(p)\sum_{L,M \ge 0} \xi_{L+1}\frac{(-1)^{M}}{M!} p^{((L+M)^2 + L-M)/2} q^{-L-M} \\ && \qquad \times \sum_{r_1(j_0-M-L)< r \le \overline r_1} {k-j_0+L+M \choose r } \left( \frac qp \right)^{j_0r}. \end{eqnarray*} Note that \[ \sum_{r\le \overline r_1} {k-j_0+L+M \choose r } \left( \frac qp \right)^{j_0r} = e^{\overline r_0} {\frak{P}}hi\left( \frac{\overline r_1 - \overline r_0}{\sqrt{\overline r_0}} \right) \left( 1 + O\left( \frac{\log\log n}{\log n} (L+M) \right)\right), \] where $ {\frak{P}}hi$ denotes the distribution function of the normal distribution. Thus, if we set \begin{eqnarray*} S_K&=& C_*(p)p^{j_0(j_0+1)2} q^{j_0-1} n^{j_0} p^{j_0(k-j_0)} \left( \frac qp \right)^{K\overline r_1} \\ &&\qquad \times \sum_{L\ge 0, \, M\ge K} \xi_{L+1}\frac{(-1)^{M-K}}{(M-K)!} p^{((L+M)^2 + L-M)/2-K(L+M)} q^{-L-M} \\ &&\qquad \qquad \times \sum_{r\le r_1(j_0-M-L)} {k-j_0+M+L \choose r } \left( \frac qp \right)^{(j_0-K)r}. \end{eqnarray*} then we have \[ T_{31} = D(p) C_*(p) e^{\overline r_0} {\frak{P}}hi\left( \frac{\overline r_1 - \overline r_0}{\sqrt{\overline r_0}} \right) (1+o(1)) -S_0 + \sum_{K\ge 1} S_K. \] In the same way as above we obtain $S_0 \sim - C_*(p)\,F_0\, C_{31,0}\left( p, \frac{\overline r_0}{\overline r_1}, \langle \overline r_1 \rangle \right)$, where \begin{eqnarray} C_{31,0}(p,u,v) &=& \sum_{L,M \ge 0} \xi_{L+1}\frac{(-1)^{M}}{M!} p^{((L+M)^2 + L-M)/2} q^{-L-M} \label{eqC310puv-0} \\ &&\qquad \times \sum_{-(M+L) \frac{\log(1/p)}{\log(p/q)} +v \le R \le 0 } u^{R-v}. \nonumber \end{eqnarray} It is also convenient to rewrite this also as a sum over $J = -M-L\le 0$ and $0\le L \le -J$: \begin{eqnarray} C_{31,0}(p,u,v) &=& \sum_{J\le 0}\sum_{L=0}^{-J} \xi_{L+1}\frac{(-1)^{-J-L}}{(-J-L)!} p^{ J(J+1)/2 + L} q^{J} \label{eqC310puv-2-0} \\ &&\qquad \times \sum_{ J \frac{\log(1/p)}{\log(p/q)} +v \le R \le 0 } u^{R-v}. \nonumber \end{eqnarray} For $K\ge 1$ the terms $S_K$ can be approximated by $S_K \sim C_*(p)\,F_0\, C_{31,K}\left( p, \frac{\overline r_0}{\overline r_1}, \langle \overline r_1 \rangle \right)$, where \begin{eqnarray} C_{31,K}(p,u,v) &=& \sum_{J\le -K} \sum_{L=0}^{-J-K} \xi_{L+1}\frac{(-1)^{-J-L-K}}{(-J-L-K)!} p^{J(J+1)/2 + L +JK} q^{J} \nonumber \\ &&\qquad \times \sum_{R\le v+J \frac{\log(1/p)}{\log(p/q)} } \left( u \left(\frac qp\right)^{-K} \right)^{R-v}. \label{eqC31Kpuv-2-0} \end{eqnarray} Summing up, if we set \[ C(p,u,v) = - C_1(p,u,v)-C_2(p,u,v)-C_{30}(p,u,v)-C_{32}(p,u,v)-C_{31,0}(p,u,v)+ \sum_{K\ge 1} C_{31,K}(p,u,v) \] and by observing that $D(p) = 0$ (see Section~\ref{secmiracle}) we have: \begin{lemma} With the notation from above we have \[ \tilde G_k(n) = C_*(p) p^{j_0(j_0+1)/2} q^{j_0-1} n^{j_0} p^{j_0(k-j_0)} \frac{{\overline r_0}^{\overline r_1}}{\Gamma(\overline r_1+1)} \left( C(p, \overline r_0/\overline r_1, \langle \overline r \rangle) + o(1) \right). \] \end{lemma} It remains to show that $C(p,u,v)$ is strictly positive for $\frac 12 < p < 1$, $\sqrt{q/p} \le u \le \sqrt{p/q}$, $0\le v < 1$. Since the representation of $C(p,u,v)$ is quite involved we will use the following strategy. We do an asymptotic analysis for $p\to \frac 12$ and $p\to 1$ and fill out the remaining interval, $0.51 \le p \le 0.97$ by a numerical analysis (together with upper bounds for the derivatives). Due to space limitations we present here only a short version of the (very involved) considerations. A full version can be found in the appendix. We start with the behavior for $p\to \frac 12$. \begin{lemma}\label{Lepto1/2} Set $p/q = e^{\eta}$ and $\tilde u = \frac 1\eta \log u$. Then for $\eta \to 0+$ (which is equivalent to $p\to \frac 12$) we have uniformly for $\tilde u \in [-\frac 12, \frac 12]$ and $v\in [0,1)$ \begin{equation}\label{eqLepto1/2} C(p,u,v) \sim \frac 1\eta h(\tilde u), \end{equation} where $h(\tilde u)$ is a continuous and positive function. In particular we have $C(p,u,v) > 0$ for $\frac 12 < p \le 0.51$. \end{lemma} \begin{proof} We single out the function $C_1(p,u,v)$ and start with the sum over $R$. The first observation is that for $\eta \to 0$ we can replace the sum by an integral, that is, we have for fixed integers $L,J$, as $\eta\to 0$, \begin{eqnarray*} && \sum_{R\in \mathbb{Z}} \left( (q/p)^{(L+J)} u \right)^{R - v -J\frac{\log(1/p)}{\log(p/q)}} e^{-(q/p)^{R - v -J\frac{\log(1/p)}{\log(p/q)}}} \\ &&\sim \int_{-\infty}^\infty \left( (q/p)^{(L+J)} u \right)^{t} e^{-(q/p)^{t}} \, dt = \frac 1\eta \int_{-\infty}^\infty e^{-\left(M - \tilde u \right) t} e^{-e^{-t}}\, dt. \end{eqnarray*} This also implies that the leading asymptotic term does not depend on $v$. Further note that $\tilde M = M - \frac 1\eta \log u = L+J - \tilde u \ge \frac 12$ so that the integral converges and by using the substitution $w = e^{-t}$ we obtain \[ \int_{-\infty}^\infty e^{-\tilde M t} e^{-e^{-t}}\, dt = \int_0^\infty w^{\tilde M -1} e^{-w}\, dw = \Gamma(\tilde M). \] This finally shows that, as $p\to \frac 12$ (or equivalently as $\eta = \log(p/q) \to 0$), \begin{equation}\label{eqC1asymp} C_1(p,u,v) \sim \frac 1\eta \sum_{J > 0} 2^{-J (J+1)/2 - J + J\tilde u} \sum_{L\ge 0} \xi_{L+1}(1/2)\, 2^{-L}\, \Gamma\left( J + L - \tilde u\right). \end{equation} Similarly we can handle the other terms and obtain the asymptotic representation (\ref{eqLepto1/2}). Since the function $h(\tilde u)$ is explicit (as a series expansion) and continuously differentiable in $\tilde u$ we can use a simple numerical analysis (together with upper bounds for the derivative $h'(\tilde u)$) in order to show that $h(\tilde u) > 0$ for $\tilde u \in [-\frac 12, \frac 12]$. Finally by taking care also on error terms (which were neglected in the above analysis) it also follows that $C(p,u,v) > 0$ for $\frac 12 < p \le 0.51$. \end{proof} The situation for $p\to 1$ is more delicate in the analysis, however, positivity follows then immediately. \begin{lemma}\label{Lepto1} Set $\overline c(v) = \max\{v-v^2/2,(1-v^2)/2\}$. Then we have, as $p\to1$ uniformly for $\sqrt{q/p} \le u \le \sqrt{p/q}$, $0\le v < 1$ \begin{equation}\label{eqLepto1} C(p,u,v) \ge \exp\left( \overline c(v) \frac{\log^2 (1-p)}{\log 1/p}(1 + o(1)) \right). \end{equation} In particular we have $C(p,u,v) > 0$ for $0.97\le p < 1$. \end{lemma} \begin{proof} We just consider the most interesting case, namely the sum $\sum_{K\ge 1} C_{31,K}(p,u,v)$ and assume for a moment that $v> 0$. We set \[ I_0:= \left[ - v\left( \frac{\log q}{\log p}-1\right) , 0 \right) \cap \mathbb{Z} \] and for $M\ge 1$ \[ I_M:= \left[ - (v+M)\left(\frac{\log q}{\log p}-1\right) , - (v+M-1)\left(\frac{\log q}{\log p}-1\right) \right) \cap \mathbb{Z} \] If $J\in I_M$ we have, as $p\to 1$, \[ \sum_{R\le v+ J \frac{\log(1/p)}{\log(p/q)}} \left( u (q/p)^{-K} \right)^{R-v} \sim \left( u (q/p)^{-K} \right)^{-M-v}. \] Since \[ \sum_{L=0}^{-J-K} \xi_{L+1} p^L \frac{(-1)^{-J-K-L}}{(-J-K-L)!} = [z^{-J-K}] \prod_{j\ge 0} \frac{e^{qp^jz} -1}{qp^j z} e^{-z} = [z^{-J-K}] e^{z/2 + O(qz^2) - z} \] we get \begin{eqnarray*} C_{31,K,M}&:=& \sum_{J\in I_M,\, J \le -K} p^{J(J+1)/2 +JK}q^J \sum_{R\le v+ J \frac{\log(1/p)}{\log(p/q)} } \left( u \left(\frac qp\right)^{-K} \right)^{R-v} \\ &&\qquad \times\sum_{L=0}^{-J-K} \xi_{L+1}p^{L} \frac{(-1)^{-J-L-K}}{(-J-L-K)!} \\ &\sim & \sum_{J\in I_M,\, J \le -K} p^{J(J+1)/2 +JK}q^J \left( u \left(\frac qp\right)^{-K} \right)^{-M-v} [z^{-J-K}] e^{z/2 + O(qz^2) - z} \end{eqnarray*} and consequently if we sum over $K\ge 1$ \begin{eqnarray*} \sum_{K\ge 1} C_{31,K,M} &\sim & u^{-M-v} \sum_{J\in I_M} p^{J(J+1)/2}q^J \sum_{K=1}^{-J} p^{JK}(q/p)^{K(M+v)} [z^{-J-K}] e^{z/2 + O(qz^2) - z} \\ & = & u^{-M-v} \sum_{J\in I_M} p^{J(J+1)/2}q^J \sum_{K=1}^{-J} p^{JK}(q/p)^{M(1+v)} [z^{-J-K}] e^{z/2 + O(qz^2) - z}. \end{eqnarray*} We observe that (for $J\in I_M$) \begin{eqnarray*} &&\sum_{K=1}^{-J} p^{JK}(q/p)^{K(M+v)} [z^{-J-K}] e^{z/2 + O(qz^2) - z} = [z^{-J}] \frac{p^{J}(q/p)^{M+v}z}{1- p^{J}(q/p)^{M+v}z } e^{z/2 + O(qz^2) - z} \\ &&\qquad \sim p^{-J^2}(q/p)^{-J(M+v)} e^{z_M/2 + O(q z_M^2) - z_M}, \end{eqnarray*} where $z_M = p^{-J}(q/p)^{-M-v}$. Note that $z_M$ varies between $1$ and $1/q$ if $J\in I_M$. However, it will turn out that the asymptotic leading terms will come from $J$ close to $- (v+M)\frac{\log q}{\log p}$ which means that $z_M$ asymptotically $1$ and, thus, the last exponential term is asymptotically $e^{-1/2}$. The reason is that the term \[ p^{J(J+1)/2}q^J p^{-J^2}(q/p)^{-J(M+v)} = p^{-J^2/2} q^{J(1-M-v)} p^{J(\frac 12 +M+v)} \] has its absolute minimum for $J$ close to $- (v+M-1)\frac{\log q}{\log p}$ and for $J\in I_M$ it gets maximal for $J$ close to $- (v+M)\frac{\log q}{\log p}$, in particular if \[ J = J_{v,M} := - \left\lfloor (M+v) \left( \frac{\log q}{\log p} -1 \right) \right\rfloor. \] Thus, we obtain \begin{eqnarray*} \sum_{K\ge 1} C_{31,K,M} &\sim & e^{-\frac 12} u^{-M-v} p^{-J_{v,M}^2/2} q^{J_{v,M}(1-M-v)} p^{J_{v,M}(\frac 12 +M+v)} \\ &=& e^{ \frac{\log^2 q}q \left( M+v- \frac 12 (M+v)^2 \right) + O(\log^2 q) }. \end{eqnarray*} Since $(M+v) - \frac 12 (M+v)^2 \le 0$ for $M\ge 2$ (and $0\le v < 1$) it is clear that only the first two terms corresponding to $M=0$ and $M=1$ are relevant Hence, we obtain \[ \sum_{K\ge 1} C_{31,K} \sim e^{ \frac{\log^2 (1-p)}{\log(1/p)} \left( v- \frac 12 v^2 \right) + O(\log^2 (1-p)) } + e^{ \frac{\log^2 (1-p)}{\log(1/p)} \frac 12 \left( 1- v^2 \right) + O(\log^2 (1-p)) }. \] Acually this kind of representation also holds for $v= 0$. The other terms can be handled in a similar way. Actually $C_1,C_2, C_{32}, C_{31,0}$ are of smaller order, whereas $C_{30}$ has (almost) a comparable order of magnitude. Finally, by taking error terms into account it follows that $C(p,u,v)$ is positive for $0.97 \le p < 1$. \end{proof} Thus, it remains to consider $C(p,u,v)$ for $0.51 \le p \le 0.97$. As mentioned above we do here a numerical analysis. For example, for the following sample valued we obtain: \begin{center} \begin{tabular}{llll} \hline $p$ & $u$ & $v$ & $C(p, u, v)$ \\ \hline 0.51 & 1.00 & 0.20 & 17.6603002053593 \\ 0.51 & 1.00 & 0.40 & 17.6630153331822 \\ 0.51 & 1.00 & 0.60 & 17.6610407898646 \\ 0.51 & 1.00 & 0.80 & 17.6856832509155 \\ 0.60 & 0.90 & 0.60 & 1.49524800151569 \\ 0.60 & 1.00 & 0.20 & 1.08391296918222 \\ 0.60 & 1.00 & 0.60 & 1.08391297098683 \\ 0.60 & 1.00 & 0.80 & 1.08391297046200 \\ 0.60 & 1.10 & 0.20 & 0.834656789094941 \\ 0.60 & 1.20 & 0.60 & 0.673917281982084 \\ 0.70 & 1.00 & 0.60 & 0.232497954955319 \\ 0.80 & 1.00 & 0.60 & 0.0287161523336721 \\ 0.85 & 1.00 & 0.60 & 0.00237172764900606 \\ 0.93 & 1.00 & 0.60 & 1.87317294616045 $\times 10^{15}$ \\ 0.97 & 0.50 & 0.60 & 9.17733198126610 $\times 10^{72}$ \\ 0.97 & 1.00 & 0.60 & 6.05478107453485 $\times 10^{72}$ \\ 0.97 & 5.00 & 0.60 & 2.30524156812013 $\times 10^{72}$ \\ \hline \end{tabular} \end{center} A more detailed analysis can be found in the appendix. \subsection{Proof of Theorem~\ref{FillupTheorem}} \label{FProof} The analysis of $F_n$ runs along the same lines as for $H_n$. As already mentioned we will give only a roadmap of the proof since it is actually much easier than that of $H_n$. \subsubsection{Lower bound on $F_n$} The lower bound on $F_n$ can be proven in two different ways. We can either use the inverse Mellin transform integral for $\tilde G_k(n)$ \[ k = k_L = \log_{1/q}\log n - (1+\epsilon)\log_{1/q}\log\log n \] evaluated at $\rho = \log_{p/q}\log n$. This leads to ${\rm Pr}[F_n < k] \le \mu_{n,k} \to 0$. Alternatively we can use the correspondence between the R\'enyi process and the random PATRICIA trie construction, along with the relationship between PATRICIA tries and standard tries. Because of the path compression step in the construction of a PATRICIA trie from a trie, the fillup level for a PATRICIA trie is always greater than or equal to the fillup level for the associated trie. Furthermore, it is known (see \cite{park2008}) that the fillup level in random tries for $p > 1/2$ is, with high probability, \begin{eqnarray*} \log_{1/q} n - \log_{1/q}\log\log n + o(\log\log\log n). \end{eqnarray*} Thus, with high probability, this is also a lower bound for the $F_n$ that we study. \subsubsection{Upper bound on $F_n$} The upper bound proof for $F_n$ follows along similar lines to the lower bound for $H_n$. We set \[ k = k_U = \log_{1/q} n - (1-\epsilon)\log_{1/q}\log\log n, \] and our goal is to show that $ {\mathrm{Var}}[B_{n,k}] = o( {\mathbb{E}}[B_{n,k}]^2)$. First we get an upper bound for $ {\mathrm{Var}}[B_{n,k}]$ in the same way as in the case of $H_n$ (via inverse Mellin transform and Depoissonization) of the form \[ {\mathrm{Var}}[B_{n,k}] = O\left(q^{-\epsilon \log_{p/q}\log n \cdot \log{1/q} \log\log n (1 + o(1))}\right). \] In order to obtain a corresponding lower bound for $\mu_{n,k} = {\mathbb{E}}[B_{n,k}]$ we use again the explicit representation \begin{equation} \label{PoGkFillupExpression} {\frak{P}}o{G}_k(n) = \sum_{j = 0}^k \sum_{m \geq j} \kappa_{m,j} (\mu_{m,j} - \mu_{m,j-1}), \end{equation} where \begin{eqnarray} \kappa_{m,j} &=& \frac{T(-m)n^m}{m!} \sum_{\ell=0}^\infty \frac{(-n)^{\ell}}{\ell!} T(-m-\ell)^{k-j} \nonumber \\ &=& \frac{T(-m)}{m!} \sum_{r=0}^{k-j} { {k-j}\choose r} (np^r q^{k-j-r})^m \exp(-np^r q^{k-j-r}). \label{eqkapparep2} \end{eqnarray} We note that, because $\rho > 0$, there are no contributions from poles, so that the $\ell$-sum begins with $0$, in contrast to (\ref{eqkapparep}) which leads to the simplified form (\ref{eqkapparep2}). Our derivation suggests that the main contribution to (\ref{PoGkFillupExpression}) comes from the terms $j = O(1)$ and $m = \rho \cdot p/q + O(1)$. In this range, the difference $\mu_{m,j} - \mu_{m,j-1}$ is estimable by the following lemma from \cite{magnerknesslszpa2014} (see part (i) of Theorem 2.2 of that paper). \begin{lemma}[Precise asymptotics for $\mu_{m,j}$ when $j = O(1)$ and $m\to\infty$] \label{MuJO1Lemma} For $p > q$, $m \to \infty$, and $j = O(1)$, we have \[ \mu_{m,j} \sim mq^j (1 - q^j)^{m-1}. \] \end{lemma} Note, in particular, that $\mu_{m,j} - \mu_{m,j-1}$ is strictly positive in this range. Applying this lemma, some algebra is required to show that the contribution of the $(m,j)$th term, with $m = \rho \cdot p/q + O(1)$ and $j = O(1)$, is \begin{equation} \label{DominantTermFillup} q^{-\epsilon \log_{p/q}\log n \cdot \log_{1/q}\log\log n (1 + o(1))}. \end{equation} To complete the necessary lower bound on the entire sum (\ref{PoGkFillupExpression}), we consider also the following sums: \begin{eqnarray} \sum_{j=0}^{j'}\sum_{m = j}^{m'} \kappa_{m,j} (\mu_{m,j} - \mu_{m,j-1}) \quad\mbox{and}\quad \sum_{j > j'}\sum_{m \geq j} \kappa_{m,j} (\mu_{m,j} - \mu_{m,j-1}), \end{eqnarray} where $j'$ and $m'$ are sufficiently large fixed positive numbers. We note that the terms that are not covered by any of these sums may be disregarded, since by Lemma~\ref{MuJO1Lemma} they are non-negative. It may be shown that both sums are smaller than the dominant term (\ref{DominantTermFillup}) by a factor of $e^{-\Theta(\rho)}$, both by upper bounding terms in absolute value and using the trivial bound $|\mu_{m,j} - \mu_{m,j-1}| \leq 2m$. We thus arrive at \begin{equation}\label{eqmunkfilluplevel} \mu_{n,k} \ge q^{-\epsilon \log_{p/q}\log n \cdot\log_{1/q}\log\log n (1 + o(1)}. \end{equation} Since this tends to $\infty$ with $n$, combining this with the upper bound for the variance yields the desired upper bound on $ {\frak{P}}r[F_n > k]$, which establishes the upper bound on $F_n$. \section{Depoissonization}\label{sec:depo} \subsection{Analytic Depoissonization} The Poisson transform $\tilde G(z)$ of a sequence $g_n$ is defined by $\tilde G(z) = \sum_{n\ge 0} g_n \frac{z^n}{n!} e^{-z}$. If the sequence $g_n$ is {\it smooth enough} then we usually have $g_n \sim \tilde G(n)$ (as $n\to\infty$) which we call {\it Depoissonization}. In \cite{jacquetszpa1997} a theory for {\it Analytic Depoissonization} is developed. For example, the basic theorem (Theorem 1) says that if \begin{eqnarray}\label{eqDep1} |\tilde G(z)| \le B |z|^\beta \end{eqnarray} for $|z|> R$ and $|\arg(z)| \le \theta$ (for some $B> 0$, $R>0$, and $0< \theta < \pi/2$) and \begin{eqnarray}\label{eqDep2} |\tilde G(z)e^z| \le A e^{\alpha|z|} \end{eqnarray} for $|z|> R$ and $\theta <|\arg(z)| \le \pi$ (for some $A> 0$ and $\alpha < 1$) then \begin{eqnarray}\label{eqDep3} g_n = \tilde G(n) + O(n^{\beta-1}). \end{eqnarray} Actually this expansion can be made more precise by taking into account derivatives of $\tilde G(z)$. For example, we have \begin{eqnarray}\label{eqDep3.2} g_n = \tilde G(n) - \frac n2 \tilde G''(n)+ O(n^{\beta-2}). \end{eqnarray} In \cite[Lemmas 1 and 18]{magnerspa2015} it is shown that $\tilde G_k(z) = \sum_{n\ge 0} \mu_{n,k} \frac{z^n}{n!} e^{-z}$ satisfies (\ref{eqDep1}) with $\beta = 1+ \epsilon$ for any $\epsilon> 0$ and (\ref{eqDep2}) for some $\alpha < 1$ uniformly for all $k\ge 0$. Thus, it follows uniformly for all $k\ge 0$ \begin{eqnarray}\label{eqDep3.3} \mu_{n,k} = \tilde G_k(n) - \frac n2 \tilde G_k''(n)+ O(n^{\epsilon-1}). \end{eqnarray} The estimate (\ref {eqDep3}) is not sufficient for our purposes (it only works if $\mu_{n,k}$ grows at least polynomially as in the {\it central range}). For the boundary region, where $k \sim \log_{1/p} n$ or $k \sim \log_{1/q} n$ we have to use (\ref{eqDep3.3}) which means that we have to deal with derivatives of $\tilde G_k(z)$, too. \subsection{Poisson Variance} Next we discuss how the variance of a random variable can be handled with the help of the Poisson transform. First we assume that $\tilde G(z)$ is the Poisson transform of the expected values $\mu_n = {\mathbb{E}}[X_n]$ or a sequence of random variables. Furthermore we set \[ \tilde V(z) = \sum_{n\ge 0} {\mathbb{E}}[X_n^2] \frac{z^n}{n!} e^{-z} - \tilde G(z)^2 \] which we denote the Poisson variance. This is not the Poisson transform of the variance. However, since we usually have $ {\mathbb{E}}[X_n^2] \sim V(n) + G(n)^2$ and $ {\mathbb{E}}[X_n] \sim G(n)$ it is expected that $ {\mathrm{Var}}[X_n] \sim V(n)$. Actually this can be made precise with the help of (\ref{eqDep3.2}). Suppose that $\tilde G(z)$ and $\tilde V(z)$ satisfy the property (\ref{eqDep1}) and that $\tilde G(z)$ and $\tilde V(z) + \tilde G(z)^2$ the property (\ref{eqDep2}). Then it follows that \[ {\mathbb{E}}[X_n] = \tilde G(n) - \frac n2 \tilde G''(n)+ O(n^{\beta-2}) \] and \[ {\mathbb{E}}[X_n^2] = \tilde V(n) + \tilde G(n)^2 - \frac n2 \tilde V''(n) - n (\tilde G'(n))^2 - n \tilde G(n)\tilde G''(n) + O(n^{\beta-2}) \] from which it follows that \begin{eqnarray} {\mathrm{Var}}[X_n] &= \tilde V(n) - \frac n2 \tilde V''(n) - n (\tilde G'(n))^2 + \frac 14 n^2 (\tilde G''(n))^2 \nonumber \\ &+ O(n^{2\beta -4}) + O( n^{\beta-2} \tilde G(n)) + O( n^{\beta} \tilde G''(n)). \label{eqVarest} \end{eqnarray} In particular in our case we know that the Poisson transform $\tilde G_k(z)$ (of the sequence $\mu_{n,k} = {\mathbb{E}}[B_{n,k}]$) and the corresponding Poisson variance $\tilde V_k(z)$ satisfy the assumptions for $\beta =1 + \epsilon$ (for every fixed $\epsilon> 0$), see \cite{magnerspa2015}. Thus we also obtain (\ref{eqVarest}) in the present context. \section{An Unexpected Identity}\label{secmiracle} In this final section we prove that $D(p) = 0$ which seems to be a new (and unexpected) identity.\footnote{The following simple proof is due to Gleb Pogudin (Univ. Linz).} \begin{lemma}\label{LeDp=0} Suppose that $|p|<1$ and $q = 1-p$ and set \begin{eqnarray} \label{eqDefAp-0} D(p) = \sum_{L,M \ge 0} \xi_{L+1}\frac{(-1)^{M}}{M!} p^{((L+M)^2 + L-M)/2} q^{-L-M}, \end{eqnarray} where $\xi_{\ell} = \xi_\ell(p)$ is recursively defined by $\xi_1 = 1$ and \begin{equation}\label{eqexrec2-0} \xi_\ell = q^{-1}p^{\ell} \sum_{J=1}^\ell \frac{\xi_{\ell+1-J}}{J!} (q/p)^{J}. \end{equation} Then \begin{equation}\label{eqLeDp=0} D(p) = 0. \end{equation} \end{lemma} \begin{proof} By setting $L+M = n$ then we can rewrite $D(p)$ as \[ D(p) = \sum_{n\ge 0} p^{ n\choose 2} \sum_{L=0}^{n} \xi_{L+1} (p/q)^L \frac{(-1)^{(n-L)}}{(n-L)!} q^{-(n-L)}. \] Since the recurrence (\ref{eqexrec2-0}) for $\xi_\ell$ can be rewritten to \[ X(z) = \sum_{L\ge 0} \xi_{L+1} z^L = \prod_{j\ge 0} \frac{e^{qp^j z} -1 }{qp^j z} \] we thus obtain \[ D(p) = \sum_{n\ge 0} p^{ n\choose 2} [z^n] X((p/q)z) e^{-z/q} = \sum_{n\ge 0} p^{ n\choose 2} [z^n] \prod_{j\ge 0} \frac{e^{(p-1)p^j z} - e^{-p^j z }}{p^{j+1} z}. \] Hence, if we set $f(z) = \frac 1{pz} \left( e^{(p-1)z} - e^{-z} \right)$, $F(z) = f(z) f(pz) f(p^2z)\cdots$, and $F_n = [z^n] F(z)$ then $D(p) = 0$ is equivalent to $\sum_{n\ge 0} F_n p^{n\choose 2} = 0$. We next set $g(z) = e^{-z}$, $h(z) = (e^z-1)/z$, and $q(z) = (1-e^{-z})/z$. Then we have $f(z) = g(z)h(pz)$ and $q(z) = g(z)h(z)$ which implies the representation \[ F(z) = \prod_{j\ge 0} g(p^j z) h(p^{j+1} z) = g(z) \prod_{j\ge 1} g(p^j z) h(p^{j} z) = g(z) \prod_{j\ge 1} q(p^j z). \] Hence, if we set $Q(z) = q(z) q(pz) q(p^2z)\cdots$, and $Q_n = [z^n] Q(z)$ then we also have \[ F(z) = g(z) Q(pz) = (1-zq(z)) Q(pz) = Q(pz) - z Q(z) = \sum_{n\ge 0} Q_n(p^n z - z^{n+1}). \] So, finally, if we use the substitution $z^n \mapsto p^{n\choose 2}$ and the property ${ n+1 \choose 2} = {n\choose 2} + n$, we immediately see that every summand vanishes. This proves $D(p) = 0$. \end{proof} \begin{thebibliography}{9} \bibitem{AbramowitzStegun} Milton Abramowitz and Irene~A. Stegun. \newblock {\em Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables}, volume~55 of {\em National Bureau of Standards Applied Mathematics Series}. \newblock Superintendent of Documents, U.S. Government Printing Office, Washington, D.C., 1964. \bibitem{devroye1992} Luc Devroye. \newblock A note on the probabilistic analysis of {PATRICIA} trees. \newblock {\em Random Structures and Algorithms}, 3(2):203--214, March 1992. \bibitem{devroye2002} Luc Devroye. \newblock Laws of large numbers and tail inequalities for random tries and {PATRICIA} trees. \newblock {\em Journal of Computational and Applied Mathematics}, 142:27--37, 2002. \bibitem{devroye2004} Luc Devroye. \newblock Universal asymptotics for random tries and {PATRICIA} trees. \newblock {\em Algorithmica}, 42(1):11--29, 2005. \bibitem{confversion} Michael Drmota, Abram Magner, and Wojciech Szpankowski. \newblock Asymmetric R\'{e}nyi Problem and PATRICIA Tries. \newblock Proceedings of the 27th International Conference on Probabilistic, Combinatorial and Asymptotic Methods for the Analysis of Algorithms, Krak\'ow, Poland, 4--8 July 2016. \bibitem{amm17} M. Drmota, C. Krattenthaler and G. Pogudin, Problem for the American Mathematical Monthly, manuscript 2017. \bibitem{drmotaszpa2011} Michael Drmota and Wojciech Szpankowski. \newblock The expected profile of digital search trees. \newblock {\em Journal of Combinatorial Theory, Series A}, 118(7):1939--1965, October 2011. \bibitem{Flajolet95mellintransforms} Philippe Flajolet, Xavier Gourdon, and Philippe Dumas. \newblock Mellin transforms and asymptotics: Harmonic sums. \newblock {\em Theoretical Computer Science}, 144:3--58, 1995. \bibitem{Flajoletsedgewick2009} Philippe Flajolet and Robert Sedgewick. \newblock {\em Analytic Combinatorics}. \newblock Cambridge University Press, Cambridge, UK, 2009. \bibitem{jacquetszpa1997} Philippe Jacquet and Wojciech Szpankowski. \newblock Analytical depoissonization and its applications. \newblock {\em Theoretical Computer Science}, 201(1-2):1--62, July 1998. \bibitem{jansonszpa1996} Svante Janson and Wojciech Szpankowski. \newblock Analysis of an asymmetric leader election algorithm. \newblock {\em Electronic Journal of Combinatorics}, 4:1--16, 1996. \bibitem{kazemi2011} Ramin Kazemi and Mohammad Vahidi-Asl. \newblock The variance of the profile in digital search trees. \newblock {\em Discrete Mathematics and Theoretical Computer Science}, 13(3):21--38, 2011. \bibitem{knuth1998acp} Donald~E. Knuth. \newblock {\em The Art of Computer Programming, Volume 3: (2nd ed.) Sorting and Searching}. \newblock Addison Wesley Longman Publishing Co., Inc., Redwood City, CA, USA, 1998. \bibitem{magnerPhD2015} Abram Magner. \newblock {\em Profiles of {PATRICIA} Tries}. \newblock PhD thesis, Purdue University, December 2015. \bibitem{magnerspa2015} Abram Magner and Wojciech Szpankowski. \newblock {\em Profiles of {PATRICIA} Tries}. {\it Algorithmica}, 76(4), 1-67, 2016. \bibitem{magnerknesslszpa2014} Abram Magner, Charles Knessl, and Wojciech Szpankowski. \newblock Expected external profile of {PATRICIA} tries. \newblock {\em Proceedings of the Eleventh Workshop on Analytic Algorithmics and Combinatorics}, pages 16--24, 2014. \bibitem{park2008} Gahyun Park, Hsien-Kuei Hwang, Pierre Nicod\`{e}me, and Wojciech Szpankowski. \newblock Profiles of tries. \newblock {\em SIAM Journal on Computing}, 38(5):1821--1880, 2009. \bibitem{Pittel85} Boris Pittel. \newblock Asymptotic growth of a class of random trees. \newblock {\em Annals of Probability}, 18:414--427, 1985. \bibitem{pittelrubin1990} Boris Pittel and Herman Rubin. \newblock How many random questions are needed to identify $n$ distinct objects? \newblock {\em Journal of Combinatorial Theory, Series A}, 55(2):292--312, 1990. \bibitem{renyi} Alfred R\'enyi. \newblock On random subsets of a finite set. \newblock {\em Mathematica}, 3:355--362, 1961. \bibitem{szpa1990} Wojciech Szpankowski. \newblock {PATRICIA} tries again revisited. \newblock {\em Journal of the ACM}, 37(4):691--711, October 1990. \bibitem{szpa2001Book} Wojciech Szpankowski. \newblock {\em Average Case Analysis of Algorithms on Sequences}. \newblock John Wiley \& Sons, Inc., New York, NY, USA, 2001. \end{thebibliography} \appendix \def\mathcal{C}{\mathcal{C}} \def\mathrm{Arg}{\mathrm{Arg}} \section{Extension of Lemma~\ref{KnesslLemma}} \label{KnesslLemmaProof} We prove here an extended version of Lemma~\ref{KnesslLemma} and provide a more detailed analysis of the quantities $\xi_{\ell}(n)$ and $\xi_\ell$. \begin{lemma} \label{KnesslLemma1} \label{KNESSLLEMMA1} \label{mu_mjUpperBoundLemma1} Let $p \geq q$. For $n\to\infty$ and $1\le k < n$ with $\log^2 (n-k) = o(k)$, $$ \mu_{n,k} = (n-k)^{3/2 + \frac{\log q}{\log p}} \frac{n!}{(n-k)!}p^{k^2/2 + k/2}q^{k} \cdot \exp\left( -\frac{\log^2(n-k)}{2\log(1/p)} \right)\Theta(1). $$ \end{lemma} We first work out the case $p> q$. The case $p=q = \frac 12$ is slightly easier and will be discussed below. We recall the definition of the quantity $ S_\ell(n) = \mu_{n,n-\ell}. $ With this notation the quantities $\xi_\ell(n)$ are defined by \begin{equation} S_\ell(n) = n! C_*(p) p^{(n-\ell)^2/2 + (n-\ell)/2}q^{n-\ell} \xi_{\ell}(n). \label{S_ellDefinition} \end{equation} where $$ C_*(p) = \prod_{j=2}^\infty (1-p^j - q^j)^{-1} \cdot (1 + (q/p)^{j-2}). $$ By defintion and simple computation we have $\xi_{1}(n) \sim 1$ and $\xi_{2}(n) \sim 1/2$. Our task now is to determine the asymptotic behavior of $\xi_{\ell}(n)$ as $\ell\to\infty$. From the recurrence (\ref{muRecurrence}) for $\mu_{n,k}$, we can derive a corresponding one for $\xi_\ell(n)$: \begin{equation} \xi_{\ell}(n)(1-p^n-q^n) = \sum_{J=1}^\ell \frac{\xi_{\ell+1-J}(n-J)}{J!}q^{-1}p^{\ell-n}(p^{n-J}q^{J} + p^{J}q^{n-J}). \label{eqxielln-rec} \end{equation} Note that this recurrence uniquely defines $\xi_\ell(n)$ given $\xi_1(1) = 1/C_*(p)$. First the recurrence provides $\xi_1(n)$ for all $n\ge 2$. Then we get $\xi_2(3)$ (recall that $\xi_2(1) = \xi_2(2) = 0$) and recursively $\xi_2(n)$ for $n\ge 4$; etc. From this recurrence it follows immediatly that for each $\ell\ge 1$ the limit $\xi_\ell = \lim_{n\to\infty} \xi_\ell(n)$ exists. Clearly this limits satisfy the recurrence $$ \xi_\ell = q^{-1}p^{\ell} \sum_{J=1}^\ell \frac{\xi_{\ell+1-J}}{J!} (q/p)^{J} $$ which we will analyze separately in the sequel. \subsection{A-priori bounds for $\xi_\ell(n)$} We will first prove an a-priori bound for $\xi_\ell(n)$ and also an error bound for the difference $\xi_\ell(n) - \xi_\ell$: \begin{equation} \xi_\ell(n) = O\left( \frac 1{\ell !} \right), \qquad \xi_\ell(n) = \xi_\ell + O\left( \frac{p^{n-\ell} + (q/p)^{n-\ell}}{(\ell-1)!} \right), \label{eqxielln-est} \end{equation} where the implied constants depend on $p$. Both inequalities can be shown by induction by applying it recursively to (\ref{eqxielln-rec}). Note first that (\ref{eqxielln-rec}) is only relevant for $n> \ell$ since $\xi_\ell(n) = 0$ for $n\le \ell$. We choose $\ell_0 \ge 1$ in a way that \[ \frac{p^{\ell_0}/q(e^{q/p}-1) + (q/p)^{\ell_0}/q(e^{p/q}-1)}{1-p^{\ell_0}-q^{\ell_0}} \le 1. \] and let $C_0$ be an upper bound for $\xi_\ell(n)$ for $\ell < \ell_0$ and all $n \ge 1$. Then it follows by induction that $\xi_\ell(n) \le C_0$ for all $\ell \ge 1$ and $n\ge 1$. We just have to observe that for $n> \ell \ge \ell_0$ \begin{eqnarray*} \xi_\ell(n) &\le& \frac{C_0}{1-p^n-q^n} \left( \sum_{J=1}^{\ell} \frac{p^{\ell-J}q^{J-1}}{J!} + \sum_{J=1}^{\ell} \frac{p^{\ell+J-n}q^{n-J-1}}{J!} \right) \\ &\le & \frac{C_0}{1-p^n-q^n} \left( \frac{p^\ell}q (e^{q/p}-1) + \frac 1q \left( \frac qp \right)^n (e^{p/q}-1) \right) \\ &\le & \frac{C_0}{1-p^{\ell_0}-q^{\ell_0}} \left( \frac{p^{\ell_0}}q (e^{q/p}-1) + \frac 1q\left( \frac qp \right)^{\ell_0} (e^{p/q}-1) \right) \\ &\le C_0. \end{eqnarray*} Next we prove the first inequality of (\ref{eqxielln-est}). Here we fix $\ell_1$ in a way that \[ \frac 1{(\ell_1+1)(1-p^{\ell_1}-q^{\ell_1})} \left( \frac 1{pq} + \frac 1{q^2} \right) \le 1 \] and set $C_1 = C_0 \ell_1!$. Then we automatically have $\xi_\ell(n) \le C_1/\ell!$ for $\ell \le \ell_1$ and $n\ge 1$. Furthermore we obtain by induction for $n>\ell > \ell_1$ \begin{eqnarray*} \xi_\ell(n) &\le& \frac{C_1}{1-p^n-q^n} \left( \sum_{J=1}^{\ell} \frac{p^{\ell-J}q^{J-1}}{J!(\ell+1-J)!} + \sum_{J=1}^{\ell} \frac{p^{\ell+J-n}q^{n-J-1}}{J!(\ell+1-J)!} \right) \\ &\le & \frac{C_1}{(\ell+1)!(1-p^n-q^n)} \\ &\times &\left( \frac 1{pq} \sum_{J=0}^{\ell+1} {\ell+1 \choose J} p^{\ell+1-J}q^{J} + \frac{(q/p)^{n-\ell}}{q^2} \sum_{J=0}^{\ell+1} {\ell+1 \choose J} p^{J}q^{\ell+1-J} \right) \\ &\le & \frac{C_1}{\ell!} \frac 1{(\ell_1+1)(1-p^{\ell_1}-q^{\ell_1})} \left( \frac 1{pq} + \frac{1}{q^2} \right) \\ &\le & \frac{C_1}{\ell!}. \end{eqnarray*} Finally we deal with the second inequality of (\ref{eqxielln-est}). Since \[ \xi_1(n) = \prod_{j> n} \frac{1-p^j-q^j}{1 + (q/p)^{j-2}} \] it is certainly true for $\ell =1$. Now it is an easy exercise to verify it for $\ell=2$, $\ell = 3$ etc. by adapting possibly the implicit constant for each $\ell$. For sufficiently large $\ell \ge \ell_2$ we can do a common inductive step because of the following calculations: \begin{eqnarray*} &&|\xi_\ell(n) - \xi_\ell| \\ &\le & \left| \frac 1{1-p^n-q^n}\sum_{J=1}^\ell \frac{\xi_{\ell+1-J}(n-J)}{J!}q^{-1} p^{\ell-n}(p^{n-J}q^{J} + p^{J}q^{n-J}) - q^{-1}p^{\ell} \sum_{J=1}^\ell \frac{\xi_{\ell+1-J}}{J!} (q/p)^{J} \right| \\ &\le& \left| \frac 1{1-p^n-q^n}\sum_{J=1}^\ell \frac{|\xi_{\ell+1-J}(n-J)- \xi_{\ell+1-J}|}{J!}q^{-1}p^{\ell-n}(p^{n-J}q^{J} + p^{J}q^{n-J}) \right| \\ &+& \left| \frac 1{1-p^n-q^n}\sum_{J=1}^\ell \frac{\xi_{\ell+1-J}}{J!}p^{\ell-J}q^{J-1} - \sum_{J=1}^\ell \frac{\xi_{\ell+1-J}}{J!} p^{\ell-J}q^{J-1} \right| \\ &+& \left| \frac 1{1-p^n-q^n}\sum_{J=1}^\ell \frac{\xi_{\ell+1-J}}{J!}p^{J+\ell-n}q^{n-J-1} \right| \\ &=& D_1 + D_2 + D_3. \end{eqnarray*} By assumption we have \[ |\xi_{\ell+1-J}(n-J)-\xi_{\ell+1-J}| \le C(p^{n-\ell-1} + (q/p)^{n-\ell-1})/(\ell-J)!, \] where we can assume without loss of generality that $C\ge 1$. So we can estimate $D_1$ by \begin{eqnarray*} D_1 &\le& \frac C{1-p^n-q^n}\sum_{J=1}^\ell \frac{p^{n-\ell-1} + (q/p)^{n-\ell-1}}{J!(\ell-J)!}q^{-1}p^{\ell-n}(p^{n-J}q^{J} + p^{J}q^{n-J}) \\ &\le & \frac C{\ell!(1-p^n-q^n)} \left( \frac{p^{n-\ell}}{pq} + \frac{p(q/p)^{n-\ell}}{q^2} \right) \left( 1 + (q/p)^{n-\ell} \right) \\ &\le& \frac{C p^{n-\ell}}{(\ell-1)!} \frac 2{pq(1-p^n-q^n)\ell} + \frac{C (q/p)^{n-\ell}}{(\ell-1)!} \frac {2p}{q^2(1-p^n-q^n)\ell}. \end{eqnarray*} Furthermore by using the inequality $\xi_\ell \le C_1/\ell!$ and the assumption $C\ge 1$ we obtain $$ D_2 = \frac{p^n+q^n}{1-p^n-q^n} \xi_\ell \le \frac{C p^{n-\ell}}{(\ell-1)!} \frac {2C_1}{(1-p^n-q^n)\ell} $$ and \begin{eqnarray*} D_3 &\le& \frac {C_1}{1-p^n-q^n} \sum_{J=1}^\ell \frac{p^{-n} q^{n-1} }{J!(\ell-J)!} p^J q^{\ell-J} \\ &\le& {C_1 (q/p)^n}{\ell! q(1-p^n-q^n)} \\ &\le & \frac{C (q/p)^{n-\ell}}{(\ell-1)!} \frac {C_1}{q(1-p^n-q^n)\ell}. \end{eqnarray*} Hence, if $\ell \ge l_2$, where $\ell_2$ satisfies \[ \frac 1{\ell_2(1-p^{\ell_2}-q^{\ell_2})} \left( \frac 2{pq} + 2C_1 \right) \le 1 \] and \[ \frac 1{\ell_2(1-p^{\ell_2}-q^{\ell_2})} \left( \frac {2p}{q^2} + \frac{C_1}q \right) \le 1 , \] we obtain the second inequality of (\ref{eqxielln-est}) for all $\ell\ge 1$ and $n\ge 1$. \subsection{Asymptotics for $\xi_\ell$} In order to obtain asymptotics for $\xi_\ell$ we use the Poisson transform of $\ell! \xi_{\ell}$ (\emph{not} of $\xi_{\ell}$), which we denote by $ {\frak{P}}o{\xi}(z)$. The functional equation is $$ {\frak{P}}o{\xi}(z) = {\frak{P}}o{\xi}(pz) \frac{1-e^{-qz}}{pqz}. $$ This may be iterated and produces the explicit formula \begin{equation} {\frak{P}}o{\xi}(z) = z\prod_{j=0}^\infty \left( \frac{1 - e^{-qp^jz}}{qp^jz} \right), \label{PoXiFormula} \end{equation} which also shows that $ {\frak{P}}o{\xi}(z)$ is entire. Cauchy's integral formula then gives $$ \ell!\cdot \xi_{\ell} = \frac{1}{2\pi i} \oint_{\mathcal{C}} \frac{e^{z}}{z^\ell} \prod_{j=0}^\infty \left( \frac{1 - e^{-qp^jz}}{qp^jz} \right)\dee{z}, $$ for a simple, closed contour $\mathcal{C}$ encircling the origin. Actually we use the circle contour $|z| = \ell$. It follows then in precisely the same way as in \cite{jacquetszpa1997} that for $\ell \to \infty$, \begin{equation} \xi_{\ell} \sim \frac{ {\frak{P}}o{\xi}(\ell)}{\ell!} = \frac{1}{(\ell-1)!} \exp\left[ \sum_{j=0}^\infty \log\left( \frac{1-e^{-qp^j\ell}}{qp^j\ell} \right) \right]. \label{sum1} \end{equation} Our next task is to justify the this step which can be seen as a depoissonization step. Let us first study the asymptotic behavior of $ {\frak{P}}o{\xi}(\ell)$ We reindex the sum by setting $j = \floor{\log_{1/p} \ell} + J$, so that, for a fixed $J$, $p^j\ell = O(1)$ as $\ell\to\infty$. Then $j = \log_{1/p}\ell + J - \fracpart{\log_{1/p}(\ell)}$, where $\fracpart{x}$ denotes the fractional part of $x$. Defining $\alpha_{\ell} = \fracpart{\log_{1/p}\ell}$, we get $$ {\frak{P}}o{\xi}(\ell)= \ell \exp\left[ \sum_{J=-\floor{\log_{1/p}\ell}}^{\infty} \log\left( \frac{1-e^{-qp^Jp^{-\alpha_\ell}}}{qp^Jp^{-\alpha_\ell}} \right). \right] $$ The sum then becomes \begin{eqnarray} &&\sum_{J=-\floor{\log_{1/p}\ell}}^{\infty} \log\left( \frac{1-e^{-qp^Jp^{-\alpha_\ell}}}{qp^Jp^{-\alpha_\ell}} \right) = \nonumber \\ &&\sum_{J=0}^\infty \log \left( \frac{1-e^{-qp^Jp^{-\alpha_\ell}}}{qp^Jp^{-\alpha_\ell}} \right) + \sum_{J=1}^\floor{\log_{1/p} \ell} \left[ \log(1-e^{-qp^{-J}p^{-\alpha_\ell}}) - \log q + \alpha_\ell \log p + J\log p \right] \nonumber \\ &\sim& \floor{\log_{1/p} \ell}[\alpha_\ell \log p - \log q] + \floor{\log_{1/p} \ell} (\floor{\log_{1/p}\ell} + 1) \frac{1}{2} \log p \label{nonSummation} \\ &+& \sum_{J=1}^\infty \log(1-e^{-qp^{-J}p^{-\alpha_\ell}}) + \sum_{J=0}^\infty \log \left( \frac{1-e^{-qp^Jp^{-\alpha_\ell}}}{qp^Jp^{-\alpha_\ell}} \right). \nonumber \end{eqnarray} The expression (\ref{nonSummation}) can be rewritten as \begin{eqnarray*} &&(\log_{1/p} \ell - \alpha_\ell)(\alpha_\ell \log p - \log q) + \frac{1}{2}\log p(\log_{1/p} \ell - \alpha_\ell) (\log_{1/p} \ell + 1 - \alpha_\ell) \\ &=& \frac{1}{2}(\log_{1/p} \ell)^2 \log p + (\log_{1/p} \ell) (-\log q + \frac{1}{2}\log p) - \alpha^2_\ell \log p \\ &+& \frac{1}{2}(\log p)\alpha_\ell(\alpha_\ell - 1) + \alpha_\ell \log q, \end{eqnarray*} so that, finally, \begin{eqnarray*} {\frak{P}}o{\xi}(\ell)& =& \ell \prod_{J=0}^\infty \left( \frac{1 - e^{-qp^Jp^{-\alpha_\ell}}}{qp^Jp^{-\alpha_\ell}} \right) \prod_{J=1}^\infty (1-e^{-qp^{-J}p^{-\alpha_\ell}}) \\ & &\qquad \times \exp\left[ -\frac{\log^2 \ell}{2\log(1/p)} \right] \ell^{1/2+\log q / \log p} \exp\left[-\frac{\alpha_\ell(\alpha_\ell + 1)}{2}\log p \right] e^{-\alpha_\ell \log q} \\ &=& \ell^{3/2+\log q / \log p} \exp\left[ -\frac{\log^2 \ell}{2\log(1/p)} \right] \Theta(1). \end{eqnarray*} It remains to check proper growth conditions on $ {\frak{P}}o{\xi}(z)$ which can be directly used to justify the depoissonization step. Actually we show that we have similiar properties as (\ref{eqDep1}) and (\ref{eqDep2}). First we show that \begin{equation}\label{eqDep1-new} {\frak{P}}o{\xi}(z) = O\left( |z|^{3/2+\log q / \log p} \exp\left[ -\frac{\log^2 |z|}{2\log(1/p)} \right] \right) \end{equation} uniformly for $ {\mathbb{R}}e(z) \ge \eta$, where $\eta> 0$ is fixed. We set $j' = \lfloor \log |z|/\log(1/p) \rfloor$. Since $(1- e^{-w})/w = 1 + O(w)$ it directly follows that \[ \prod_{j=j'+1}^{\infty} \left( \frac{1 - e^{-qp^jz}}{qp^jz} \right) = O(1). \] Furthermore, if $ {\mathbb{R}}e(z) \ge \eta$ it follows that $|1 - e^{-qp^jz}| \le 1 + e^{-qp^j \eta}$ which implies that $\prod_{j=0}^{j'} \left( {1 - e^{-qp^jz}} \right) = O(1)$. Hence we obtain (\ref{eqDep1-new})). \[ {\frak{P}}o{\xi}(z) = O\left( |z|\, q^{-j'-1} p^{-j'(j'+1)/2} |z|^{-j'}\right) = O\left( |z|^{3/2+\log q / \log p} exp\left[ -\frac{\log^2 |z|}{2\log(1/p)} \right] \right). \] It also follows (by considering Cauchy's formula as in \cite{jacquetszpa1997}) that all derivatives have similar extimates: \begin{equation}\label{eqDep1-new-der} {\frak{P}}o{\xi}^{(k)}(z) = O\left( k! |z|^{3/2+\log q / \log p-k} \exp\left[ -\frac{\log^2 |z|}{2\log(1/p)} \right] \right) \end{equation} uniformly for $ {\mathbb{R}}e(z) \ge \eta$. It remains to prove a condition of the form (\ref{eqDep2}): \begin{equation}\label{eqDep2-new} |e^{z} {\frak{P}}o{\xi}(z)| \leq Ce^{\alpha|z|}, \end{equation} for some positive constants $C$ and $\alpha$ with $\alpha < 1$, for $\theta\le |\mathrm{Arg}(z)| \le \pi$ We will choose $\theta$ such that $\cos(\theta) < 1/2$. which we require in order to prove the desired bound for $z$ outside the cone but with $ {\mathbb{R}}e(z) > 0$. This can be proved following the steps of \cite{magnerspa2015}, and we leave details for the reader.` \begin{comment} Then the product in (\ref{PoXiFormula}) is bounded as follows: we first split it exactly as above, with the same choice of $j'$ The bounding of the final product is then exactly as before, yielding a factor of $O(1)$. The initial product may be estimated as follows: \begin{eqnarray*} \left|\prod_{j=0}^{j'} \left( \frac{1 - e^{-qp^j z}}{qp^jz} \right)\right| &=& O(1) \left| \prod_{j=0}^{j'} (1 - e^{-qp^j z}) \right| \\ &\leq& O(1) \prod_{j=0}^{j'} (e^{qp^j | {\mathbb{R}}e(z)|} + 1) \\ &\leq& O(1)2^{j'} \prod_{j=0}^{j'} e^{qp^j | {\mathbb{R}}e(z)|} \\ &<& O(1)2^{j'} \exp\left( q| {\mathbb{R}}e(z)| \sum_{j=0}^\infty p^j \right) \\ &=& O(1)2^{j'} e^{| {\mathbb{R}}e(z)|}. \end{eqnarray*} Here, the first equality follows by bounding the denominator, taking into account our choice of $j'$. The first inequality is by the triangle inequality and upper bounding $- {\mathbb{R}}e(z) \leq | {\mathbb{R}}e(z)|$. The second inequality is from noting that $1 \leq e^{qp^j | {\mathbb{R}}e(z)|}$, then taking the resulting factor of $2^{j'+1}$ out of the product. The third inequality is by extending the sum in the exponent to infinity, and the final equality is using the fact that $\sum_{j=0}^\infty p^j = 1/(1-p)= 1/q$. This upper bound gives us a bound of $$ |e^z|| {\frak{P}}o{\xi}(z)| = e^{ {\mathbb{R}}e(z)} | {\frak{P}}o{\xi}(z)| \leq O(1)|z| 2^{j'} e^{ {\mathbb{R}}e(z) + | {\mathbb{R}}e(z)|}. $$ Now, if $ {\mathbb{R}}e(z) < 0$, then the exponent becomes exactly $0$. If, on the other hand, $ {\mathbb{R}}e(z) > 0$, then we note that $$ {\mathbb{R}}e(z) + | {\mathbb{R}}e(z)| = 2 {\mathbb{R}}e(z) = 2\cos(\mathrm{Arg}(z))|z| \leq 2\cos(\theta)|z| < \alpha'|z|, $$ for some $\alpha' < 1$. Here, the final inequality is by our choice of $\theta$ so that $\cos(\theta) < 1/2$. By our choice of $j'$, $2^{j'}$ is of at most polynomial growth with respect to $|z|$, so that we may choose $\alpha \in (\alpha', 1)$ for which $$ |e^{z} {\frak{P}}o{\xi}(z)| \leq O(1)e^{\alpha|z|}, $$ as desired. \end{comment} Finally by using (\ref{eqDep1-new}) and (\ref{eqDep2-new}) together with the method used in \cite{jacquetszpa1997} we obtain that \begin{eqnarray*} \ell! \xi_\ell &=& {\frak{P}}o{\xi}(\ell) + O\left( \ell^{1/2+\log q / \log p} \exp\left[ -\frac{\log^2 \ell}{2\log(1/p)} \right] \right) \\ &= & {\frak{P}}o{\xi}(\ell) \left( 1 + O(\ell^{-1}) \right). \end{eqnarray*} This completes the depoissonization proof for $ {\frak{P}}o{\xi}(z)$. Summing up we obtain \begin{eqnarray*} \xi_\ell(n) &= &\frac{ {\frak{P}}o{\xi}(\ell)}{\ell!} \left( 1 + O(\ell^{-1}) \right)+ O\left( \frac{p^{n-\ell} + (q/p)^{n-\ell}}{(\ell-1)!} \right) \\ &=& \frac{ {\frak{P}}o{\xi}(\ell)}{\ell!} \Theta(1) \end{eqnarray*} if $\log^2 \ell = o(n-\ell)$. Hence, plugging this into (\ref{S_ellDefinition}) and setting $k=n-\ell$, we arrive at \begin{eqnarray*} \mu_{n,k} = \frac{n!}{(n-k)!} p^{k^2/2 + k/2}q^k (n-k)^{3/2+\frac{\log q}{\log p}} \exp\left( -\frac{\log^2(n-k)}{2\log(1/p)} \right) \Theta(1), \end{eqnarray*} if $\log^2(n-k)) = o(k)$. \subsection{The Symmetric case $p = q = \frac 12$} If $p = q = \frac 12$ we can process almost in the same way as above. Most importantly the recurrence for $\xi_\ell(n)$ simplifies to \[ \xi_\ell(n)(1 - 2^{1-n}) = \sum_{J=1}^\ell \frac{\xi_{\ell+1-J}}{J!} 2^{2-\ell}. \] Similarly to the above it now follows that $\xi_\ell(n) = \xi_\ell + O(2^{-(n-\ell)}/(\ell-1)!)$, where $\xi_\ell = \lim_{n\to\infty} \xi_\ell(n)$. Furthermore the Poisson generating function $ {\frak{P}}o{\xi}(z)$ of $\ell! \xi_\ell$ is now given by \[ {\frak{P}}o{\xi}(z) = z^2 \prod_{j=1}^\infty \frac{1-e^{-z/2^j}}{z/2^j}. \] By using the same techniques aa above it, thus, follows that \[ \xi_\ell = \frac{\ell^{5/2}}{\ell !} \exp\left( -\frac{\log^2\ell}{2\log 2} \right) \Theta(1). \] and consequently \begin{eqnarray*} \mu_{n,k} = \frac{n!}{(n-k)!} 2^{-k^2/2 - 3k/2} (n-k)^{5/2} \exp\left( -\frac{\log^2(n-k)}{2\log 2} \right) \Theta(1), \end{eqnarray*} if $\log^2(n-k)) = o(k)$. This is consistent with the case $p > q$. \subsection{Uniform bounds for $p\to 1$}. We finally provide upper bounds for $\xi_\ell$ that are uniform as $p\to 1$. (Actually we show more or less that (\ref{sum1}) gives also an upper bound.) For convenience we work with $\xi_{\ell+1}$ instead of $\xi_\ell$ and recall that the generating function is given by \[ X(z) = \sum_{\ell \ge 0} \xi_\ell z^\ell = \prod_{j\ge 0} \frac{e^{qp^jz} -1}{qp^j z}. \] First suppose that $|z| \le 1/(1-p) = 1/q$ Then we have $|qp^j z|\le 1$ for all $j\ge 0$ and by using the approximation $e^x = 1 + x + x^2/2 + O(x^3)$ we directly obtain the uniform representation \begin{equation}\label{eqxiappr0} X(z) = \sum_{\ell \ge 0} \xi_\ell z^\ell = e^{z/2 + O(qz^2)}. \qquad (|qz| \le 1) \end{equation} This representation can be also used to obtain asymptotics for $\xi_{\ell+1} = \xi_{\ell+1}(p)$ if $\ell \le 1/(1-p) = 1/q$. We just apply a usual saddle point asymptotics on the circle $|z| = \ell$ on the Cauchy integral \[ \xi_{\ell +1} = \frac 1{2\pi i} \int_{|z|= \ell} X(z) z^{-ell-1}\, dz \] to obtain \begin{equation}\label{eqxiappr} \xi_{\ell+1}(p) = \frac 1{2^{\ell(1+O(q\ell)))} \ell!} \qquad (\ell \le 1/q). \end{equation} In particular if $\ell$ is fixed then (\ref{eqxiappr}) implies that $\xi_{\ell+1}(1) = 1/(2^\ell \ell!)$. If $|zq|\ge 1$ then the situation is different. Here we restrict to the case of positive real $z$, since this is sufficient to obtain upper bounds. We define $j_0$ by \[ j_0 = \left\lceil \frac{\log(q z)}{\log(1/p)} \right\rceil \] and distinguish between the case $j\ge j_0$ and $j< j_0$. In the first case we $qp^j z \le 1$ so that \[ \prod_{j\ge j_0} \frac{e^{qp^jz} -1}{qp^j z} = e^{p^{j_0} z/2 + O(p^{2j_0} q z^2) } = e^{O(1/q)}. \] We note that the implicit constant can be chosen to be arbitrarily close to $\frac 12 + \frac 16 = \frac 23$. For the remaining product we have \[ \prod_{j < j_0} \frac{e^{qp^jz} -1}{qp^j z} \le \prod_{j < j_0} \frac{e^{qp^jz}}{qp^j z} \le \frac{ e^{z(1-p^{j_0})}}{(qz)^{j_0} p^{j_0(j_0-1)/2} } \le e^{z - \frac 1q - \frac{\log^2(qz)}{2\log(1/p)}} \] Thus we obtain \begin{equation}\label{eqxiappr2} X(z) \le e^{z - \frac{\log^2(qz)}{2\log(1/p)}}. \qquad (qz > 1). \end{equation} From this estimate we obtain, by the way, a uniform estimate for \begin{equation}\label{eqxiappr3} \xi_{\ell+1} \le \frac {X(\ell)}{\ell^\ell} \le \frac{C}{\ell!} \end{equation} uniformly for $\frac 12 \le p \le 1$. \section{Precise Analysis of the Lower Bound} We present here a detailed and precise analysis for the lower bound of $\tilde G_k(n)$, where $k = \log_{1/p} \log n + {\frak{P}}si_L(n)$ with $ {\frak{P}}si_L(n) = \frac 12 (1- \epsilon) \log_{p/q} \log n$. We recall that $j_0 = \lfloor j^* +\frac 12\rfloor$ is defined as the closest integer to $j^*$, where $j^*$ is the solution of the equation \[ (q/p)^{j^*} (k - j^*) = \frac{\log (1/p)}{\log(p/q)} (j^* - {\frak{P}}si_L(n)). \] Furthermore we set \[ \overline r_0 = (q/p)^{j_0} (k - j_0) \quad \mbox{and} \quad \overline r_1 = \frac{\log (1/p)}{\log(p/q)} (j_0 - {\frak{P}}si_L(n)). \] and \[ r_1(j) = \frac{\log (1/p)}{\log(p/q)} (j - {\frak{P}}si_L(n)) = \overline r_1 + \frac{\log (1/p)}{\log(p/q)}(j-j_0). \] Note that $\sqrt{q/p} \le \overline r_0 / \overline r_1 \le \sqrt{p/q}$ and that \[ n p^{k-j} (q/p)^{r_1(j)} = p^{ {\frak{P}}si_L(n) - j} (q/p)^{r_1(j)} = 1. \] We will also make use of the following abbreviation \begin{equation}\label{eqF0} F_0 := p^{j_0(j_0+1)2} q^{j_0-1} n^{j_0} p^{j_0(k-j_0)} \frac{ \overline r_0^{\overline r_1}} {\Gamma(\overline r_1 +1)}. \end{equation} Next we recall that we represent $\tilde G_k(n)$ (see (\ref{G_kExplicitFormHLowerBound-2})) by \[ {\frak{P}}o{G}_k(n) = \sum_{j=0}^k \sum_{m \ge j} \overline\kappa_{m,j} \overline \nu_{m,j} + O\left( n^{j_0} T(-j_0)^{k-j_0}p^{j_0(j_0+1)/2}q^{j_0} \left( p^{j_0} + (q/p)^{j_0} \right) \right), \] where \[ \overline \nu_{m,j} := - C_*(p) m! p^{j(j-1)/2} q^{j-1} \xi_{m-j+1} . \] and \begin{eqnarray*} \overline \kappa_{m,j} = \frac{p^m n^m}{m!} \sum_{r=0}^{k-j} {k-j \choose r} p^{m(k-j-r)} q^{mr} \left( e^{-np^{k-j-r}q^r} - \sum_{\ell\le j_0-m} \frac{(-n)^\ell}{\ell!} (p^{k-j-r}q^{r})^\ell \right) \end{eqnarray*} for $j\le j_0$ and $j\le m\le j_0$ and otherwise \begin{eqnarray*} \overline \kappa_{m,j} = \frac{p^m n^m}{m!} \sum_{r=0}^{k-j_0} {k-j \choose r} p^{m(k-j-r)} q^{mr)} e^{-np^{k-j-r}q^r}. \end{eqnarray*} We also split the above into several parts: \[ T_1 := \sum_{j> j_0} \sum_{m\ge j} \overline\kappa_{m,j} \overline \nu_{m,j}, \quad T_2 := \sum_{j\le j_0} \sum_{m > j_0} \overline \kappa_{m,j} \overline \nu_{m,j}, \quad T_3 := \sum_{j\le j_0} \sum_{m=j}^{j_0} \overline \kappa_{m,j} \overline \nu_{m,j}. \] Moreover we note that the exponential function $e^{-np^{k-j-r}q^r} = e^{-(q/p)^{r-r_1(j)}}$ behaves completely differently for $r\le r_1(j)$ and for $r> r_1(j)$ where $r_1(j) = (j- \psi_(n))\frac{\log(1/p)}{\log(p/q)}$. Hence it is convenient to split $T_3$ into three parts $T_{30} + T_{31}+T_{32}$, where the $T_{30}$ and $T_{31}$ correspond to the terms with $r\le r_1(j)$ and $T_{32}$ for those with $r> r_1(j)$. $T_{30}$ involves the exponential function $e^{-np^{k-j-r}q^r}$ whereas $T_{31}$ takes care of the polynomial sum $\sum_{\ell\le j_0-m} \frac{(-n)^\ell}{\ell!} (p^{k-j-r}q^{r})^\ell$. What remains is a more detailed analysis of the sums $T_1,T_2,T_{30}, T_{31}, T_{32}$. \subsection{Representation of the terms $T_1,T_2,T_{30}, T_{31}, T_{32}$} \subsubsection{The term $T_1$.} We recall that \begin{eqnarray*} T_1 &=& \sum_{j> j_0} \sum_{m\ge j} \overline\kappa_{m,j} \overline \nu_{m,j} \\ &=& - C_*(p) \sum_{j> j_0} \sum_{m\ge j} p^{j(j-1)/2} q^{j-1} \xi_{m-j+1} p^m n^m \sum_{r=0}^{k-j} {k-j \choose r} p^{m(k-j-r)} q^{mr} e^{-n p^{k-j-r} q^r} \end{eqnarray*} We use now the substitutions $j=j_0 + J$ and $m = j+ L = j_0 + J + L$, where $J > 0$ and $L\ge 0$. Furthermore by using approximation ${k-j \choose r} \sim (k-j)^r/r! \sim (k-j_0)^r/r!$ we obtain \begin{eqnarray*} T_1 &\sim & - C_*(p) p^{j_0(j_0+1)2} q^{j_0-1} n^{j_0} p^{j_0(k-j_0)} \sum_{J > 0} \sum_{L\ge 0} p^{J (J+1)/2} q^{J} \xi_{L+1} p^L \\ &&\qquad \times \sum_{r} \frac{ {\overline r_0}^r } {r!} (q/p)^{(L+J)(r-r_1(j))} e^{-(q/p)^{r-r_1(j)}} \\ &\sim& - C_*(p)F_0 \cdot\sum_{J > 0} p^{J (J+1)/2} q^{J} \left( \frac {\overline r_0}{\overline r_1} \right)^{J \frac{\log(1/p)}{\log(p/q)}} \sum_{L\ge 0} \xi_{L+1} p^L \\ &&\qquad \times \sum_r (q/p)^{(L+J)(r-r_1(j))} \left( \frac {\overline r_0}{\overline r_1} \right)^{r-r_j(j)} e^{-(q/p)^{r-r_1(j)}}, \end{eqnarray*} where $F_0$ is given in (\ref{eqF0}). Thus, if we define (with the implicit notation $q = 1-p$) \begin{eqnarray} C_1(p,u,v) &=& \sum_{J > 0} p^{J (J+1)/2} q^{J} u^{J \frac{\log(1/p)}{\log(p/q)}} \sum_{L\ge 0} \xi_{L+1} p^L \label{eqC1puv} \\ &&\qquad \times \sum_{R\in \mathbb{Z}} \left( (q/p)^{(L+J)} u \right)^{R - v -J\frac{\log(1/p)}{\log(p/q)}} e^{-(q/p)^{R - v -J\frac{\log(1/p)}{\log(p/q)}}} \nonumber \end{eqnarray} we obtain \[ T_1 \sim - C_*(p)\,F_0\, C_1\left( p, \frac{\overline r_0}{\overline r_1}, \langle \overline r_1 \rangle \right), \] where $\langle x \rangle = x - \lfloor x \rfloor$ denotes the fractional part of a real number $x$. Note that we have substituted $r-r_1(j)$ by \begin{eqnarray*} r - r_1(j) &=& (r-\lfloor \overline r_1 \rfloor) - \langle \overline r_1 \rangle + (\overline r_1 - r_1(j)) \\ &=& R - v - J\frac{\log(1/p)}{\log(p/q)} \end{eqnarray*} \subsubsection{The term $T_2$.} The term $T_2$ can be handled almost in the same way as $T_1$. By using the representation \begin{eqnarray*} T_2 &=& \sum_{j\le j_0} \sum_{m > j_0} \overline\kappa_{m,j} \overline \nu_{m,j} \\ &=& - C_*(p) \sum_{j\le j_0} \sum_{m > j_0} p^{j(j-1)/2} q^{j-1} \xi_{m-j+1} p^m n^m \sum_{r=0}^{k-j} {k-j \choose r} p^{m(k-j-r)} q^{mr} e^{-n p^{k-j-r} q^r} \end{eqnarray*} and the same substitutions as above, $j = j_0+J$, $m = j+ L = j_0 + J + L$, where $J \le 0$ and $L\ge -J$, we obtain \[ T_2 \sim - C_*(p)\,F_0\, C_2\left( p, \frac{\overline r_0}{\overline r_1}, \langle \overline r_1 \rangle \right), \] where \begin{eqnarray} C_2(p,u,v) &=& \sum_{J \le 0} p^{J (J+1)/2} q^{J} u^{J \frac{\log(1/p)}{\log(p/q)}} \sum_{L> -J} \xi_{L+1} p^L \label{eqC2puv} \\ &&\qquad \times \sum_{R\in \mathbb{Z}} \left( (q/p)^{(L+J)} u \right)^{R - v -J\frac{\log(1/p)}{\log(p/q)}} e^{-(q/p)^{R - v -J\frac{\log(1/p)}{\log(p/q)}}}. \nonumber \end{eqnarray} \subsubsection{The term $T_{30}$.} Next we consider $T_{30}$ that is given by \begin{eqnarray*} T_{30} &=& - C_*(p) \sum_{j\le j_0} \sum_{m= j}^{j_0} p^{j(j-1)/2} q^{j-1} \xi_{m-j+1} p^m n^m \sum_{r\le r_j(j)}^{k-j} {k-j \choose r} p^{m(k-j-r)} q^{mr} e^{-n p^{k-j-r} q^r}. \end{eqnarray*} Here we obtain (again with the same substitutions as above) \[ T_{30} \sim - C_*(p)\,F_0\, C_{30}\left( p, \frac{\overline r_0}{\overline r_1}, \langle \overline r_1 \rangle \right), \] where \begin{eqnarray} C_{30}(p,u,v) &=& \sum_{J \le 0} p^{J (J+1)/2} q^{J} u^{J \frac{\log(1/p)}{\log(p/q)}} \sum_{L=0}^{-J} \xi_{L+1} p^L \label{eqC30puv} \\ &&\qquad \times \sum_{R\in \mathbb{Z}, R - v -J\frac{\log(1/p)}{\log(p/q)}\le 0 } \left( (q/p)^{(L+J)} u \right)^{R - v -J\frac{\log(1/p)}{\log(p/q)}} e^{-(q/p)^{R - v -J\frac{\log(1/p)}{\log(p/q)}}}. \nonumber \end{eqnarray} \subsubsection{The term $T_{32}$.} The term $T_{32}$ that is given by \begin{eqnarray*} T_{32} &=& - C_*(p) \sum_{j\le j_0} \sum_{m= j}^{j_0} p^{j(j-1)/2} q^{j-1} \xi_{m-j+1} p^m n^m \sum_{r > r_j(j)}^{k-j} {k-j \choose r} p^{m(k-j-r)} q^{mr} \\ && \qquad \qquad \times \left( e^{-n p^{k-j-r} q^r} - \sum_{\ell\le j_0-m} \frac{(-n p^{k-j-r} q^r)^\ell}{\ell !} \right) \end{eqnarray*} Here we get \[ T_{32} \sim - C_*(p)\,F_0\, C_{32}\left( p, \frac{\overline r_0}{\overline r_1}, \langle \overline r_1 \rangle \right), \] where \begin{eqnarray} C_{32}(p,u,v) &=& \sum_{J \le 0} p^{J (J+1)/2} q^{J} u^{J \frac{\log(1/p)}{\log(p/q)}} \sum_{L=0}^{-J} \xi_{L+1} p^L \label{eqC32puv} \\ &&\qquad \times \sum_{R\in \mathbb{Z}, R - v -J\frac{\log(1/p)}{\log(p/q)} > 0 } \left( (q/p)^{(L+J)} u \right)^{R - v -J\frac{\log(1/p)}{\log(p/q)}} \\ && \qquad \qquad \times \left( e^{-(q/p)^{R - v -J\frac{\log(1/p)}{\log(p/q)}}} - \sum_{\ell=0}^{-J-L} \frac{(-1)^\ell}{\ell!} (q/p)^{(R - v -J\frac{\log(1/p)}{\log(p/q)})\ell} \right) \nonumber \end{eqnarray} \subsubsection{The term $T_{31}$.} The term $T_{31}$ is the most interesting one. It can be written as \[ T_{31} = \sum_{K\ge 0} S_K. \] where we have to distinguish between the term $S_0$ and the terms $S_K$ for $K\ge 1$, The term $S_0$ is given by \begin{eqnarray*} S_0&=& - C_*(p)p^{j_0(j_0+1)2} q^{j_0-1} n^{j_0} p^{j_0(k-j_0)} \sum_{L,M \ge 0} \xi_{L+1}\frac{(-1)^{M}}{M!} p^{((L+M)^2 + L-M)/2} q^{-L-M} \\ && \qquad \times \sum_{r_1(j_0-M-L)< r \le \overline r_1} {k-j_0+L+M \choose r } \left( \frac qp \right)^{j_0r}. \end{eqnarray*} Since \[ \sum_{r_1(j_0-M-L)< r \le \overline r_1} {k-j_0+L+M \choose r } \left( \frac qp \right)^{j_0r} \sim \frac{ \overline r_0^{\overline r_1}} {\Gamma(\overline r_1 +1)} \sum_{-(M+L) \frac{\log(1/p)}{\log(p/q)} \le r-\overline r_1 \le 0 } \left( \frac{\overline r_0}{\overline r_1} \right)^{r-\overline r_1} \] we, thus, obtain the representation \[ S_0 \sim - C_*(p)\,F_0\, C_{31,0}\left( p, \frac{\overline r_0}{\overline r_1}, \langle \overline r_1 \rangle \right), \] where \begin{eqnarray} C_{31,0}(p,u,v) &=& \sum_{L,M \ge 0} \xi_{L+1}\frac{(-1)^{M}}{M!} p^{((L+M)^2 + L-M)/2} q^{-L-M} \label{eqC310puv} \\ &&\qquad \times \sum_{-(M+L) \frac{\log(1/p)}{\log(p/q)} +v \le R \le 0 } u^{R-v}. \nonumber \end{eqnarray} It is also convenient to rewrite this also as a sum over $J = -M-L\le 0$ and $0\le L \le -J$: \begin{eqnarray} C_{31,0}(p,u,v) &=& \sum_{J\le 0}\sum_{L=0}^{-J} \xi_{L+1}\frac{(-1)^{-J-L}}{(-J-L)!} p^{ J(J+1)/2 + L} q^{J} \label{eqC310puv-2} \\ &&\qquad \times \sum_{ J \frac{\log(1/p)}{\log(p/q)} +v \le R \le 0 } u^{R-v}. \nonumber \end{eqnarray} For $K\ge 1$ the terms $S_K$ are given by \begin{eqnarray*} S_K&=& C_*(p)p^{j_0(j_0+1)2} q^{j_0-1} n^{j_0} p^{j_0(k-j_0)} \left( \frac qp \right)^{K\overline r_1} \\ &&\qquad \times \sum_{L\ge 0, \, M\ge K} \xi_{L+1}\frac{(-1)^{M-K}}{(M-K)!} p^{((L+M)^2 + L-M)/2-K(L+M)} q^{-L-M} \\ &&\qquad \qquad \times \sum_{r\le r_1(j_0-M-L)} {k-j_0+M+L \choose r } \left( \frac qp \right)^{(j_0-K)r}. \end{eqnarray*} The sum over $r$ together with the factor $\left( q/p \right)^{K\overline r_1}$ can be approximated by \begin{eqnarray*} && \left( \frac qp \right)^{K\overline r_1} \sum_{r\le r_1(j_0-M-L)} {k-j_0+M+L \choose r } \left( \frac qp \right)^{(j_0-K)r} \\ &&\sim \frac{ \overline r_0^{\overline r_1}} {\Gamma(\overline r_1 +1)} \sum_{r\le \overline r_1 - (M+L) \frac{\log(1/p)}{\log(p/q)} } \left( \frac{\overline r_0}{\overline r_1} \right)^{r-\overline r_1} \left( \frac qp \right)^{-K(r-\overline r_1)}. \end{eqnarray*} Thus, we have for $K\ge 1$ \[ S_K \sim C_*(p)\,F_0\, C_{31,K}\left( p, \frac{\overline r_0}{\overline r_1}, \langle \overline r_1 \rangle \right), \] where \begin{eqnarray} C_{31,K}(p,u,v) &=& \sum_{L\ge 0, \, M\ge K} \xi_{L+1}\frac{(-1)^{M-K}}{(M-K)!} p^{((L+M)^2 + L-M)/2-K(L+M)} q^{-L-M} \nonumber \\ &&\qquad \times \sum_{R\le v - (M+L) \frac{\log(1/p)}{\log(p/q)} } \left( u \left(\frac qp\right)^{-K} \right)^{R-v}. \label{eqC31Kpuv} \end{eqnarray} As above we can rewrite this as a sum over $J\le -K$ and $0\le L \le -J-K$: \begin{eqnarray} C_{31,K}(p,u,v) &=& \sum_{J\le -K} \sum_{L=0}^{-J-K} \xi_{L+1}\frac{(-1)^{-J-L-K}}{(-J-L-K)!} p^{J(J+1)/2 + L +JK} q^{J} \nonumber \\ &&\qquad \times \sum_{R\le v+J \frac{\log(1/p)}{\log(p/q)} } \left( u \left(\frac qp\right)^{-K} \right)^{R-v}. \label{eqC31Kpuv-2} \end{eqnarray} \subsection{Asymptotic analysis for $p\to \frac 12$.} It $p\to \frac 12 $ then $q/p \to 1$ and we can write \[ \frac qp = e^{-\eta}, \] where $\eta \to 0$. Note that $p> \frac 12$ implies that $\eta > 0$. Furthermore, we use the abbrevation $u = \overline r_0/\overline r_1$ and $v = \{ \overline r_1\}$. Note that $ e^{\eta/2}= \sqrt{q/p}\le u \le \sqrt{p/q} = e^{\eta/2}$ or equivalently $-\frac 12 \le \frac 1\eta \log u \le \frac 12$. We will therefore also use the abbreviation $\tilde u = \frac 1\eta \log u$ Finally we mention that $\xi_\ell = \xi_\ell(p)$ depend smoothly on $p \in [\frac 12, 1]$. Furthermore we have a uniform upper bound $|\xi_\ell(p)|\le C/(\ell-1)!$. \subsubsection{The term $T_1$.} Since the factors $C_*(p)$ and $F_0$ are present in all terms it suffices to study the sum $C_1(p,u,v)$ (defined in (\ref{eqC1puv})) in order to study the asymptotic behavior of $T_1$. We start with the sum over $R$. The first observation is that for $\eta \to 0$ we can replace the sum by an integral, that is, we have for fixed integers $L,J$, as $\eta\to 0$, \begin{eqnarray*} && \sum_{R\in \mathbb{Z}} \left( (q/p)^{(L+J)} u \right)^{R - v -J\frac{\log(1/p)}{\log(p/q)}} e^{-(q/p)^{R - v -J\frac{\log(1/p)}{\log(p/q)}}} \\ &&\sim \int_{-\infty}^\infty \left( (q/p)^{(L+J)} u \right)^{t} e^{-(q/p)^{t}} \, dt \\ && = \frac 1\eta \int_{-\infty}^\infty e^{-\left(M - \tilde u \right) t} e^{-e^{-t}}\, dt. \end{eqnarray*} This also implies that the leading asymptotic term does not depend on $v = \{ \overline r_1\}$. Further note that $\tilde M = M - \frac 1\eta \log u = L+J - \tilde u \ge \frac 12$ so that the integral converges and by using the substitution $w = e^{-t}$ we obtain \[ \int_{-\infty}^\infty e^{-\tilde M t} e^{-e^{-t}}\, dt = \int_0^\infty w^{\tilde M -1} e^{-w}\, dw = \Gamma(\tilde M). \] This finally shows that, as $p\to \frac 12$ (or equivalently as $\eta = \log(p/q) \to 0$), \begin{equation}\label{eqC1asymp} C_1(p,u,v) \sim \frac 1\eta \sum_{J > 0} 2^{-J (J+1)/2 - J + J\tilde u} \sum_{L\ge 0} \xi_{L+1}(1/2)\, 2^{-L}\, \Gamma\left( J + L - \tilde u\right). \end{equation} \subsubsection{The term $T_2$.} Here we analyze the term $C_2(p,u,v)$ (defined in (\ref{eqC2puv})) which looks very similar to $C_1(p,u,v)$, and in fact we can do almost the same considerations. First of all we again have that $J+L - \frac 1\eta \log u > 0$. Thus, the appearing sums and integrals are convergent. This finally shows that, as $p\to \frac 12$ (or equivalently as $\eta = \log(p/q) \to 0$), \begin{equation}\label{eqC2asymp} C_2(p,u,v) \sim \frac 1\eta \sum_{J \le 0} 2^{-J (J+1)/2 - J + J\tilde u} \sum_{L > -J} \xi_{L+1}(1/2)\, 2^{-L}\, \Gamma\left( J + L - \tilde u\right). \end{equation} \subsubsection{The term $T_{30}$.} The representation of $C_{30}(p,u,v)$ (given in (\ref{eqC30puv})) is quite similar to $C_1(p,u,v)$ or $C_2(p,u,v)$. The main difference is that $L+J \le 0$ and that the sum over $R$ can now be approximated by the incomplete Gamma function: \begin{eqnarray*} && \sum_{R\in \mathbb{Z}, R - v -J\frac{\log(1/p)}{\log(p/q)}\le 0 } \left( (q/p)^{(L+J)} u \right)^{R - v -J\frac{\log(1/p)}{\log(p/q)}} e^{-(q/p)^{R - v -J\frac{\log(1/p)}{\log(p/q)}}} \\ && \qquad \qquad \sim \int_{-\infty}^0 \left( (q/p)^{(L+J)} u \right)^{t} e^{-(q/p)^{t}} \, dt \\ && \qquad \qquad = \frac 1\eta \int_{-\infty}^0 e^{-\left(L+J - \tilde u\right) t} e^{-e^{-t}}\, dt \\ && \qquad \qquad = \frac 1\eta \int_1^\infty w^{L+J - \tilde u} e^{-w}\, dw \\ && \qquad \qquad = \frac 1\eta \Gamma( L+J - \tilde u, 1), \end{eqnarray*} where \[ \Gamma(s,z) := \int_z^\infty w^{s-1} e^{-w}\, dw. \] This leads to \begin{equation}\label{eqC30asymp} C_{30}(p,u,v) \sim \frac 1\eta \sum_{J \le 0} 2^{-J (J+1)/2 - J + J\tilde u} \sum_{L =0}^{-J} \xi_{L+1}(1/2)\, 2^{-L}\, \Gamma\left( J + L - \tilde u, 1\right). \end{equation} \subsubsection{The term $T_{32}$.} We start again with the analysis of the $R$-sum. We note that $L+J\le 0$ so that we obtain \begin{eqnarray*} &&\sum_{R\in \mathbb{Z}, R - v -J\frac{\log(1/p)}{\log(p/q)} > 0 } \left( (q/p)^{(L+J)} u \right)^{R - v -J\frac{\log(1/p)}{\log(p/q)}} \\ && \qquad \qquad \times \left( e^{-(q/p)^{R - v -J\frac{\log(1/p)}{\log(p/q)}}} - \sum_{\ell=0}^{-J-L} \frac{(-1)^\ell}{\ell!} (q/p)^{(R - v -J\frac{\log(1/p)}{\log(p/q)})\ell} \right) \\ && = \sum_{R\in \mathbb{Z}, R - v -J\frac{\log(1/p)}{\log(p/q)} > 0 } \left( (q/p)^{(L+J-\tilde u)} \right)^{R - v -J\frac{\log(1/p)}{\log(p/q)}} \\ && \qquad \qquad \times \sum_{\ell\ge -J-L+1} \frac{(-1)^\ell}{\ell!} (q/p)^{(R - v -J\frac{\log(1/p)}{\log(p/q)})\ell} \\ &&\sim \sum_{\ell\ge -J-L+1} \frac{(-1)^\ell}{\ell!} \int_0^\infty e^{-\eta (\ell + J+L - \tilde u) t}\, dt \\ && = \sum_{\ell\ge -J-L+1} \frac{(-1)^\ell}{\ell!} \frac 1{\ell + J+L - \tilde u} \end{eqnarray*} This leads to \begin{equation}\label{eqC32asymp} C_{32}(p,u,v) \sim \frac 1\eta \sum_{J \le 0} 2^{-J (J+1)/2 - J + J\tilde u} \sum_{L =0}^{-J} \xi_{L+1}(1/2)\, 2^{-L} \sum_{\ell\ge -J-L+1} \frac{(-1)^\ell}{\ell!} \frac 1{\ell + J+L - \tilde u}. \end{equation} \subsubsection{The term $T_{31}$.} We consider first the behavior of $C_{31,0}(p,u,v)$ (that is given in (\ref{eqC310puv})). Since \[ \sum_{-(M+L) \frac{\log(1/p)}{\log(p/q)} +v \le R \le 0 } u^{R-v} \sim \frac{1 - 2^{-(M+L)\tilde u}}{\eta\, \tilde u} \] we directly obtain \begin{equation}\label{eqC310asymp} C_{31,0}(p,u,v) \sim \frac 1\eta \sum_{L,M \ge 0} \xi_{L+1}(1/2) \frac{(-1)^{M}}{M!} 2^{-(L+M)^2/2 +L/2 + 3M/2} \frac{1 - 2^{-(M+L)\tilde u}}{\tilde u}. \end{equation} As above it also convenient to rewrite the resulting sum into a sum in $J = -L-M \le 0$ and $0\le L \le -J$: \begin{equation}\label{eqC310asymp-2} C_{31,0}(p,u,v) \sim \frac 1\eta \sum_{J\le 0,\, 0\le L \le -J} \xi_{L+1}(1/2) \frac{(-1)^{-J-L}}{(-J-L)!} 2^{-J(J+1)/2 -J-L} \frac{1 - 2^{J\tilde u}}{\tilde u}. \end{equation} Next suppose that $K\ge 1$. The terms $C_{31,K}(p,u,v)$ are given in (\ref{eqC310puv}). Since \[ \sum_{R\le - (M+L) \frac{\log(1/p)}{\log(p/q)} } \left( u \left(\frac qp\right)^{-K} \right)^{R-v} \sim \frac{p^{K(M+L)} p^{(M+L)\tilde u}}{\eta(K+\tilde u)} \] it follows that \begin{eqnarray} && C_{31,K}(p,u,v) \label{eqC31Kasymp} \\ && \sim \frac 1{\eta(K+\tilde u)} \sum_{L\ge 0, \, M\ge K} \xi_{L+1}(1/2) \frac{(-1)^{M-K}}{(M-K)!} 2^{-(L+M)^2/2 + L/2+3M/2- (M+L)\tilde u } \nonumber \end{eqnarray} and consequently \begin{eqnarray} && \sum_{K\ge 1} C_{31,K}(p,u,v) \label{eqC31asymp} \\ && \sim \frac 1{\eta} \sum_{L\ge 0, \, M\ge 1} \xi_{L+1}(1/2) \, 2^{-(L+M)^2/2 + L/2 + 3M/2- (M+L)\tilde u } \sum_{K=1}^M \frac{(-1)^{M-K}}{(M-K)!(K+\tilde u)} \nonumber \end{eqnarray} Of course we can rewrite the resulting sum as a sum over $J,L$: \begin{eqnarray} && \sum_{K\ge 1} C_{31,K}(p,u,v) \label{eqC31asymp-2} \\ && \sim \frac 1{\eta}\sum_{J\le -1, 0\le L\le -J-1} \xi_{L+1}(1/2)\, 2^{-J(J+1)/2 -J -L +J\tilde u } \sum_{K=1}^{-J-L} \frac{(-1)^{-J-L-K}}{(-J-L-K)!(K+\tilde u)} \nonumber \end{eqnarray} \subsubsection{The full behavior for $p\to \frac 12$.} Summing up it follows that \begin{eqnarray*} \tilde G_k(n) &=& T_1 + T_2 + T_{30} + T_{32} + \sum_{K\ge 0} S_K \\ &\sim & C_*(p)\,F_0\, ( - C_1 - C_2 - C_{30} - C_{32} - C_{31,0} + \sum_{K\ge 1} C_{31,K} ), \end{eqnarray*} where, as $p\to \frac 12$ or equivalently as $\eta \to 0$, all the terms $C_1(p,u,v)$, $C_2(p,u,v)$, $C_{30}(p,u,v)$, $C_{32}(p,u,v)$, $C_{31,K}(p,u,v)$ ($K\ge 0$) behave as $1/\eta$ with some factor that only depends on $\tilde u$. This means that we have \[ - C_1 - C_2 - C_{30} - C_{32} - C_{31,0} + \sum_{K\ge 1} C_{31,K} \sim \frac 1\eta h_1(\tilde u) \] for some explicit function $h_1(\tilde u)$ that collects from the asymptotic relations (\ref{eqC1asymp}), (\ref{eqC2asymp}), (\ref{eqC30asymp}), (\ref{eqC32asymp}), (\ref{eqC310asymp}), (\ref{eqC31asymp}). It remains to check that $h_1(\tilde u)$ stays positive for all $\tilde u \in [-\frac 12, \frac 12]$. It is clear that the dependence on $\tilde u$ is smooth in all appearing terms and that the negative derivative with respect to $\tilde u$ can be uniformly upper bounded, as seen in the following table of derivatives. \begin{center} \begin{tabular}{|l|l|l|l|} \hline $\tilde{u}$ & $h_1(\tilde{u})$ & $h_1(\tilde{u}) / h'_1(\tilde{u})$ & $h'_1(\tilde{u})$ \\ \hline $-0.50$ & $1.37683018271327$ & $-0.722028017914153$ & $-1.90689301322511$ \\ $-0.40$ & $1.20276152989834$ & $-0.760013160991751$ & $-1.58255355516324$ \\ $-0.30$ & $1.05800806833013$ & $-0.802220048867141$ & $-1.31885019555944$ \\ $-0.20$ & $0.937149181875061$ & $-0.849393914518373$ & $-1.10331515902895$ \\ $-0.10$ & $0.835870082265573$ & $-0.902466357406207$ & $-0.926206362603876$ \\ $0.00$ & $0.358367943474688$ & $0.000915198138561305$ & $391.574161239056$ \\ $0.10$ & $0.678937477362699$ & $-1.03136470454952$ & $-0.658290393657834$ \\ $0.20$ & $0.618287879529247$ & $-1.11069248821028$ & $-0.556668822461859$ \\ $0.30$ & $0.566972485392761$ & $-1.20324708128446$ & $-0.471202045042585$ \\ $0.40$ & $0.523532363681955$ & $-1.31263743584976$ & $-0.398840037152404$ \\ $0.50$ & $0.486782979369433$ & $-1.44391680699806$ & $-0.337126749276828$ \\ \hline \end{tabular} \end{center} Hence it sufficient to check positivity in a sufficiently fine grid which can be easily performed. The following table gives some sample values: \begin{center} \begin{tabular}{|l|l|l|} \hline $p$ & $\tilde{u}$ & $h_1(\tilde{u})$ \\ \hline $0.50$ & $-0.50$ & $1.37683018271327$ \\ $0.50$ & $-0.45$ & $1.28574151187623$ \\ $0.50$ & $-0.40$ & $1.20276152989834$ \\ $0.50$ & $-0.35$ & $1.12708836544424$ \\ $0.50$ & $-0.30$ & $1.05800806833013$ \\ $0.50$ & $-0.25$ & $0.994884277261959$ \\ $0.50$ & $-0.20$ & $0.937149181875062$ \\ $0.50$ & $-0.15$ & $0.884295608451989$ \\ $0.50$ & $-0.10$ & $0.835870082265572$ \\ $0.50$ & $-0.05$ & $0.791466739676032$ \\ $0.50$ & $0.00$ & $0.580594753668194$ \\ $0.50$ & $0.05$ & $0.713309765274110$ \\ $0.50$ & $0.10$ & $0.678937477362699$ \\ $0.50$ & $0.15$ & $0.647342275661044$ \\ $0.50$ & $0.20$ & $0.618287879529247$ \\ $0.50$ & $0.25$ & $0.591561730562133$ \\ $0.50$ & $0.30$ & $0.566972485392761$ \\ $0.50$ & $0.35$ & $0.544347799045552$ \\ $0.50$ & $0.40$ & $0.523532363681955$ \\ $0.50$ & $0.45$ & $0.504386172111908$ \\ $0.50$ & $0.50$ & $0.486782979369433$ \\ \hline \end{tabular} \end{center} \parsecskip \subsection{Asymptotic Analysis for $p\to 1$.} Before we start with the analysis of the terms $T_1$, $T_2$ etc. we recall that \[ \xi_{L+1}(1) = \frac 1{2^L L!}. \] and that \[ \sum_{L\ge 0} \xi_{L+1} z^L = e^{z/2 + O(qz^2)}, \qquad |z|\le 1/q \] which implies that \[ \xi_{L+1}(p) = \frac 1{2^{L(1+O(qL))} L!}. \] On the other hand we have a uniform estimate of the form \[ \xi_{L+1} \le \frac C{L!} \] Next we observe that, as $p\to 1$, \[ \left( \frac pq \right)^{\frac{\log(1/p)}{\log(p/q)}} = 1 + O(q) \to 1. \] Hence, it also follows that \[ u^{\frac{\log(1/p)}{\log(p/q)}} = 1 + O(q) \to 1. \] The strategy for the analysis in the case $p\to 1$ is to show that the terms $C_1$, $C_2$, $C_{31,0}$, and $C_{32}$ are (at most) of order $e^{O(1/q)}$, whereas the $C_{30}$ and the sum $\sum_{K\ge 1} C_{31,0}$ are at least of order $e^{c/q \log^2 q}$ for some positive constant $c> 0$. These terms have to be evaluated with more care. \subsubsection{The term $T_1$.} We consider the sum $C_1(p,u,v)$ (defined in (\ref{eqC1puv})) and start with the term $J=1$. Then for every fixed $L\ge 0$ the sum over $R$ is dominated by (at most) two terms corresponding to $R = 0$ and $R=1$: \begin{eqnarray*} && \sum_{R\in \mathbb{Z}} \left( (q/p)^{(L+1)} u \right)^{R - v -J\frac{\log(1/p)}{\log(p/q)}} e^{-(q/p)^{R - v -J\frac{\log(1/p)}{\log(p/q)}}} \\ && \sim \left( (q/p)^{(L+1)} u \right)^{- v -\frac{\log(1/p)}{\log(p/q)}} e^{-(q/p)^{- v -\frac{\log(1/p)}{\log(p/q)}}} \\ && + \left( (q/p)^{(L+1)} u \right)^{1 - v -J\frac{\log(1/p)}{\log(p/q)}} e^{-(q/p)^{1 - v -\frac{\log(1/p)}{\log(p/q)}}} \\ && \sim \left( (q/p)^{(L+1)}u \right)^{- v } e^{-(q/p)^{- v}} + \left( (q/p)^{(L+1)}u\right)^{1- v } e^{-(q/p)^{1- v}}. \end{eqnarray*} Since (for $\delta \in \{0,1\}$) \begin{eqnarray*} \sum_{L\ge 0} \xi_{L+1} p^L (q/p)^{(\delta-v)L} &= & e^{ \frac 12 p (q/p)^{\delta-v} + O( q (q/p)^{2(\delta -v)} ) }\\ &= & e^{\frac 12 q^{(\delta-v)} + O(q^{1+\delta-v} ) } \end{eqnarray*} we thus obtain that term corresponding $J=1$ in $C_1(p,u,v)$ is asymptotically given by \[ q (qu)^{-v} e^{-\frac 12 q^{-v}} + q (qu)^{1-v} e^{-\frac 12 q^{1-v}} \] For $J> 1$ the computations are almost the same but the (initial) factor $q^J$ makes them negligible compared to this term. As above we represent $u$ as $u = (p/q)^{\tilde u} \sim q^{-\tilde u}$ Thus, we finally have, as $p\to 1$, \begin{equation}\label{eqC1asymp2} C_1(p,u,v) \sim q^{1 -v(1-\tilde u)} e^{-\frac 12 q^{-v}} + q^{1+ (1- v)(1-\tilde u)} e^{-\frac 12 q^{1-v}} \end{equation} This term is trivially bounded. \subsubsection{The term $T_2$.} The main difference between the terms $C_1(p,u,v)$ and $C_2(p,u,v)$ (defined in (\ref{eqC1puv}) and (\ref{eqC2puv})) is that $C_2(p,u,v)$ sums over $J\le 0$ and the {\it leading factor} is now $q^J$ that tends to infinity (for $J < 0$ as $p\to 1$). Next we consider the sum over $R$ and observe that we (uniformly) have \begin{eqnarray*} &&\sum_{R\in \mathbb{Z}} \left( (q/p)^{(L+J)} u \right)^{R - v -J\frac{\log(1/p)}{\log(p/q)}} e^{-(q/p)^{R - v -J\frac{\log(1/p)}{\log(p/q)}}} \\ &&\qquad = O\left( \frac{(L + J-1)!}{\log(1/q)} \right). \end{eqnarray*} Furthermore since $\xi_{L+1} \le C/ L!$ and \[ \sum_{L > -J} \frac{(L+J-1)!}{L!} = O \left( \frac 1{(L+1)!} \right) \] it is sufficient to consider the sum \[ \frac 1{\log(1/p)} \sum_{J\le 0} p^{J(J+1)/2}q^J \frac 1{(L+1)!} \] It is an easy exercise that the term $p^{J(J+1)/2}q^J /{(L+1)!}$ is maximal for $J \sim A/\log p \sim - A/q$ for some constant $A> 0$ and that it is of order $e^{O(1/q)}$. Hence it follows that $C_2(p,u,v)$ is also upper bounded by $e^{O(1/q)}$, too. \subsubsection{The term $T_{30}$.} We first suppose that $v> 0$ and we also consider (first) the interval \[ J\in \left[ - v\left( \frac{\log q}{\log p}-1\right) , 0 \right] \cap \mathbb{Z} \] for which the term corresponding to $R=0$ dominates the sum (note that $L+J\le 0$); \begin{eqnarray*} &&\sum_{R\in \mathbb{Z}, R - v -J\frac{\log(1/p)}{\log(p/q)}\le 0 } \left( (q/p)^{(L+J)} u \right)^{R - v -J\frac{\log(1/p)}{\log(p/q)}} e^{-(q/p)^{R - v -J\frac{\log(1/p)}{\log(p/q)}}} \\ && \sim \left( (q/p)^{(L+J)} u \right)^{- v -J\frac{\log(1/p)}{\log(p/q)}} e^{-(q/p)^{- v -J\frac{\log(1/p)}{\log(p/q)}}} \end{eqnarray*} It is also clear that this term gets largest when $J$ equals \[ J = J_{v,0} := - \left\lfloor v \left( \frac{\log q}{\log p} -1 \right) \right\rfloor. \] Interestingly the result will depend heavily on the rounding error \[ \delta = - v \left( \frac{\log q}{\log p} -1 \right) - J_{v,0}. \] In particular the sum over $R$ is then asymptotically given by \[ \left( (q/p)^{(L+J_{v,0})} u \right)^{- v -J_{v,0}\frac{\log(1/p)}{\log(p/q)}} e^{-(q/p)^{- v -J_{v,0}\frac{\log(1/p)}{\log(p/q)}}} \sim e^{-1} (q/p)^{-\delta(J_{v,0}+L)}. \] Thus, we have to study next the following sum \begin{eqnarray*} e^{-1} \sum_{L=0}^{-J_{v,0}} \xi_{L+1} p^L (q/p)^{-\delta(J_{v,0}+L)} &= & e^{-1} [z^{-J_{v,0}}] \frac{ e^{pz/2 + O(qz^2)} }{1 - (q/p)^\delta z} \\ &\sim & e^{-1} (q/p)^{-\delta J_{v,0}} e^{p(p/q)^\delta + O(q (p/q)^{2\delta})}. \end{eqnarray*} Finally, we observe that the (total) sum over $J \in \left[ - v\left( \frac{\log q}{\log p}-1\right) , 0 \right]$ is dominated by the term corresponding to $J_{v,0}$ and is given by \begin{eqnarray*} && e^{-1} (q/p)^{-\delta J_{v,0}} e^{p(p/q)^\delta + O(q (p/q)^{2\delta})} (q^{1/2} u)^{-v} p^{J_{v,0}^2/2} q^{J_{v,0}} \\ && \sim e^{-1} (q^{1/2} u)^{-v} q^{-v^2+v} e^{(p/q)^\delta (1+o(1))} e^{(v(1-\delta)-v^2/2 \frac{\log^2 q}{\log(1/p)}} q^{\delta(2-3v)} \end{eqnarray*} For example, if $\delta = 0$ then we obtain (asymptotically) \[ \sim e^{-1/2} (q^{1/2} u)^{-v} q^{-v^2+v} e^{(v-v^2/2) \frac{\log^2 q}{\log(1/p)}}. \] Next we consider \[ J\in \left[ - (v+1)\left( \frac{\log q}{\log p}-1\right) , (v+1)\left( \frac{\log q}{\log p}-1\right) \right) \cap \mathbb{Z}. \] In this range the term $p^{J^2/2}q^J$ attains its maximum if $J$ is close to $J_0 := -\frac{\log q}{\log p}$ and it will turn out to be the essential range in this case. However, in order to be precise we set $J = \kappa J_0$ with $\kappa \in (v,v+1)$. In this case the sum over $R$ is dominated by the term related to $R = -1$: \[ \left( (q/p)^{L+\kappa J_0}u\right)^{-1-v+\kappa} e^{-(q/p)^{-1-v+\kappa}} (1+o(1)) \] Thus, we are led to analyze the sum \begin{eqnarray*} &&\sum_{L=0}^{-\kappa J_0} \xi_{L+1} p^L (q/p)^{(-1-v+\kappa)(L+\kappa J_0)} = \\ && [z^{-\kappa J_0}] X(z) \frac 1{1- (q/p)^{1+v-\kappa} z} = (q/p)^{(-1-v+\kappa)\kappa J_0} e^{q^{-1-v+\kappa}/2 + O(q^{1-2(1+v-\kappa)})} \end{eqnarray*} and consequently the term \begin{eqnarray*} && p^{\kappa J_0(\kappa J_0+1)/2}q^{\kappa J_0} u^{\kappa J_0 \frac{\log p}{\log q}} (q/p)^{(-1-v+\kappa)\kappa J_0} u^{-1-v+\kappa} e^{q^{-1-v+\kappa}/2} \\ && \sim e^{(\kappa - \kappa^2/2 - v) \frac{\log^2 q}{\log(1/p)} } q^{-\kappa(\frac 32 + v - \kappa)} u^{-1-v} e^{q^{-1-v+\kappa}/2} \end{eqnarray*} which gets maximal for $\kappa = 1$ and has a local behavior of the form \[ e^{(\frac 12 - v) \frac{\log^2 q}{\log(1/p)} } q^{-\frac 12 -v} u^{-1-v} e^{q^{-v}/2} e^{- \frac {\log(1/p)}2 (J - J_0)^2 }. \] Thus, summing over $J$ in this range we finally get \[ \frac 1{\sqrt{2\pi \log(1/p)}} e^{(\frac 12 - v) \frac{\log^2 q}{\log(1/p)} } q^{-\frac 12 -v} u^{-1-v} e^{q^{-v}/2} \] It is an easy exercise to show that the contributions coming from $J < (v+1)J_0$ are negligible compared to these terms. Hence, we obtain for $v> 0$ as $p\to 1$ \begin{eqnarray*} C_{30}(p,u,v) & \sim & e^{-1} (q^{1/2} u)^{-v} q^{-v^2+v} e^{(p/q)^\delta (1+o(1))} e^{(v(1-\delta)-v^2/2) \frac{\log^2 q}{\log(1/p)}} q^{\delta(2-3v)} \\ &~~+& \frac 1{\sqrt{2\pi}} e^{(\frac 12 - v) \frac{\log^2 q}{\log(1/p)} } q^{-1 -v} u^{-1-v} e^{q^{-v}/2}. \end{eqnarray*} Finally the case $v= 0$ (or if $v$ is close to zero) can be handled in the same way, however, only the second term survives and so we get \[ C_{30}(p,u,v) \le \frac {1+o(1)}{\sqrt{2\pi}} e^{(\frac 12 - v) \frac{\log^2 q}{\log(1/p)} } q^{-1 -v} u^{-1-v} e^{q^{-v}/2}. \] \subsubsection{The term $T_{32}$.} The analysis of $C_{32}(p,u.v)$ is relatively easy. If $R - v -J\frac{\log(1/p)}{\log(p/q)}>0$ then \[ (q/p)^{R - v -J\frac{\log(1/p)}{\log(p/q)}} \le 1 \] which implies that \[ e^{-(q/p)^{R - v -J\frac{\log(1/p)}{\log(p/q)}}} - \sum_{\ell=0}^{-J-L} \frac{(-1)^\ell}{\ell!} (q/p)^{(R - v -J\frac{\log(1/p)}{\log(p/q)})\ell} = O\left( \frac{(q/p)^{(R - v -J\frac{\log(1/p)}{\log(p/q)})(-J-L+1)} }{(-J-L+1)! } \right). \] We first suppose that $v> 0$ and concentrate on $J\in (vJ_0,0]$. Then the relevant term in the sum over $R$ is the one with $R = 1$. Thus, the sum over $R$ is dominated (up to a constant) by \[ \left( (q/p)^{(L+J)} u \right)^{1 - v -J\frac{\log(1/p)}{\log(p/q)}} \frac{(q/p)^{(R - v -J\frac{\log(1/p)}{\log(p/q)})(-J-L+1)} }{(-J-L+1)! } = \frac{((q/p)u)^{1 - v -J\frac{\log(1/p)}{\log(p/q)}} }{(-J-L+1)! }. \] Next, by using the upper bound $\xi_{L+1} \le C/L!$ it follows that \[ \sum_{L=0}^{-J} \xi_{L+1}p^L \frac{1}{(-J-L+1)! } \le \frac {2^{-J}}{(-J+1)!}. \] Thus, we are led to consider the sum \[ \sum_{J\in (vJ_0,0]} p^{J(J+1)/2} q^J u^{J \frac{\log(1/p)}{\log(p/q)}} \frac {2^{-J}}{(-J+1)!} ((q/p)u)^{1 - v -J\frac{\log(1/p)}{\log(p/q)}}. \] As in the case of the analysis of $C_2(p,u,v)$ is follows that this sum is upper bounded by $e^{O(1/q)}$ since the most significant term appears for $J \sim A/\log p$ (for some constant $A> 0$). Finally it is an easy exercise to show that the sum over $J \le J_0 v$ is negligible. If $v= 0$ then a similar analysis applies and we (certainly) have an upper bound of the form $e^{O(1/q)}$. \subsubsection{The term $T_{31}$.} We set \[ I_0:= \left[ - v\left( \frac{\log q}{\log p}-1\right) , 0 \right) \cap \mathbb{Z} \] and for $M\ge 1$ \[ I_M:= \left[ - (v+M)\left(\frac{\log q}{\log p}-1\right) , - (v+M-1)\left(\frac{\log q}{\log p}-1\right) \right) \cap \mathbb{Z} \] First we consider the term $C_{31,0}(p,u,v)$ given in (\ref{eqC310puv-2}). If $v> 0$ and $J\in I_0$ then $v + J \frac{\log(1/p)}{\log(p/q)} > 0$ which implies that the sum over $R$ is empty. Thus, we can assume that $J < - v\left( \frac{\log q}{\log p}-1\right)$. By using the upper bound $\xi_{L+1} \le C/L!$ we obtain the upper bound \[ \left|\sum_{L=0}^{-J} \xi_{L+1}\frac{(-1)^{-J-L}}{(-J-L)!} \right| \le C \frac {2^{-J}}{(-J)!}. \] Recall that the term \[ p^{ J(J+1)/2} q^{J} \frac {2^{-J}}{(-J)!} \] is maximal for $J$ close to $J_{\max} = A/\log p$ (for some constant $A>0$) and the maximum value is or order $e^{O(1/q)}$. Hence, if $v > A/\log(1/q)$ then $J_{\max} \in I_0$ and consequently we can upper bound $C_{31,0}$ trivially by $e^{O(1/q)}$. Conversely, if $v \le A/\log(1/q)$ then $J_{\max}\in I_1$ and we also get an upper bound of the form $e^{O(1/q)}$. Note that the appearing $R$-sums are negligible compared to the leading term $e^{O(1/q)}$. Finally if $v= 0$, then we have $J_{\max} \in I_1$ and again we get an upper bound of the form $e^{O(1/q)}$. Next suppose that $K\ge 1$. Here we note that for $J\in I_M$ we have, as $p\to 0$, \[ \sum_{R\le v+ J \frac{\log(1/p)}{\log(p/q)}} \left( u (q/p)^{-K} \right)^{R-v} \sim \left( u (q/p)^{-K} \right)^{-M-v}. \] Actually we could also work with an error term. However, in order to make the following computations more transparent we concentrate on the leading term. Since \[ \sum_{L=0}^{-J-K} \xi_{L+1} p^L \frac{(-1)^{-J-K-L}}{(-J-K-L)!} = [z^{-J-K}] e^{z/2 + O(qz^2) - z} \] we get \begin{eqnarray*} C_{31,K,M}&:=& \sum_{J\in I_M,\, J \le -K} p^{J(J+1)/2 +JK}q^J \sum_{R\le v+ J \frac{\log(1/p)}{\log(p/q)} } \left( u \left(\frac qp\right)^{-K} \right)^{R-v} \\ &&\qquad \times\sum_{L=0}^{-J-K} \xi_{L+1}p^{L} \frac{(-1)^{-J-L-K}}{(-J-L-K)!} \\ &\sim & \sum_{J\in I_M,\, J \le -K} p^{J(J+1)/2 +JK}q^J \left( u \left(\frac qp\right)^{-K} \right)^{-M-v} [z^{-J-K}] e^{z/2 + O(qz^2) - z} \end{eqnarray*} and consequently if we sum over $K\ge 1$ \begin{eqnarray*} \sum_{K\ge 1} C_{31,K,M} &\sim & u^{-M-v} \sum_{J\in I_M} p^{J(J+1)/2}q^J \\ &&\qquad \times \sum_{K=1}^{-J} p^{JK}(q/p)^{K(M+v)} [z^{-J-K}] e^{z/2 + O(qz^2) - z} \\ & = & u^{-M-v} \sum_{J\in I_M} p^{J(J+1)/2}q^J \\ &&\qquad \times \sum_{K=1}^{-J} p^{JK}(q/p)^{M(1+v)} [z^{-J-K}] e^{z/2 + O(qz^2) - z}. \end{eqnarray*} We observe that (for $J\in I_M$) \begin{eqnarray*} &&\sum_{K=1}^{-J} p^{JK}(q/p)^{K(M+v)} [z^{-J-K}] e^{z/2 + O(qz^2) - z} = [z^{-J}] \frac{p^{J}(q/p)^{M+v}z}{1- p^{J}(q/p)^{M+v}z } e^{z/2 + O(qz^2) - z} \\ &&\qquad \sim p^{-J^2}(q/p)^{-J(M+v)} e^{z_M/2 + O(q z_M^2) - z_M}, \end{eqnarray*} where $z_M = p^{-J}(q/p)^{-M-v}$. Note that $z_M$ varies between $1$ and $1/q$ if $J\in I_M$. However, it will turn out that the asymptotic leading terms will come from $J$ close to $- (v+M)\frac{\log q}{\log p}$ which means that $z_M$ asymptotically $1$ and, thus, the last exponential term is asymptotically $e^{-1/2}$. The reason is that the term \[ p^{J(J+1)/2}q^J p^{-J^2}(q/p)^{-J(M+v)} = p^{-J^2/2} q^{J(1-M-v)} p^{J(\frac 12 +M+v)} \] has its absolute minimum for $J$ close to $- (v+M-1)\frac{\log q}{\log p}$ and for $J\in I_M$ it gets maximal for $J$ close to $- (v+M)\frac{\log q}{\log p}$, in particular if \[ J = J_{v,M} := - \left\lfloor (M+v) \left( \frac{\log q}{\log p} -1 \right) \right\rfloor. \] Thus, we obtain \begin{eqnarray*} \sum_{K\ge 1} C_{31,K,M} &\sim & e^{-\frac 12} u^{-M-v} p^{-J_{v,M}^2/2} q^{J_{v,M}(1-M-v)} p^{J_{v,M}(\frac 12 +M+v)} \\ &=& e^{ \frac{\log^2 q}q \left( M+v- \frac 12 (M+v)^2 \right) + O(\log^2 q) }. \end{eqnarray*} Since $(M+v) - \frac 12 (M+v)^2 \le 0$ for $M\ge 2$ (and $0\le v < 1$) it is clear that only the first two terms corresponding to $M=0$ and $M=1$ are relevant. Hence, we obtain as a crude estimate \[ \sum_{K\ge 1} C_{31,K} e^{ \frac{\log^2 q}q \left( v- \frac 12 v^2 \right) + O(\log^2 q) } + e^{ \frac{\log^2 q}q \frac 12 \left( 1- v^2 \right) + O(\log^2 q) }. \] or as a more precise one \[ \sum_{K\ge 1} C_{31,K} \sim e^{-\frac 12} (q^{1/2} u)^{-v} q^{-v^2} p^{-J_{v,0}^2/2} q^{J_{v,0}(1-v)} + e^{-\frac 12} q^{-(1+v)(\frac 32 +v) } u^{-1-v} p^{-J_{v,1}^2/2} q^{-J_{v,0}v} \] In what follows we will have to study the precise behavior of the first summand. We set (as in the analysis of $T_{30}$ \[ \delta = - v \left( \frac{\log q}{\log p} -1 \right) - J_{v,0}. \] then we have \[ e^{-\frac 12} (q^{1/2} u)^{-v} q^{-v^2} p^{-J_{v,0}^2/2} q^{J_{v,0}(1-v)} \sim e^{-\frac 12} (q^{1/2} u)^{-v} q^{-v^2+v+\delta}e^{(v-v^2/2) \frac{\log^2 q}{\log(1/p)}}. \] If $v=0$ or close to $0$ then the second term dominates and we get a lower bound of the form \[ \sum_{K\ge 1} C_{31,K} \ge e^{-\frac 12} q^{-(1+v)(\frac 32 +v) } u^{-1-v} p^{-J_{v,1}^2/2} q^{-J_{v,0}v} \] \subsubsection{The full behavior for $p\to 1$.} The most significant terms are $T_{30}$ and $T_{31}$ (or $C_{30}$ and $\sum_{K\ge 1} C_{31,K}$). So we just have to concentrate on them. Both consist of two contributions, where the first one is dominating for larger $v$ and the second one for smaller $v$. Suppose first that $\frac 12 \le v < 1$. Then the corresponding terms in $C_{30}$ and $\sum_{K\ge 1} C_{31,K}$ are \[ e^{-1} (q^{1/2} u)^{-v} q^{-v^2+v} e^{(p/q)^\delta (1+o(1))} e^{(v(1-\delta)-v^2/2) \frac{\log^2 q}{\log(1/p)}} q^{\delta(2-3v)} \] and \[ e^{-\frac 12} (q^{1/2} u)^{-v} q^{-v^2+v+\delta}e^{(v-v^2/2) \frac{\log^2 q}{\log(1/p)}} \] where \[ \delta = - v \left( \frac{\log q}{\log p} -1 \right) - J_{v,0}. \] and \[ J_{v,0} = - \left\lfloor v \left( \frac{\log q}{\log p} -1 \right) \right\rfloor. \] In particular we have $0\le \delta < 1$. If $\delta > 0$ then it is clear that the term corresponding to $\sum_{K\ge 1} C_{31,K}$ dominates the other one. Thus, $T_{31}$ is larger than that of $T_{30}$ which proves positivity in this case. If $\delta = 0$ then both terms coincide. However, by looking at second order terms (which we have not worked out here) it again follows that $T_{31}$ dominates. If $0\le v \le \frac 12$ then we have to look at the terms \[ \frac 1{\sqrt{2\pi}} e^{(\frac 12 - v) \frac{\log^2 q}{\log(1/p)} } q^{-1 -v} u^{-1-v} e^{q^{-v}/2} \] and \[ e^{-\frac 12} q^{-(1+v)(\frac 32 +v) } u^{-1-v} p^{-J_{v,1}^2/2} q^{-J_{v,0}v} \] If we replace $J_{v,0}$ by $- \left\lfloor v \left( \frac{\log q}{\log p}-1\right)\right\rfloor$ then $p^{-J_{v,1}^2/2} q^{-J_{v,0}v}$ rewrites to $e^{ (1-v^2) \log^2 q/(2 \log(1/p)}$ which is definitely smaller than $e^{(\frac 12 - v) \frac{\log^2 q}{\log(1/p)} }$ if $v> 0$. If $v=0$ then the subexponential growth of the first term is of order $u^{-1} q^{-1}$ and that of the second term of order $u^{-1} q^{-3/2}$. Hence again the second term dominates and so we get positivity as $p\to 1$ in this case. \subsection{Proof of Positivity} In the previous sections we have shown that $\tilde G_k(n) \gg F_0$ if $p$ is sufficiently close to $1/2$ or sufficiently close to $1$. Actually we can do the above analysis even more precisely by giving error terms which rigorously proves the lower bound $\tilde G_k(n) \gg F_0$ for $0.5 < p \le 0.51$ and for $0.97 \le p < 1$. Thus it remains to consider the interval $0.51\le p \le 0.97$. In this interval we know that \[ \tilde G_k(n) \sim C_*(p)\,F_0\, ( - C_1 - C_2 - C_{30} - C_{32} - C_{31,0} + \sum_{K\ge 1} C_{31,K} ), \] where the terms $C_1(p,u,v)$, $C_2(p,u,v)$, $C_{30}(p,u,v)$, $C_{32}(p,u,v)$, $C_{31,K}(p,u,v)$ ($K\ge 0$), in which $u = \overline r_0 / \overline r_1 \in [\sqrt{q/p},\sqrt{p/q}]$ and $v = \langle \overline r_1 \rangle$, are explicitly given in (\ref{eqC1puv}), (\ref{eqC2puv}), (\ref{eqC30puv}), (\ref{eqC32puv}), (\ref{eqC310puv}), (\ref{eqC31Kpuv}). In order to make numerical calculations we first replace the infinite sums of \[ - C_1 - C_2 - C_{30} - C_{32} - C_{31,0} + \sum_{K\ge 1} C_{31,K} \] by finite sums, that is we just have to consider $|J|\le J_0$, $L\le L_0$, $K\le K_0$, and $R \le R_0$ where \[ J_0 = 35 , \quad L_0 = 70, \quad K_0 = 80, \quad R_0 = 95. \] The error that we make can be uniformly bounded by $10^{-5}$. All the terms that appear in the remaining finite sum are continuous in the parameters $p$, $u$, $v$; however there is a discontinuity in the terms that appear if $R- v- J \frac{\log(1/p)}{\log(p/q)} = 0$. These are only finitely many such cases for $R$. Hence, in order to show positivity of $- C_1 - C_2 - C_{30} - C_{32} - C_{31,0} + \sum_{K\ge 1} C_{31,K} := C(p, u, v)$ it is sufficient to check it in a sufficiently fine three-dimensional grid and a proper analysis of the partial derivatives and by considering discontinuities if $R- v- J \frac{\log(1/p)}{\log(p/q)} = 0$. Below we give a table of partial derivatives with respect to $p, u, v$. \begin{center} \begin{tabular}{|l|l|l|l|l|l|l|} \hline $p$ & $u$ & $v$ & $\frac{C(p,u,v)}{\| \nabla C(p,u,v) \|_1 }$ & $\frac{\partial C}{\partial p}$ & $\frac{\partial C}{\partial u}$ & $\frac{\partial C}{\partial v}$ \\ \hline $0.60$ & $0.8573214$ & $0.200$ & $0.0397029$ & $-37.3216755$ & $-6.87205829924586$ & $-1.70174985214544 \times 10^{-9}$ \\ $0.60$ & $0.8573214$ & $0.400$ & $0.0397066$ & $-37.3175544$ & $-6.87205829422766$ & $8.17124146124115 \times 10^{-9}$ \\ $0.60$ & $0.8573214$ & $0.600$ & $0.0397105$ & $-37.3133045$ & $-6.87205830278614$ & $6.75015598972095 \times 10^{-9}$ \\ $0.60$ & $0.9573214$ & $0.200$ & $0.0477100$ & $-21.9600978$ & $-3.88671232717819$ & $1.68487446217114 \times 10^{-9}$ \\ $0.60$ & $0.9573214$ & $0.400$ & $0.0477178$ & $-21.9558812$ & $-3.88671232690196$ & $6.52766729558607 \times 10^{-9}$ \\ $0.60$ & $0.9573214$ & $0.600$ & $0.0477256$ & $-21.9516277$ & $-3.88671233292026$ & $2.35500507983488 \times 10^{-9}$ \\ $0.60$ & $1.0573214$ & $0.200$ & $0.0556009$ & $-14.2980109$ & $-2.37660414946284$ & $3.01625391330163 \times 10^{-9}$ \\ $0.60$ & $1.0573214$ & $0.400$ & $0.0556152$ & $-14.2937099$ & $-2.37660415074092$ & $4.62252458532930 \times 10^{-9}$ \\ $0.60$ & $1.0573214$ & $0.600$ & $0.0556294$ & $-14.2894568$ & $-2.37660415496421$ & $-1.60316204755873 \times 10^{-10}$ \\ $0.60$ & $1.1573214$ & $0.200$ & $0.0626269$ & $-10.1863196$ & $-1.54288334226127$ & $3.33222338610994 \times 10^{-9}$ \\ $0.60$ & $1.1573214$ & $0.400$ & $0.0626503$ & $-10.1819434$ & $-1.54288334394570$ & $2.95541369155217 \times 10^{-9}$ \\ $0.60$ & $1.1573214$ & $0.600$ & $0.0626730$ & $-10.1776934$ & $-1.54288334671548$ & $-1.49635859258979 \times 10^{-9}$ \\ $0.70$ & $0.7419408$ & $0.200$ & $0.0466821$ & $-7.02015816$ & $-0.941410951563526$ & $0.00277949304106073$ \\ $0.70$ & $0.7419408$ & $0.400$ & $0.0468213$ & $-7.00927750$ & $-0.941036859551048$ & $0.00326076664425301$ \\ $0.70$ & $0.7419408$ & $0.600$ & $0.0469950$ & $-6.99412985$ & $-0.941080960885188$ & $0.00352113957369227$ \\ $0.70$ & $0.8419408$ & $0.200$ & $0.0492989$ & $-5.33811883$ & $-0.631417261109490$ & $0.00300199019243053$ \\ $0.70$ & $0.8419408$ & $0.400$ & $0.0495040$ & $-5.32611794$ & $-0.631168855463216$ & $0.00332417515469530$ \\ $0.70$ & $0.8419408$ & $0.600$ & $0.0497253$ & $-5.31304507$ & $-0.631258543609903$ & $0.00339509555136175$ \\ $0.70$ & $0.9419408$ & $0.200$ & $0.0514611$ & $-4.23520180$ & $-0.447473694530132$ & $0.00317392708737430$ \\ $0.70$ & $0.9419408$ & $0.400$ & $0.0517361$ & $-4.22295039$ & $-0.447305509108986$ & $0.00334798714618501$ \\ $0.70$ & $0.9419408$ & $0.600$ & $0.0520044$ & $-4.21164153$ & $-0.447402921736284$ & $0.00328784937675408$ \\ $0.70$ & $1.0419408$ & $0.200$ & $0.0532287$ & $-3.47206308$ & $-0.330624920881206$ & $0.00330789550107013$ \\ $0.70$ & $1.0419408$ & $0.400$ & $0.0535756$ & $-3.45998730$ & $-0.330507053556417$ & $0.00335245633964476$ \\ $0.70$ & $1.0419408$ & $0.600$ & $0.0538907$ & $-3.45007283$ & $-0.330597031821256$ & $0.00320084691018963$ \\ $0.70$ & $1.1419408$ & $0.200$ & $0.0546466$ & $-2.92151577$ & $-0.252543040933695$ & $0.00341430828054712$ \\ $0.70$ & $1.1419408$ & $0.400$ & $0.0550660$ & $-2.90980583$ & $-0.252456509284293$ & $0.00334836078108580$ \\ $0.70$ & $1.1419408$ & $0.600$ & $0.0554287$ & $-2.90095183$ & $-0.252534083156064$ & $0.00313143305508135$ \\ $0.70$ & $1.2419408$ & $0.200$ & $0.0557535$ & $-2.51064207$ & $-0.198272264594124$ & $0.00350094009471391$ \\ $0.70$ & $1.2419408$ & $0.400$ & $0.0562453$ & $-2.49936355$ & $-0.198205173826516$ & $0.00334130490875495$ \\ $0.70$ & $1.2419408$ & $0.600$ & $0.0566567$ & $-2.49129520$ & $-0.198269800136153$ & $0.00307611303140831$ \\ $0.70$ & $1.3419408$ & $0.200$ & $0.0565844$ & $-2.19510393$ & $-0.159366055272336$ & $0.00357337397538515$ \\ $0.70$ & $1.3419408$ & $0.400$ & $0.0571484$ & $-2.18425793$ & $-0.159310991691086$ & $0.00333409085406799$ \\ $0.70$ & $1.3419408$ & $0.600$ & $0.0576098$ & $-2.17675830$ & $-0.159363902270115$ & $0.00303165893500434$ \\ $0.70$ & $1.4419408$ & $0.200$ & $0.0571731$ & $-1.94654812$ & $-0.130808089307877$ & $0.00363555773552626$ \\ $0.70$ & $1.4419408$ & $0.400$ & $0.0578086$ & $-1.93610396$ & $-0.130760449668088$ & $0.00332801239988356$ \\ $0.70$ & $1.4419408$ & $0.600$ & $0.0583219$ & $-1.92900552$ & $-0.130803443699534$ & $0.00299540897730211$ \\ \hline \end{tabular} \end{center} \begin{center} \begin{tabular}{|l|l|l|l|l|l|l|} \hline $p$ & $u$ & $v$ & $\frac{C(p,u,v)}{\| \nabla C(p,u,v) \|_1 }$ & $\frac{\partial C}{\partial p}$ & $\frac{\partial C}{\partial u}$ & $\frac{\partial C}{\partial v}$ \\ \hline $0.80$ & $0.65$ & $0.200$ & $0.0248967$ & $-1.46956589776437$ & $-0.119459648828979$ & $0.0600241100414678$ \\ $0.80$ & $0.65$ & $0.400$ & $0.0290076$ & $-1.49636288594479$ & $-0.140905496493815$ & $-0.00268991132656993$ \\ $0.80$ & $0.65$ & $0.600$ & $0.0231229$ & $-1.58402037698124$ & $-0.134370873496437$ & $-0.0531169107773621$ \\ $0.80$ & $0.75$ & $0.200$ & $0.0237794$ & $-1.18806547561690$ & $-0.0793871424775716$ & $0.0485832481444959$ \\ $0.80$ & $0.75$ & $0.400$ & $0.0276804$ & $-1.20264660480984$ & $-0.0935985673038431$ & $-0.00629235525195782$ \\ $0.80$ & $0.75$ & $0.600$ & $0.0215052$ & $-1.26861837550507$ & $-0.0871968711919635$ & $-0.0437779185560316$ \\ $0.80$ & $0.85$ & $0.200$ & $0.0225818$ & $-0.992277554530574$ & $-0.0563859753697216$ & $0.0402965617070095$ \\ $0.80$ & $0.85$ & $0.400$ & $0.0262502$ & $-0.998636356683846$ & $-0.0661789200080420$ & $-0.00814619885147749$ \\ $0.80$ & $0.85$ & $0.600$ & $0.0198854$ & $-1.05095720444126$ & $-0.0603108685055531$ & $-0.0365389341325795$ \\ $0.80$ & $0.95$ & $0.200$ & $0.0213276$ & $-0.848335491866692$ & $-0.0420003183307927$ & $0.0341005124937510$ \\ $0.80$ & $0.95$ & $0.400$ & $0.0247639$ & $-0.849069631954080$ & $-0.0489517334756329$ & $-0.00902477816566716$ \\ $0.80$ & $0.95$ & $0.600$ & $0.0182823$ & $-0.892403384526119$ & $-0.0437039697658292$ & $-0.0308048695885077$ \\ $0.80$ & $1.05$ & $0.200$ & $0.0200260$ & $-0.738163948966530$ & $-0.0324361927255268$ & $0.0293485365574497$ \\ $0.80$ & $1.05$ & $0.400$ & $0.0232414$ & $-0.735010372125089$ & $-0.0374844296402443$ & $-0.00933903507416289$ \\ $0.80$ & $1.05$ & $0.600$ & $0.0166997$ & $-0.772209946390490$ & $-0.0328328708576464$ & $-0.0261777524244167$ \\ $0.80$ & $1.15$ & $0.200$ & $0.0186804$ & $-0.651184626946133$ & $-0.0257793005573603$ & $0.0256278809160904$ \\ $0.80$ & $1.15$ & $0.400$ & $0.0216908$ & $-0.645332988952418$ & $-0.0295103956773346$ & $-0.00931950613392019$ \\ $0.80$ & $1.15$ & $0.600$ & $0.0151354$ & $-0.678217958622440$ & $-0.0253954332265494$ & $-0.0223840630155792$ \\ $0.80$ & $1.25$ & $0.200$ & $0.0172908$ & $-0.580782272038505$ & $-0.0209780958471129$ & $0.0226643319933828$ \\ $0.80$ & $1.25$ & $0.400$ & $0.0201143$ & $-0.573064137057600$ & $-0.0237727883387606$ & $-0.00910101858409007$ \\ $0.80$ & $1.25$ & $0.600$ & $0.0135846$ & $-0.602835170141702$ & $-0.0201279084848238$ & $-0.0192308538302655$ \\ $0.80$ & $1.35$ & $0.200$ & $0.0158557$ & $-0.522603802636468$ & $-0.0174163026258611$ & $0.0202695748470205$ \\ $0.80$ & $1.35$ & $0.400$ & $0.0185105$ & $-0.513610382057550$ & $-0.0195302017260701$ & $-0.00876498700819184$ \\ $0.80$ & $1.35$ & $0.600$ & $0.0120405$ & $-0.541082442332197$ & $-0.0162926045703671$ & $-0.0165787548596086$ \\ $0.80$ & $1.45$ & $0.200$ & $0.0143716$ & $-0.473664745896940$ & $-0.0147138076727060$ & $0.0183106129867383$ \\ $0.80$ & $1.45$ & $0.400$ & $0.0168756$ & $-0.463822268983449$ & $-0.0163233325025658$ & $-0.00836177299845531$ \\ $0.80$ & $1.45$ & $0.600$ & $0.0104955$ & $-0.489561844233322$ & $-0.0134373556761602$ & $-0.0143249441002524$ \\ $0.80$ & $1.55$ & $0.200$ & $0.0128332$ & $-0.431847222870374$ & $-0.0126263003039639$ & $0.0166912413419595$ \\ $0.80$ & $1.55$ & $0.400$ & $0.0152035$ & $-0.421466433564888$ & $-0.0138561256477487$ & $-0.00792306972385859$ \\ $0.80$ & $1.55$ & $0.600$ & $0.0089402$ & $-0.445875381153371$ & $-0.0112737912587590$ & $-0.0123921213806000$ \\ $0.80$ & $1.65$ & $0.200$ & $0.0112335$ & $-0.395602281798801$ & $-0.0109913985113508$ & $0.0153404116502998$ \\ $0.80$ & $1.65$ & $0.400$ & $0.0134857$ & $-0.384911693686263$ & $-0.0119314480429011$ & $-0.00746903628368045$ \\ $0.80$ & $1.65$ & $0.600$ & $0.0073638$ & $-0.408280137456529$ & $-0.00961191538806361$ & $-0.0107211906978932$ \\ $0.80$ & $1.75$ & $0.200$ & $0.0095627$ & $-0.363765195132260$ & $-0.00969821012120065$ & $0.0142046951836505$ \\ $0.80$ & $1.75$ & $0.400$ & $0.0117111$ & $-0.352933892131091$ & $-0.0104145054677929$ & $-0.00701254332113876$ \\ $0.80$ & $1.75$ & $0.600$ & $0.0057536$ & $-0.375474526805419$ & $-0.00832317642363023$ & $-0.00926629099495813$ \\ $0.80$ & $1.85$ & $0.200$ & $0.0078081$ & $-0.335436119925703$ & $-0.00866939342358819$ & $0.0132432673112248$ \\ $0.80$ & $1.85$ & $0.400$ & $0.0098652$ & $-0.324589674384868$ & $-0.00921116843244363$ & $-0.00656177159186200$ \\ $0.80$ & $1.85$ & $0.600$ & $0.0040938$ & $-0.346460554688122$ & $-0.00731875624637723$ & $-0.00799135079532221$ \\ $0.80$ & $1.95$ & $0.200$ & $0.0059525$ & $-0.309899919159307$ & $-0.00785021919114115$ & $0.0124244891566150$ \\ $0.80$ & $1.95$ & $0.400$ & $0.0079291$ & $-0.299131685380871$ & $-0.00825469764720310$ & $-0.00612184077297684$ \\ $0.80$ & $1.95$ & $0.600$ & $0.0023648$ & $-0.320451686228296$ & $-0.00653637328440482$ & $-0.00686765290325297$ \\ \hline \end{tabular} \end{center} Supposing that on a three-dimensional grid of values, we find that the minimum value for $C(p, u, v)$ is some $\delta > 0$, and that the sum of the partial derivatives is bounded above in absolute value by some $D$. Then, by the multivariate form of Taylor's theorem, it is sufficient for the minimum distance between adjacent grid points to be at most $|\delta / D|$ in order for us to conclude that $ C(p, u, v)$ is positive over its entire domain. In particular, we observe numerically that $|\delta / D|$, when $\delta/D$ is negative, is bounded below by approximately $0.001$. The following table gives some sample values: \begin{center} \begin{tabular}{|l|l|l|l|} \hline $p$ & $u$ & $v$ & $C(p, u, v)$ \\ \hline $0.51$ & $0.980196058819607$ & $0.400$ & $29.9108624383664$ \\ $0.53$ & $0.941696582148512$ & $0.400$ & $10.0526228338624$ \\ $0.53$ & $0.991696582148512$ & $0.400$ & $5.88642519475933$ \\ $0.53$ & $1.04169658214851$ & $0.400$ & $3.93987124883441$ \\ $0.57$ & $0.868553950490285$ & $0.400$ & $3.52426761973916$ \\ $0.57$ & $0.918553950490285$ & $0.400$ & $2.69816719720437$ \\ $0.57$ & $0.968553950490285$ & $0.400$ & $2.13795667104722$ \\ $0.57$ & $1.01855395049029$ & $0.400$ & $1.74575802150213$ \\ $0.57$ & $1.06855395049029$ & $0.400$ & $1.46363184340248$ \\ $0.57$ & $1.11855395049029$ & $0.400$ & $1.25604047995371$ \\ $0.61$ & $0.799590058902111$ & $0.400$ & $1.77277314367910$ \\ $0.61$ & $0.849590058902111$ & $0.400$ & $1.46518133627806$ \\ $0.61$ & $0.899590058902111$ & $0.400$ & $1.23455159510288$ \\ $0.61$ & $0.949590058902111$ & $0.400$ & $1.05777607664529$ \\ $0.61$ & $0.999590058902111$ & $0.400$ & $0.919664990997429$ \\ $0.61$ & $1.04959005890211$ & $0.400$ & $0.809941704967424$ \\ $0.61$ & $1.09959005890211$ & $0.400$ & $0.721477392051825$ \\ $0.61$ & $1.14959005890211$ & $0.400$ & $0.649215937996586$ \\ $0.61$ & $1.19959005890211$ & $0.400$ & $0.589499096328785$ \\ $0.61$ & $1.24959005890211$ & $0.400$ & $0.539631267061691$ \\ $0.65$ & $0.733799385705343$ & $0.400$ & $0.982651282114738$ \\ $0.65$ & $0.783799385705343$ & $0.400$ & $0.839124203522317$ \\ $0.65$ & $0.833799385705343$ & $0.400$ & $0.727100276432193$ \\ $0.65$ & $0.883799385705343$ & $0.400$ & $0.637940561184182$ \\ $0.65$ & $0.933799385705343$ & $0.400$ & $0.565793172609293$ \\ $0.65$ & $0.983799385705343$ & $0.400$ & $0.506567343973066$ \\ $0.65$ & $1.03379938570534$ & $0.400$ & $0.457330788676269$ \\ $0.65$ & $1.08379938570534$ & $0.400$ & $0.415936629134123$ \\ $0.65$ & $1.13379938570534$ & $0.400$ & $0.380783719541390$ \\ $0.65$ & $1.18379938570534$ & $0.400$ & $0.350658093707974$ \\ $0.65$ & $1.23379938570534$ & $0.400$ & $0.324625450046327$ \\ $0.65$ & $1.28379938570534$ & $0.400$ & $0.301956613678465$ \\ $0.65$ & $1.33379938570534$ & $0.400$ & $0.282074753241104$ \\ \hline \end{tabular} \end{center} \begin{center} \begin{tabular}{|l|l|l|l|} \hline $p$ & $u$ & $v$ & $C(p, u, v)$ \\ \hline $0.69$ & $0.670280062599836$ & $0.400$ & $0.550461162927249$ \\ $0.69$ & $0.720280062599836$ & $0.400$ & $0.476560662585850$ \\ $0.69$ & $0.770280062599836$ & $0.400$ & $0.418085884501238$ \\ $0.69$ & $0.820280062599836$ & $0.400$ & $0.370869235479415$ \\ $0.69$ & $0.870280062599836$ & $0.400$ & $0.332104766553185$ \\ $0.69$ & $0.920280062599836$ & $0.400$ & $0.299828522402899$ \\ $0.69$ & $0.970280062599836$ & $0.400$ & $0.272625293598979$ \\ $0.69$ & $1.02028006259984$ & $0.400$ & $0.249451145554923$ \\ $0.69$ & $1.07028006259984$ & $0.400$ & $0.229520731342754$ \\ $0.69$ & $1.12028006259984$ & $0.400$ & $0.212233117795916$ \\ $0.69$ & $1.17028006259984$ & $0.400$ & $0.197121539103930$ \\ $0.69$ & $1.22028006259984$ & $0.400$ & $0.183818532413344$ \\ $0.69$ & $1.27028006259984$ & $0.400$ & $0.172031239042804$ \\ $0.69$ & $1.32028006259984$ & $0.400$ & $0.161523580019521$ \\ $0.69$ & $1.37028006259984$ & $0.400$ & $0.152103170273795$ \\ $0.69$ & $1.42028006259984$ & $0.400$ & $0.143611550335309$ \\ $0.69$ & $1.47028006259984$ & $0.400$ & $0.135916766083619$ \\ $0.73$ & $0.608163640559537$ & $0.400$ & $0.293753647044937$ \\ $0.73$ & $0.658163640559537$ & $0.400$ & $0.254521707499460$ \\ $0.73$ & $0.708163640559537$ & $0.400$ & $0.223617820853438$ \\ $0.73$ & $0.758163640559537$ & $0.400$ & $0.198689182699194$ \\ $0.73$ & $0.808163640559537$ & $0.400$ & $0.178200553380755$ \\ $0.73$ & $0.858163640559537$ & $0.400$ & $0.161099962106000$ \\ $0.73$ & $0.908163640559537$ & $0.400$ & $0.146640237980822$ \\ $0.73$ & $0.958163640559537$ & $0.400$ & $0.134275597770952$ \\ $0.73$ & $1.00816364055954$ & $0.400$ & $0.123598129339214$ \\ $0.73$ & $1.05816364055954$ & $0.400$ & $0.114297028223827$ \\ $0.73$ & $1.10816364055954$ & $0.400$ & $0.106131513996562$ \\ $0.73$ & $1.15816364055954$ & $0.400$ & $0.0989123176383000$ \\ $0.73$ & $1.20816364055954$ & $0.400$ & $0.0924887213278005$ \\ $0.73$ & $1.25816364055954$ & $0.400$ & $0.0867392964169298$ \\ $0.73$ & $1.30816364055954$ & $0.400$ & $0.0815651633709091$ \\ $0.73$ & $1.35816364055954$ & $0.400$ & $0.0768850068928071$ \\ $0.73$ & $1.40816364055954$ & $0.400$ & $0.0726313343834271$ \\ $0.73$ & $1.45816364055954$ & $0.400$ & $0.0687476288039068$ \\ $0.73$ & $1.50816364055954$ & $0.400$ & $0.0651861534672111$ \\ $0.73$ & $1.55816364055954$ & $0.400$ & $0.0619062371781993$ \\ $0.73$ & $1.60816364055954$ & $0.400$ & $0.0588729160730193$ \\ \hline \end{tabular} \end{center} \begin{center} \begin{tabular}{|l|l|l|l|} \hline $p$ & $u$ & $v$ & $C(p, u, v)$ \\ \hline $0.77$ & $0.546535725000021$ & $0.400$ & $0.145533658462583$ \\ $0.77$ & $0.596535725000021$ & $0.400$ & $0.124327954267287$ \\ $0.77$ & $0.646535725000021$ & $0.400$ & $0.107945853943789$ \\ $0.77$ & $0.696535725000021$ & $0.400$ & $0.0949108647641168$ \\ $0.77$ & $0.746535725000021$ & $0.400$ & $0.0843029431585052$ \\ $0.77$ & $0.796535725000021$ & $0.400$ & $0.0755136117642792$ \\ $0.77$ & $0.846535725000021$ & $0.400$ & $0.0681223833348810$ \\ $0.77$ & $0.896535725000021$ & $0.400$ & $0.0618286193771169$ \\ $0.77$ & $0.946535725000021$ & $0.400$ & $0.0564114016933814$ \\ $0.77$ & $0.996535725000021$ & $0.400$ & $0.0517046696768304$ \\ $0.77$ & $1.04653572500002$ & $0.400$ & $0.0475811789582821$ \\ $0.77$ & $1.09653572500002$ & $0.400$ & $0.0439418020113962$ \\ $0.77$ & $1.14653572500002$ & $0.400$ & $0.0407081902919728$ \\ $0.77$ & $1.19653572500002$ & $0.400$ & $0.0378176207163818$ \\ $0.77$ & $1.24653572500002$ & $0.400$ & $0.0352193008652932$ \\ $0.77$ & $1.29653572500002$ & $0.400$ & $0.0328716716127033$ \\ $0.77$ & $1.34653572500002$ & $0.400$ & $0.0307404059773191$ \\ $0.77$ & $1.39653572500002$ & $0.400$ & $0.0287969028851336$ \\ $0.77$ & $1.44653572500002$ & $0.400$ & $0.0270171384844815$ \\ $0.77$ & $1.49653572500002$ & $0.400$ & $0.0253807795447578$ \\ $0.77$ & $1.54653572500002$ & $0.400$ & $0.0238704914680881$ \\ $0.77$ & $1.59653572500002$ & $0.400$ & $0.0224713924978488$ \\ $0.77$ & $1.64653572500002$ & $0.400$ & $0.0211706188873642$ \\ $0.77$ & $1.69653572500002$ & $0.400$ & $0.0199569750386104$ \\ $0.77$ & $1.74653572500002$ & $0.400$ & $0.0188206491934295$ \\ $0.77$ & $1.79653572500002$ & $0.400$ & $0.0177529799785283$ \\ $0.81$ & $0.484322104837853$ & $0.400$ & $0.0760870936626361$ \\ $0.81$ & $0.534322104837853$ & $0.400$ & $0.0675499639584132$ \\ $0.81$ & $0.584322104837853$ & $0.400$ & $0.0605278017367255$ \\ $0.81$ & $0.634322104837853$ & $0.400$ & $0.0546912979618810$ \\ $0.81$ & $0.684322104837853$ & $0.400$ & $0.0497865208840267$ \\ $0.81$ & $0.734322104837853$ & $0.400$ & $0.0456211260755595$ \\ $0.81$ & $0.784322104837853$ & $0.400$ & $0.0420491692950691$ \\ $0.81$ & $0.834322104837853$ & $0.400$ & $0.0389589632508773$ \\ $0.81$ & $0.884322104837853$ & $0.400$ & $0.0362641022489925$ \\ $0.81$ & $0.934322104837853$ & $0.400$ & $0.0338969726150253$ \\ $0.81$ & $0.984322104837853$ & $0.400$ & $0.0318040708586409$ \\ $0.81$ & $1.03432210483785$ & $0.400$ & $0.0299426063306214$ \\ $0.81$ & $1.08432210483785$ & $0.400$ & $0.0282780131573759$ \\ $0.81$ & $1.13432210483785$ & $0.400$ & $0.0267821087966240$ \\ $0.81$ & $1.18432210483785$ & $0.400$ & $0.0254317162361133$ \\ $0.81$ & $1.23432210483785$ & $0.400$ & $0.0242076219697651$ \\ $0.81$ & $1.28432210483785$ & $0.400$ & $0.0230937796962394$ \\ $0.81$ & $1.33432210483785$ & $0.400$ & $0.0220766957561978$ \\ $0.81$ & $1.38432210483785$ & $0.400$ & $0.0211449503674146$ \\ $0.81$ & $1.43432210483785$ & $0.400$ & $0.0202888213490553$ \\ $0.81$ & $1.48432210483785$ & $0.400$ & $0.0194999859288600$ \\ $0.81$ & $1.53432210483785$ & $0.400$ & $0.0187712825802748$ \\ \hline \end{tabular} \end{center} \begin{center} \begin{tabular}{|l|l|l|l|} \hline $p$ & $u$ & $v$ & $C(p, u, v)$ \\ \hline $0.81$ & $1.58432210483785$ & $0.400$ & $0.0180965194068392$ \\ $0.81$ & $1.63432210483785$ & $0.400$ & $0.0174703189090337$ \\ $0.81$ & $1.68432210483785$ & $0.400$ & $0.0168879914158993$ \\ $0.81$ & $1.73432210483785$ & $0.400$ & $0.0163454312572640$ \\ $0.81$ & $1.78432210483785$ & $0.400$ & $0.0158390311067080$ \\ $0.81$ & $1.83432210483785$ & $0.400$ & $0.0153656109424389$ \\ $0.81$ & $1.88432210483785$ & $0.400$ & $0.0149223588338145$ \\ $0.81$ & $1.93432210483785$ & $0.400$ & $0.0145067813642754$ \\ $0.81$ & $1.98432210483785$ & $0.400$ & $0.0141166619432482$ \\ $0.81$ & $2.03432210483785$ & $0.400$ & $0.0137500256186627$ \\ $0.85$ & $0.420084025208403$ & $0.400$ & $0.0186691810013144$ \\ $0.85$ & $0.470084025208403$ & $0.400$ & $0.0162646973694791$ \\ $0.85$ & $0.520084025208403$ & $0.400$ & $0.0143518941913499$ \\ $0.85$ & $0.570084025208403$ & $0.400$ & $0.0128045473320526$ \\ $0.85$ & $0.620084025208403$ & $0.400$ & $0.0115330738808552$ \\ $0.85$ & $0.670084025208403$ & $0.400$ & $0.0104734953492880$ \\ $0.85$ & $0.720084025208403$ & $0.400$ & $0.00957942237801035$ \\ $0.85$ & $0.770084025208403$ & $0.400$ & $0.00881663551626843$ \\ $0.85$ & $0.820084025208403$ & $0.400$ & $0.00815946033617365$ \\ $0.85$ & $0.870084025208403$ & $0.400$ & $0.00758832384963171$ \\ $0.85$ & $0.920084025208403$ & $0.400$ & $0.00708808343006240$ \\ $0.85$ & $0.970084025208403$ & $0.400$ & $0.00664686534582870$ \\ $0.85$ & $1.02008402520840$ & $0.400$ & $0.00625524408678757$ \\ $0.85$ & $1.07008402520840$ & $0.400$ & $0.00590565337915905$ \\ $0.85$ & $1.12008402520840$ & $0.400$ & $0.00559195686946623$ \\ $0.85$ & $1.17008402520840$ & $0.400$ & $0.00530913114198484$ \\ $0.85$ & $1.22008402520840$ & $0.400$ & $0.00505302824785758$ \\ $0.85$ & $1.27008402520840$ & $0.400$ & $0.00482019595983729$ \\ $0.85$ & $1.32008402520840$ & $0.400$ & $0.00460774000566744$ \\ $0.85$ & $1.37008402520840$ & $0.400$ & $0.00441321739799605$ \\ $0.85$ & $1.42008402520840$ & $0.400$ & $0.00423455315103638$ \\ $0.85$ & $1.47008402520840$ & $0.400$ & $0.00406997450318158$ \\ $0.85$ & $1.52008402520840$ & $0.400$ & $0.00391795871382783$ \\ $0.85$ & $1.57008402520840$ & $0.400$ & $0.00377719110929320$ \\ $0.85$ & $1.62008402520840$ & $0.400$ & $0.00364653129327053$ \\ $0.85$ & $1.67008402520840$ & $0.400$ & $0.00352498564643611$ \\ $0.85$ & $1.72008402520840$ & $0.400$ & $0.00341168480917986$ \\ $0.85$ & $1.77008402520840$ & $0.400$ & $0.00330586520067300$ \\ $0.85$ & $1.82008402520840$ & $0.400$ & $0.00320685363931261$ \\ $0.85$ & $1.87008402520840$ & $0.400$ & $0.00311405461434333$ \\ $0.85$ & $1.92008402520840$ & $0.400$ & $0.00302693959019962$ \\ $0.85$ & $1.97008402520840$ & $0.400$ & $0.00294503804752821$ \\ $0.85$ & $2.02008402520840$ & $0.400$ & $0.00286792988845264$ \\ $0.85$ & $2.07008402520840$ & $0.400$ & $0.00279523901235734$ \\ $0.85$ & $2.12008402520840$ & $0.400$ & $0.00272662785891953$ \\ $0.85$ & $2.17008402520840$ & $0.400$ & $0.00266179269510758$ \\ $0.85$ & $2.22008402520840$ & $0.400$ & $0.00260045962977529$ \\ $0.85$ & $2.27008402520840$ & $0.400$ & $0.00254238115849148$ \\ $0.85$ & $2.32008402520840$ & $0.400$ & $0.00248733315356731$ \\ $0.85$ & $2.37008402520840$ & $0.400$ & $0.00243511230246440$ \\ \hline \end{tabular} \end{center} \begin{center} \begin{tabular}{|l|l|l|l|} \hline $p$ & $u$ & $v$ & $C(p, u, v)$ \\ \hline $0.89$ & $0.351561524655326$ & $0.400$ & $0.00145292282104492$ \\ $0.89$ & $0.401561524655326$ & $0.400$ & $0.00118577480316162$ \\ $0.89$ & $0.451561524655326$ & $0.400$ & $0.000993013381958008$ \\ $0.89$ & $0.501561524655326$ & $0.400$ & $0.000848412513732910$ \\ $0.89$ & $0.551561524655326$ & $0.400$ & $0.000735878944396973$ \\ $0.89$ & $0.601561524655326$ & $0.400$ & $0.000646829605102539$ \\ $0.89$ & $0.651561524655326$ & $0.400$ & $0.000574707984924316$ \\ $0.89$ & $0.701561524655326$ & $0.400$ & $0.000515460968017578$ \\ $0.89$ & $0.751561524655326$ & $0.400$ & $0.000466108322143555$ \\ $0.89$ & $0.801561524655326$ & $0.400$ & $0.000424087047576904$ \\ $0.89$ & $0.851561524655326$ & $0.400$ & $0.000387966632843018$ \\ $0.89$ & $0.901561524655326$ & $0.400$ & $0.000357031822204590$ \\ $0.89$ & $0.951561524655326$ & $0.400$ & $0.000330328941345215$ \\ $0.89$ & $1.00156152465533$ & $0.400$ & $0.000306487083435059$ \\ $0.89$ & $1.05156152465533$ & $0.400$ & $0.000285655260086060$ \\ $0.89$ & $1.10156152465533$ & $0.400$ & $0.000267118215560913$ \\ $0.89$ & $1.15156152465533$ & $0.400$ & $0.000250488519668579$ \\ $0.89$ & $1.20156152465533$ & $0.400$ & $0.000235617160797119$ \\ $0.89$ & $1.25156152465533$ & $0.400$ & $0.000222235918045044$ \\ $0.89$ & $1.30156152465533$ & $0.400$ & $0.000210016965866089$ \\ $0.89$ & $1.35156152465533$ & $0.400$ & $0.000198870897293091$ \\ $0.89$ & $1.40156152465533$ & $0.400$ & $0.000188708305358887$ \\ $0.89$ & $1.45156152465533$ & $0.400$ & $0.000179469585418701$ \\ $0.89$ & $1.50156152465533$ & $0.400$ & $0.000171035528182983$ \\ $0.89$ & $1.55156152465533$ & $0.400$ & $0.000163167715072632$ \\ $0.89$ & $1.60156152465533$ & $0.400$ & $0.000155717134475708$ \\ $0.89$ & $1.65156152465533$ & $0.400$ & $0.000149309635162354$ \\ $0.89$ & $1.70156152465533$ & $0.400$ & $0.000143021345138550$ \\ $0.89$ & $1.75156152465533$ & $0.400$ & $0.000137194991111755$ \\ $0.89$ & $1.80156152465533$ & $0.400$ & $0.000131785869598389$ \\ $0.89$ & $1.85156152465533$ & $0.400$ & $0.000126823782920837$ \\ $0.89$ & $1.90156152465533$ & $0.400$ & $0.000121921300888062$ \\ $0.89$ & $1.95156152465533$ & $0.400$ & $0.000117585062980652$ \\ $0.89$ & $2.00156152465533$ & $0.400$ & $0.000113427639007568$ \\ $0.89$ & $2.05156152465533$ & $0.400$ & $0.000109493732452393$ \\ $0.89$ & $2.10156152465533$ & $0.400$ & $0.000105798244476318$ \\ $0.89$ & $2.15156152465533$ & $0.400$ & $0.000102311372756958$ \\ $0.89$ & $2.20156152465533$ & $0.400$ & $0.0000989884138107300$ \\ $0.89$ & $2.25156152465533$ & $0.400$ & $0.0000958889722824097$ \\ $0.89$ & $2.30156152465533$ & $0.400$ & $0.0000930279493331909$ \\ $0.89$ & $2.35156152465533$ & $0.400$ & $0.0000902861356735229$ \\ $0.89$ & $2.40156152465533$ & $0.400$ & $0.0000875443220138550$ \\ $0.89$ & $2.45156152465533$ & $0.400$ & $0.0000850409269332886$ \\ $0.89$ & $2.50156152465533$ & $0.400$ & $0.0000825822353363037$ \\ $0.89$ & $2.55156152465533$ & $0.400$ & $0.0000803321599960327$ \\ $0.89$ & $2.60156152465533$ & $0.400$ & $0.0000781416893005371$ \\ $0.89$ & $2.65156152465533$ & $0.400$ & $0.0000760406255722046$ \\ $0.89$ & $2.70156152465532$ & $0.400$ & $0.0000741034746170044$ \\ \hline \end{tabular} \end{center} \begin{center} \begin{tabular}{|l|l|l|l|} \hline $p$ & $u$ & $v$ & $C(p, u, v)$ \\ \hline $0.89$ & $2.75156152465532$ & $0.400$ & $0.0000721588730812073$ \\ $0.89$ & $2.80156152465532$ & $0.400$ & $0.0000703781843185425$ \\ $0.93$ & $0.274351630584367$ & $0.400$ & $5.08388570706080 \times 10^{18}$ \\ $0.93$ & $0.324351630584367$ & $0.400$ & $4.02164584183275 \times 10^{18}$ \\ $0.93$ & $0.374351630584367$ & $0.400$ & $3.29029412228838 \times 10^{18}$ \\ $0.93$ & $0.424351630584367$ & $0.400$ & $2.76064216553067 \times 10^{18}$ \\ $0.93$ & $0.474351630584367$ & $0.400$ & $2.36203204644596 \times 10^{18}$ \\ $0.93$ & $0.524351630584367$ & $0.400$ & $2.05283796276531 \times 10^{18}$ \\ $0.93$ & $0.574351630584367$ & $0.400$ & $1.80707983941192 \times 10^{18}$ \\ $0.93$ & $0.624351630584367$ & $0.400$ & $1.60777544694030 \times 10^{18}$ \\ $0.93$ & $0.674351630584367$ & $0.400$ & $1.44339556221551 \times 10^{18}$ \\ $0.93$ & $0.724351630584367$ & $0.400$ & $1.30586151661967 \times 10^{18}$ \\ $0.93$ & $0.774351630584367$ & $0.400$ & $1.18935865797586 \times 10^{18}$ \\ $0.93$ & $0.824351630584367$ & $0.400$ & $1.08960438065609 \times 10^{18}$ \\ $0.93$ & $0.874351630584367$ & $0.400$ & $1.00338075849318 \times 10^{18}$ \\ $0.93$ & $0.924351630584367$ & $0.400$ & $9.28227105214006 \times 10^{17}$ \\ $0.93$ & $0.974351630584367$ & $0.400$ & $8.62232402787871 \times 10^{17}$ \\ $0.93$ & $1.02435163058437$ & $0.400$ & $8.03891902568485 \times 10^{17}$ \\ $0.93$ & $1.07435163058437$ & $0.400$ & $7.52006018232347 \times 10^{17}$ \\ $0.93$ & $1.12435163058437$ & $0.400$ & $7.05607724938857 \times 10^{17}$ \\ $0.93$ & $1.17435163058437$ & $0.400$ & $6.63909564478263 \times 10^{17}$ \\ $0.93$ & $1.22435163058437$ & $0.400$ & $6.26264382665951 \times 10^{17}$ \\ $0.93$ & $1.27435163058437$ & $0.400$ & $5.92135844931128 \times 10^{17}$ \\ $0.93$ & $1.32435163058437$ & $0.400$ & $5.61076019967838 \times 10^{17}$ \\ $0.93$ & $1.37435163058437$ & $0.400$ & $5.32708143146652 \times 10^{17}$ \\ $0.93$ & $1.42435163058437$ & $0.400$ & $5.06713224060254 \times 10^{17}$ \\ $0.93$ & $1.47435163058437$ & $0.400$ & $4.82819540336550 \times 10^{17}$ \\ $0.93$ & $1.52435163058437$ & $0.400$ & $4.60794321949106 \times 10^{17}$ \\ $0.93$ & $1.57435163058437$ & $0.400$ & $4.40437114631546 \times 10^{17}$ \\ $0.93$ & $1.62435163058437$ & $0.400$ & $4.21574442377283 \times 10^{17}$ \\ $0.93$ & $1.67435163058437$ & $0.400$ & $4.04055483735086 \times 10^{17}$ \\ $0.93$ & $1.72435163058437$ & $0.400$ & $3.87748545677684 \times 10^{17}$ \\ $0.93$ & $1.77435163058437$ & $0.400$ & $3.72538169700794 \times 10^{17}$ \\ $0.93$ & $1.82435163058437$ & $0.400$ & $3.58322742657385 \times 10^{17}$ \\ $0.93$ & $1.87435163058437$ & $0.400$ & $3.45012513242157 \times 10^{17}$ \\ $0.93$ & $1.92435163058437$ & $0.400$ & $3.32527936550119 \times 10^{17}$ \\ $0.93$ & $1.97435163058437$ & $0.400$ & $3.20798285548969 \times 10^{17}$ \\ $0.93$ & $2.02435163058437$ & $0.400$ & $3.09760480929080 \times 10^{17}$ \\ $0.93$ & $2.07435163058437$ & $0.400$ & $2.99358100573060 \times 10^{17}$ \\ $0.93$ & $2.12435163058437$ & $0.400$ & $2.89540537512205 \times 10^{17}$ \\ $0.93$ & $2.17435163058437$ & $0.400$ & $2.80262281222224 \times 10^{17}$ \\ $0.93$ & $2.22435163058437$ & $0.400$ & $2.71482301836819 \times 10^{17}$ \\ $0.93$ & $2.27435163058437$ & $0.400$ & $2.63163520611826 \times 10^{17}$ \\ $0.93$ & $2.32435163058437$ & $0.400$ & $2.55272352970900 \times 10^{17}$ \\ $0.93$ & $2.37435163058437$ & $0.400$ & $2.47778312871162 \times 10^{17}$ \\ $0.93$ & $2.42435163058437$ & $0.400$ & $2.40653669169914 \times 10^{17}$ \\ $0.93$ & $2.47435163058437$ & $0.400$ & $2.33873146248732 \times 10^{17}$ \\ \hline \end{tabular} \end{center} \begin{center} \begin{tabular}{|l|l|l|l|} \hline $p$ & $u$ & $v$ & $C(p, u, v)$ \\ \hline $0.93$ & $2.52435163058437$ & $0.400$ & $2.27413662434387 \times 10^{17}$ \\ $0.93$ & $2.57435163058437$ & $0.400$ & $2.21254100805868 \times 10^{17}$ \\ $0.93$ & $2.62435163058437$ & $0.400$ & $2.15375107839294 \times 10^{17}$ \\ $0.93$ & $2.67435163058437$ & $0.400$ & $2.09758916054065 \times 10^{17}$ \\ $0.93$ & $2.72435163058437$ & $0.400$ & $2.04389187412840 \times 10^{17}$ \\ $0.93$ & $2.77435163058437$ & $0.400$ & $1.99250874717724 \times 10^{17}$ \\ $0.93$ & $2.82435163058437$ & $0.400$ & $1.94330098653627 \times 10^{17}$ \\ $0.93$ & $2.87435163058437$ & $0.400$ & $1.89614038471800 \times 10^{17}$ \\ $0.97$ & $0.50$ & $0.60$ & $9.17733198126610 \times 10^{72}$ \\ $0.97$ & $1.00$ & $0.60$ & $6.05478107453485 \times 10^{72}$ \\ $0.97$ & $3.00$ & $0.60$ & $3.13202840384780 \times 10^{72}$ \\ $0.97$ & $5.00$ & $0.60$ & $2.30524156812013 \times 10^{72}$ \\ \hline \end{tabular} \end{center} \parsecskip This finally proves the lower bound $\tilde G_k(n) \gg F_0$ for every $p\in (\frac 12, 1)$. \label{lastpage} \end{document}
\begin{document} \title[Coalgebraic Trace Semantics for Continuous PTS]{Coalgebraic Trace Semantics for Continuous Probabilistic Transition Systems\rsuper*} \author[Henning Kerstan]{Henning Kerstan} \address{Universität Duisburg-Essen, Duisburg, Germany} \email{\{henning.kerstan, barbara\_koenig\}@uni-due.de} \author[Barbara König]{Barbara König} \keywords{probabilistic transition systems, Markov processes, coalgebra, trace semantics} \subjclass{G.3, F.1.1, F.1.2} \titlecomment{{\lsuper*}This is an extended version of \cite{KK12a} which was presented at CONCUR 2012.} \hypersetup{ pdftitle={Coalgebraic Trace Semantics for Continuous Probabilistic Transition Systems}, pdfauthor={Henning Kerstan, Barbara König}, pdfkeywords={probabilistic transition systems, Markov processes, coalgebra, trace semantics} } \begin{abstract} Coalgebras in a Kleisli category yield a generic definition of trace semantics for various types of labelled transition systems. In this paper we apply this generic theory to generative probabilistic transition systems, short PTS, with arbitrary (possibly uncountable) state spaces. We consider the sub-probability monad and the probability monad (Giry monad) on the category of measurable spaces and measurable functions. Our main contribution is that the existence of a final coalgebra in the Kleisli category of these monads is closely connected to the measure-theoretic extension theorem for sigma-finite pre-measures. In fact, we obtain a practical definition of the trace measure for both finite and infinite traces of PTS that subsumes a well-known result for discrete probabilistic transition systems. Finally we consider two example systems with uncountable state spaces and apply our theory to calculate their trace measures. \end{abstract} \maketitle \section{Introduction} Coalgebra \cite{jacobs,r:universal-coalgebra} is a general framework in which several types of transition systems can be studied (deterministic and non-deterministic automata, weighted automata, transition systems with non deterministic and probabilistic branching, etc.). One of the strong points of coalgebra is that it induces -- via the notion of coalgebra homomorphism and final coalgebra -- a notion of behavioral equivalence for all these types of systems. The resulting behavioral equivalence is usually some form of bisimilarity. However, \cite{hasuo} has shown that by modifying the category in which the coalgebra lives, one can obtain different notions of behavioral equivalence, such as trace equivalence. We will shortly describe the basic idea: given an endofunctor $F$ on $\mathbf{Set}$, the category of sets and total functions, describing the branching type of the system, a coalgebra in the category $\mathbf{Set}$ is a function $\alpha\colon X\to FX$, where $X$ is a set. Consider, for instance, the functor $FX = \mathcal{P}_\mathit{fin}(\ensuremath{\mathcal{A}} \times X+\mathbf{1})$, where $\mathcal{P}_\mathit{fin}$ is the finite powerset functor and $\ensuremath{\mathcal{A}}$ is a given alphabet. This setup allows us to specify finitely branching non-deterministic automata where a state $x\in X$ is mapped to a set of tuples of the form $(a,y)$, for $a\in \ensuremath{\mathcal{A}}, y\in X$, describing transitions. The set contains the symbol $\checkmark$ (for termination) -- the only element contained in the one-element set $\mathbf{1}$ -- if and only if $x$ is a final state. A coalgebra homomorphism maps the set of states of a coalgebra to the set of states of another coalgebra, preserving the branching structure. Furthermore, the final coalgebra -- if it exists -- is the final object in the category of coalgebras. Every coalgebra has a unique homomorphism into the final coalgebra and two states of a transition system modelled as coalgebra are mapped to the same state in the final coalgebra iff they are behaviorally equivalent. Now, applying this notion to the example above induces bisimilarity, whereas usually the appropriate notion of behavioral equivalence for non-deterministic finite automata is language equivalence. One of the ideas of \cite{hasuo} is to view a coalgebra $X\to\mathcal{P}(\mathcal{A}\times X+\mathbf{1})$ not as an arrow in $\mathbf{Set}$, but as an arrow $X\to \mathcal{A}\times X+\mathbf{1}$ in $\mathbf{Rel}$, the category of sets and relations which is also the Kleisli category of the powerset monad. This induces trace equivalence, instead of bisimilarity, with the underlying intuition that non-determinism is a side-effect that is ``hidden'' within the monad. This side effect is not present in the final coalgebra (which consists of the set $\mathcal{A}^*$ with a suitable coalgebra structure), but in the arrow from a state $x\in X$ to $\mathcal{A}^*$, which is a relation, and relates each state with all words accepted from this state. More generally, coalgebras are given as arrows $X\to TFX$ in a Kleisli category, where a monad $T$ describes implicit branching and an endofunctor $F$ specifies explicit branching with the underlying intuition that the implicit branching (for instance non-determinism or probabilistic branching) is aggregated and abstracted away in the final coalgebra. For several monads this yields a form of trace semantics. In \cite{hasuo} a theorem gives sufficient conditions for the existence of a final coalgebra for Kleisli categories over $\mathbf{Set}$, which -- interestingly -- can be obtained as initial $F$-algebra in $\mathbf{Set}$. In \cite{hasuo} it is also proposed to obtain probabilistic trace semantics for the Kleisli category of the (discrete) subdistribution monad $\mathcal{D}$ on $\mathbf{Set}$. The endofunctor of that monad maps a set $X$ to the set $\mathcal{D}(X)$ of all functions $p\colon X \to [0,1]$ satisfying $\sum_{x \in X} p(x) \leq 1$. Coalgebras in this setting are functions of the form $X\to \mathcal{D}(\mathcal{A}\times X+\mathbf{1})$ (modeling probabilistic branching and termination), seen as arrows in the corresponding Kleisli category. From the general result in \cite{hasuo} mentioned above it again follows that the final coalgebra is carried by $\mathcal{A}^*$, where the mapping into the final coalgebra assigns to each state a discrete probability distribution over its traces. In this way one obtains the finite trace semantics of generative probabilistic systems \cite{s:coalg-ps-phd,glabbeek}. The contribution in \cite{hasuo} is restricted to discrete probability spaces, where the probability distributions always have at most countable support \cite{Sokolova20115095}. This might seem sufficient for practical applications at first glance, but it has two important drawbacks: first, it excludes several interesting systems that involve uncountable state spaces (see for instance the examples in Section~\ref{sec:advexamples} or the examples in \cite{Pan09}). Second, it excludes the treatment of infinite traces, as detailed in \cite{hasuo}, since the set of all infinite traces is uncountable and hence needs measure theory to be treated appropriately. This is an intuitive reason for the choice of the subdistribution monad -- instead of the distribution monad -- in \cite{hasuo}: for a given state, it might always be the case that a non-zero ``probability mass'' is associated to the infinite traces leaving this state, which -- in the discrete case -- cannot be specified by a probability distribution over all words. Hence, we generalize the results concerning probabilistic trace semantics from \cite{hasuo} to the case of uncountable state spaces, by working in the Kleisli category of the (continuous) sub-probability monad over $\mathbf{Meas}$ (the category of measurable spaces). Unlike in \cite{hasuo} we do not derive the final coalgebra via a generic construction (building the initial algebra of the functor), but we construct the final coalgebra directly. Furthermore we consider the Kleisli category of the (continuous) probability monad (Giry monad) and treat the case with and without termination. In the former case we obtain a coalgebra over the set $\mathcal{A}^\infty$ (finite and infinite traces over $\mathcal{A}$) and in the latter over the set $\mathcal{A}^\omega$ (infinite traces), which shows the naturality of the approach. For completeness we also consider the case of the sub-probability monad without termination, which results in a trivial final coalgebra over the empty set. In all cases we obtain the natural trace measures as instances of the generic coalgebraic theory. Since, to our knowledge, there is no generic construction of the final coalgebra for these cases, we construct the respective final coalgebras directly and show their correctness by proving that each coalgebra admits a unique homomorphism into the final coalgebra. Here we rely on the measure-theoretic extension theorem for sigma-finite pre-measures and the identity theorem. In the conclusion we will further compare our approach to \cite{hasuo} and discuss why we took an alternative route. \subsection{Another paper?} This paper is the extended version of the paper \cite{KK12a} first published at CONCUR 2012 and thus it necessarily contains all results of that paper. Due to page limitations some of the proofs were omitted in the published version and hence in the technical report \cite{KK12TR} we provided a version which is identical to the original paper but contains an appendix with the missing proofs. In contrast to that, the paper at hand contains all the proofs in place and also some corrections. Moreover, more details are presented, mainly taken from \cite{kerstan}, which was the starting point for everything. Last but not least the paper at hand includes the new Section \ref{sec:advexamples} containing two examples with uncountable state spaces and some additional theory needed in order to understand them. \section{Background Material and Preliminaries} \label{sec:prelim} We assume that the reader is familiar with the basic definitions of category theory. However, we will provide a brief introduction to notation, measure theory and integration, coalgebra, coalgebraic trace semantics and Kleisli categories -- of course all geared to our needs. \subsection{Notation} By $\mathbf{1}$ we denote a singleton set, its unique element is $\checkmark$. For arbitrary sets $X, Y$ we write $X \setminus Y$ for set complement, $X \times Y$ for the usual cartesian product and the disjoint union $X + Y$ is the set $\set{(x,0), (y,1)\mid x \in X, y \in Y}$. Whenever $X \cap Y = \emptyset$ this coincides with (is isomorphic to) the usual union $X \cup Y$ in an obvious way. For set inclusion we write $\subset$ for strict inclusion and $\subseteq$ otherwise. The set of real numbers is denoted by $\mathbb{R}$, the set of extended reals is the set $\overline{\mathbb{R}} := \mathbb{R} \cup \set{\pm\infty}$ and $\mathbb{R}_+$ and $\overline{\mathbb{R}}_+$ are their restrictions to the non-negative (extended) reals. We require $0 \cdot \pm \infty = \pm \infty \cdot 0 = 0$. For a function $f\colon X \to Y$ and a set $A \subseteq X$ the restriction of $f$ to $A$ is the function $f|_A\colon A \to Y$. \subsection{A Brief Introduction to Measure Theory} Within this section we want to give a very brief introduction to measure theory. For a more thorough treatment there are many standard textbooks as e.g. {\cite{ash,Els07}}. Measure theory generalizes the idea of length, area or volume. Its most basic definition is that of a $\emph{$\sigma$-algebra (sigma-algebra)}$. Given an arbitrary set $X$ we call a set $\mathcal{S}igma$ of subsets of $X$ a \emph{$\sigma$-algebra} iff it contains the empty set and is closed under complement and countable union. The tuple $(X, \mathcal{S}igma)$ is called a \emph{measurable space}. We will sometimes call the set $X$ itself a measurable space, keeping in mind that there is an associated $\sigma$-algebra which we will then denote by $\mathcal{S}igma_X$. For any subset $\mathcal{G} \subseteq \powerset{X}$ we can always uniquely construct the smallest $\sigma$-algebra on $X$ containing $\mathcal{G}$ which is denoted by $\sigalg[X]{\mathcal{G}}$. We call $\mathcal{G}$ the \emph{generator} of $\sigalg[X]{\mathcal{G}}$, which in turn is called \emph{the $\sigma$-algebra generated by $\mathcal{G}$}. It is known (and easy to show), that $\sigma_X$ is a monotone and idempotent operator. The elements of a $\sigma$-algebra on $X$ are called the \emph{measurable sets} of $X$. Among all possible generators for $\sigma$-algebras, there are special ones, so-called \emph{semirings of sets}. \begin{defi}[Semiring of Sets] Let $X$ be an arbitrary set. A subset $\mathcal{S} \subseteq \powerset{X}$ is called a \emph{semiring of sets} if it satisfies the following three properties. \begin{enumerate}[label=(\alph*)] \item $\mathcal{S}$ contains the empty set, i.e. $\emptyset \in \mathcal{S}$. \item $\mathcal{S}$ is closed under pairwise intersection, i.e. for $A, B \in \mathcal{S}$ we always require $(A \cap B)\in \mathcal{S}$. \item The set difference of any two sets in $\mathcal{S}$ is the disjoint union of finitely many sets in $\mathcal{S}$, i.e. for any $A, B \in \mathcal{S}$ there is an $N \in \mathbb{N}$ and pairwise disjoint sets $C_1,\mathbf{hd}ots,C_N \in \mathcal{S}$ such that $A\setminus B = \cup_{n=1}^N C_n$. \end{enumerate} \end{defi} \noindent It is easy to see that every $\sigma$-algebra is a semiring of sets but the reverse is false. Please note that a semiring of sets is different from a semiring in algebra. For our purposes, we will consider special semirings containing a countable cover of the base set. \begin{defi}[Countable Cover, Covering Semiring] Let $\mathcal{S}$ be a semiring. A countable sequence $(S_n)_{n \in \mathbb{N}}$ of sets in $\mathcal{S}$ such that $\cup_{n \in \mathbb{N}}S_n = X$ is called a \emph{countable cover of $X$ (in $\mathcal{S}$)}. If such a countable cover exists we call $\mathcal{S}$ a \emph{covering} semiring. \end{defi} With these basic structures at hand, we can now define pre-measures and measures. A non-negative function $\mu \colon \mathcal{S} \to \overline{\mathbb{R}}_+$ defined on a semiring $\mathcal{S}$ is called a \emph{pre-measure} on $X$ if it assigns $0$ to the empty set and is \emph{$\sigma$-additive}, i.e. for a sequence $(S_n)_{n \in \mathbb{N}}$ of pairwise disjoint sets in $\mathcal{S}$ where $\left(\cup_{n \in \mathbb{N}}S_n\right) \in \mathcal{S}$ we must have \begin{align} \mu\left(\bigcup_{n \in \mathbb{N}}S_n\right) = \sum_{n \in \mathbb{N}}\mu\left(S_n\right). \end{align} A pre-measure $\mu$ is called \emph{$\sigma$-finite} if there is a countable cover $(S_n)_{n \in \mathbb{N}}$ of $X$ in $\mathcal{S}$ such that $\mu\left(S_n\right) < \infty$ for all $n \in \mathbb{N}$. Whenever $\mathcal{S}$ is a $\sigma$-algebra we call $\mu$ a \emph{measure} and the tuple $(X, \mathcal{S}, \mu)$ a \emph{measure space}. In that case $\mu$ is said to be \emph{finite} iff $\mu(X) < \infty$ and for the special cases $\mu(X) = 1$ (or $\mu(X) \leq 1$) $\mu$ is called a \emph{probability measure} (or \emph{sub-probability measure} respectively). Measures are \emph{monotone}, i.e. if $A,B$ are measurable $A \subseteq B$ implies $\mu(A) \leq \mu(B)$ and \emph{continuous}, i.e. for measurable $A_1 \subseteq A_2 \subseteq \mathbf{hd}ots \subseteq A_n \subseteq \mathbf{hd}ots$ we always have $\mu\left(\cup_{n=1}^\infty A_n\right) = \lim_{n \to \infty} \mu(A_n)$ and for measurable $B_1 \supseteq B_2 \supseteq \mathbf{hd}ots \supseteq B_n \supseteq \mathbf{hd}ots$ with $\mu(B_1) < \infty$ we have $\mu\left(\cap_{n=1}^\infty A_n\right) = \lim_{n \to \infty} \mu(A_n)$ \cite[1.2.5 and 1.2.7]{ash}. Given a measurable space $(X, \mathcal{S}igma_X)$, a simple and well-known probability measure, is the so-called \emph{Dirac measure}, which we will use later. It is defined as $\delta_x^X\colon \mathcal{S}igma_X \to [0,1]$, and is $1$ on $S \in \mathcal{S}igma_X$ iff $x \in S$ and $0$ otherwise. The most significant theorems from measure theory which we will use in this paper are the identity theorem and the extension theorem for $\sigma$-finite pre-measures, for which a proof can be found e.g. in~\cite[II.5.6 and II.5.7]{Els07}. \begin{prop}[Identity Theorem] Let $X$ be a set, $\mathcal{G} \subseteq \powerset{X}$ be a set which is closed under pairwise intersection and $\mu, \nu \colon \sigalg[X]{\mathcal{G}} \to \overline{\mathbb{R}}_+$ be measures. If $\mu|_\mathcal{G} = \nu|_\mathcal{G}$ and $\mathcal{G}$ contains a countable cover $(G_n)_{n \in \mathbb{N}}$ of $X$ satisfying $\mu(G_n) = \nu(G_n) < \infty$ for all $n \in \mathbb{N}$ then $\mu = \nu$.\qed \end{prop} \begin{prop}[Extension Theorem for $\sigma$-finite Pre-Measures] \label{prop:extension} Let $X$ be a set, \linebreak $\mathcal{S} \subseteq \powerset{X}$ be a semiring of sets and $\mu\colon \mathcal{S} \to \overline{\mathbb{R}}_+$ be a $\sigma$-finite pre-measure. Then there exists a uniquely determined measure $\hat{\mu} \colon \sigalg[X]{\mathcal{S}} \to \overline{\mathbb{R}}_+$ such that $\hat{\mu}|_\mathcal{S} = \mu$. \qed \end{prop} As we are only interested in finite measures, we provide a result, which can be derived easily from the identity theorem. \begin{cor}[Equality of Finite Measures on Covering Semirings] \label{cor:equality_of_measures} Let $X$ be an arbitrary set, $\mathcal{S} \subseteq \powerset{X}$ be a covering semiring and $\mu, \nu \colon \sigalg[X]{\mathcal{S}} \to \overline{\mathbb{R}}_+$ be finite measures. Then $\mu = \nu$ if and only if $\mu|_\mathcal{S} = \nu|_\mathcal{S}$. \end{cor} \proof Obviously we get $\mu|_\mathcal{S} = \nu|_\mathcal{S}$ if $\mu = \nu$. For the other direction let $(S_n)_{n \in \mathbb{N}}$ be a countable cover of $X$. Then finiteness of $\mu$ and $\nu$ together with the fact that measures are continuous and $\mu|_\mathcal{S} = \nu|_\mathcal{S}$ yield $\mu(S_n) = \nu(S_n) \leq \nu(X)< \infty$ for all $n \in \mathbb{N}$. Since $\mathcal{S}$ is a semiring of sets, it is closed under pairwise intersection which allows us to apply the identity theorem yielding $\mu = \nu$. \qed \subsection{The Category of Measurable Spaces and Functions} Let $X$ and $Y$ be measurable spaces. A function $f \colon X \to Y$ is called \emph{measurable} iff the pre-image of any measurable set of $Y$ is a measurable set of $X$. The category $\mathbf{Meas}$ has measurable spaces as objects and measurable functions as arrows. Composition of arrows is function composition and the identity arrows are the identity functions. The product of two measurable spaces $(X, \mathcal{S}igma_X)$ and $(Y, \mathcal{S}igma_Y)$ is the set $X \times Y$ endowed with the $\sigma$-algebra generated by $\mathcal{S}igma_X \ast \mathcal{S}igma_Y$, the set of so-called ``rectangles'' of measurable sets which is $\set{S_X \times S_Y\mid S_X \in \mathcal{S}igma_X, S_Y \in \mathcal{S}igma_Y}$. It is called the \emph{product $\sigma$-algebra} of $\mathcal{S}igma_X$ and $\mathcal{S}igma_Y$ and is denoted by $\mathcal{S}igma_X \otimes \mathcal{S}igma_Y$. Whenever $\mathcal{S}igma_X$ and $\mathcal{S}igma_Y$ have suitable generators, we can also construct a possibly smaller generator for the product $\sigma$-algebra by taking only the ``rectangles'' of the generators. \begin{prop}[Generators for the Product $\sigma$-Algebra] \label{prop:generator_product} Let $X, Y$ be arbitrary sets and $\mathcal{G}_X \subseteq \powerset{X}, \mathcal{G}_Y \subseteq \powerset{Y}$ such that $X \in \mathcal{G}_X$ and $Y \in \mathcal{G}_Y$. Then the following holds: \[ \sigalg[X\times Y]{\mathcal{G}_X \ast \mathcal{G}_Y} = \sigalg[X]{\mathcal{G}_X} \otimes \sigalg[Y]{\mathcal{G}_Y}\,. \eqno{\qEd} \] \end{prop} A proof of this proposition can be found in many standard textbooks on measure theory, e.g. in \cite{Els07}. We remark that there are (obvious) product endofunctors on the category of measurable spaces and functions. \begin{defi}[Product Functors] Let $(Z, \mathcal{S}igma_Z)$ be a measurable space. The endofunctor $Z \times \mathrm{Id}_\mathbf{Meas}$ maps a measurable space $(X, \mathcal{S}igma_X)$ to $\left(Z \times X, \mathcal{S}igma_Z \otimes \mathcal{S}igma_X\right)$ and a measurable function $f\colon X \to Y$ to the measurable function $Z \times f \colon Z \times X \to Z \times Y, (z,x) \mapsto \left(z,f(x)\right)$. The functor $\mathrm{Id}_\mathbf{Meas} \times Z$ is constructed analogously. \end{defi} The coproduct of two measurable spaces $(X, \mathcal{S}igma_X)$ and $(Y, \mathcal{S}igma_Y)$ is the set $X + Y$ endowed with $\mathcal{S}igma_X \oplus \mathcal{S}igma_Y := \set{S_X + S_Y\mid S_X \in \mathcal{S}igma_X, S_Y \in \mathcal{S}igma_Y}$ as $\sigma$-algebra, the \emph{disjoint union $\sigma$-algebra}. Note that in contrast to the product no $\sigma$-operator is needed because $\mathcal{S}igma_X \oplus \mathcal{S}igma_Y$ itself is already a $\sigma$-algebra whereas $\mathcal{S}igma_X \ast \mathcal{S}igma_Y$ is usually no $\sigma$-algebra. For generators of the disjoint union $\sigma$-algebra we provide and prove a comparable result to the one given above for the product $\sigma$-algebra. \begin{prop}[Generators for the Disjoint Union $\sigma$-Algebra] \label{prop:generator_union} Let $X, Y$ be arbitrary sets and $\mathcal{G}_X \subseteq \powerset{X}, \mathcal{G}_Y \subseteq \powerset{Y}$ such that $\emptyset \in \mathcal{G}_X$ and $Y \in \mathcal{G}_Y$. Then the following holds: \begin{align} \sigalg[X + Y]{\mathcal{G}_X \oplus \mathcal{G}_Y} = \sigalg[X]{\mathcal{G}_X} \oplus \sigalg[Y]{\mathcal{G}_Y}\label{eq:generator_union}\,. \end{align} \end{prop} \noindent In order to prove this, we cite another result from \cite[I.4.5 Korollar]{Els07}. \begin{lem} \label{lem:trace_sigma_algebra} Let $X$ be an arbitrary set, $\mathcal{G} \subseteq \powerset{X}$ and $S \subseteq X$. Then $\sigalg[S]{\mathcal{G}|S} = \sigma_X(\mathcal{G})|S$ where $\mathcal{G} | S := \set{G \cap S \mid G \in \mathcal{G}}$ and analogously $\sigalg[X]{\mathcal{G}}|S := \set{G \cap S \mid G \in \sigalg[X]{\mathcal{G}}}$. \end{lem} \proof[Proof of Proposition~\ref{prop:generator_union}] Without loss of generality we assume that $X$ and $Y$ are pairwise disjoint. Hence for any subsets $A \subseteq X$, $B \subseteq Y$ we have $A \cap B = \emptyset$ and thus $A + B \cong A \cup B$. In order to prove equation \eqref{eq:generator_union} we show both inclusions. \begin{itemize}[label=$\subseteq$] \item We have $\mathcal{G}_X \oplus \mathcal{G}_Y \subseteq \sigalg[X]{\mathcal{G}_X} \oplus \sigalg[Y]{\mathcal{G}_Y}$ and thus monotonicity and idempotence of the $\sigma$-operator immediately yield $\sigalg[X\cup Y]{\mathcal{G}_X \oplus \mathcal{G}_Y} \subseteq \sigalg[X]{\mathcal{G}_X} \oplus \sigalg[Y]{\mathcal{G}_Y}$. \item [$\supseteq$] Let $G \in \sigalg[X]{\mathcal{G}_X} \oplus \sigalg[Y]{\mathcal{G}_Y}$. Then $G = G_X\cup G_Y$ with $G_X \in \sigalg[X]{\mathcal{G}_X}$ and $G_Y \in \sigalg[Y]{\mathcal{G}_Y}$. \linebreak We observe that $\mathcal{G}_X = (\mathcal{G}_X \oplus \mathcal{G}_Y) | X$ and by applying Lemma~\ref{lem:trace_sigma_algebra} we obtain that $\sigalg[X\cup Y]{\mathcal{G}_X \oplus \mathcal{G}_Y} | X = \sigalg[X]{\mathcal{G}_X}$. Thus there must be a $G'_Y \in \powerset{Y}$ such that \linebreak $G_X \cup G'_Y \in \sigalg[X\cup Y]{\mathcal{G}_X \oplus \mathcal{G}_Y}$. Analogously there must be a $G'_X \in \powerset{X}$ such that $G'_X \cup G_Y \in \sigalg[X\cup Y]{\mathcal{G}_X \oplus \mathcal{G}_Y}$. We have $Y = \emptyset \cup Y \in \sigalg[X\cup Y]{\mathcal{G}_X \oplus \mathcal{G}_Y}$ and hence we also have $X = (X\cup Y)\setminus Y \in \sigalg[X\cup Y]{\mathcal{G}_X \oplus \mathcal{G}_Y}$. Thus we calculate \begin{align*} G = G_X \cup G_Y = \big( (G_X \cup G'_Y) \cap X \big) \cup \big( (G'_X \cup G_Y) \cap Y\big) \in \sigalg[X\cup Y]{\mathcal{G}_X \oplus \mathcal{G}_Y} \end{align*} and hence can conclude that $\sigalg[X\cup Y]{\mathcal{G}_X \oplus \mathcal{G}_Y} \supseteq \sigalg[X]{\mathcal{G}_X} \oplus \sigalg[Y]{\mathcal{G}_Y}$.\qed \end{itemize} \noindent As before we have endofunctors for the coproduct, the coproduct functors. \begin{defi}[Co-Product Functors] Let $(Z, \mathcal{S}igma_Z)$ be a measurable space. The endofunctor $\mathrm{Id}_\mathbf{Meas} + Z$ maps a measurable space $(X, \mathcal{S}igma_X)$ to $\left(X+Z, \mathcal{S}igma_X \oplus \mathcal{S}igma_Z\right)$ and a measurable function $f\colon X \to Y$ to the measurable function $f + Z \colon X + Z\to Y + Z$, $(x,0) \mapsto (f(x),0)$, $(z,1) \mapsto (z,1)$. The functor $\mathrm{Id}_\mathbf{Meas} + Z$ is constructed analogously. \end{defi} For isomorphisms in $\mathbf{Meas}$ we provide the following characterization which we will need later for our main result. \begin{prop}[Isomorphisms in $\mathbf{Meas}$] \label{prop:isomorphisms} Two measurable spaces $X$ and $Y$ are isomorphic in $\mathbf{Meas}$ iff there is a bijective function $\varphi\colon X \to Y$ such that\footnote{For $\mathcal{S} \subseteq \powerset{X}$ and a function $\varphi \colon X \to Y$ let $\varphi(\mathcal{S}) = \set{\varphi\left(S_X\right) \mid S_X \in \mathcal{S}} = \set{ \set{\varphi(x) \mid x \in S_X} \mid S_X \in \mathcal{S}}$.} $\varphi\left(\mathcal{S}igma_X\right) = \mathcal{S}igma_Y$. If $\mathcal{S}igma_X$ is generated by a set $\mathcal{S} \subseteq \powerset{X}$ then $X$ and $Y$ are isomorphic iff there is a bijective function $\varphi\colon X \to Y$ such that $\mathcal{S}igma_Y$ is generated by ${\varphi\left(\mathcal{S}\right)}$. In this case $\mathcal{S}$ is a (covering) semiring of sets [a $\sigma$-algebra] iff $\varphi(\mathcal{S})$ is a (covering) semiring of sets [a $\sigma$-algebra]. \end{prop} Again, we need a result from measure theory for the proof. This auxiliary result and its proof can be found e.g. in \cite[I.4.4 Satz]{Els07}. \begin{lem} \label{lem:generator_inverse} Let $X, Y$ be sets, $f\colon X \to Y$ be a function. Then for every subset $\mathcal{S} \subseteq \powerset{Y}$ it holds that $\sigalg[X]{f^{-1}(\mathcal{S})} = f^{-1}\left(\sigalg[Y]{\mathcal{S}}\right)$.\qed \end{lem} \proof[Proof of Proposition~\ref{prop:isomorphisms}] Since the identity arrows in $\mathbf{Meas}$ are the identity functions, we can immediately derive that any isomorphism $\varphi\colon X \to Y$ must be a bijective function. Measurability of $\varphi$ and its inverse function $\varphi^{-1}\colon Y \to X$ yield $\varphi\left(\mathcal{S}igma_X\right) = \mathcal{S}igma_Y$. The equality $\sigalg[Y]{\varphi(\mathcal{S})} = \varphi\left(\sigalg[X]{\mathcal{S}}\right)$ follows from Lemma~\ref{lem:generator_inverse} by taking $f = \varphi^{-1}$. The last equivalence is easy to verify using bijectivity of $\varphi$ and $\varphi^{-1}$.\qed \subsection{Kleisli Categories and Liftings of Endofunctors} Recall that a monad on a category $\mathbf{C}$ is a triple $(T, \eta, \mu)$ where $T\colon \mathbf{C} \to \mathbf{C}$ is an endofunctor together with two natural transformations\footnote{This is the second meaning of the symbol $\mu$. Until now, $\mu$ was used as a symbol for a (pre-)measure.} $\eta \colon \mathrm{Id}_\mathbf{C} \mathbb{R}ightarrow T$ and $\mu\colon T^2 \mathbb{R}ightarrow T$ such that the following diagrams commute for all $\mathbf{C}$-objects $X$. \[\xymatrix@C+20 pt{ TX \ar[r]^{T\eta_X} \ar[dr]_{\,\mathrm{id}_{TX}\!} \ar[d]_{\eta_{TX}} & T^2X \ar[d]^{\mu_X} & & T^3X \ar[r]^{T\mu_X} \ar[d]_{\mu_{TX}} & T^2X \ar[d]^{\mu_X}\\ T^2X \ar[r]_{\mu_X} & TX & & T^2X \ar[r]_{\mu_X} & TX }\] \noindent Given a monad $(T, \eta, \mu)$ on a category $\mathbf{C}$ we can define a new category, the Kleisli category of $T$, where the objects are the same as in $\mathbf{C}$ but every arrow in the new category corresponds to an arrow $f\colon X \to TY$ in $\mathbf{C}$. Thus, arrows in the Kleisli category incorporate side effects specified by a monad~\cite{hasuo,abhkms:coalgebra-min-det}. Formally we will use the following definition. \begin{defi}[Kleisli Category] Let $(T, \eta, \mu)$ be a monad on a category $\mathbf{C}$. The \emph{Kleisli category of $T$} has the same objects as $\mathbf{C}$. For any two such objects $X$ and $Y$, the Kleisli arrows with domain $X$ and codomain $Y$ are exactly the $\mathbf{C}$-arrows $f\colon X \to TY$. Composition of Kleisli arrows $f \colon X \to TY$ and $g \colon Y\to TZ $ is defined as $g\circ_T f := \mu_Z \circ T(g)\circ f$, the identity arrow for any Kleisli object $X$ is $\eta_X$. \end{defi} Given an endofunctor $F$ on $\mathbf{C}$, we want to construct an endofunctor $\overline{F}$ on $\mathcal{K}\ell(T)$ that ``resembles'' $F$: Since objects in $\mathbf{C}$ and objects in $\mathcal{K}\ell(T)$ are the same, we want $\overline{F}$ to coincide with $F$ on objects i.e. we want $\overline{F}X = FX$. It remains to define how $\overline{F}$ shall act on Kleisli arrows $f\colon X \to TY$ such that it ``resembles'' $F$. Formally we require $\overline{F}$ to be a \emph{lifting} of $F$ in the following sense: Given a monad $(T,\eta,\mu)$ and its Kleisli category $\mathcal{K}\ell(T)$, there is a canonical adjunction\footnote{Explicitly: The left-adjoint $L\colon \mathbf{C} \to \mathcal{K}\ell(T)$ is given by $LX = X$ for all $\mathbf{C}$-objects $X$ and $L(f) = \eta_Y \circ f$ for all $\mathbf{C}$-arrows $f\colon X \to Y$. The right-adjoint $R\colon \mathcal{K}\ell(T) \to \mathbf{C}$ is given by $RX = TX$ for all $\mathcal{K}\ell(T)$-objects $X$ and $R(f)=\mu_Y \circ Tf$ for all $\mathcal{K}\ell(T)$-arrows $f \colon X \to TY$.} \begin{align*} \big(L\colon \mathbf{C} \to \mathcal{K}\ell(T)\big) \quad \dashv \quad \big(R\colon \mathcal{K}\ell(T) \to \mathbf{C}\big) \end{align*} with unit $\eta'\colon \mathrm{Id}_\mathbf{C} \mathbb{R}ightarrow RL$ and counit $\varepsilon\colon LR \mathbb{R}ightarrow \mathrm{Id}_{\mathcal{K}\ell(T)}$ giving rise to the monad, i.e. $T = RL$, $\eta=\eta'$, $\mu = R\varepsilon L$. Then an endofunctor $\overline{F}$ on $\mathcal{K}\ell(T)$ is called a \emph{lifting of $F$} if it satisfies $\overline{F}L = LF$. We will use the fact that these liftings are in one-to-one correspondence with distributive laws \cite{mulry-lifting}. \begin{defi}[Distributive Law] Let $(T, \eta, \mu)$ be a monad on a category $\mathbf{C}$ and $F$ be an endofunctor on $\mathbf{C}$. A natural transformation $\lambda\colon FT \mathbb{R}ightarrow TF$ is called a \emph{distributive law} if for all $\mathbf{C}$-objects $X$ the following diagrams commute in $\mathbf{C}$: \[\xymatrix{ FX \ar[r]^{F\eta_X} \ar[dr]_{\eta_{FX}} & FTX \ar[d]^{\lambda_X} & & FT^2X \ar[r]^{\lambda_{TX}} \ar[d]_{F\mu_X} & TFTX \ar[r]^{T\lambda_X} & T^2FX \ar[d]^{\mu_{FX}}\\ & TFX & & FTX \ar[rr]_{\lambda_X} & & TFX }\] or equivalently $\lambda_X \circ F\eta_X = \eta_{FX}$ and $\mu_{FX} \circ T\lambda_X \circ \lambda_{TX} = \lambda_X \circ F\mu_X$. \end{defi} Whenever we have such a distributive law we get the lifting of a functor as defined above in the following way \cite{mulry-lifting}. \begin{prop}[Lifting via Distributive Law] Let $(T, \eta, \mu)$ be a monad on a category $\mathbf{C}$ and $F$ be an endofunctor on $\mathbf{C}$ with a distributive law $\lambda\colon FT \mathbb{R}ightarrow TF$. The distributive law induces a lifting of $F$ to an endofunctor $\overline{F}\colon \mathcal{K}\ell(T) \to \mathcal{K}\ell(T)$ if we define $\overline{F}X =FX$ for each object $X$ of $\mathcal{K}\ell(T)$ and $\overline{F}(f) := \lambda_Y \circ Ff$ for each Kleisli arrow $f \colon X \to TY$. \qed \end{prop} \subsection{Coalgebraic Trace Semantics} We first recall the central notions of coalgebra, coalgebra homomorphism and final coalgebra. \begin{defi}[Coalgebra, Coalgebra-Homomorphism, Final Coalgebra] \label{def:coalgebra} For an endofunctor $F$ on a category $\mathbf{D}$ an $F$-coalgebra is a pair $(X, \alpha)$ where $X$ is an object and $\alpha\colon X \to FX$ is an arrow of $\mathbf{D}$. An $F$-coalgebra homomorphism between two $F$-coalgebras $(X, \alpha), (Y, \beta)$ is an arrow $\varphi \colon X \to Y$ in $\mathbf{D}$ such that $\beta \circ \varphi = F(\varphi)\circ \alpha$. We call an $F$-coalgebra $(\Omega, \kappa)$ final if and only if for every $F$-coalgebra $(X,\alpha)$ there is a unique $F$-coalgebra-homomorphism $\varphi_\alpha \colon X \to \Omega$. \end{defi} By choosing a suitable category and a suitable endofunctor, many (labelled) transition systems can be modelled as $F$-coalgebras. The final coalgebra -- if it exists -- can be seen as the ``universe of all possible behaviors'' and the unique map into it yields a behavioral equivalence: Two states are equivalent iff they have the same image the final coalgebra. Whenever transition systems incorporate side-effects, these can be ``hidden'' in a monad $T$. This leads to the following setting: the category $\mathbf{D}$ of Definition \ref{def:coalgebra} is $\mathcal{K}\ell(T)$, i.e., the Kleisli category for the monad $T$ and a functor $\overline{F}\colon \mathcal{K}\ell(T)\to \mathcal{K}\ell(T)$ is obtained by suitably lifting a functor $F$ of the underlying category (such that $\overline{F}X = FX$ on objects, see above). Then coalgebras are defined as arrows $\alpha\colon X\to\overline{F}X$ in the Kleisli category, which can be regarded as arrows $X\to TFX$ in the base category. As indicated in the introduction, the monad can be seen as describing implicit branching (side effects), whereas $F$ describes the explicit branching structure. In this setup the final coalgebra in the Kleisli category often yields a notion of trace semantics \cite{hasuo,Sokolova20115095}. The side effects specified via the monad are not part of the final coalgebra, but are contained in the unique map into the final coalgebra (which is again a Kleisli arrow). In our case $T$ is either the sub-probability or the probability monad on $\mathbf{Meas}$ (which will be defined later), whereas $F$ is defined as $F = \ensuremath{\mathcal{A}} \times \mathrm{Id}_\mathbf{Meas} + \mathbf{1}$ or $F = \ensuremath{\mathcal{A}} \times \mathrm{Id}_\mathbf{Meas}$ for a given finite alphabet $\ensuremath{\mathcal{A}}$. That is, the monad $T$ describes probabilistic branching, whereas the endofunctor $F$ specifies (explicitly observable) labels and possibly termination. \subsection{Borel-Sigma-Algebras and the Lebesgue Integral} Before we can define the probability and the sub-probability monad, we give a crash course in integration loosely based on \cite{ash,Els07}. For that purpose let us fix a measurable space $X$ and a measure $\mu$ on $X$. We want to integrate numerical functions $f\colon X \to \overline{\mathbb{R}}$ and in order to do that we need a suitable $\sigma$-algebra on $\overline{\mathbb{R}}$ to define measurability of such functions. Recall that a topological space is a tuple $(Y, \mathcal{T})$, where $Y$ is a set and $\mathcal{T} \subseteq \powerset{Y}$ is a set containing the empty set, the set $Y$ itself and is closed under arbitrary unions and finite intersections. The set $\mathcal{T}$ is called the \emph{topology} of $Y$ and its elements are called \emph{open sets}. The \emph{Borel $\sigma$-algebra} on $Y$, denoted $\mathcal{B}(Y)$, is the $\sigma$-algebra generated by the open sets $\mathcal{T}$ of the topology, i.e. $\mathcal{B}(Y) = \sigalg[Y]{\mathcal{T}}$. Thus the Borel $\sigma$-algebra provides a connection of topological aspects and measurability. For the set of real numbers, it can be shown (\cite[I.4.3 Satz]{Els07}) that the Borel $\sigma$-algebra $\mathcal{B}(\mathbb{R})$ is generated by the semiring of all left-open intervals \begin{align*} \mathcal{B}(\mathbb{R}) = \sigalg[\mathbb{R}]{\set{\,(a,b]\mid a,b \in \mathbb{R}, a \leq b}}. \end{align*} With this definition at hand, we now equip the set $\overline{\mathbb{R}}$ of extended reals with its Borel \linebreak $\sigma$-algebra which can be defined as \begin{align*} \mathcal{B}(\overline{\mathbb{R}}) = \sigalg[\overline{\mathbb{R}}]{\set{B \cup E \mid B \in \mathcal{B}(\mathbb{R}), E \subseteq \set{-\infty, \infty}}}. \end{align*} A function $f\colon X \to \overline{\mathbb{R}}$ is called \emph{(Borel-)measurable} if it is measurable with with respect to this Borel $\sigma$-algebra. Given two Borel-measurable functions $f,g \colon Y \to \overline{\mathbb{R}}$ and real numbers $\alpha, \beta$ also $\alpha f+\beta g$ is Borel-measurable \cite[III.4.7]{Els07} and thus are all finite linear combinations of Borel-measurable functions. Moreover, if $(f_n)_{n \in \mathbb{N}}$ is a sequence of Borel-measurable functions $f_n\colon X \to \overline{\mathbb{R}}$ converging pointwise to a function $f\colon X \to \overline{\mathbb{R}}$, then also $f$ is Borel-measurable \cite[1.5.4]{ash}. In the remainder of this section we will just consider Borel-measurable functions. We call $f$ \emph{simple} iff it attains only finitely many values, say $f(X) = \set{\alpha_1, \mathbf{hd}ots, \alpha_N}$. The integral of such a simple function $f$ is then defined to be the $\mu$-weighted sum of the $\alpha_n$, formally $\Int{f}[\mu] = \sum_{n=1}^{N}\alpha_n\mu(S_n)$ where $S_n = f^{-1}(\alpha_n) \in \mathcal{S}igma_X$. Whenever $f$ is non-negative we can approximate it from below using non-negative simple functions. In this case we define the integral to be \[\Int{f}[\mu] := \sup\set{\Int{s}[\mu]\mid s \mbox{ non-negative and simple s.t. } 0 \leq s \leq f}.\] For arbitrary Borel-measurable $f$ we decompose it into its positive part $f^+ := \max\set{f,0}$ and negative part $f^- := \max\set{-f, 0}$ which are both non-negative and Borel-measurable. We note that $f = f^+ - f^{-}$ and consequently we define the integral of $f$ to be the difference $\Int{f}[\mu] := \Int{f^+}[\mu] - \Int{f^-}[\mu]$ if not both integrals on the right hand side are $+\infty$. In the latter case we say that the integral does not exist. Whenever it exists and is finite we call $f$ a \emph{$\mu$-integrable function} or simply an \emph{integrable function} if the measure $\mu$ is obvious from the context. For every measurable set $S \in \mathcal{S}igma_X$ its characteristic function $\chi_S \colon X \to \mathbb{R}$, which is \linebreak $1$ if $x \in S$ and $0$ otherwise, is $\mu$-integrable and for $\mu$-integrable $f$ the product $\chi_S \cdot f$ is also $\mu$-integrable and we write \begin{align*} \Int[S]{f}[\mu] := \Int{\chi_S \cdot f}[\mu] \,. \end{align*} Instead of $\Int[S]{f}[\mu]$ we will sometimes write $\Int[S]{f(x)}[\mu(x)]$ or $\Int[{x \in S}]{f(x)}[\mu(x)]$ which is useful if we have functions with more than one argument or multiple integrals. Note that this does not imply that singleton sets are measurable. Some useful properties of the integral are that it is \emph{linear}, i.e. for $\mu$-integrable functions $f,g\colon X \to \overline{\mathbb{R}}$ and real numbers $\alpha, \beta$ we have \begin{align*} \Int{\alpha f + \beta g}[\mu] = \alpha \Int{f}[\mu] + \beta\Int{g}[\mu] \end{align*} and the integral is \emph{monotone}, i.e. $f \leq g$ implies $\Int{f}[\mu] \leq \Int{g}[\mu] $. We will state one result explicitly which we will use later in our proofs. This result and its proof can be found e.g. in \cite[Theorem 1.6.12]{ash}. \begin{prop}[Image Measure] Let $X, Y$ be measurable spaces, $\mu$ be a measure on $X$, $f\colon Y \to \overline{\mathbb{R}}$ be a Borel-measurable function and $g\colon X \to Y$ be a measurable function. Then $\mu \circ g^{-1}$ is a measure\footnote{This notation is a bit lax, if we wanted to be really precise we would have to write $\mu \circ \left(g^{-1}|_{\mathcal{S}igma_Y}\right)$.} on $Y$, the so-called \emph{image-measure} and $f$ is $(\mu \circ g^{-1})$-integrable iff $f \circ g$ is $\mu$-integrable and in this case we have $\Int[S]{f}[(\mu \circ g^{-1})] = \Int[{g^{-1}(S)}]{f \circ g}[\mu]$ for all $S \in \mathcal{S}igma_Y$.\qed \end{prop} \subsection{The Probability and the Sub-Probability Monad} We will now introduce the probability monad (Giry monad) and the sub-probability monad as e.g. presented in \cite{Gir82} and \cite{Pan09}. First, we take a look at the endofunctors of these monads. \begin{defi}[The Sub-Probability and the Probability Functor] The \emph{sub-probability-functor} $\mathbb{S} \colon \mathbf{Meas} \to \mathbf{Meas}$ maps a measurable space $(X, \mathcal{S}igma_X)$ to the measurable space $\big(\mathbb{S}(X), \mathcal{S}igma_{\mathbb{S}(X)}\big)$ where $\mathbb{S}(X)$ is the set of all sub-probability measures on $\mathcal{S}igma_X$ and $\mathcal{S}igma_{\mathbb{S}(X)}$ is the smallest $\sigma$-algebra such that for all $S \in \mathcal{S}igma_X$ the \emph{evaluation maps}: \begin{align} \quad p_S\colon \mathbb{S}(X) \to [0,1],\quad p_S(P) = P(S) \label{eq:evaluation_map} \end{align} are Borel-measurable. For any measurable function $f \colon X \to Y$ between measurable spaces $(X, \mathcal{S}igma_X)$, $(Y, \mathcal{S}igma_Y)$ the arrow $\mathbb{S}(f)$ maps a probability measure $P$ to its image measure: \begin{align} \mathbb{S}(f) \colon \mathbb{S}(X) \to \mathbb{S}(Y), \quad \mathbb{S}(f)(P) := P \circ f^{-1}. \end{align} If we take full probabilities instead of sub-probabilities we get another endofunctor, the probability functor $\mathbb{P}$, analogously. \end{defi} Both the sub-probability functor $\mathbb{S}$ and the probability functor $\mathbb{P}$ are functors of monads with the following unit and multiplication natural transformations. \begin{defi}[Unit and Multiplication] Let $T$ be either the sub-probability functor $\mathbb{S}$ or the probability functor $\mathbb{P}$. We obtain two natural transformations $\eta \colon \mathrm{Id}_\mathbf{Meas} \mathbb{R}ightarrow T$ and $\mu \colon T^2\mathbb{R}ightarrow T$ by defining for every measurable space $(X,\mathcal{S}igma_X)$: \begin{align} \eta_X \colon X \to TX,\ & \quad \eta_X(x) = \delta_x^X \label{eq:giry_unit}\\ \mu_X \colon T^2X \to TX,\ & \quad \mu_X(P)(S) := \Int{p_S}[P] \quad \text{for } S \in \mathcal{S}igma_X\label{eq:giry_mult} \end{align} where $\delta_x^X\colon \mathcal{S}igma_X \to [0,1]$ is the Dirac measure and $p_S$ is the evaluation map \eqref{eq:evaluation_map} from above. \end{defi} If we combine all the ingredients we obtain the following result which also guarantees the soundness of the previous definitions. \begin{prop}[\cite{Gir82,Pan09}] $(\mathbb{S}, \eta, \mu)$ and $(\mathbb{P}, \eta, \mu)$ are monads on $\mathbf{Meas}$.\qed \end{prop} \subsection{A Category of Stochastic Relations} The Kleisli category of the sub-probability monad $(\mathbb{S}, \eta, \mu)$ is sometimes called \emph{category of stochastic relations \cite{Pan09}} and denoted by $\mathbf{SRel}$. Let us briefly analyze the arrows of this category: Given two measurable spaces $(X,\mathcal{S}igma_X)$, $(Y, \mathcal{S}igma_Y)$ a Kleisli arrow $h \colon X \to \mathbb{S}Y$ maps each $x \in X$ to a sub-probability measure $h(x) \colon \mathcal{S}igma_Y \to [0,1]$. By uncurrying we can regard $h$ as a function $h\colon X \times \mathcal{S}igma_Y\to [0,1]$. Certainly for each $x \in X$ the function $S \mapsto h(x,S)$ is a (sub-)probability measure and one can show that for each $S \in \mathcal{S}igma_Y$ the function $x \mapsto h(x,S)$ is Borel-measurable. Any function $h \colon X \times \mathcal{S}igma_Y\to [0,1]$ with these properties is called a \emph{Markov kernel} or a \emph{stochastic kernel} and it is known \cite[Proposition 2.7]{Dob07b} that these Markov kernels correspond exactly to the Kleisli arrows $h \colon X \to \mathbb{S}Y$. We will later need the following, simple result about Borel-measurable functions and Markov kernels: \begin{lem} \label{lem:measMarkovKernel} Let $(X, \mathcal{S}igma_X)$ and $(Y, \mathcal{S}igma_Y)$ be measurable spaces, $g \colon Y \to [0,1]$ be a Borel-measurable function and $h\colon X \times \mathcal{S}igma_Y \to [0,1]$ be a Markov kernel. Then the function\linebreak $f\colon X \to [0,1]$, $f(x) := \Int[y \in Y]{g(y)}[h(x,y)]$ is Borel-measurable. \end{lem} \proof If $g$ is a simple and Borel-measurable function, say $g(Y) = \set{\alpha_1,..., \alpha_N}$, then $f(x) = \sum_{n=1}^N \alpha_n h(x,A_n)$ where $A_n = g^{-1}(\set{\alpha_n})$ and hence $f$ is Borel-measurable as a linear combination of Borel-measurable functions. If $g$ is an arbitrary, Borel-measurable function we approximate it from below with simple functions $s_i$, $i \in \mathbb{N}$ and define $f_i\colon X \to [0,1]$ with $f_i(x) = \Int[y \in Y]{s_i(y)}[h(x,y)]$. Then by the monotone convergence theorem (\cite[1.6.2]{ash}) we have $f(x) = \Int[y \in Y]{\lim_{i \to \infty}s_i(y)}[h(x,y)] = \lim_{i \to \infty}f_i(x)$. As shown before, each of the $f_i$ is Borel-measurable and thus also the function $f$ is Borel-measurable as pointwise limit of Borel-measurable functions. \qed \section{Main Results} \subsection{Continuous Probabilistic Transition Systems} There is a big variety of probabilistic transition systems \cite{Sokolova20115095,glabbeek}. We will deal with four slightly different versions of so-called \emph{generative} PTS. The underlying intuition is that, according to a sub-probability measure, an action from the alphabet $\mathcal{A}$ and a set of possible successor states are chosen. We distinguish between probabilistic branching according to sub-probability and probability measures and furthermore we treat systems without and with termination. \begin{defi}[Probabilistic Transition System] A \emph{probabilistic transition system}, short \emph{PTS}, is a tuple $(\ensuremath{\mathcal{A}}, X, \alpha)$ where $\ensuremath{\mathcal{A}}$ is a finite alphabet (endowed with $\powerset{\ensuremath{\mathcal{A}}}$ as $\sigma$-algebra), $X$ is the \emph{state space}, an arbitrary measurable space with $\sigma$-algebra $\mathcal{S}igma_X$ and $\alpha$ is the \emph{transition function} which has one of the following forms and determines the type\footnote{The reason for choosing these symbols as type-identifiers will be revealed later in this paper.} of the PTS. \begin{center}\begin{tabular}{p{5cm}|c} \hline Transition Function $\alpha$ & Type $\diamond$ of the PTS\\ \hline $\alpha\colon X \to \mathbb{S}(\ensuremath{\mathcal{A}} \times X)$ & $0$\\ $\alpha\colon X \to \mathbb{S}(\ensuremath{\mathcal{A}} \times X + \mathbf{1})$ & $*$\\ $\alpha\colon X \to \mathbb{P}(\ensuremath{\mathcal{A}} \times X)$ & $\omega$\\ $\alpha\colon X \to \mathbb{P}(\ensuremath{\mathcal{A}} \times X + \mathbf{1})$ & $\infty$\\ \hline \end{tabular}\end{center} For every symbol $a \in \ensuremath{\mathcal{A}}$ we define a Markov kernel $\mathbf{P}_{a}\colon X \times \mathcal{S}igma_X \to [0,1]$ where \begin{align} \P{a}{x}{S} := \alpha(x)(\set{a} \times S)\,. \end{align} Intuitively, $\P{a}{x}{S}$ is the probability of making an $a$-transition from the state $x \in X$ to any state $y \in S$. Whenever $X$ is a countable set and $\mathcal{S}igma_X = \powerset{X}$ we call the PTS \emph{discrete}. The unique state $\checkmark \in \mathbf{1}$ -- whenever it is present -- denotes termination of the system. \end{defi} We will now take a look at a small example $\infty$-PTS before we continue with our theory. \begin{exa}[Discrete PTS with Finite and Infinite Traces] \label{ex:pts} Let $\ensuremath{\mathcal{A}} = \set{a,b}$, $X = \set{0,1,2}$, $\mathcal{S}igma_X = \powerset{X}$ and $\alpha \colon X \to \mathbb{P}(\ensuremath{\mathcal{A}} \times X + \mathbf{1})$ such that we obtain the following system. \begin{center} \begin{tikzpicture}[node distance=1.4 and 2.8, on grid, shorten >=1pt, >=stealth', semithick] \node[state, inner sep=2pt, minimum size=20pt,draw](q0) {$0$}; \node[state, inner sep=2pt, minimum size=20pt,draw, right=of q0] (q1) {$1$}; \node[state, inner sep=2pt, minimum size=20pt,draw, below=of q1] (q2) {$2$}; \node[state, inner sep=2pt, minimum size=20pt,draw, right=of q1, accepting] (q3) {$\checkmark$}; \draw[->] (q0) edge[loop left] node[left] {$b,1$} (q0); \draw[->] (q1) edge node[above] {$b, 1/3$} (q0); \draw[->] (q1) edge node[left] {$a, 1/3$} (q2); \draw[->] (q1) edge node[above] {$1/3$} (q3); \draw[->] (q2) edge[loop left] node[left] {$a, 2/3$} (q2); \draw[->] (q2) edge node[below] {$1/3$} (q3); \end{tikzpicture} \end{center} \noindent As stated in the definition, $\checkmark$ is the unique final state. It has only incoming transitions bearing probabilities and no labels. The intuitive interpretation of these transitions can be stated as follows: ``From state $1$ the system terminates immediately with probability $1/3$''. \end{exa} \subsection{Towards Measurable Sets of Words: Cones and Semirings} \label{sec:cones} In order to define a trace measure on these probabilistic transition systems we need suitable $\sigma$-algebras on the sets of words. While the set of all finite words, $\ensuremath{\mathcal{A}}star$, is rather simple -- we will take $\powerset{\ensuremath{\mathcal{A}}star}$ as $\sigma$-algebra -- the set of all infinite words, $\ensuremath{\mathcal{A}}omega$, and also the set of all finite and infinite words, $\ensuremath{\mathcal{A}}infty$, needs some consideration. For a word $u \in \ensuremath{\mathcal{A}}star$ we call the set of all infinite words that have $u$ as a prefix the \emph{$\omega$-cone} of $u$, denoted by $\cone{\omega}{u}$, and similarly we call the set of all finite and infinite words having $u$ as a prefix the \emph{$\infty$-cone} \cite[p.~23]{Pan09} of $u$ and denote it with $\cone{\infty}{u}$. Sometimes, e.g. in \cite{baierkatoen2008}, these sets are also-called ``cylinder sets''. A cone can be visualized in the following way: For a given alphabet $\ensuremath{\mathcal{A}} \not = \emptyset$ we consider the undirected, rooted and labelled tree given by $\mathcal{T} := (V, E,\varepsilon, l)$ with infinitely many vertices $V := \ensuremath{\mathcal{A}}star$, edges $E := \set{ \set{u, ua}\mid u \in \mathcal{A}^*, a \in \mathcal{A}}$, root $\varepsilon \in \mathcal{A}^*$ and edge-labeling function $l \colon E \to \mathcal{A}, \set{u, ua} \mapsto a$. For $\mathcal{A} = \set{a,b,c}$ the first three levels of the tree can be depicted as follows: \[\begin{xy}\xymatrix{ & &&& \ar@{-}[dlll]_a\varepsilon\ar@{-}[d]_b\ar@{-}[drrr]^c\\ & \ar@{-}[dl]_a a \ar@{-}[d]_b\ar@{-}[dr]^c &&& \ar@{-}[dl]_a b \ar@{-}[d]_b\ar@{-}[dr]^c &&& \ar@{-}[dl]_a c \ar@{-}[d]_b\ar@{-}[dr]^c\\ aa & ab & ac & ba & bb & bc & ca & cb & cc }\end{xy} \] Given a finite word $u \in \mathcal{A}^*$, the $\omega$-cone of $u$ is represented by the set of all infinite paths\footnote{Within this paper a path of an undirected graph $(V,E)$ is always considered to be \emph{simple}, i.e. any two vertices in a path are different.} that begin in $\varepsilon$ and contain the vertex $u$ and the $\infty$-cone of $u$ is represented by the set of all finite and infinite paths that begin in $\varepsilon$ and contain the vertex $u$ (and thus necessarily have a length which is greater or equal to the length of $u$). \begin{defi}[Cones] Let $\ensuremath{\mathcal{A}}$ be a finite alphabet and let $\sqsubseteq \, \subset \ensuremath{\mathcal{A}}star \times \ensuremath{\mathcal{A}}infty$ denote the usual prefix relation on words. For $u \in \ensuremath{\mathcal{A}}star$ we define its $\omega$-\emph{cone} to be the set $\cone{\omega}{u}:=\set{v \in \ensuremath{\mathcal{A}}omega\mid u \sqsubseteq v }$ and analogously we define $\cone{\infty}{u}:=\set{v \in \ensuremath{\mathcal{A}}infty\mid u \sqsubseteq v }$, the $\infty$-\emph{cone} of $u$. \end{defi} With this definition at hand, we can now define the semirings we will use to generate $\sigma$-algebras on $\emptyset$, $\ensuremath{\mathcal{A}}star$, $\ensuremath{\mathcal{A}}omega$ and $\ensuremath{\mathcal{A}}infty$. \begin{defi}[Semirings of Sets of Words] \label{def:semirings_of_words} Let $\ensuremath{\mathcal{A}}$ be a finite alphabet. We define \begin{align*} \mathcal{S}_0 &:= \set{\emptyset} \subset \powerset{\emptyset},\\ \mathcal{S}_* &:= \set{\emptyset}\cup \set{\set{u}\mid u \in \ensuremath{\mathcal{A}}star} \subset \powerset{\ensuremath{\mathcal{A}}^*},\\ \mathcal{S}_\omega &:= \set{\emptyset}\cup \set{\cone{\omega}{u}\mid u \in \ensuremath{\mathcal{A}}star} \subset \powerset{\ensuremath{\mathcal{A}}^\omega},\\ {\mathcal{S}_\infty} &:= \set{\emptyset}\cup \set{\set{u}\mid u \in \ensuremath{\mathcal{A}}star} \cup \set{\cone{\infty}{u}\mid u \in \ensuremath{\mathcal{A}}star}\subset \powerset{\ensuremath{\mathcal{A}}infty}. \end{align*} \end{defi} \noindent For the next proposition the fact that $\ensuremath{\mathcal{A}}$ is a finite alphabet is crucial. \begin{prop} \label{prop:semirings_of_words} The sets $\mathcal{S}_0$, ${\mathcal{S}_*}$, ${\mathcal{S}_\omega}$ and ${\mathcal{S}_\infty}$ are covering semirings of sets. \end{prop} \proof For $\mathcal{S}_0 = \set{\emptyset}$ nothing has to be shown. Obviously we have $\emptyset \in {\mathcal{S}_*}$ and for elements $\set{u}, \set{v} \in {\mathcal{S}_*}$ we remark that $\set{u} \cap \set{v}$ is either $\set{u}$ iff $u = v$ or $\emptyset$ else. Moreover, $\set{u} \setminus \set{v}$ is either $\emptyset$ iff $u=v$ or $\set{u}$ else. We proceed with the proof for ${\mathcal{S}_\infty}$, the proof for ${\mathcal{S}_\omega}$ can be carried out almost analogously (in fact, it is simpler). By definition we have $\emptyset \in {\mathcal{S}_\infty}$. An intersection $\cone{\infty}{u} \,\cap \cone{\infty}{v}$ is non-empty iff either $u \sqsubseteq v$ or $v \sqsubseteq u$ and is then equal to $\cone{\infty}{v}$ or to $\cone{\infty}{u}$ and thus an element of $\mathcal{S}_\infty$. Similarly an intersection $\cone{\infty}{u} \cap \set{v}$ is non-empty iff $u \sqsubseteq v$ and is then equal to $\set{v} \in {\mathcal{S}_\infty}$. As before we have $\set{u} \cap \set{v} = \set{u}$ for $u=v$ and $\set{u} \cap \set{v} = \emptyset$ else. For the set difference $\cone{\infty}{u} \,\setminus \cone{\infty}{v}$ we denote that this is either $\emptyset$ (iff $v \sqsubseteq u$) or $\cone{\infty}{u}$ (iff $v \not \sqsubseteq u$ and $u \not \sqsubseteq v$) or otherwise ($u \sqsubseteq v$) the following union\footnote{For $n \in \mathbb{N}$ we define $\ensuremath{\mathcal{A}}^{<n} := \set{u \in \ensuremath{\mathcal{A}} \mid |u| < n}$.} of finitely many disjoint sets in ${\mathcal{S}_\infty}$: \begin{align*} \cone{\infty}{u} \,\setminus \cone{\infty}{v} = \left(\bigcup\limits_{v' \in \ensuremath{\mathcal{A}}^{|v|}\setminus\set{v}, u \sqsubseteq v'} \! \cone{\infty}{v'}\right) \cup \left(\bigcup\limits_{v' \in \ensuremath{\mathcal{A}}^{< |v|},~u \sqsubseteq v'}\set{v'}\right). \end{align*} As before we get $\set{u}\setminus\set{v}=\emptyset$ iff $u=v$ and $\set{u}\setminus\set{v} = \set{u}$ else. For $\set{u} \setminus \cone{\infty}{v}$ we observe that this is either $\set{u}$ iff $v \not \sqsubseteq u$ or $\emptyset$ else. Finally, $\cone{\infty}{u} \,\setminus \set{v}$ is either $\cone{\infty}{u}$ (iff $u \not \sqsubseteq v$) or ($u \sqsubseteq v$) the following union of finitely many disjoint sets in ${\mathcal{S}_\infty}$: \begin{align*} \cone{\infty}{u} \,\setminus \set{v} = \left(\bigcup\limits_{v' \in \ensuremath{\mathcal{A}}^{|v|}\setminus\set{v}, u \sqsubseteq v'} \! \cone{\infty}{v'}\right) \cup \left(\bigcup\limits_{v' \in \ensuremath{\mathcal{A}}^{< |v|},~u \sqsubseteq v'}\set{v'}\right) \cup \left(\bigcup\limits_{a \in \ensuremath{\mathcal{A}}} \! \cone{\infty}{va}\right) \end{align*} which completes the proof that the given sets are semirings. The countable (and even disjoint) covers are: $\emptyset = \emptyset$, $\ensuremath{\mathcal{A}}star = \cup_{u \in \ensuremath{\mathcal{A}}star}\set{a}$, $\ensuremath{\mathcal{A}}omega =\, \cone{\omega}{\varepsilon}$ and $\ensuremath{\mathcal{A}}infty =\, \cone{\infty}{\varepsilon}$. \qed We remark that many interesting sets will be measurable in the $\sigma$-algebra generated by these cones. The singleton-set $\set{u}$ will be measurable for every $u \in \ensuremath{\mathcal{A}}omega$ because $\set{u} = \bigcap_{v \sqsubseteq u}\cone{\omega}{v} = \bigcap_{v \sqsubseteq u}\cone{\infty}{v}$ which are countable intersections, and (for $\infty$-cones only) the set $\ensuremath{\mathcal{A}}star = \cup_{u \in \ensuremath{\mathcal{A}}star}\set{u}$ and consequently also the set $\ensuremath{\mathcal{A}}omega = \ensuremath{\mathcal{A}}infty \setminus \ensuremath{\mathcal{A}}star$ will be measurable. The latter will be useful to check to what ``extent'' a state of a $\infty$-PTS accepts finite or infinite behavior. \subsection{Measurable Sets of Words} Let us now take a closer look at the $\sigma$-algebras generated by the semirings which we defined in the last section. We obviously obtain the trivial $\sigma$-algebra $\sigalg[\emptyset]{\mathcal{S}_0} = \set{\emptyset}$. Since $\ensuremath{\mathcal{A}}$ is finite, $\ensuremath{\mathcal{A}}star$ is countable and we can easily conclude $\sigalg[\ensuremath{\mathcal{A}}star]{{\mathcal{S}_*}} = \powerset{\ensuremath{\mathcal{A}}star}$. The other two cases need a more thorough treatment. For the remainder of this section let thus $\diamond \in \set{\omega,\infty}$. We will use the concepts of transfinite induction (cf. e.g. to \cite{Dud89} for an introduction) to extend the semi-ring $\mathcal{S}_\diamond$ to the $\sigma$-algebra it generates. A similar construction is well-known and presented e.g. in \cite{Els07}. Usually this explicit construction is not needed but for our proofs it will turn out to be useful. \begin{defi} For any set $X$ and $\mathcal{G} \subseteq \powerset{X}$ let $\Unions{\mathcal{G}}$ and $\Intersections{\mathcal{G}}$ be the closure of $\mathcal{G}$ under countable unions and intersections. We define $\mathbb{R}diamond(0) :=\set{\cup_{n=1}^N S_n \mid N \in \mathbb{N}, S_n \in \mathcal{S}_\diamond \text{ disjoint}}$, $\mathbb{R}diamond(\alpha+1) := \Unions{\Intersections{\mathbb{R}diamond(\alpha)}}$ for every ordinal $\alpha$ and $\mathbb{R}diamond(\gamma) := \cup_{\alpha < \gamma} \mathbb{R}diamond(\alpha)$ for every limit ordinal $\gamma$. \end{defi} Obviously we have $\mathbb{R}diamond(\alpha) \subseteq \mathbb{R}diamond(\beta)$ for all ordinals $\alpha < \beta$. Since $\mathcal{S}_\diamond$ is a semiring of sets, is easy to see that $\mathbb{R}diamond(0)$ is an \emph{algebra}, i.e. it contains the base set $\ensuremath{\mathcal{A}}^\diamond$, is closed under complement and binary (and hence all finite) unions and intersections. \begin{lem} \label{lem:complementsLimit} $A \in \mathbb{R}diamond(\gamma) \implies \compl{A} \in \mathbb{R}diamond(\gamma)$ for every limit ordinal $\gamma$. \end{lem} \proof We will show that $A \in \mathbb{R}diamond(\alpha) \implies \compl{A} \in \Intersections{\mathbb{R}diamond(\alpha)}$ for every ordinal $\alpha$. This is true for the algebra $\mathbb{R}diamond(0)$. Now let $\alpha$ be an ordinal satisfying the implication and let $A \in \mathbb{R}diamond(\alpha+1)$. Then $A = \cup_{m=1}^\infty \cap_{n=1}^\infty A_{m,n}$ with $A_{m,n} \in \mathbb{R}diamond(\alpha)$ and by deMorgan's rules $\compl{A} = \cap_{m=1}^\infty \cup_{n=1}^\infty \compl{A_{m,n}}$ where by hypothesis $\compl{A_{m,n}} \in \Intersections{\mathbb{R}diamond(\alpha)}$, thus $\cup_{n=1}^\infty \compl{A_{m,n}} \in \Unions{\Intersections{\mathbb{R}diamond(\alpha)}} = \mathbb{R}diamond(\alpha+1)$ and therefore $\compl{A} \in \Intersections{\mathbb{R}diamond(\alpha+1)}$. Finally, let $\gamma$ be a limit ordinal and suppose the implication holds for all ordinals $\alpha < \gamma$. For any $B\in \mathbb{R}diamond(\gamma)$ there is a $\beta < \gamma$ such that $B \in \mathbb{R}diamond(\beta)$. Hence we have $\overline{B} \in \Intersections{\mathbb{R}diamond(\beta)} \subseteq \Intersections{\mathbb{R}diamond(\gamma)} \subseteq \mathbb{R}diamond(\gamma)$. \qed \begin{lem} \label{lem:finite_union_intersection} $A, B \in \mathbb{R}diamond(\alpha) \implies A\cup B, A \cap B \in \mathbb{R}diamond(\alpha)$ for every ordinal $\alpha$. \end{lem} \proof This is true for the algebra $\mathbb{R}diamond(0)$. Let $\alpha$ be an ordinal satisfying the implication and $A, B \in \mathbb{R}diamond(\alpha+1)$, then $A = \cup_{k=1}^\infty \cap_{l=1}^\infty A_{k,l}$ and $B = \cup_{m=1}^\infty \cap_{n=1}^\infty B_{m,n}$ with $A_{k,l}, B_{m,n} \in \mathbb{R}diamond(\alpha)$. Obviously $A \cup B = \cup_{k,m=1}^\infty \cap_{l,n=1}^\infty ( A_{k,l} \cup B_{m,n})$ and $A \cap B = \cup_{k,m=1}^\infty \cap_{l,n=1}^\infty ( A_{k,l} \cap B_{m,n})$ where by hypothesis $A_{k,l} \cup B_{m,n}, A_{k,l} \cap B_{m,n} \in \mathbb{R}diamond(\alpha)$. Let $\gamma$ be a limit ordinal and suppose the statement is true for all $\alpha < \gamma$ and let $A, B \in \mathbb{R}diamond(\gamma)$. There must be ordinals $\alpha, \beta < \gamma$ such that $A \in \mathbb{R}diamond(\alpha)$ and $B \in \mathbb{R}diamond(\beta)$. Assume wlog $\alpha \leq \beta$ then $A \in \mathbb{R}diamond(\beta)$, hence $A \cup B, A \cap B \in \mathbb{R}diamond(\beta) \subseteq \mathbb{R}diamond(\gamma)$ which completes the proof. \qed \begin{lem} \label{lem:IntersectionIsClosedUnderFiniteUnion} $A, B \in \Intersections{\mathbb{R}diamond(\alpha)} \implies A\cup B \in \Intersections{\mathbb{R}diamond(\alpha)}$ for every ordinal $\alpha$. \end{lem} \proof Let $A, B \in \Intersections{\mathbb{R}diamond(\alpha)}$ then $A:= \cap_{m=1}^\infty A_m$ and $B:= \cap_{n=1}^\infty B_n$ with $A_m, B_n \in \mathbb{R}diamond(\alpha)$. Then $A \cup B = \cap_{m,n=1}^\infty (A_m \cup B_n)$ where $A_m \cup B_n \in \mathbb{R}diamond(\alpha)$ by Lemma \ref{lem:finite_union_intersection} and thus \linebreak $A \cup B \in \Intersections{\mathbb{R}diamond(\alpha)}$. \qed \begin{prop} \label{prop:TransFiniteSigAlg} $\sigalg[\ensuremath{\mathcal{A}}^\diamond]{\mathbb{R}diamond(0)} = \mathbb{R}diamond(\omega_1)$ where $\omega_1$ is the smallest uncountable limit ordinal. \end{prop} \proof[Proof (adapted from \cite{Els07}).] We first show $\mathbb{R}diamond(\omega_1) \subseteq \sigalg[X]{\mathbb{R}diamond(0)}$. We know that \linebreak $\mathbb{R}diamond(0) \subseteq \sigalg[X]{\mathbb{R}diamond(0)}$. For an ordinal $\alpha$ with $\mathbb{R}diamond(\alpha) \subseteq \sigalg[X]{\mathbb{R}diamond(0)}$ let $A \in \mathbb{R}diamond(\alpha+1)$. Then $A = \cup_{m=1}^\infty\cap_{n=1}^\infty A_{m,n}$ with $A_{m,n} \in \mathbb{R}diamond(\alpha)$ yielding $A \in \sigalg[X]{\mathbb{R}diamond(0)}$. If $\gamma$ is a limit ordinal with $\mathbb{R}diamond(\alpha) \subseteq \sigalg[X]{\mathbb{R}diamond(0)}$ for all ordinals $\alpha < \gamma$ then for any $A \in \mathbb{R}diamond(\gamma)$ there must be an ordinal $\alpha < \gamma$ such that $A \in \mathbb{R}diamond(\alpha)$ and hence $A \in \sigalg[X]{\mathbb{R}diamond(0)}$. In order to show $\mathbb{R}diamond(\omega_1) \supseteq \sigalg[X]{\mathbb{R}diamond(0)}$ it suffices to show that $\mathbb{R}diamond(\omega_1)$ is a $\sigma$-algebra. We have $X \in R(0) \subseteq R(\omega_1)$ and Lemma \ref{lem:complementsLimit} yields closure under complements. Let $A_n \in \mathbb{R}diamond(\omega_1)$ for $n \in \mathbb{N}$. Then for each $A_n$ we have an $\alpha_n$ such that $A_n \in \mathbb{R}diamond(\alpha_n)$. Since $\omega_1$ is the first uncountable ordinal, we must find an $\alpha < \omega_1$ such that $\alpha_n < \alpha$ for all $n \in \mathbb{N}$. Hence we have $A_n \in \mathbb{R}diamond(\alpha)$ for all $n \in \mathbb{N}$. Thus $\cup_{n=1}^\infty A_n \in \mathbb{R}diamond(\alpha+1) \subseteq \mathbb{R}diamond(\omega_1)$. \qed \subsection{The Trace Measure} We will now define the trace measure which can be understood as the behavior of a state: it measures the probability of accepting a set of words. \begin{defi}[The Trace Measure] \label{def:trace_premeasure} Let $(\ensuremath{\mathcal{A}}, X, \alpha)$ be a $\diamond$-PTS. For every state $x \in X$ we define the trace (sub-)probability measure $\mathbf{tr}(x) \colon \sigalg[\ensuremath{\mathcal{A}}^\diamond]{\mathcal{S}_\diamond} \to [0,1]$ as follows: In all four cases we require $\mathbf{tr}(x)(\emptyset) = 0$. For $\diamond \in \set{*, \infty}$ we define \begin{align} \mathbf{tr}(x)(\set{\varepsilon}) = \alpha(x)(\mathbf{1}) \label{eq:trace_emptyword} \end{align} and \begin{align} \mathbf{tr}(x)\big(\set{au}\big) := \Int[{x' \in X}]{\mathbf{tr}(x')(\set{u})}[\P{a}{x}{x'}] \label{eq:trace_main_equation} \end{align} for all $a \in A$ and all $u \in \ensuremath{\mathcal{A}}star$. For $\diamond \in \set{\omega, \infty}$ we define \begin{align} \mathbf{tr}(x)(\cone{\diamond}{\varepsilon}) = 1\label{eq:trace_wholespace} \end{align} and \begin{align} \mathbf{tr}(x)\big(\cone{\diamond}{au}\big) := \Int[{x' \in X}]{\mathbf{tr}(x')(\cone{\diamond}{u})}[\P{a}{x}{x'}] \label{eq:trace_main_equation2} \end{align} for all $a \in A$ and all $u \in \ensuremath{\mathcal{A}}star$. \end{defi} We need to verify that everything is well-defined and sound. In the next proposition we explicitly state what has to be shown. \begin{prop} \label{prop:trace_premeasure} For all four types $\diamond \in \set{0,*,\omega, \infty}$ of PTS the equations in Definition~\ref{def:trace_premeasure} yield a $\sigma$-finite pre-measure $\mathbf{tr}(x)\colon \mathcal{S}_\diamond \to [0,1]$ for every $x \in X$. Moreover, the unique extension of this pre-measure is a (sub-)probability measure. \end{prop} Before we prove this proposition, let us try to get a more intuitive understanding of Definition~\ref{def:trace_premeasure} and especially equation \eqref{eq:trace_main_equation}. First we check how the above definition reduces when we consider discrete systems. \begin{rem} Let $(\ensuremath{\mathcal{A}}, X, \alpha)$ be a discrete\footnote{If $Z$ is a countable set and $\mu\colon \powerset{Z} \to [0,1]$ is a measure, we write $\mu(z)$ for $\mu(\set{z})$.} $*$-PTS, i.e. $X$ is a countable set with \linebreak $\sigma$-algebra $\powerset{X}$ and the transition probability function is $\alpha \colon X \to \mathbb{S}(\ensuremath{\mathcal{A}} \times X + \mathbf{1})$. Then $\mathbf{tr}(x)(\varepsilon) := \alpha(x)(\checkmark)$ and \eqref{eq:trace_main_equation} is equivalent to \begin{align} \quad \mathbf{tr}(x)(au) := \sum_{x' \in X} \mathbf{tr}(x')(u) \cdot \P{a}{x}{x'} \end{align} for all $a \in \ensuremath{\mathcal{A}}$ and all $u \in \ensuremath{\mathcal{A}}star$ which in turn is equivalent to the discrete ``trace distribution'' presented in \cite{Hasuo06generictrace} for the sub\--dis\-tri\-bu\-tion monad $\mathcal{D}$ on $\mathbf{Set}$. \end{rem} Having seen this coincidence with known results, we proceed to calculate the trace measure for our example (Example \ref{ex:pts}) which we can only do in our more general setting because this $\infty$-PTS is a discrete probabilistic transition system which exhibits both finite and infinite behavior. \begin{exa}[Example \ref{ex:pts} continued.] We calculate the trace measures for the $\infty$-PTS from Example \ref{ex:pts}. We have $\mathbf{tr}(0) = \delta_{b^\omega}^\ensuremath{\mathcal{A}}infty$ because \begin{align*} \mathbf{tr}(0)(\set{b^\omega}) &= \mathbf{tr}(0)\left(\bigcap_{k=0}^{\infty}\cone{\infty}{b^k}\right)=\mathbf{tr}(0)\left(\ensuremath{\mathcal{A}}infty \setminus \bigcup_{k=0}^{\infty}\left(\ensuremath{\mathcal{A}}infty \setminus \cone{\infty}{b^k}\right)\right) \\ &= \mathbf{tr}(0)\left(\ensuremath{\mathcal{A}}infty\right) - \mathbf{tr}(0)\left(\bigcup_{k=0}^{\infty}\left(\ensuremath{\mathcal{A}}infty \setminus \cone{\infty}{b^k}\right)\right) \geq 1 - \sum_{k=0}^{\infty}\mathbf{tr}(0)\left(\ensuremath{\mathcal{A}}infty \setminus \cone{\infty}{b^k}\right)\\ &= 1 - \sum_{k=0}^{\infty} \left(1- \mathbf{tr}(0)\left(\cone{\infty}{b^k}\right)\right) = 1-\sum_{k=0}^{\infty}(1-1) = 1 \end{align*} Thus we have $\mathbf{tr}(0)(\ensuremath{\mathcal{A}}star) = \mathbf{tr}(0)\left(\cup_{u \in \ensuremath{\mathcal{A}}star}\set{u}\right) = 0$ and $\mathbf{tr}(0)(\ensuremath{\mathcal{A}}omega) = 1$. By induction we can show that $\mathbf{tr}(2)(\set{a^k}) = (1/3) \cdot (2/3)^k$ and thus $\mathbf{tr}(2)(\ensuremath{\mathcal{A}}star) = 1$ because \begin{align*} 1 \geq \mathbf{tr}(2)(\ensuremath{\mathcal{A}}star) = \mathbf{tr}(2)\left(\bigcup_{u \in \ensuremath{\mathcal{A}}star}^\infty \set{u}\right)\geq \mathbf{tr}(2)\left(\bigcup_{k=0}^\infty \set{a^k}\right)= \frac{1}{3}\cdot\sum_{k=0}^\infty\left(\frac{2}{3}\right)^k = 1 \end{align*} and hence $\mathbf{tr}(2)(\ensuremath{\mathcal{A}}omega) = 0$. Furthermore we calculate $\mathbf{tr}(1)(\set{b^\omega})= 1/3$, $\mathbf{tr}(1)(\cone{\infty}{a}) = 1/3$ and $\mathbf{tr}(1)(\set{\varepsilon}) = 1/3$ yielding $\mathbf{tr}(1)(\ensuremath{\mathcal{A}}star) = 2/3$ and $\mathbf{tr}(1)(\ensuremath{\mathcal{A}}omega) = 1/3$. \end{exa} Recall, that we still have to prove Proposition~\ref{prop:trace_premeasure}. In order to simplify this proof, we provide a few technical results about the sets ${\mathcal{S}_*}$, ${\mathcal{S}_\omega}$, ${\mathcal{S}_\infty}$. For all these results remember again that $\ensuremath{\mathcal{A}}$ is required to be a \emph{finite} alphabet. This is a crucial point, particularly in the next lemma. \begin{lem}[Countable Unions] \label{lem:union_cones} Let $(S_n)_{n \in \mathbb{N}}$ be a sequence of pairwise disjoint sets in ${\mathcal{S}_\omega}$ or in ${\mathcal{S}_\infty}$ such that their union, $\cup_{n \in \mathbb{N}}S_n$, is itself an element of ${\mathcal{S}_\omega}$ or ${\mathcal{S}_\infty}$. Then $S_n = \emptyset$ for all but finitely many $n$. \end{lem} \proof We have several cases to consider.\\ \emph{Case 1:} If $\cup_{n \in \mathbb{N}}S_n = \emptyset \in \mathcal{S}_\diamond$ for $\diamond \in \{\omega, \infty\}$, we have $S_n = \emptyset$ for all $n \in \mathbb{N}$.\\ \emph{Case 2:} If $\cup_{n \in \mathbb{N}} = \{u\} \in {\mathcal{S}_\infty}$ with suitable $u \in \ensuremath{\mathcal{A}}star$ we get $S_n=\emptyset$ for all but one $n \in \mathbb{N}$ since the $S_n$ are disjoint.\\ \emph{Case 3:} Let $\cup_{n \in \mathbb{N}}S_n =~ \cone{\diamond}{u}$ with a suitable $u \in \ensuremath{\mathcal{A}}star$ for $\diamond \in \{\omega, \infty\}$. Suppose there are infinitely many $n \in \mathbb{N}$ such that $S_n \not = \emptyset$. Without loss of generality we can assume $S_n\not=\emptyset$ for all $n \in \mathbb{N}$ and thus there is an infinite set $U :=\set{u_n\mid n \in \mathbb{N}}$ of words such that for each $n \in \mathbb{N}$ we either have $S_n = \{u_n\}$ (only for $\diamond=\infty$) or $S_n =~\cone{\diamond}{u_n}$ (for $\diamond \in \{\omega, \infty\}$). Necessarily we have $u \sqsubseteq u_n$ for all $n \in \mathbb{N}$. We will now revive our tree metaphor from Section \ref{sec:cones}: The prefix-closure $\mathrm{pref}(U) = \set{v \in \ensuremath{\mathcal{A}}star \mid \exists n \in \mathbb{N}: v \sqsubseteq u_n}$ of $U$ is the set of vertices contained in the paths from the root $\varepsilon$ (via $u$) to $u_n$. We consider the subtree $\mathcal{T'} = (\mathrm{pref}(U), E', \varepsilon, l|_{E'})$ with $E' = \set{\set{u,ua} \mid a \in \ensuremath{\mathcal{A}}, u, ua \in \mathrm{pref}(U)}$. Since the set $U$ and hence also $\mathrm{pref}(U)$ is infinite, we have thus constructed an infinite, connected graph where every vertex has finite degree (because $\ensuremath{\mathcal{A}}$ is finite). By König's Lemma \cite[Satz 3]{koenigd} there is an infinite path starting at the root $\varepsilon$. Let $v \in \ensuremath{\mathcal{A}}omega$ be the unique, infinite word associated to that path (which we get by concatenating all the labels along this path). Since $u \sqsubset v$ we must have $v \in ~\cone{\diamond}{u}$. Moreover, we know that $~\cone{\diamond}{u} = \cup_{n \in \mathbb{N}} S_n$ and due to the fact that the $S_n$ are pairwise disjoint we must find a unique $m \in \mathbb{N}$ with $v \in S_m$. This necessarily requires $S_m$ to be a cone of the form $S_m = ~\cone{\diamond}{u_m}$ with $u_m \in U$ and $u_m \sqsubset v$. Again due to the fact that the $S_n$ are disjoint we know that there cannot be a $u' \in U$ with $u_m \sqsubset u'$ and hence there also cannot be a $u' \in \mathrm{pref}(U)$ with $u_m \sqsubset u'$. Thus the vertex $u_m$ is a leaf of the tree $\mathcal{T}'$ and therefore the finite path from $\varepsilon$ to $u_m$ is the only path from $\varepsilon$ that contains $u_m$. This contradicts the existence of $v$ because this path is infinite and contains $u_m$. Hence our assumption must have been wrong and there cannot be infinitely many $n \in \mathbb{N}$ with $S_n \not = \emptyset$. \qed \begin{lem} \label{lem:premeas_singletons} Any map $\mu \colon {\mathcal{S}_*} \to \overline{\mathbb{R}}_+$ where $\mu(\emptyset) = 0$ is $\sigma$-additive and thus a pre-measure. \end{lem} \proof Let $\left(S_n\right)_{n \in \mathbb{N}}$ be a family of disjoint sets from ${\mathcal{S}_*}$ with $\left(\cup_{n \in \mathbb{N}}S_n\right) \in {\mathcal{S}_*}$, then we have $S_n = \emptyset$ for all but at most one $n \in \mathbb{N}$. \qed \begin{lem} \label{lem:premeas_omega_cone} A map $\mu\colon \mathcal{S}_\omega \to \overline{\mathbb{R}}_+$ where $\mu(\emptyset) = 0$ is $\sigma$-additive and thus a pre-measure if and only if the following equation holds for all $u \in \ensuremath{\mathcal{A}}star$. \begin{align} \mu\left(\cone{\omega}{u}\right) = \sum_{a \in \ensuremath{\mathcal{A}}}\mu\left(\cone{\omega}{ua}\right)\label{eq:premeas_omega_cone} \end{align} \end{lem} \noindent We omit the proof of this lemma as it is very similar to the proof of the following lemma. \begin{lem} \label{lem:premeas_infty_cone} A map $\mu\colon {\mathcal{S}_\infty} \to \overline{\mathbb{R}}_+$ where $\mu(\emptyset) = 0$ is $\sigma$-additive and thus a pre-measure if and only if the following equation holds for all $u \in \ensuremath{\mathcal{A}}star$. \begin{align} \mu\left(\cone{\infty}{u}\right) = \mu\left(\set{u}\right) + \sum_{a \in \ensuremath{\mathcal{A}}}\mu\left(\cone{\infty}{ua}\right)\label{eq:premeas_infty_cone} \end{align} \end{lem} \proof Obviously $\sigma$-additivity of $\mu$ implies equality \eqref{eq:premeas_infty_cone}. Let now $\left(S_n\right)_{n \in \mathbb{N}}$ be a family of disjoint sets from ${\mathcal{S}_\infty}$ with $\left(\cup_{n \in \mathbb{N}}S_n\right) \in {\mathcal{S}_\infty}$. Using Lemma~\ref{lem:union_cones} we know that (after resorting) we can assume that there is an $N \in \mathbb{N}$ such that $S_n \not= \emptyset$ for $1 \leq n \leq N$ and $S_n = \emptyset$ for $n > N$. For non-trivial cases (trivial means $S_n = \emptyset$ for all but one set) there must be a word $u \in \ensuremath{\mathcal{A}}star$ such that $\cone{\infty}{u} = \left(\cup_{n = 1}^NS_n\right)$. Because $u$ is an element of $\cone{\infty}{u}$ there must be a natural number $m$ with $u \in S_m$ which is unique because the family is disjoint. Without loss of generality we assume that $u \in S_1$. By construction of ${\mathcal{S}_\infty}$ and the fact that $\cup_{n=1}^NS_n =~\cone{\infty}{u}$ there are two cases to consider: either $S_1 = \set{u}$ or $S_1=~\cone{\infty}{u}$. The latter cannot be true since this would imply $S_n = \emptyset$ for $n\geq 2$ which we explicitly excluded. Thus we have $S_1 = \set{u}$. We remark that \begin{align*} \bigcup_{a \in \ensuremath{\mathcal{A}}} \cone{\infty}{ua} =~\cone{\infty}{u}\setminus \set{u} = \left(\bigcup_{n=2}^NS_n\right). \end{align*} Again by construction of ${\mathcal{S}_\infty}$ we must be able to select sets $S_k^a \in \set{S_n \mid 2 \leq n \leq N}$ for all $a \in \ensuremath{\mathcal{A}}$ and all $k$ where $1 \leq k \leq K_a < N$ for a constant $K_a$ such that $\cup_{k =1}^{K_a} S_k^a =~\cone{\infty}{ua}$. This selection is unique in the following manner: For $a,b\in\ensuremath{\mathcal{A}}$ where $a \not = b$ and $1\leq k \leq K_a$, $1\leq l \leq K_b$ we have $S_k^a \not= S_l^b$. Additionally it is complete in the sense that $\set{S_k^a\mid a \in \ensuremath{\mathcal{A}}, 1 \leq k \leq K_a} = \set{S_n\mid 2 \leq n \leq N}$. We apply our equation \eqref{eq:premeas_infty_cone} to get \begin{align*} \mu\left(\bigcup_{n=1}^NS_n\right)= \mu\left(\cone{\infty}{u}\right) = \mu\left(S_1\right) + \sum_{a \in \ensuremath{\mathcal{A}}}\mu\left(\bigcup_{k =1}^{K_a} S_k^a\right) \end{align*} and note that we can repeat the procedure for each of the disjoint unions $\cup_{k=1}^{K_a} S_k^a$. Since $K_a < N$ for all $a$ this procedure stops after finitely many steps yielding $\sigma$-additivity of $\mu$. \qed Using these results, we can now finally prove Proposition~\ref{prop:trace_premeasure}. \proof[Proof of Proposition~\ref{prop:trace_premeasure}] We will look at the different types of PTS separately. For $\diamond = 0$ nothing has to be shown because $\sigalg[\emptyset]{\set{\emptyset}} = \set{\emptyset}$ and $\mathbf{tr}(x)\colon \set{\emptyset} \to [0,1]$ is already uniquely defined by $\mathbf{tr}(x)(\emptyset) = 0$. For $\diamond = *$ Lemma~\ref{lem:premeas_singletons} yields immediately that the equations define a pre-measure. For $\diamond = \infty$ we have to check validity of equation \eqref{eq:premeas_infty_cone} of Lemma~\ref{lem:premeas_infty_cone}. We will do so using induction on the length of the word $u \in \ensuremath{\mathcal{A}}star$ in that equation. We have \begin{align*} &\mathbf{tr}(x)(\cone{\infty}{\varepsilon}) = 1 = \alpha(x)(\ensuremath{\mathcal{A}} \times X + \mathbf{1}) = \alpha(x)(\mathbf{1}) + \sum_{a \in \ensuremath{\mathcal{A}}}\P{a}{x}{X} \\ &= \mathbf{tr}(x)(\set{\varepsilon}) + \sum_{a \in \ensuremath{\mathcal{A}}}\Int[x'\in X]{1}[\P{a}{x}{x'}]\\ &= \mathbf{tr}(x)(\set{\varepsilon}) + \sum_{a \in \ensuremath{\mathcal{A}}}\Int[x'\in X]{\mathbf{tr}(x')(\cone{\infty}{\varepsilon})}[\P{a}{x}{x'}]\\ &=\mathbf{tr}(x)(\set{\varepsilon}) + \sum_{a \in \ensuremath{\mathcal{A}}}\mathbf{tr}(x)(\cone{\infty}{a\varepsilon}) = \mathbf{tr}(x)(\set{\varepsilon}) + \sum_{a \in \ensuremath{\mathcal{A}}}\mathbf{tr}(x)(\cone{\infty}{\varepsilon a}) \end{align*} for all $x \in X$. Now let us assume that for all $x \in X$ and all words $u \in \ensuremath{\mathcal{A}}^{\leq{n}}$ of length less or equal to a fixed $n \in \mathbb{N}$ the induction hypothesis \begin{align*} \mathbf{tr}(x)(\cone{\infty}{u}) = \mathbf{tr}(x)(\set{u}) + \sum_{b \in \ensuremath{\mathcal{A}}} \mathbf{tr}(x)(\cone{\infty}{ub}) \end{align*} is fulfilled. Then for all $x \in X$, all $a \in \ensuremath{\mathcal{A}}$ and all $u \in \ensuremath{\mathcal{A}}^{\leq n}$ we calculate \begin{align*} &\mathbf{tr}(x)(\cone{\infty}{au}) = \Int[{x' \in X}]{\mathbf{tr}(x')(\cone{\infty}{u})}[\P{a}{x}{x'}] \\ &\quad = \Int[{x' \in X}]{\left(\mathbf{tr}(x')(\set{u}) + \sum_{b \in \ensuremath{\mathcal{A}}} \mathbf{tr}(x')(\cone{\infty}{ub})\right)}[\P{a}{x}{x'}]\\ &\quad = \Int[{x' \in X}]{\mathbf{tr}(x')(\set{u})}[\P{a}{x}{x'}] + \sum_{b \in \ensuremath{\mathcal{A}}} \Int[x'\in X]{\mathbf{tr}(x')(\cone{\infty}{ub})}[\P{a}{x}{x'}]\\ &\quad = \mathbf{tr}(x)(\set{au}) + \sum_{b \in \ensuremath{\mathcal{A}}}\mathbf{tr}(x)(\cone{\infty}{aub}) \end{align*} and hence also for $au \in \ensuremath{\mathcal{A}}^{\leq {n+1}}$ equation \eqref{eq:premeas_infty_cone} is fulfilled and by induction we conclude that it is valid for all $u \in \ensuremath{\mathcal{A}}star$. The only difficult case is $\diamond = \omega$ where we will, of course, apply Lemma~\ref{lem:premeas_omega_cone}. Let $u = u_1\dots u_m$ with $u_k \in \ensuremath{\mathcal{A}}$ for every $k \in \mathbb{N}$ with $k \leq m$, then multiple application of the defining equation \eqref{eq:trace_main_equation} yields \begin{align*} \mathbf{tr}(x)\big(\cone{\omega}{u}\big) &= \int\limits_{x_1 \in X}\mathbf{hd}ots\int\limits_{x_m \in X}\!1\,\mathrm{d}\P{u_m}{x_{m-1}}{x_m}\mathbf{hd}ots\mathrm{d}\P{u_1}{x}{x_1} \end{align*} and for arbitrary $a \in \mathcal{A}$ we obtain analogously: \begin{align*} \mathbf{tr}(x)\big(\cone{\omega}{ua}\big) &= \int\limits_{x_1 \in X}\mathbf{hd}ots\int\limits_{x_m \in X}\!\P{a}{x_m}{X}\,\mathrm{d}\P{u_m}{x_{m-1}}{x_m}\mathbf{hd}ots\mathrm{d}\P{u_1}{x}{x_1}\,. \end{align*} All integrals exist and are bounded above by $1$ so we can use the linearity and monotonicity of the integral to exchange the finite sum and the integrals. Using the fact that \begin{align*} \sum_{a \in \mathcal{A}}\P{a}{x_m}{X} = \sum_{a \in \mathcal{A}}\alpha(x_m)(\set{a} \times X) = \alpha(x_m)(\ensuremath{\mathcal{A}} \times X) = 1 \end{align*} we obtain that indeed the necessary and sufficient equality \begin{align*} \mathbf{tr}(x)\big(\cone{\omega}{u}\big) = \sum_{a \in \mathcal{A}}\mathbf{tr}(x)\big(\cone{\omega}{ua}\big) \end{align*} is valid for all $u \in \mathcal{A}^*$ and thus Lemma~\ref{lem:premeas_omega_cone} yields that also $\mathbf{tr}(x)\colon {\mathcal{S}_\omega} \to \overline{\mathbb{R}}_+$ is $\sigma$-additive and thus a pre-measure. Now let us check that the pre-measures for $\diamond \in \set{*, \omega, \infty}$ are $\sigma$-finite and that their unique extensions must be (sub-)probability measures. For $\diamond \in \set{\omega, \infty}$ this is obvious and in these cases the unique extension must be a probability measure because by definition we have $\mathbf{tr}(x)(\ensuremath{\mathcal{A}}omega) = 1$ and $\mathbf{tr}(x)(\ensuremath{\mathcal{A}}infty) = 1$ respectively. For the remaining case ($\diamond = *$) we will use induction. We have $\mathbf{tr}(x)(\{\varepsilon\}) = \alpha(x)(\mathbf{1}) \leq 1$ for every $x \in X$. Let us now assume that for a fixed but arbitrary $n \in \mathbb{N}$ the inequality $\mathbf{tr}(x)(\{u\}) \leq 1$ is valid for all $x \in X$ and all words $u \in \mathcal{A}^{\leq n}$ with length less or equal to $n$. Then for any word $u' \in \mathcal{A}^{n+1}$ of length $n+1$ we have $u' = au$ with $a \in \mathcal{A}$ and $u \in \mathcal{A}^n$. We observe that \begin{align*} \mathbf{tr}(x)(\{au\}) = \Int[{x' \in X}]{\underbrace{\mathbf{tr}(x')(\{u\})}_{\leq 1}}[\P{a}{x}{x'}] \leq \Int{1}[\P{a}{x}{x'}]=\P{a}{x}{X} \leq 1 \end{align*} and conclude by induction that $\mathbf{tr}(x)(\{u\}) \leq 1$ is valid for all $u \in \mathcal{A}^*$ and all $x \in X$. Due to the fact that $\mathcal{A}^* = \cup_{u \in \mathcal{A}^*} \{u\}$ this yields that $\mathbf{tr}$ is $\sigma$-finite. Again by induction we will show that $\mathbf{tr}$ is bounded above by $1$ and thus a sub-probability measure. We have $\mathbf{tr}(x)\left(\mathcal{A}^{\leq 0}\right) = \mathbf{tr}(x)(\{\varepsilon\})\leq 1$ for all $x \in X$. Suppose that for a fixed but arbitrary $n \in \mathbb{N}$ the inequality $\mathbf{tr}(x)\left(\mathcal{A}^{\leq n-1}\right) \leq 1$ holds for all $x \in X$. We conclude with the following calculation \begin{align*} \mathbf{tr}(x)\left(\mathcal{A}^{\leq n}\right) &= \mathbf{tr}(x)\left( \cup_{u \in \mathcal{A}^{\leq n}} \{u\}\right) = \sum\limits_{u \in \mathcal{A}^{\leq n}} \mathbf{tr}(x)\left( \{u\}\right)\\ &= \mathbf{tr}(x)(\{\varepsilon\}) + \sum_{a \in \mathcal{A}} \sum_{u \in \mathcal{A}^{\leq n-1}} \mathbf{tr}(x)\left(\{au\}\right) \\ &= \alpha(x)(\mathbf{1}) + \sum\limits_{a \in \mathcal{A}} \sum\limits_{u \in \mathcal{A}^{\leq n-1}} \Int{\mathbf{tr}(x')\left(\set{u}\right)}[\P{a}{x}{x'}] \\ &= \alpha(x)(\mathbf{1}) + \sum\limits_{a \in \mathcal{A}} \Int{\sum_{u \in \mathcal{A}^{\leq n-1}}\!\mathbf{tr}(x')(\{u\})}[\P{a}{x}{x'}]\\ &= \alpha(x)(\mathbf{1}) + \sum\limits_{a \in \mathcal{A}} \Int{\underbrace{\left(\mathbf{tr}(x')\left(\mathcal{A}^{\leq n-1}\right)\right)}_{\leq 1}}[\P{a}{x}{x'}]\\ &\leq \alpha(x)(\mathbf{1}) + \sum\limits_{a \in \mathcal{A}}\Int{1}[\P{a}{x}{x'}]=\alpha(x)(\mathbf{1}) + \sum\limits_{a \in \mathcal{A}} \P{a}{x}{X}\\ &= \alpha(x)(\mathbf{1}) + \sum\limits_{a \in \mathcal{A}} \alpha(x)(\{a\} \times X)= \alpha(x)(\mathcal{A} \times X + \mathbf{1}) \leq 1 \end{align*} using the linearity and monotonicity of the integral which can be applied here since $\mathcal{A}$ is finite which in turn implies that $\mathcal{A}^{\leq n-1}$ is finite and all the integrals $\Int{\mathbf{tr}(x')\left(\set{u}\right)}[\P{a}{x}{x'}]$ exist because $\mathbf{tr}(x')\left(\set{u}\right)$ is bounded above by $1$. By induction we can thus conclude that \begin{align*} \forall x \in X\ \forall n \in \mathbb{N}_0: \mathbf{tr}(x)\left(\mathcal{A}^{\leq n} \right) \leq 1 \end{align*} which is equivalent to \begin{align*} \forall x \in X\ \sup_{n \in \mathbb{N}_0}\left(\mathbf{tr}(x)\left(\mathcal{A}^{\leq n} \right)\right) \leq 1\,. \end{align*} Since $\mathbf{tr}(x)$ is a measure (and thus non-negative and $\sigma$-additive), the sequence given by $\left(\mathbf{tr}(x)\left(\mathcal{A}^{\leq n}\right)\right)_{n \in \mathbb{N}_0}$ is a monotonically increasing sequence of real numbers bounded above by $1$. Furthermore, $\mathbf{tr}(x)$ is continuous from below as a measure and we have $\mathcal{A}^{\leq n} \subseteq \mathcal{A}^{\leq n+1}$ for all $n \in \mathbb{N}_0$ and thus we obtain \begin{align*} \mathbf{tr}(x)\left(\mathcal{A}^*\right) = \mathbf{tr}(x)\left( \bigcup\limits_{n =1}^\infty \mathcal{A}^{\leq n}\right) = \lim_{n \to \infty} \mathbf{tr}(x)\left( \mathcal{A}^{\leq n}\right) = \sup_{n \in\mathbb{N}_0}\mathbf{tr}(x)\left(\mathcal{A}^{\leq n} \right) \leq 1\,. \end{align*} \qed \subsection{The Trace Function is a Kleisli Arrow} Now that we know that our definition of a trace measure is mathematically sound, we remember that we wanted to show that it is ``natural'', meaning that it arises from the final coalgebra in the Kleisli category of the (sub-)probability monad. We start by showing that the function $\mathbf{tr}\colon X \to T\ensuremath{\mathcal{A}}^\diamond$ we get from Definition \ref{def:trace_premeasure} is a Kleisli arrow by proving that it is a Markov kernel. Since $\mathbf{tr}(x)$ is a sub-probability measure for each $x \in X$ by Proposition \ref{prop:trace_premeasure} we just have to show that for each $S \in \sigalg[\ensuremath{\mathcal{A}}^\diamond]{\mathcal{S}_\diamond}$ the function $x \mapsto \mathbf{tr}(x)(S)$ is Borel-measurable. This is easy for elements $S$ of the previously defined semirings: \begin{lem} \label{lem:measurabilityGenerators} For every $S \in \mathcal{S}_\diamond$ the function $x \mapsto \mathbf{tr}(x)(S)$ is Borel-measurable. \end{lem} \proof For $\diamond=0$ nothing has to be shown. For the other cases we will use induction on the length of a word $u$. For $\diamond \in \set{*,\infty}$ measurability of $x \mapsto \mathbf{tr}(x)(\set{\varepsilon})$ follows from measurability of $x \mapsto \alpha(x)(\mathbf{1})$ and for $\diamond \in \set{\omega,\infty}$ the function $x \mapsto \mathbf{tr}\left(x)(\cone{\diamond}{\varepsilon}\right)$ is the constant function with value $1$ and thus is measurable. Suppose now that for an $n \in \mathbb{N}$ we have established that for all $u \in \ensuremath{\mathcal{A}}^n$ the functions $x \mapsto \mathbf{tr}(x)(\set{u})$ and $x \mapsto \mathbf{tr}(x)(\cone{\diamond}{u})$ (whenever they are meaningful) are measurable. Then for all $a \in \ensuremath{\mathcal{A}}$ and all $u \in \ensuremath{\mathcal{A}}^n$ we have $\mathbf{tr}(x)(\set{au}) = \Int[x' \in X]{\mathbf{tr}(x')(\set{u})}[{\mathbf{P}_{a}(x,x')}]$ and also $\mathbf{tr}(x)(\cone{\diamond}{au}) = \Int[x' \in X]{\mathbf{tr}(x')(\cone{\diamond}{u})}[{\mathbf{P}_{a}(x,x')}]$ and by applying Lemma \ref{lem:measMarkovKernel} we get the desired measurability. \qed Without any more complicated tools we get the complete result for any $*$-PTS: \begin{prop} \label{prop:MeasurabilityFiniteTrace} For every $S \in \powerset{\ensuremath{\mathcal{A}}star}$ the function $x \mapsto\mathbf{tr}(x)(S)$ is Borel-measurable. \end{prop} \proof We know from Lemma \ref{lem:measurabilityGenerators} that $x\mapsto\mathbf{tr}(x)(S)$ is measurable for every $S \in {\mathcal{S}_*}$. Recall that $\sigalg[\ensuremath{\mathcal{A}}star]{{\mathcal{S}_*}} = \powerset{\ensuremath{\mathcal{A}}star}$ and every $S \in \powerset{\ensuremath{\mathcal{A}}star}$ is at most countably\footnote{For finite $S$ the proof works analogously but simpler!} infinite, say $S := \set{u_1, u_2,\mathbf{hd}ots}$ and we have the trivial, disjoint decomposition $S = \cup_{n=1}^\infty\set{u_n}$. If we define $T_N := \cup_{n=1}^N \set{u_n}$ we get an increasing sequence of sets converging to $S$. Hence by continuity of the sub-probability measures $S' \mapsto \mathbf{tr}(x)(S')$ we get $\mathbf{tr}(x)(S) = \lim_{N\to \infty}\mathbf{tr}(x)(T_N) = \lim_{N\to \infty}\sum_{n=1}^N \mathbf{tr}(x, \set{u_n})$. Thus $x \mapsto\mathbf{tr}(x)(S)$ is the pointwise limit of a finite sum of measurable functions and therefore measurable. \qed From here until the rest of this subsection we restrict $\diamond$ to be either $\omega$ or $\infty$ if not indicated otherwise. As before, we will rely on transfinite induction for our proof. \begin{lem} For every $S \in \mathbb{R}diamond(0)$ the function $x \mapsto\mathbf{tr}(x)(S)$ is measurable. \end{lem} \proof We know from Lemma \ref{lem:measurabilityGenerators} that $x\mapsto\mathbf{tr}(x)(S)$ is measurable for every $S \in \mathcal{S}_\diamond$. Let $S \in \mathbb{R}diamond(0)$ then $S = \cup_{n=1}^N S_n$ with $S_n \in \mathcal{S}_\diamond$ disjoint for $1 \leq n \leq N \in \mathbb{N}$. We have $\mathbf{tr}(x)(S) = \sum_{n=1}^N \mathbf{tr}(x,S_n)$ which is measurable as a finite sum of measurable functions. \qed \begin{lem} Let $\alpha$ be an ordinal s.t. the function $x \mapsto \mathbf{tr}(x)(S)$ is measurable for each $S \in \mathbb{R}diamond(\alpha)$. Then $x \mapsto \mathbf{tr}(x)(S)$ is measurable for each $S \in \Intersections{\mathbb{R}diamond(\alpha)}$. \end{lem} \proof Let $S \in \Intersections{\mathbb{R}diamond(\alpha)}$ then $S = \cap_{n=1}^\infty S_n$ with $S_n \in \mathbb{R}diamond(\alpha)$. We define $T_N := \cap_{n=1}^N S_n$ for all $N \in \mathbb{N}$, then $T_N \in \mathbb{R}diamond(\alpha)$ by Lemma \ref{lem:finite_union_intersection}. We have $T_N \supseteq T_{N+1}$ for all $N \in \mathbb{N}$ \linebreak and $S = \cap_{N=1}^\infty T_N$. Continuity of $S' \mapsto \mathbf{tr}(x)(S')$ for every $x \in X$ yields $\mathbf{tr}(x)(S) = \lim_{N \to \infty} \mathbf{tr}(x)\left(T_N\right)$. Hence $x \mapsto \mathbf{tr}(x)(S)$ is measurable as pointwise limit of measurable functions. \qed \begin{lem} Let $\alpha$ be an ordinal s.t. the function $x \mapsto \mathbf{tr}(x)(S)$ is measurable for each $S \in \Intersections{\mathbb{R}diamond(\alpha)}$. Then $x \mapsto \mathbf{tr}(x)(S)$ is measurable for each $S \in \mathbb{R}diamond(\alpha+1)$. \end{lem} \proof Let $S \in \mathbb{R}diamond(\alpha+1)$ then $S = \cup_{n=1}^\infty S_n$ with $S_n \in \Intersections{\mathbb{R}diamond(\alpha)}$. We define $T_N:= \cup_{n=1}^N S_n$ for all $N \in \mathbb{N}$. Then we know that $T_N \in \Intersections{\mathbb{R}diamond(\alpha)}$ by Lemma \ref{lem:IntersectionIsClosedUnderFiniteUnion}. We have \linebreak $T_N \subseteq T_{N+1}$ for all $N \in \mathbb{N}$ and $S = \cup_{N=1}^\infty T_N$. Continuity of the sub-probability measures $S' \mapsto \mathbf{tr}(x)(S')$ yields for every $x \in X$ that $\mathbf{tr}(x)(S) = \lim_{N \to \infty} \mathbf{tr}\left(x)(T_N\right)$. Hence the function $x \mapsto \mathbf{tr}(x)(S)$ is measurable as pointwise limit of measurable functions. \qed \begin{lem} Let $\gamma$ be a limit ordinal s.t. for all ordinals $\alpha < \gamma$ the function $x \mapsto \mathbf{tr}(x)(S)$ is measurable for each $S \in \mathbb{R}diamond(\alpha)$. Then $x \mapsto \mathbf{tr}(x)(S)$ is measurable for each $S \in \mathbb{R}diamond(\gamma)$. \end{lem} \proof Let $S \in \mathbb{R}diamond(\gamma)$, then there is an $\alpha < \gamma$ such that $S \in \mathbb{R}diamond(\alpha)$ and hence $x \mapsto\mathbf{tr}(x)(S)$ is measurable for this $S$. \qed By using the characterization $\sigalg[\ensuremath{\mathcal{A}}^\diamond]{\mathcal{S}_\diamond} = \mathbb{R}diamond(\omega_1)$ of Proposition \ref{prop:TransFiniteSigAlg} and combining the four preceding lemmas we get the desired result: \begin{prop} \label{prop:TraceIsMeasurable} For every $S \in \sigalg[\ensuremath{\mathcal{A}}^\diamond]{\mathcal{S}_\diamond}$ the function $x \mapsto \mathbf{tr}(x)(S)$ is measurable. \qed \end{prop} Finally, combining this result with Proposition \ref{prop:trace_premeasure} and the fact that Markov kernels are in one-to-one correspondence with Kleisli arrows \cite[Proposition 2.7]{Dob07b} yields: \begin{prop} \label{prop:TraceIsKleisli} Let $\diamond \in \set{0, *, \omega, \infty}$ and $(T, \eta, \mu)$ be the (sub-)probability monad. Then the function $\mathbf{tr}\colon X \to T\ensuremath{\mathcal{A}}^\diamond$ given by Definition \ref{def:trace_premeasure} is a Kleisli arrow. \qed \end{prop} \subsection{The Trace Measure and Final Coalgebra} Before stating the next proposition which presents a close connection between the unique existence of the map into the final coalgebra and the unique extension of a family of $\sigma$-finite pre-measures, we first give some intuition: in order to show that a coalgebra is final it is enough to show that every other coalgebra admits a unique homomorphism into it. Commutativity of the square underlying the homomorphism and uniqueness have to be shown for every element of a $\sigma$-algebra and one of our main contributions is to reduce the proof obligations to a smaller set of generators, which form a covering semiring. This proposition will later be applied to our four types of transition systems by using the semirings defined earlier and showing that there can be only one way to assign probabilities to their elements. \begin{prop} \label{thm_finalcoalg} Let $(T, \eta, \mu)$ be either the sub-probability monad $(\mathbb{S}, \eta, \mu)$ or the probability monad $(\mathbb{P}, \eta, \mu)$, $F$ be an endofunctor on $\mathbf{Meas}$ with a distributive law $\lambda \colon FT \mathbb{R}ightarrow TF$ and $(\Omega, \kappa)$ be an $\overline{F}$-coalgebra where $\mathcal{S}igma_{F\Omega} = \sigalg[F\Omega]{\mathcal{S}_{F\Omega}}$ for a covering semiring $\mathcal{S}_{F\Omega}$. Then the following statements are equivalent: \begin{enumerate} \item $(\Omega, \kappa)$ is a final $\overline{F}$-coalgebra in $\mathcal{K}\ell(T)$. \item For every $\overline{F}$-coalgebra $(X, \alpha)$ in $\mathcal{K}\ell(T)$ there is a unique Kleisli arrow $\mathbf{tr}\colon X \to T\Omega$ satisfying the following condition: \begin{align} \forall x \in X, \forall S \in \mathcal{S}_{F\Omega}: \quad \Int[\Omega]{p_S \circ \kappa}[\mathbf{tr}(x)] = \Int[{FX}]{p_S \circ \lambda_\Omega \circ F(\mathbf{tr})}[\alpha(x)]\,. \label{eq:giry_final_coalgebra} \end{align} \end{enumerate} \end{prop} \proof We consider the final coalgebra diagram in $\mathcal{K}\ell(T)$. \begin{align*}\begin{xy}\xymatrix{ X \ar[rr]^{\alpha} \ar[d]_{\mathbf{tr}} && \overline{F}X \ar[d]^{\overline{F}(\mathbf{tr}) = \lambda_\Omega \circ F(\mathbf{tr})}\\ \Omega \ar[rr]^{\kappa} && \overline{F}\Omega }\end{xy}\end{align*} By definition $(\Omega, \kappa)$ is final iff for every $\overline{F}$-coalgebra $(X , \alpha)$ there is a unique Kleisli arrow $\mathbf{tr} \colon X \to T\Omega$ making the diagram commute. We define \begin{align*} g := \mu_{F\Omega} \circ T(\kappa)\circ \mathbf{tr}\ \mbox{(down, right)} \quad \text{and}\quad h:= \mu_{F\Omega} \circ T\left(\overline{F}(\mathbf{tr})\right) \circ \alpha\ \mbox{(right, down)} \end{align*} and note that commutativity of the final coalgebra diagram is equivalent to \begin{align} \forall x \in X,\forall S \in \mathcal{S}_{F\Omega}: \quad g(x)(S) &= h(x)(S) \label{eq:g_equals_h} \end{align} because $\mathcal{S}_{F\Omega}$ is a covering semiring and for all $x \in X$ both $g(x)$ and $h(x)$ are sub-probability measures and thus finite measures which allows us to apply Corollary \ref{cor:equality_of_measures}. We calculate \begin{align*} g(x) (S) &= (\mu_{F\Omega} \circ T(\kappa) \circ \mathbf{tr}) (x) (S) =\mu_{F\Omega}\left(T(\kappa)(\mathbf{tr}(x))\right)(S)\\ & =\mu_{F\Omega} \left(\mathbf{tr}(x) \circ \kappa^{-1}\right) (S) = \Int{p_S}[{\left(\mathbf{tr}(x) \circ \kappa^{-1}\right)}] = \Int{p_S\circ\kappa} [\mathbf{tr}(x)] \end{align*} and if we define $\rho := \overline{F}(\mathbf{tr}) = \lambda_\Omega \circ F(\mathbf{tr}) \colon FX \to TF\Omega$ we obtain \begin{align*} h(x)(S) &= (\mu_{F\Omega} \circ T(\rho) \circ \alpha) (x) (S) = \mu_{F\Omega} \left(T(\rho) (\alpha (x))\right) (S) =\mu_{F\Omega} \left(\alpha(x)\circ \rho^{-1}\right) (S) \\ &= \int\! p_S\, \mathrm{d}\left(\alpha(x)\circ \rho^{-1}\right) = \int\! p_S \circ \rho\, \mathrm{d}\alpha(x) = \int\! p_S \circ \lambda_\Omega \circ F(\mathbf{tr})\, \mathrm{d}\alpha(x) \end{align*} and thus \eqref{eq:g_equals_h} is equivalent to \eqref{eq:giry_final_coalgebra}. \qed We immediately obtain the following corollary. \begin{cor} \label{cor:main_corollary} Let in Proposition \ref{thm_finalcoalg} $\kappa = \eta_{F\Omega} \circ \varphi$, for an isomorphism $\varphi \colon \Omega \to F\Omega$ in $\mathbf{Meas}$, and let $\mathcal{S}_\Omega \subseteq \powerset{\Omega}$ be a covering semiring such that $\mathcal{S}igma_\Omega = \sigalg[\Omega]{\mathcal{S}_\Omega}$. Then equation \eqref{eq:giry_final_coalgebra} is equivalent to \begin{align} \forall x \in X, \forall S \in \mathcal{S}_\Omega: \quad \mathbf{tr}(x)(S) = \Int{p_{\varphi(S)} \circ \lambda_\Omega \circ F(\mathbf{tr})}[\alpha(x)]\,. \label{eq:giry_final_coalgebra_semiring} \end{align} \end{cor} \proof Since $\varphi$ is an isomorphism in $\mathbf{Meas}$ we know from Proposition~\ref{prop:isomorphisms} that $\mathcal{S}igma_{F\Omega} = \sigalg[F\Omega]{\varphi(\mathcal{S}_\Omega)}$. For every $S \in \mathcal{S}_{\Omega}$ and every $u \in \Omega$ we calculate \[p_{\varphi(S)}\circ \kappa (u) = p_{\varphi(S)}\ \circ\eta_{F\Omega} \circ \varphi (u) = \delta_{\varphi(u)}^{F\Omega} (\varphi(S))= \chi_{\varphi(S)}(\varphi(u)) = \chi_{S}({u}) \] and hence we have $\int\! p_{\varphi(S)}\circ\kappa\, \mathrm{d}\mathbf{tr}(x) = \int\! \chi_S \, \mathrm{d}\mathbf{tr}(x) = \mathbf{tr}(x)(S)$. \qed Since we want to apply this corollary to sets of words, we now define the necessary isomorphism $\varphi$ using the characterization given in Proposition~\ref{prop:isomorphisms}. \begin{prop} \label{prop:words_iso} Let $\mathcal{A}$ be an arbitrary alphabet and let \begin{align} \varphi\colon \ensuremath{\mathcal{A}}infty \to \ensuremath{\mathcal{A}} \times \ensuremath{\mathcal{A}}infty + \mathbf{1}, \quad \varepsilon \mapsto \checkmark, \quad au \mapsto (a,u)\,. \end{align} Then $\varphi$, $\varphi|_{\ensuremath{\mathcal{A}}star}\colon \ensuremath{\mathcal{A}}star \to \varphi(\ensuremath{\mathcal{A}}star)$ and $\varphi|_\ensuremath{\mathcal{A}}omega\colon \ensuremath{\mathcal{A}}omega \to \varphi(\ensuremath{\mathcal{A}}omega)$ are isomorphisms in $\mathbf{Meas}$ because they are bijective functions\footnote{Note that we restrict not only the domain of $\varphi$ here but also its codomain.} and we have \begin{align} \sigalg[\ensuremath{\mathcal{A}} \times \ensuremath{\mathcal{A}}omega]{\varphi({\mathcal{S}_\omega})} &= \powerset{\ensuremath{\mathcal{A}}} \otimes \sigalg[\ensuremath{\mathcal{A}}omega]{{\mathcal{S}_\omega}}\,, \\ \sigalg[\ensuremath{\mathcal{A}} \times \ensuremath{\mathcal{A}}star + \mathbf{1}]{\varphi({\mathcal{S}_*})} &= \powerset{\ensuremath{\mathcal{A}}} \otimes \sigalg[\ensuremath{\mathcal{A}}star]{{\mathcal{S}_*}} \oplus \powerset{\mathbf{1}}\,, \label{eq:sigalg_eq_Astar}\\ \sigalg[\ensuremath{\mathcal{A}} \times \ensuremath{\mathcal{A}}infty + \mathbf{1}]{\varphi({\mathcal{S}_\infty})} &= \powerset{\ensuremath{\mathcal{A}}} \otimes \sigalg[\ensuremath{\mathcal{A}}infty]{{\mathcal{S}_\infty}} \oplus \powerset{\mathbf{1}}\,. \label{eq:sigalg_eq_Ainfty} \end{align} \end{prop} \proof Bijectivity is obvious. We will now show validity of \eqref{eq:sigalg_eq_Ainfty}, the other equations can be verified analogously.\footnote{For proving \eqref{eq:sigalg_eq_Astar} we can use Proposition \ref{prop:generator_product} because $\sigalg[\ensuremath{\mathcal{A}}star]{{\mathcal{S}_*}} = \sigalg[\ensuremath{\mathcal{A}}star]{{\mathcal{S}_*} \cup \set{\ensuremath{\mathcal{A}}star}}$.} Let $\mathcal{S}_\ensuremath{\mathcal{A}} := \set{\emptyset} \cup \set{\set{a} \mid a \in \ensuremath{\mathcal{A}}} \cup \set{\ensuremath{\mathcal{A}}}$, then it is easy to show that we have $\sigalg[\ensuremath{\mathcal{A}}]{\mathcal{S}_\ensuremath{\mathcal{A}}} = \powerset{\ensuremath{\mathcal{A}}}$ and Propositions \ref{prop:generator_product} and \ref{prop:generator_union} yield that \begin{align*} \powerset{\ensuremath{\mathcal{A}}} \otimes \sigalg[\ensuremath{\mathcal{A}}infty]{{\mathcal{S}_\infty}} \oplus \powerset{\mathbf{1}} = \sigalg[\ensuremath{\mathcal{A}} \times \ensuremath{\mathcal{A}}infty + \mathbf{1}]{\mathcal{S}_\ensuremath{\mathcal{A}} \ast {\mathcal{S}_\infty} \oplus \powerset{\mathbf{1}}}\,. \end{align*} We calculate $\varphi\left(\emptyset\right) = \emptyset$, $\varphi\left(\set{\varepsilon}\right) = \mathbf{1}$, $\varphi\left(\cone{\omega}{\varepsilon}\right) = \ensuremath{\mathcal{A}} \times \ensuremath{\mathcal{A}}omega$, $\varphi\left(\cone{\infty}{\varepsilon}\right) = \ensuremath{\mathcal{A}} \times \ensuremath{\mathcal{A}}infty + \mathbf{1}$, and for all $a \in \ensuremath{\mathcal{A}}$ and all $u \in \ensuremath{\mathcal{A}}star$ we have $\varphi\left(\set{au}\right) = \set{(a,u)}$ and also $\varphi\left(\cone{\infty}{au}\right) = \set{a} \times \cone{\infty}{u}$. This yields \begin{align*} \varphi({\mathcal{S}_\infty}) &= \set{\emptyset, \emptyset + \mathbf{1}, \ensuremath{\mathcal{A}} \times \ensuremath{\mathcal{A}}infty + \mathbf{1}} \cup \set{\set{a} \times \set{u} + \emptyset, \set{a} \times \cone{\infty}{u}+\emptyset\mid a \in \ensuremath{\mathcal{A}}, u \in \ensuremath{\mathcal{A}}star} \end{align*} and furthermore we have \begin{align*} \mathcal{S}_\ensuremath{\mathcal{A}} \ast {\mathcal{S}_\infty} \oplus \powerset{\mathbf{1}} = \set{\emptyset, \emptyset + \mathbf{1}} &\cup \set{\set{a} \times \set{u} + \emptyset, \set{a} \times \cone{\infty}{u}+\emptyset\mid a \in \ensuremath{\mathcal{A}}, u \in \ensuremath{\mathcal{A}}star}\\ &\cup \set{\set{a} \times \set{u} + \mathbf{1}, \set{a} \times \cone{\infty}{u}+\mathbf{1}\mid a \in \ensuremath{\mathcal{A}}, u \in \ensuremath{\mathcal{A}}star}\\ &\cup \set{\ensuremath{\mathcal{A}}\, \times \set{u} + \emptyset, \ensuremath{\mathcal{A}}\, \times \cone{\infty}{u}+\emptyset\mid u \in \ensuremath{\mathcal{A}}star}\\ &\cup \set{\ensuremath{\mathcal{A}}\, \times \set{u} + \mathbf{1}, \ensuremath{\mathcal{A}}\, \times \cone{\infty}{u}+\mathbf{1}\mid u \in \ensuremath{\mathcal{A}}star}. \end{align*} Due to the fact that $\ensuremath{\mathcal{A}} \times \ensuremath{\mathcal{A}}infty + \mathbf{1} = \ensuremath{\mathcal{A}} \times \cone{\infty}{\varepsilon} + \mathbf{1}$ we have $\varphi({\mathcal{S}_\infty}) \subseteq \mathcal{S}_\ensuremath{\mathcal{A}} \ast {\mathcal{S}_\infty} \oplus \powerset{\mathbf{1}}$ and the monotonicity of the $\sigma$-operator yields \begin{align*} \sigalg[\ensuremath{\mathcal{A}} \times \ensuremath{\mathcal{A}}infty + \mathbf{1}]{\varphi({\mathcal{S}_\infty})} \subseteq \sigalg[\ensuremath{\mathcal{A}} \times \ensuremath{\mathcal{A}}infty + \mathbf{1}]{\mathcal{S}_\ensuremath{\mathcal{A}} \ast {\mathcal{S}_\infty} \oplus \powerset{\mathbf{1}}}\,. \end{align*} For the other inclusion we remark that \begin{align*} \set{a} \times \set{u} + \mathbf{1} &= (\set{a} \times \set{u} + \emptyset) \cup (\emptyset + \mathbf{1})\\ \set{a} \times \cone{\infty}{u} + \mathbf{1} &= (\set{a} \times \cone{\infty}{u} + \emptyset) \cup (\emptyset + \mathbf{1}) \end{align*} and together with the countable decomposition $\ensuremath{\mathcal{A}} = \cup_{a \in A} \set{a}$ it is easy to see that \begin{align*} \mathcal{S}_\ensuremath{\mathcal{A}} \ast {\mathcal{S}_\infty} \oplus \powerset{\mathbf{1}} \subseteq \sigalg[\ensuremath{\mathcal{A}} \times \ensuremath{\mathcal{A}}infty + \mathbf{1}]{\varphi({\mathcal{S}_\infty})} \end{align*} and monotonicity and idempotence of the $\sigma$-operator complete the proof. \qed We recall that -- in order to get a lifting of an endofunctor on $\mathbf{Meas}$ -- we also need a distributive law for the functors and the monads we are using to define PTS. In order to define such a law we first provide two technical lemmas. \begin{lem} \label{lem:semirings} Let $\ensuremath{\mathcal{A}}$ be an alphabet and $(X, \mathcal{S}igma_X)$ be a measurable space. \begin{enumerate} \item The sets $\powerset{\ensuremath{\mathcal{A}}} \ast \mathcal{S}igma_X$ and $\powerset{\ensuremath{\mathcal{A}}} \ast \mathcal{S}igma_X \oplus \powerset{\mathbf{1}}$ are covering semirings of sets.\label{itm:semirings:one} \item $\powerset{\ensuremath{\mathcal{A}}} \otimes \mathcal{S}igma_X = \sigalg[\ensuremath{\mathcal{A}} \times X]{\powerset{\ensuremath{\mathcal{A}}} \ast \mathcal{S}igma_X}$. \item $\powerset{\ensuremath{\mathcal{A}}} \otimes \mathcal{S}igma_X \oplus \powerset{\mathbf{1}} = \sigalg[\ensuremath{\mathcal{A}} \times X+\mathbf{1}]{\powerset{\ensuremath{\mathcal{A}}} \ast \mathcal{S}igma_X\oplus \mathbf{1}}$. \end{enumerate} \end{lem} \proof Showing property \eqref{itm:semirings:one} is straightforward and will thus be omitted. The rest follows by Propositions \ref{prop:generator_product} and \ref{prop:generator_union}. \qed \begin{lem}[Product Measures] \label{lem:productmeasures} Let $\ensuremath{\mathcal{A}}$ be an alphabet, $a \in \ensuremath{\mathcal{A}}$ and $(X, \mathcal{S}igma_X)$ be a measurable space with a sub-probability measure $P \colon \mathcal{S}igma_X \to [0,1]$. Then the following holds: \begin{enumerate} \item The \emph{product measure} $\delta_a^\ensuremath{\mathcal{A}} \otimes P\colon \powerset{\ensuremath{\mathcal{A}}} \otimes \mathcal{S}igma_X \to \mathbb{R}_+$ of $\delta_a^\ensuremath{\mathcal{A}}$ and $P$ which is the unique extension of the pre-measure satisfying \begin{align} (\delta_a^\ensuremath{\mathcal{A}} \otimes P)(S_\ensuremath{\mathcal{A}} \times S_X) := \delta_a^\ensuremath{\mathcal{A}}(S_\ensuremath{\mathcal{A}}) \cdot P(S_X)\label{eq:product_measure} \end{align} for all $S_\ensuremath{\mathcal{A}} \times S_X \in \powerset{\ensuremath{\mathcal{A}}} \ast \mathcal{S}igma_X$ is a sub-probability measure on $\ensuremath{\mathcal{A}} \times X$. If $P$ is a probability measure on $X$, then also $\delta_a^\ensuremath{\mathcal{A}} \otimes P$ is a probability measure on $\ensuremath{\mathcal{A}} \times X$. \item The measure $\delta_a^\ensuremath{\mathcal{A}} \odot P\colon \powerset{\ensuremath{\mathcal{A}}} \otimes \mathcal{S}igma_X \oplus \powerset{\mathbf{1}} \to \mathbb{R}_+$ which is defined via the equation \begin{align} \quad (\delta_a^\ensuremath{\mathcal{A}} \odot P)(S) := (\delta_a^\ensuremath{\mathcal{A}} \otimes P) \left(S \cap (\ensuremath{\mathcal{A}} \times X)\right)\label{eq:product_coproduct_measure} \end{align} for all $S \in \powerset{\ensuremath{\mathcal{A}}} \otimes \mathcal{S}igma_X \oplus \powerset{\mathbf{1}}$ is a sub-probability measure on $\ensuremath{\mathcal{A}} \times X + \mathbf{1}$. If $P$ is a probability measure on $X$, then also $\delta_a^\ensuremath{\mathcal{A}} \odot P$ is a probability measure on $\ensuremath{\mathcal{A}} \times X+\mathbf{1}$. \end{enumerate} \end{lem} \proof Before proving the statement, we check that the two equations yield unique measures. \begin{enumerate} \item Existence and uniqueness of the product measure is a well known fact from measure theory and follows immediately by Proposition~\ref{prop:extension} because equation \eqref{eq:product_measure} defines a $\sigma$-finite pre-measure on $\powerset{A} \ast \mathcal{S}igma_X$ which by Lemma~\ref{lem:semirings} is a covering semiring of sets and a generator for the product-$\sigma$-algebra. \item We obviously have non-negativity and $(\delta_a^\ensuremath{\mathcal{A}} \odot P)(\emptyset)=0$. Let $(S_n)_{n \in \mathbb{N}}$ be a family of pairwise disjoint sets in $\powerset{A} \otimes \mathcal{S}igma_X \oplus \powerset{\mathbf{1}}$. Then the following holds \begin{align*} &(\delta_a^\ensuremath{\mathcal{A}} \odot P)\left(\bigcup_{n \in \mathbb{N}}S_n\right) = (\delta_a^\ensuremath{\mathcal{A}} \otimes P)\left(\bigcup_{n \in \mathbb{N}}(S_n\cap (\ensuremath{\mathcal{A}} \times X))\right)\\ &\quad =\sum_{n \in N}(\delta_a^\ensuremath{\mathcal{A}} \otimes P)(S_n\cap (\ensuremath{\mathcal{A}} \times X)) =\sum_{n \in N}(\delta_a^\ensuremath{\mathcal{A}} \odot P)\left(S_n\right) \end{align*} and hence $\delta_a^\ensuremath{\mathcal{A}} \odot P$ as defined by equation \eqref{eq:product_coproduct_measure} is $\sigma$-additive and thus a measure. \end{enumerate} For the proof of the Lemma we observe that \begin{align*} (\delta_a^\ensuremath{\mathcal{A}} \odot P)(\ensuremath{\mathcal{A}} \times X+\mathbf{1}) = (\delta_a^\ensuremath{\mathcal{A}} \otimes P)(\ensuremath{\mathcal{A}} \times X) = \delta_a^\ensuremath{\mathcal{A}}(\ensuremath{\mathcal{A}}) \cdot P(X) = P(X) \end{align*} which immediately yields that both measures are sub-probability measures and if $P$ is a probability measure they are probability measures. \qed With the help of the preceding lemmas, we can now state and prove the distributive laws for the endofunctors $\ensuremath{\mathcal{A}} \times \mathrm{Id}_\mathbf{Meas}$, $\ensuremath{\mathcal{A}} \times \mathrm{Id}_\mathbf{Meas} + \mathbf{1}$ on $\mathbf{Meas}$ and the sub-probability monad and the probability monad. \begin{prop}[Distributive Laws] \label{prop:distributive_law_giry} Let $(T, \eta, \mu)$ be either the sub-probability monad $(\mathbb{S}, \eta, \mu)$ or the probability monad $(\mathbb{P}, \eta, \mu)$ and $\ensuremath{\mathcal{A}}$ be an alphabet with $\sigma$-algebra $\powerset{\ensuremath{\mathcal{A}}}$. \begin{enumerate} \item Let $F = \mathcal{A} \times \mathrm{Id}_\mathbf{Meas}$. For every measurable space $(X, \mathcal{S}igma_X)$ we define \begin{align} \lambda_X&\colon \ensuremath{\mathcal{A}} \times TX \to T(\ensuremath{\mathcal{A}} \times X),~ (a,P) \mapsto \delta_a^\ensuremath{\mathcal{A}} \otimes P\,.\label{eq:distributivelaw_noterm} \end{align} Then $\lambda\colon FT \mathbb{R}ightarrow TF$ is a distributive law. \item Let $F = \mathcal{A} \times \mathrm{Id}_\mathbf{Meas} + \mathbf{1}$. For every measurable space $(X, \mathcal{S}igma_X)$ we define \begin{align} &\lambda_X\colon \ensuremath{\mathcal{A}} \times TX + \mathbf{1} \to T(\ensuremath{\mathcal{A}} \times X + \mathbf{1})\nonumber\\ &(a,P) \mapsto \delta_a^\ensuremath{\mathcal{A}} \odot P \label{eq:distributivelaw_term}, \quad \checkmark \mapsto \delta_\checkmark^{\ensuremath{\mathcal{A}} \times X + \mathbf{1}}\, . \end{align} Then $\lambda\colon FT \mathbb{R}ightarrow TF$ is a distributive law. \end{enumerate} \end{prop} \proof In order to show that the given maps are distributive laws we have to check commutativity of the following three diagrams \[\xymatrix{ FTY \ar[r]^{\lambda_Y} \ar[d]_{FTf} & TFY \ar[d]^{TFf} & FX \ar[r]^{F\eta_X} \ar[dr]_{\eta_{FX}} & FTX \ar[d]^{\lambda_X} & FT^2X \ar[r]^{\lambda_{TX}} \ar[d]_{F\mu_X} & TFTX \ar[r]^{T\lambda_X} & T^2FX \ar[d]^{\mu_{FX}}\\ FTX \ar[r]^{\lambda_X} & TFX & & TFX & FTX\ar[rr]_{\lambda_X} & & TFX }\] for all measurable spaces $(X, \mathcal{S}igma_X)$, $(Y, \mathcal{S}igma_Y)$ and all measurable functions $f \colon Y \to X$. By Lemma~\ref{lem:semirings} we know that $\powerset{\ensuremath{\mathcal{A}}} \ast \mathcal{S}igma_X$ and $\powerset{\ensuremath{\mathcal{A}}} \ast \mathcal{S}igma_X \oplus \powerset{\mathbf{1}}$ are covering semirings of sets and that they are generators for the $\sigma$-algebras $\powerset{\ensuremath{\mathcal{A}}} \otimes \mathcal{S}igma_X$ and $\powerset{\ensuremath{\mathcal{A}}} \otimes \mathcal{S}igma_X\oplus \powerset{\mathbf{1}}$. Moreover, we know from Lemma~\ref{lem:productmeasures} that the measures assigned in equations~\eqref{eq:distributivelaw_noterm} and \eqref{eq:distributivelaw_term} are sub-probability measures and thus finite. We can therefore use Corollary \ref{cor:equality_of_measures} to check the equality of the various (sub-)probability measures. We will provide the proofs for the second distributive law only, the proofs for the first law are simpler and can in fact be derived from the given proofs. Let $S := S_\mathcal{A} \times S_X + S_\mathbf{1} \in \powerset{\ensuremath{\mathcal{A}}} \ast \mathcal{S}igma_X \oplus \powerset{\mathbf{1}}$. \begin{enumerate} \item Let $f \colon Y \to X$ be a measurable function. For $(a,P) \in \mathcal{A} \times TY$ we calculate \begin{align*} (TFf \circ \lambda_Y)(a,P)(S) &= (\delta_a^{\mathcal{A}}\odot P)\left((Ff)^{-1}(S)\right) =(\delta_a^{\mathcal{A}}\odot P) (S_{\mathcal{A}} \times f^{-1}(S_X) + S_\mathbf{1})\\ &= \delta_a^{\mathcal{A}} (S_{\mathcal{A}}) \cdot P\left(f^{-1}(S_X)\right) = (\delta_a^{\mathcal{A}} \odot (P\circ f^{-1}))(S_{\mathcal{A}} \times S_X + S_\mathbf{1}) \\ &= (\lambda_X \circ FTf)(a,P)(S) \end{align*} and analogously we obtain \begin{align*} &(TFf \circ \lambda_Y) (\checkmark) (S) =\delta_\checkmark^{\mathcal{A}\times Y + \mathbf{1}}\left((Ff)^{-1} (S)\right) \\ &\quad=\delta_\checkmark^{\mathcal{A}\times Y + \mathbf{1}}\left(S_{\mathcal{A}} \times f^{-1}(S_X) + S_\mathbf{1}\right) = \delta_\checkmark^{\mathcal{A}\times X + \mathbf{1}} (S)= (\lambda_X \circ FTf)(\checkmark)(S)\,. \end{align*} \item For $(a,x) \in \ensuremath{\mathcal{A}} \times X$ we calculate \begin{align*} \eta_{FX}(a,x)(S) &= \delta_{(a,x)}^{FX}(S_\mathcal{A} \times S_X + S_\mathbf{1}) = \delta_a^\mathcal{A}(S_\mathcal{A}) \cdot \delta_x^X(S_X)\\ &= (\delta_a^\mathcal{A} \odot \delta_x^X)(S) =\lambda_X(a,\delta_x^X)(S) = \big(\lambda_X \circ F\eta_X\big)(a,x)(S) \end{align*} and also \begin{align*} \eta_{FX} (\checkmark) = \delta_\checkmark^{FX} = \lambda_X(\checkmark) = \lambda_X\left(F\eta_X(\checkmark)\right) = \big(\lambda_X\circ F\eta_X\big)(\checkmark)\,. \end{align*} \item For $(a,P) \in FT^2X$ we calculate \begin{align*} \left(\lambda_X \circ F\mu_X\right)(a,P)(S) &= \left(\lambda_X\left(a, \mu_X(P)\right)\right)(S) = \left(\delta_a^\mathcal{A}\odot \mu_X(P)\right)(S) \\ &=\delta_a^\mathcal{A}(S_{\mathcal{A}}) \cdot \mu_X(P)(S_X) = \delta_a^\mathcal{A}(S_{\mathcal{A}}) \cdot \Int{p_{S_X}}[P] \end{align*} and \begin{align*} &\left(\mu_{FX} \circ T\lambda_X\circ \lambda_{TX}\right)\!(a,P)(S) = \mu_{FX}\left(\left(\delta_a^\mathcal{A} \odot P\right) \circ \lambda^{-1}_X\right)(S) \\ &\quad = \Int[TFX]{p_S}[{\left(\left(\delta_a^\mathcal{A}\odot P\right) \circ \lambda^{-1}_X\right)}] = \Int[\lambda^{-1}_X(TFX)]{p_S\circ \lambda_X}[\big(\delta_a^\mathcal{A} \odot P\big)] \\ &\quad = \Int[\set{a} \times TX]{p_S\circ \lambda_X}[\big(\delta_a^\mathcal{A} \odot P\big)] = \Int[P' \in TX]{\big(\delta_a^\mathcal{A} \otimes P')(S)}[P(P')]\\ &\quad = \Int[P' \in TX]{\delta_a^\mathcal{A}(S_{\mathcal{A}})\cdot P'(S_X)}[P(P')] = \delta_a^\mathcal{A}(S_{\mathcal{A}})\cdot \Int{p_{S_X}}[P]\,. \end{align*} Analogously we obtain \begin{align*} \left(\lambda_X \circ F\mu_X \right)(\checkmark) = \lambda_X(\checkmark) = \delta_{\checkmark}^{\mathcal{A}\times X+1} \end{align*} and \begin{align*} &\left(\mu_{FX} \circ T\lambda_X\circ \lambda_{TX}\right)(\checkmark)(S) = \mu_{FX}\left(\delta_\checkmark^{\mathcal{A} \times TX+\mathbf{1}} \circ \lambda^{-1}_X\right)(S)\\ &\quad=\Int[TFX]{p_S}[\left(\delta_\checkmark^{\mathcal{A}\times TX+\mathbf{1}}\circ \lambda^{-1}_X\right) ] = \Int[\lambda^{-1}_X(TFX)]{p_S \circ \lambda_X}[\delta_\checkmark^{\mathcal{A}\times TX+\mathbf{1}}]\\ &\quad = (p_S \circ \lambda_X)(\checkmark)= \delta_\checkmark^{\mathcal{A}\times X+\mathbf{1}}(S)\,. \end{align*} \end{enumerate} \qed \noindent With this result at hand we can finally apply Corollary \ref{cor:main_corollary} to the measurable spaces $\emptyset$, $\ensuremath{\mathcal{A}}star$, $\ensuremath{\mathcal{A}}omega$, $\ensuremath{\mathcal{A}}infty$, each of which is of course equipped with the $\sigma$-algebra generated by the covering semirings $\mathcal{S}_0$, ${\mathcal{S}_*}$, ${\mathcal{S}_\omega}$, ${\mathcal{S}_\infty}$ as defined in Proposition~\ref{prop:semirings_of_words}, to obtain the final coalgebra and the induced trace semantics for PTS as presented in the following theorem. \begin{thm}[Final Coalgebra and Trace Semantics for PTS] Let $(T, \eta, \mu)$ be either the sub-probability monad $(\mathbb{S}, \eta, \mu)$ or the probability monad $(\mathbb{P}, \eta, \mu)$ and $F$ be either $\ensuremath{\mathcal{A}} \times \mathrm{Id}_\mathbf{Meas}$ or $\ensuremath{\mathcal{A}} \times \mathrm{Id}_\mathbf{Meas} + \mathbf{1}$. A PTS $(\ensuremath{\mathcal{A}}, X, \alpha)$ is an $\overline{F}$-coalgebra $(X , \alpha)$ in $\mathcal{K}\ell(T)$ and vice versa. In the following table we present the (carriers of) final $\overline{F}$-coalgebras $\left(\Omega, \kappa\right)$ in $\mathcal{K}\ell(T)$ for all suitable choices of $T$ and $F$ (depending on the type of the PTS). \begin{align*}\begin{tabular}{c|c|l|c} \hline Type $\diamond$~ & ~Monad $T$ ~ & ~Endofunctor $F$ ~ & Carrier $\Omega$ \\\hline $0$ & $\mathbb{S}$ & ~$\ensuremath{\mathcal{A}} \times \mathrm{Id}_\mathbf{Meas}$ & $(\emptyset, \set{\emptyset})$ \\ $*$ & $\mathbb{S}$ & ~$\ensuremath{\mathcal{A}} \times \mathrm{Id}_\mathbf{Meas} + \mathbf{1}$ & $\left(\ensuremath{\mathcal{A}}star, \sigalg[\ensuremath{\mathcal{A}}star]{{\mathcal{S}_*}}\right) $ \\ $\omega$ & $\mathbb{P}$ & ~$\ensuremath{\mathcal{A}} \times \mathrm{Id}_\mathbf{Meas}$ & $\left(\ensuremath{\mathcal{A}}omega, \sigalg[\ensuremath{\mathcal{A}}omega]{{\mathcal{S}_\omega}}\right) $\\ $\infty$ & $\mathbb{P}$ & ~$\ensuremath{\mathcal{A}} \times \mathrm{Id}_\mathbf{Meas} + \mathbf{1}$ & ~$\left(\ensuremath{\mathcal{A}}infty, \sigalg[\ensuremath{\mathcal{A}}infty]{{\mathcal{S}_\infty}}\right)$\\\hline \end{tabular}\end{align*} where for $\diamond \in \set{*, \omega, \infty}$ we have $\kappa = \eta_{F\Omega}\circ \varphi$ where $\varphi$ is the isomorphism as defined in Proposition~\ref{prop:words_iso} and for $\diamond = \emptyset$ we take $\kappa = \eta_{F\emptyset} \circ \varphi$ with $\varphi$ being the empty function $\varphi \colon \emptyset \to \emptyset$. The unique arrow into the final coalgebra is the map $\mathbf{tr}\colon X \to T\Omega$ given by Definition~\ref{def:trace_premeasure}. \end{thm} \proof For the whole proof we always assume that the combinations of the type $\diamond$ of the PTS, the monad $T$, the endofunctor $F$ and the carrier $(\Omega, \mathcal{S}igma_\Omega)$ are chosen as presented in the table given in the corollary. Thus e.g. $\diamond = *$ automatically yields $T = \mathbb{S}$, $F = \ensuremath{\mathcal{A}} \times \mathrm{Id}_\mathbf{Meas} + \mathbf{1}$, $\Omega=\ensuremath{\mathcal{A}}star$, $\mathcal{S}igma_\Omega = \sigalg[\ensuremath{\mathcal{A}}star]{{\mathcal{S}_*}}$ and we automatically work in the Kleisli category $\mathcal{K}\ell(\mathbb{S})$ of the sub-probability monad. The first statement of the theorem is obvious by construction of the transition function $\alpha$. For $\diamond \in \set{*, \omega, \infty}$ we remark that the preconditions of Corollary \ref{cor:main_corollary} are fulfilled and aim at applying this corollary, and especially at evaluating equation \eqref{eq:giry_final_coalgebra_semiring} for the covering semirings ${\mathcal{S}_*}, {\mathcal{S}_\omega}, {\mathcal{S}_\infty}$. Let us carry out these calculations in various steps to obtain all the equations of Definition \ref{def:trace_premeasure}. For all $(b,x') \in \ensuremath{\mathcal{A}} \times X$ we calculate \begin{align*} (\lambda_\Omega \circ F(\mathbf{tr})) (b,x') = \begin{cases} \delta_b^\ensuremath{\mathcal{A}} \otimes \mathbf{tr}(x'), & \diamond = \omega\\ \delta_b^\ensuremath{\mathcal{A}} \odot \mathbf{tr}(x'), & \diamond \in \set{*, \infty}. \end{cases} \end{align*} Now suppose $S$ is chosen as $S=\set{au}$, $S=~\cone{\omega}{au}$ or $S=~\cone{\infty}{au}$ respectively for an arbitrary $a \in \ensuremath{\mathcal{A}}$ and an arbitrary $u \in \ensuremath{\mathcal{A}}star$. Then $\varphi(S) = \set{a} \times S'$ with $S'=\set{u}$, $S'=~\cone{\omega}{u}$ or $S'=~\cone{\infty}{u}$ respectively and hence we obtain \begin{align*} &(p_{\varphi(S)} \circ \lambda_\Omega \circ F(\mathbf{tr})) (b,x') = \delta_b^\ensuremath{\mathcal{A}} \otimes \mathbf{tr}(x')(\set{a} \times S') \\ &\quad = \delta_b^\ensuremath{\mathcal{A}}(\set{a}) \cdot \mathbf{tr}(x')(S') = \chi_{\set{a} \times X}(b,x') \cdot \mathbf{tr}(x')(S')\,. \end{align*} Using this, we evaluate equation \eqref{eq:giry_final_coalgebra_semiring} of Corollary \ref{cor:main_corollary} for these sets and get \begin{align*} \mathbf{tr}(x)(S) = \Int[(b,x') \in \set{a} \times X]{\mathbf{tr}(x')(S')}[\alpha(x)] = \Int[x' \in X]{\mathbf{tr}(x')(S')}[\P{a}{x}{x'}] \end{align*} which yields equations \eqref{eq:trace_main_equation} and \eqref{eq:trace_main_equation2} of Definition~\ref{def:trace_premeasure}. For $\diamond \in \set{*,\infty}$ we calculate \begin{align*} (\lambda_\Omega \circ F(\mathbf{tr})) (\checkmark) = \delta_\checkmark^{\ensuremath{\mathcal{A}} \times \Omega + \mathbf{1}} \end{align*} and conclude that for $z \in \ensuremath{\mathcal{A}} \times X + \mathbf{1}$ we have $(p_{\varphi(\set{\varepsilon})} \circ \lambda_\Omega \circ F(\mathbf{tr})) (z) = 1$ if and only if $z = \checkmark$. Hence evaluating equation \eqref{eq:giry_final_coalgebra_semiring} in this case yields \begin{align*} \mathbf{tr}(x)(\set{\varepsilon}) = \Int{p_{\varphi(\set{\varepsilon})} \circ \lambda_\Omega \circ F(\mathbf{tr})}[\alpha(x)] = \Int{\chi_\mathbf{1}}[\alpha(x)] = \alpha(x)(\mathbf{1}) \end{align*} which is equation \eqref{eq:trace_emptyword}. For $\diamond \in \set{\omega, \infty}$ we have $\mathbf{tr}(x)(\ensuremath{\mathcal{A}}^\diamond) = 1$ due to the fact that $\mathbf{tr}(x)$ must be a probability measure. This is already equation \eqref{eq:trace_wholespace} because $\ensuremath{\mathcal{A}}^\diamond=\varepsilon \ensuremath{\mathcal{A}}^\diamond$. Moreover $\varphi(\cone{\diamond}{\varepsilon}) = \varphi(\Omega) = F\Omega$ and since also $\lambda_\Omega \circ F(\mathbf{tr})$ must be a probability measure evaluating \eqref{eq:giry_final_coalgebra_semiring} yields the same: \begin{align*} \mathbf{tr}(x)(\cone{\diamond}{\varepsilon}) &= \Int{p_{\varphi(\cone{\diamond}{\varepsilon})} \circ \lambda_\Omega \circ F(\mathbf{tr})}[\alpha(x)] =\Int{1}[\alpha(x)] = \alpha(x)(FX) = 1\,. \end{align*} Finally, for $\diamond=0$ we remark, that the $\mathcal{K}\ell(\mathbb{S})$-object $(\emptyset, \set{\emptyset})$ is the unique final object of $\mathcal{K}\ell(\mathbb{S})$: Given any $\mathcal{K}\ell(\mathbb{S})$-object $(X, \mathcal{S}igma_X)$, the unique map into the final object is given as $f \colon X \to \mathbb{S}(\emptyset) = \set{(p \colon \set{\emptyset} \to [0,1], p(\emptyset) = 0)}$ mapping any $x \in X$ to the unique element of that set. Moreover, $(\emptyset, \set{\emptyset})$ together with $\kappa = \eta_{F\emptyset} \circ \varphi$, where the map $\varphi \colon \emptyset \to \ensuremath{\mathcal{A}} \times \emptyset$ is the obvious and unique isomorphism $(\emptyset, \powerset{\emptyset}) \cong (\ensuremath{\mathcal{A}} \times \emptyset, \powerset{\ensuremath{\mathcal{A}}} \otimes \powerset{\emptyset})$, is a $\overline{F}$-coalgebra and thus final. In all cases we have obtained exactly the equations from Definition~\ref{def:trace_premeasure} which by Proposition \ref{prop:trace_premeasure} yield a unique function $\mathbf{tr}\colon X \to T\ensuremath{\mathcal{A}}^\diamond$. From Proposition \ref{prop:TraceIsKleisli} we know that this function is indeed a Kleisli arrow.\qed \section{Examples} \label{sec:advexamples} In this section we will define and examine two truly continuous probabilistic systems and calculate their trace measures or parts thereof. However, in order to deal with these systems, we first need to provide some additional measure theoretic results and tools. At first, we will explain the \emph{counting measure} on countable sets and also the \emph{Lebesgue measure} as this is ``the'' standard measure on the reals. Afterwards we will take a quick look into the theory of measures with \emph{densities}. With these tools at hand we can finally present the examples. All of the presented results should be contained in any standard textbook on measure and integration theory. We use \cite{Els07} as our primary source for this short summary. \begin{defi}[Counting Measure] Let $X$ be a countable set. The \emph{counting measure} on $(X, \powerset{X})$ is the cardinality map \begin{align} \#\colon \powerset{X} \to \overline{\mathbb{R}}_+, \quad A \mapsto |A| \end{align} assigning to each finite subset of $X$ its number of elements and $\infty$ to each infinite subset of $X$. It is uniquely defined as the extension of the $\sigma$-finite pre-measure on the set of all singletons (and $\emptyset$) which is $1$ on every singleton and $0$ on $\emptyset$. \end{defi} \subsection{Completion and the Lebesgue Measure} The (one-dimensional) \emph{Lebesgue-Borel measure} is the unique measure $\lambda'$ on the reals equipped with the Borel $\sigma$-algebra $\mathcal{B}(\mathbb{R})$ satisfying $\lambda'\left((a,b]\right) = b-a$ for every $a, b \in \mathbb{R}$, $a \leq b$. In order to obtain the \emph{Lebesgue measure}, we will refine both the measure and the set of measurable sets by \emph{completion}. We call a measure space $(X, \mathcal{S}igma, \mu)$ \emph{complete} if every subset of a $\mu$-null-set (i.e. a measurable set $S \in \mathcal{S}igma$ such that $\mu(S) = 0$) is measurable (and necessarily also a $\mu$-null-set). For any measure space $(X, \mathcal{S}igma, \mu)$ there is always a smallest complete measure space $(X, \tilde{\mathcal{S}igma}, \tilde{\mu})$ such that $\mathcal{S}igma \subseteq \tilde{\mathcal{S}igma}$ and $\tilde{\mu}|_\mathcal{S}igma = \mu$ called the \emph{completion} (\cite[II.~§6]{Els07}). The completion of the Lebesgue-Borel measure yields the \emph{Lebesgue $\sigma$-algebra} $\mathcal{L}$ and the \emph{Lebesgue measure}\footnote{This is the second meaning of the symbol $\lambda$. Until here, $\lambda$ was used as symbol for a distributive law.} $\lambda \colon \mathcal{L} \to \overline{\mathbb{R}}$. For the Lebesgue measure we will use the following notation for integrals: \begin{align*} \Int[a][b]{f} := \Int[{[a,b]}]{f}[\lambda]\,. \end{align*} \subsection{Densities} When dealing with measures on arbitrary measurable spaces -- especially in the context of probability measures -- it is sometimes useful to describe them using so-called \emph{densities}. We will give a short introduction into the theory of densities here which is sufficient for understanding the upcoming examples. Given a measurable space $(X, \mathcal{S}igma_X)$ and measures $\mu, \nu \colon \mathcal{S}igma_X \to \overline{\mathbb{R}}_+$ we call a Borel-measurable function $f \colon X \to \overline{\mathbb{R}}$ satisfying \begin{align} \nu (S) = \Int[S]{f}[\mu]\label{eq:density} \end{align} for all measurable sets $S \in \mathcal{S}igma_X$ a \emph{$\mu$-density of $\nu$}. In that case $\mu(S) = 0$ implies $\nu(S)=0$ for all measurable sets $S \in \mathcal{S}igma_X$ and we say that $\nu$ is \emph{absolutely continuous} with respect to $\mu$ and write $\nu \ll \mu$. Densities are neither unique nor do they always exist. However, if $\nu$ has two $\mu$-densities $f,g$ then $f = g$ holds $\mu$-almost everywhere, i.e. there is a $\mu$ null set $N \in \mathcal{S}igma_X$ such that for all $x \in X\setminus N$ we have $f(x) = g(x)$. Moreover, any such $\mu$-density uniquely defines the measure $\nu$. If $\mu = \lambda$, i.e. $\mu$ is the Lebesgue-measure, and \eqref{eq:density} holds for a measure $\nu$ and a function $f$, then $f$ is called \emph{Lebesgue density} of $\nu$. For our examples we will make use of the following Proposition which can be found e.g. in \cite[IV.2.12 Satz]{Els07}. \begin{prop}[Integration and Measures with Densities] Let $(X, \mathcal{S}igma_X)$ be a measurable space and let $\mu, \nu \colon \mathcal{S}igma_X \to \mathbb{R}_+$ be measures such that $\nu$ has a $\mu$-density $f$. If $g \colon X \to \mathbb{R}_+$ is $\nu$-integrable, then $\int\!g\,\mathrm{d}\nu = \int\!gf\,\mathrm{d}\mu.$\qed \end{prop} \subsection{Examples} With all the previous results at hand, we can now present our two continuous examples using densities to describe the transition functions. \begin{exa} We will first give an informal description of this example as a kind of one-player-game which is played in the closed real interval $[0,1]$. The player, who is in any point $z \in [0,1]$, can jump up and will afterwards touch down on a new position $x \in [0,1]$ which is determined probabilistically. After a jump, the player announces, whether he is left ``$L$'' or right ``$R$'' of his previous position. The total probability of jumping from $z$ to the left is $z$ and the probability of jumping to the right is $1-z$. In both cases, we have a continuous uniform probability distribution. As we are within the set of reals, the probability of hitting a specific point $x_0 \in [0,1]$ is always zero. Let us now continue with the precise definition of our example. Let $\mathcal{A} := \set{L,R}$. We consider the PTS $(\set{L,R}, [0,1], \alpha)$ where $[0,1]$ is equipped with the Lebesgue $\sigma$-algebra of the reals, restricted to that interval denoted $\mathcal{L}([0,1])$. The transition probability function $\alpha \colon [0,1] \to \mathbb{P}([0,1])$ is given as \begin{align*} \alpha(z)(S) = \Int[S]{f_z}[(\# \otimes \lambda)] \end{align*} for every $z \in [0,1]$ and all sets $S \in \powerset{\set{L,R}} \otimes \mathcal{L}([0,1])$ with the $(\# \otimes \lambda)$-densities \begin{align*} f_z \colon \set{L,R} \times [0,1] \to \mathbb{R}^+, \quad (a,x) \mapsto \chi_{\set{L} \times [0,z]}(a,x) + \chi_{\set{R} \times [z,1]}(a,x)\,. \end{align*} We observe that $S \mapsto \P{L}{z}{S}, S \mapsto \P{R}{z}{S} \colon \mathcal{L}([0,1]) \to \mathbb{R}^+$ thus have Lebesgue-densities \begin{align*} \P{L}{z}{S} = \Int[S]{\chi_{[0,z]}}[\lambda] = \Int[S]{\chi_{[0,z]}(x)}, \quad \P{R}{z}{S} = \Int[S]{\chi_{[z,1]}}[\lambda] = \Int[S]{\chi_{[z,1]}(x)}\,. \end{align*} with the following graphs (in the real plane) \begin{center} \begingroup \makeatletter \providecommand\color[2][]{ \mathcal{G}enericError{(gnuplot) \space\space\space\@spaces}{ Package color not loaded in conjunction with terminal option `colourtext' }{See the gnuplot documentation for explanation. }{Either use 'blacktext' in gnuplot or load the package color.sty in LaTeX.} \renewcommand\color[2][]{} } \providecommand\includegraphics[2][]{ \mathcal{G}enericError{(gnuplot) \space\space\space\@spaces}{ Package graphicx or graphics not loaded }{See the gnuplot documentation for explanation. }{The gnuplot epslatex terminal needs graphicx.sty or graphics.sty.} \renewcommand\includegraphics[2][]{} } \providecommand\rotatebox[2]{#2} \@ifundefined{ifGPcolor}{ \newif\ifGPcolor \mathcal{G}Pcolortrue }{} \@ifundefined{ifGPblacktext}{ \newif\ifGPblacktext \mathcal{G}Pblacktexttrue }{} \let\gplgaddtomacro\g@addto@macro \gdef\gplbacktext{} \gdef\gplfronttext{} \makeatother \ifGPblacktext \def\color{black}{} \def\colorgray#1{} \else \ifGPcolor \def\color{black}{\color[rgb]{#1}} \def\colorgray#1{\color[gray]{#1}} \expandafter\def\color{white}{\color{white}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color[rgb]{1,0,0}} \expandafter\def\color{black}{\color[rgb]{0,1,0}} \expandafter\def\color{black}{\color[rgb]{0,0,1}} \expandafter\def\color{black}{\color[rgb]{1,0,1}} \expandafter\def\color{black}{\color[rgb]{0,1,1}} \expandafter\def\color{black}{\color[rgb]{1,1,0}} \expandafter\def\color{black}{\color[rgb]{0,0,0}} \expandafter\def\color{black}{\color[rgb]{1,0.3,0}} \expandafter\def\color{black}{\color[rgb]{0.5,0.5,0.5}} \else \def\color{black}{\color{black}} \def\colorgray#1{\color[gray]{#1}} \expandafter\def\color{white}{\color{white}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \fi \fi \setlength{\unitlength}{0.0500bp} \begin{picture}(5668.00,1700.00) \gplgaddtomacro\gplbacktext{ \color{black} \put(330,457){\makebox(0,0)[r]{\strut{}}} \put(330,1098){\makebox(0,0)[r]{\strut{}$1$}} \put(462,174){\makebox(0,0){\strut{}$0$}} \put(1803,174){\makebox(0,0){\strut{}$z$}} \put(4644,174){\makebox(0,0){\strut{}$1$}} \color{black} \put(880,1354){\makebox(0,0)[l]{\strut{}$\chi_{[0,z]}$}} \put(880,713){\makebox(0,0)[l]{\strut{}$\chi_{[z,1]}$}} \put(2887,1354){\makebox(0,0)[l]{\strut{}$\chi_{[z,1]}$}} \put(2887,713){\makebox(0,0)[l]{\strut{}$\chi_{[0,z]}$}} } \gplgaddtomacro\gplfronttext{ } \gplbacktext \put(0,0){\includegraphics{img_square}} \gplfronttext \end{picture} \endgroup \end{center} Evaluating these measures on $[0,1]$ yields \begin{align*} \P{L}{z}{[0,1]} = \int_0^z\!1\,\mathrm{d}x = z, \quad \P{R}{z}{[0,1]} = \int_z^1\! 1\,\mathrm{d}x = 1-z\,. \end{align*} With these preparations at hand we calculate the trace measure on some cones. \begin{align*} \mathbf{tr}(z)(\cone{\omega}{\varepsilon}) &= 1\\ \mathbf{tr}(z)(\cone{\omega}{L}) &= \Int[{[0,1]}]{1}[\P{L}{z}{z'}] = \P{L}{z}{[0,1]} = z\\ \mathbf{tr}(z)(\cone{\omega}{R}) &= \Int[{[0,1]}]{1}[\P{R}{z}{z'}] = \P{R}{z}{[0,1]} = 1-z\\ \mathbf{tr}(z)(\cone{\omega}{LL}) &= \Int[{[0,1]}]{x}[\P{L}{z}{x}] = \Int[0][1]{x \cdot \chi_{[0,z]}(x)} = \Int[0][z]{x} = \left[\frac{1}{2} x^2\right]_0^z = \frac{1}{2}z^2\\ \mathbf{tr}(z)(\cone{\omega}{LR}) &= \Int[{[0,1]}]{1-x}[\P{L}{z}{x}] = \Int[0][z]{(1-x)} = \left[x-\frac{1}{2} x^2\right]_0^z = z- \frac{1}{2}z^2\\ \mathbf{tr}(z)(\cone{\omega}{RL}) &= \Int[{[0,1]}]{x}[\P{R}{z}{x}] = \Int[0][1]{x \cdot \chi_{[z,1]}(x)} = \Int[z][1]{x} = \left[\frac{1}{2} x^2\right]_z^1 = \frac{1}{2} - \frac{1}{2} z^2\\ \mathbf{tr}(z)(\cone{\omega}{RR}) &= \Int[{[0,1]}]{1-x}[\P{R}{z}{x}] = \Int[z][1]{(1-x)} = \left[x-\frac{1}{2} x^2\right]_z^1 = \frac{1}{2} - z + \frac{1}{2} z^2 \end{align*} Thus for any word $u \in \ensuremath{\mathcal{A}}star$ of length $n$ there is a polynomial $p_u \in \mathbb{R}[Z]$ in one variable $Z$ with degree $\mathop{deg}(p_u) = n$. Evaluating this polynomial for an arbitrary $z \in [0,1]$ yields the value of the trace measure $\mathbf{tr}(z)$ on the cone $\cone{\omega}{u}$ generated by $u$, i.e. $\mathbf{tr}(z)(\cone{\omega}{u}) = p_u(z)$. \end{exa} While the previous example provides some understanding on how to describe a continuous PTS and also on how to calculate its trace measure, we are interested in trace equivalence. The second example will thus be a system which is trace equivalent to a finite state system. \begin{exa} As before, we will give an informal description as a kind of one-player-game first. There is exactly one player, who starts in any point $z \in \mathbb{R}$, jumps up and touches down somewhere on the real line announcing whether he is left ``$L$'' or right ``$R$'' of his previous position or has landed back on his previous position ``$N$''. The probability of landing is initially given via a normal distribution centered on the original position $z$. Thus, the probability of landing in close proximity of $z$, i.e. in the interval $[z-\varepsilon, z + \varepsilon]$, is high for sufficiently big $\varepsilon \in \mathbb{R}_+\setminus\set{0}$ whereas the probability of landing far away, i.e. outside of that interval, is negligible. The player has a finite amount of energy and each jump drains that energy so that after finitely many jumps he will not be able to jump again resulting in an infinite series of ``$N$'' messages. Before that the energy level determines the likelihood of his jump width, i.e. the standard deviation of the normal distributions. Now let us give a formal description of such a system. Recall that the density function of the normal distribution with expected value\footnote{This is the third meaning of $\mu$. Until here, $\mu$ was used as symbol for a measure and also as a symbol for the multiplication natural transformation of a monad.} $\mu \in \mathbb{R}$ and standard deviation $\sigma \in \mathbb{R}^+ \setminus\set{0}$ is the Gaussian function \begin{align*} \varphi_{\mu, \sigma}\colon \mathbb{R} \to \mathbb{R}^+, \quad \varphi_{\mu, \sigma}(x) = \frac{1}{\sigma \sqrt{2\pi}} \cdot \exp\left(-\frac{1}{2}\left(\frac{x-\mu}{\sigma}\right)^2\right) \end{align*} with the following graph (in the real plane), often called the ``bell curve". \begin{center} \begingroup \makeatletter \providecommand\color[2][]{ \mathcal{G}enericError{(gnuplot) \space\space\space\@spaces}{ Package color not loaded in conjunction with terminal option `colourtext' }{See the gnuplot documentation for explanation. }{Either use 'blacktext' in gnuplot or load the package color.sty in LaTeX.} \renewcommand\color[2][]{} } \providecommand\includegraphics[2][]{ \mathcal{G}enericError{(gnuplot) \space\space\space\@spaces}{ Package graphicx or graphics not loaded }{See the gnuplot documentation for explanation. }{The gnuplot epslatex terminal needs graphicx.sty or graphics.sty.} \renewcommand\includegraphics[2][]{} } \providecommand\rotatebox[2]{#2} \@ifundefined{ifGPcolor}{ \newif\ifGPcolor \mathcal{G}Pcolortrue }{} \@ifundefined{ifGPblacktext}{ \newif\ifGPblacktext \mathcal{G}Pblacktexttrue }{} \let\gplgaddtomacro\g@addto@macro \gdef\gplbacktext{} \gdef\gplfronttext{} \makeatother \ifGPblacktext \def\color{black}{} \def\colorgray#1{} \else \ifGPcolor \def\color{black}{\color[rgb]{#1}} \def\colorgray#1{\color[gray]{#1}} \expandafter\def\color{white}{\color{white}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color[rgb]{1,0,0}} \expandafter\def\color{black}{\color[rgb]{0,1,0}} \expandafter\def\color{black}{\color[rgb]{0,0,1}} \expandafter\def\color{black}{\color[rgb]{1,0,1}} \expandafter\def\color{black}{\color[rgb]{0,1,1}} \expandafter\def\color{black}{\color[rgb]{1,1,0}} \expandafter\def\color{black}{\color[rgb]{0,0,0}} \expandafter\def\color{black}{\color[rgb]{1,0.3,0}} \expandafter\def\color{black}{\color[rgb]{0.5,0.5,0.5}} \else \def\color{black}{\color{black}} \def\colorgray#1{\color[gray]{#1}} \expandafter\def\color{white}{\color{white}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \expandafter\def\color{black}{\color{black}} \fi \fi \setlength{\unitlength}{0.0500bp} \begin{picture}(5668.00,1700.00) \gplgaddtomacro\gplbacktext{ \color{black} \put(2801,223){\makebox(0,0){\strut{}$z$}} \color{black} \put(3233,1234){\makebox(0,0)[l]{\strut{}$\varphi$}} } \gplgaddtomacro\gplfronttext{ } \gplbacktext \put(0,0){\includegraphics{img_gauss}} \gplfronttext \end{picture} \endgroup \end{center} Let now the finite ``energy level'' or ``time horizon'' (which is the maximal number of jumps) $T \in \mathbb{N}$, $T \geq 2$ be given. We consider the PTS with alphabet $\mathcal{A} := \set{L,N,R}$, state space $(\mathbb{N}_0 \times \mathbb{R}, \powerset{\mathbb{N}_0} \otimes \mathcal{L})$ and transition probability function $\alpha\colon \mathbb{N}_0 \times \mathbb{R} \to \Prob{\mathcal{A} \times \mathbb{N}_0 \times \mathbb{R}}$ which we define in two steps. For all $(t,z) \in \mathbb{N}_0 \times \mathbb{R}$ with $t < T$ and all measurable sets $S \in \powerset{\mathcal{A}} \otimes \powerset{\mathbb{N}_0} \otimes \mathcal{L}$ we set \begin{align*} \alpha(t,z)(S) := \Int[S]{f_{(t,z)}}[(\# \otimes \# \otimes \lambda)] \end{align*} where the $(\#\otimes\#\otimes\lambda)$-density $f_{(t,z)}$ is \begin{align*} f_{(t,z)}\colon \mathcal{A} \times \mathbb{N}_0 \times \mathbb{R} \to \mathbb{R}^+, (a, t', x) \mapsto \begin{cases} \chi_{(-\infty,z]}(x) \cdot \varphi_{z, 1/(t+1)}(x), & a = L \wedge t' = t+1\\ \chi_{[z, +\infty)}(x) \cdot \varphi_{z, 1/(t+1)}(x), & a = R \wedge t' = t+1\\ 0, & \text{else.} \end{cases} \end{align*} Thus in the first two cases the density is the left (or right) half of the Gaussian density function with expected value $\mu = z$ and standard deviation $\sigma = 1/(t+1)$ and the constant zero function in all other cases. For the remaining $(t,z) \in \mathbb{N}_0 \times \mathbb{R}$ with $t \geq T$ we define the transition probability function to be \begin{align*} \alpha(t,z):= \delta_{(N, t+1, z)}^{\mathcal{A} \times \mathbb{N}_0 \times \mathbb{R}}\,. \end{align*} We observe that for $(t,z) \in \mathbb{N}_0 \times \mathbb{R}$ with $t < T$ we have $\P{N}{(t,z)}{\mathbb{N}_0 \times \mathbb{R}} = 0$ and \begin{align*} \P{L}{(t,z)}{\mathbb{N}_0 \times \mathbb{R}} = \Int[-\infty][z]{\varphi_{z,1/(t+1)}(x)} = \frac{1}{2} = \Int[z][\infty]{\varphi_{z,1/(t+1)}(x)} = \P{R}{(t,z)}{\mathbb{N}_0\times \mathbb{R}}. \end{align*} For $t \geq T$ we have $\P{N}{(t,z)}{\mathbb{N}_0 \times \mathbb{R}} = 1$ and $\P{L}{(t,z)}{\mathbb{N}_0 \times \mathbb{R}} = \P{R}{(t,z)}{\mathbb{N}_0 \times \mathbb{R}} = 0$. When we combine these results we obtain the trace measure. For $t <T$ we get \begin{align*} \mathbf{tr}(t,z) = \sum\limits_{u \in \set{L,R}^{T-t}} \left(\frac{1}{2}\right)^{T-t} \!\cdot \delta_{uN^\omega}^{\mathcal{A}^\omega} \end{align*} and for $t \geq T$ the trace measure is $\mathbf{tr}(t,z) = \delta_{N^\omega}^{\mathcal{A}^\omega}$. Obviously the trace measure does not depend on $z$, i.e. $\mathbf{tr}(t,z_1) = \mathbf{tr}(t, z_2)$ for all $t \in \mathbb{N}$ and all $z_1, z_2 \in \mathbb{R}$. Moreover, there is a simple finite state system which is trace equivalent to this system. The finite system has the same alphabet $\ensuremath{\mathcal{A}}$, its state space is $(\set{0,\mathbf{hd}ots,T},\powerset{\set{0, \mathbf{hd}ots, T}})$, and the transition function $\alpha\colon \set{0,\mathbf{hd}ots,T} \to \Prob{\ensuremath{\mathcal{A}}\, \times \set{0,\mathbf{hd}ots, T}}$ is given as follows \begin{center}\begin{tikzpicture}[node distance=1.8 and 2.5, on grid, shorten >=1pt, >=stealth', semithick] \begin{scope}[state, inner sep=2pt, minimum size=32pt] \draw node [draw] (q0) {$0$}; \draw node [draw, right=of q0] (q1) {$1$}; \draw node [draw, right=of q1] (q2) {$2$}; \draw node [draw, right=of q2] (q3) {$T-1$}; \draw node [draw, right=of q3] (q4) {$T$}; \end{scope} \begin{scope}[->] \draw (q0) edge[bend left] node[above] {$L, 1/2$} (q1); \draw (q0) edge[bend right] node[below] {$R, 1/2$} (q1); \draw (q1) edge[bend left] node[above] {$L, 1/2$} (q2); \draw (q1) edge[bend right] node[below] {$R, 1/2$} (q2); \draw (q3) edge[bend left] node[above] {$L, 1/2$} (q4); \draw (q3) edge[bend right] node[below] {$R, 1/2$} (q4); \draw (q4) edge[loop right] node[right] {$N, 1$} (q4); \end{scope} \draw (q2) edge[dashed] node {} (q3); \end{tikzpicture}\end{center} i.e. for $t < T$ we define \begin{align*} \alpha(t) = \frac{1}{2} \cdot \left(\delta_{(L, t+1)}^{\ensuremath{\mathcal{A}} \times \set{0,\mathbf{hd}ots,T}} + \delta_{(R, t+1)}^{\ensuremath{\mathcal{A}} \times \set{0,\mathbf{hd}ots,T}}\right) \end{align*} and for $t = T$ we define $\alpha(t) = \delta_{(N,T)}^{\ensuremath{\mathcal{A}} \times \set{0,\mathbf{hd}ots,T}}$. \end{exa} \section{Conclusion, Related and Future Work} We have shown how to obtain coalgebraic trace semantics for generative probabilistic transition systems in a general measure-theoretic setting, thereby allowing uncountable state spaces and infinite trace semantics. Especially we have presented final coalgebras for four different types of probabilistic systems. There is a huge body of work on Markov processes and probabilistic transition systems, but only part of it deals with behavioral equivalences, as in our setting. Even when the focus is on behavioral equivalences, so far usually bisimilarity and related equivalences have been studied (see for instance \cite{larsenskou89}), neglecting the very natural notion of trace equivalence. Furthermore many papers restrict to countable state spaces and discrete probability theory. Our work is clearly inspired by \cite{hasuo}, which presents the idea to obtain trace equivalence by considering coalgebras in suitable Kleisli categories, generalizing their instantiation of generative probabilistic systems to a general measure-theoretic setting and considering new types of systems. Different from the route we took in this paper, another option might have been to extend the general theorem (Theorem~3.3) of \cite{hasuo}. The theorem gives sufficient conditions under which a final coalgebra in a Kleisli category coincides with an initial algebra in the underlying category $\mathbf{Set}$. This theorem is given for Kleisli categories over $\mathbf{Set}$ and requires that the Kleisli category is $\mathbf{Cppo}$-enriched, i.e., each homset carries a complete partial order with bottom and some additional conditions hold. This theorem is non-trivial to generalize. First, it would be necessary to extend it to $\mathbf{Meas}$ and second -- and even more importantly -- the requirement of the Kleisli category being $\mathbf{Cppo}$-enriched is quite restrictive. For the case of the sub-probability monad a bottom elements exist (the arrow which maps everything to the constant $0$-measure), but this is not the case for the probability monad, which is the more challenging part, giving rise to infinite words. Hence we would require a different approach, which can also be seen by the fact that in the case of the probability monad the final coalgebra is \emph{not} the initial algebra in $\mathbf{Meas}$. The study of probabilistic systems using coalgebra is not a new approach. An extensive survey on the coalgebraic treatment of these systems can be found in \cite{Sokolova20115095} including an overview of various different types of transition systems containing probabilistic effects alongside user-input, non-determinism and termination, extensions that we did not consider in this paper (apart from termination). A thorough consideration of coalgebras and especially theorems guaranteeing the existence of final coalgebras for certain functors on $\mathbf{Meas}$ is given in \cite{viglizzofinal} but since all these are coalgebras in $\mathbf{Meas}$ and not in the Kleisli category over a suitable monad, the obtained behavioral equivalence is probabilistic Larsen-Skou \cite{larsenskou89} bisimilarity instead of trace equivalence and the results do not directly apply to our setting. Also, in \cite{doberkat2007stochastic} and \cite{Pan09} a very thorough and general overview of properties of labelled Markov processes including the treatment of and the evaluation of temporal logics on probabilistic systems is given. However, the authors do not explicitly cover a coalgebraic notion of trace semantics. Infinite traces in a general coalgebraic setting have already been studied in \cite{Cirstea:2010:GIT:1841982.1842047}. However, this generic theory, once applied to probabilistic systems, is restricted to coalgebras with countable carrier while our setting, which is undoubtedly specific and covers only certain functors and branching types, allows arbitrary carriers for coalgebras of probabilistic systems. As future work we plan to apply the minimization algorithm introduced in \cite{abhkms:coalgebra-min-det} and adapt it to this general setting, by working out the notion of canonical representatives for probabilistic transition system. We are especially interested in comparing this to the canonical representatives for weak and strong bisimilarity presented recently in \cite{eisentrautetal2013}. Furthermore we plan to define and study a notion of probabilistic trace distance, similar to the distance measure (for bisimilarity) considered in \cite{bw:behavioural-distances,bw:behavioural-pseudometric}. We are also interested in algorithms for calculating this distance, perhaps similar to what has been proposed in \cite{chen} for probabilistic bisimilarity or the more recent on-the-fly algorithm presented in \cite{baccietal2013}. \end{document}
\begin{document} \title{Stable Synchronous Propagation of Signals \ by Feedforward Networks} \begin{abstract} \noindent We analyse the dynamics of networks in which a central pattern generator (CPG) transmits signals along one or more feedforward chains in a synchronous or phase-synchronous manner. Such propagating signals are common in biology, especially in locomotion and peristalsis, and are of interest for continuum robots. We construct such networks as feedforward lifts of the CPG. If the CPG dynamics is periodic, so is the lifted dynamics. Synchrony with the CPG manifests as a standing wave, and a regular phase pattern creates a travelling wave. We discuss Liapunov, asymptotic, and Floquet stability of the lifted periodic orbit and introduce transverse versions of these conditions that imply stability for signals propagating along arbitrarily long chains. We compare these notions to a simpler condition, transverse stability of the synchrony subspace, which is equivalent to Floquet stability when nodes are $1$-dimensional. \end{abstract} \section{Introduction} \label{S:intro} Many aspects of animal physiology involve the longitudinal propagation of rhythmic time-periodic patterns in which linear chains of neurons oscillate in synchrony or with specific phase relations. These two types of behaviour can be interpreted as standing waves and travelling waves, respectively. A common mechanism for such propagating chains involves a network of neurons, often called a Central Pattern Generator (CPG), which generates the basic rhythms. This lies at the start of a feedforward network along which the CPG signals propagate. Similar waves of motion are used to propel snake-like robots for exploration (including other planets) \cite{MWXLW17}; there are also numerous medical applications, see \cite{JC19,SOWRK10,ZHX20} and references therein. This field of `continuum robots' is advancing rapidly and the literature is huge. Both types of application can be modelled using networks of coupled dynamical systems. We work in the general formalism of \cite{GS23,GST05,SGP03}, see Section~\mbox{Re}f{S:NAO}. We say that two nodes are {\em synchronous} if their waveforms (time series) are identical. More generally, two nodes are {\em phase-synchronous} if their waveforms (time series) are identical except for a phase shift (time translation) These definitions are idealisations, but they open up a powerful mathematical approach with useful implications. Real systems can be considered as perturbations of idealised ones, and much of the interesting structure persists in an appropriately approximate form. The main aim of this paper is to describe a general method for constructing networks in which periodic dynamics of a specified CPG propagates synchronously, or phase-synchronously with a regular pattern of phase shifts, along a feedforward chain, tree, or any other feedforward structure. (For simplicity we often use the term `chain' without implying linear topology.) This is achieved by constructing the rest of the network as a {\em feedforward lift} of the CPG. Of course, the use of chains to propagate signals is not a new idea, as even a cursory glance at the literature shows. Indeed, it is arguably the simplest, most natural, and most obvious method. However, the formal setting in which we carry out the analysis makes it possible to prove some general stability results and helps to unify the area. \subsubsection{Stability} A key issue is to ensure that these propagating states are stable. This term has many technically different meanings, see \cite{BS70} and Section~\mbox{Re}f{S:BStab}. More recently, chaotic dynamics has extended the diversity of meanings. Stability of synchronous states has been widely studied for special models, such as the Kuramoto model \cite{K84,MBBP19}. Other approaches and related results can be found in \cite{BPP00,PC98,PK17}, and a version for random dynamical systems is analysed in \cite{HLC13}. We consider several notions of stability for equilibria and periodic orbits, concentrating on the periodic case. Roughly speaking, Liapunov stability means that a small perturbation of the initial conditions has a small effect on the orbit; asymptotic stability means that the state converges to the orbit after a small perturbation; and for exponential stability the convergence has an exponential bound. For an equilibrium, exponential stability is equivalent to linear stability; for a periodic orbit it is equivalent to stability in the Floquet sense \cite{HKW81}, which for brevity we call `Floquet stability'. For formal definitions and further discussion, see Section \mbox{Re}f{S:BStab}. \subsubsection{Transverse Stability} The feedforward structure implies that if the lifted periodic orbit is stable, for a given stability notion, then the CPG orbit must be stable. However, this condition is not sufficient for stability of the lifted periodic orbit, because synchrony might be destroyed by perturbations transverse to the synchrony subspace, that is, by {\em synchrony-breaking} perturbations. The main point of this paper is to find necessary and sufficient conditions for the lifted state to be stable in each of the three senses above. This is achieved by defining associated conditions of `transverse' Liapunov, asymptotic, and linear/Floquet stability. For each of the three stability notions ${\mathcal S}$, we prove that the lifted periodic state is ${\mathcal S}$-stable if and only if the CPG periodic orbit is ${\mathcal S}$-stable on the CPG state space and the orbit is transversely ${\mathcal S}$-stable at every node of the chain. These results are stated and proved in Theorem \mbox{Re}f{T:FFStab} for Floquet stability and in Theorem \mbox{Re}f{T:FFLSstab} for Liapunov stability. There is also a version for asymptotic stability; we omit a statement and proof since these are similar to, and simpler than, those for Liapunov stability. Theorem \mbox{Re}f{T:TWtranseigen} generalises the Floquet stability result to signals that propagate according to a specified phase pattern. A related, and simpler, notion is transverse stability of the synchrony subspace. Intuitively, this states that the vector field is attracting towards the synchrony subspace at every point on that subspace. Technically, it means that at any point on the synchrony subspace (or, more generally, in a neighbourhood of the CPG periodic orbit) all eigenvalues of the Jacobian, for eigenvectors transverse to the synchrony subspace, have negative real parts. If the state of the CPG is Floquet stable and node spaces are 1-dimensional, this condition implies Floquet stability of the lifted state. For node spaces of dimension 2 or more, this implication trivially remains valid for equilibria, but it can fail for periodic orbits, as the celebrated Markus-Yamabe counterexample (Example \mbox{Re}f{ex:notFloquet}) shows. Despite this, transverse stability of the synchrony subspace retains some heuristic value, and can sometimes be given rigorous justification. It is therefore worth examining in its own right, independently of its relation to overall stability. An important feature of feedforward lifts is that all of these transverse stability notions are determined by dynamics associated with individual nodes of the CPG. In consequence, our results show that if the propagating signal is stable one step along the chain, then it remains stable however long the chain is, or if the chain branches like a tree. A side effect of this feature is the generic occurrence of multiple Floquet multipliers, except for very short chains, even when the overall network has no symmetry. This multiplicity is an advantage for all forms of transverse stability, but may cause problems for bifurcation analysis when it occurs for a critical eigenvalue. \subsection{Biological Motivation} To set the scene, we begin with two examples of propagating phase-synchronous signals in biological systems: peristalsis in the gut and peristaltic waves in crawling movement in {\em Drosophila} larvae. Further examples include the heartbeat of the medicinal leech \cite{BP04,CP83,CNO95}, legged locomotion \cite{B01,B19,CS93a,CS93b,GSBC98,GSCB99,PG06,S14}, and the motion of the nematode worm {\em Caenorhabditis elegans} \cite{BBC12,IB18,OIB21,SSSIT21}. These networks are similar, but not identical, to feedforward lifts, and are presented solely as motivation. \begin{example}\em \label{ex:persistalsis} Peristalsis in the intestine is a travelling wave of muscular contractions controlled by the enteric nervous system, which contains millions of neurons, mainly bunched into ganglia of two types: myenteric and submucosal. Successive ganglia are connected together, and the large scale topology for each type is that of a chain. Submucosal ganglia are spaced more closely than myenteric ones. General information is in \cite{F08,Gr03}. Mathematical models of enteric neural motor patterns are surveyed in~\cite{CTB13}, which contains few mathematical details but a large number of references. We also mention \cite{CBT08} on a model of intestinal segmentation and \cite{TBB00} on a recurrent excitatory network model, both in guinea pigs. Figure~\mbox{Re}f{F:KF99fig} shows a schematic network from \cite{KF99} with modular feedforward structure. \begin{figure} \caption{Schematic `cartoon' of the model of \cite{KF99} \label{F:KF99fig} \end{figure} \end{example} \begin{example}\em \label{ex:drosophila} Gjorgjieva {\em et al.}~\cite{GBEE13} study neural networks for crawling movement in {\em Drosophila} larvae, which is driven by a peristaltic wave propagating from the rear (posterior) to the front (anterior). Figure~\mbox{Re}f{F:Gjorg1} (top) shows these contractions in the larva, in snapshots taken every 200 ms; A1--A8/9 indicate the segments; arrows illustrate simultaneous contraction of neighbouring segments; lines across the larva show the dentical belts, which approximate segment boundaries. Figure~\mbox{Re}f{F:Gjorg1} (bottom) shows the model network studied in that paper. The equations for the dynamics are of Wilson--Cowan (rate model) type \cite{ET10,WC72}. The segments are connected with nearest-neighbor excitatory connections (triangular arrowheads) and inhibitory connections (barred arrowheads). Forward waves are initiated by providing a time-varying external input $P_{\mathrm{ext}}$ into the excitatory population of segment A8. When $P_{\mathrm{ext}}$ is a short pulse of suitable amplitude a single wave is excited; longer pulses excite more waves. \end{example} \begin{figure} \caption{ {\em Top} \label{F:Gjorg1} \end{figure} Many networks in the literature have a similar repetitive feedforward structure; see for example \cite{MHKDA03,SMG18}. These biological examples are based on networks of neurons, which control muscle groups, but the general theory applies more widely. A standard evolutionary pathway is to make multiple copies of an existing structure, and to modify the result through adaptation to different environments. \subsection{Feedforward Propagation} In general, firing signals can propagate naturally along chains of neurons if each neuron sends an excitatory signal to the next. However, these signals can lose synchrony with each other, or phase relations can change, because of random time delays or other accumulating differences between distinct chains. Similar remarks apply to other areas of application. A more robust way to propagate dynamic patterns, in general networks, has its merits. To set up such a propagation method, we work in a general context for network dynamics introduced in~\cite{GST05, SGP03}, with slight modifications in \cite{GS23}, which provides a formal framework for analysing networks of coupled dynamical systems (ODEs). These can be viewed as directed networks in which nodes and directed edges (`arrows') are labelled with `types'. Nodes of the same type have the same state space, and arrows of the same type represent identical types of coupling. Nodes with isomorphic sets of input arrows obey identical ODEs when corresponding couplings are identified. We outline this formalism in Section~\mbox{Re}f{S:NAO}. The main object of this paper is to use this formalism to construct, for any small CPG network, a larger network in which the dynamics of the CPG can be transmitted synchronously along chains, trees, or other feedforward cascades of modules. The same construction, applied to a CPG with cyclic group symmetry, can lead to stable propagation of signals with `phase synchrony' --- identical waveforms except for regular phase shifts. We call such behaviour a {\em phase pattern}. Cyclic group symmetry is intimately involved in such patterns \cite{GRW12,S20overdet,SP08} and \cite[Chapter 17]{GS23}; see Section~\mbox{Re}f{S:RPPCGS}. In the context of a chain of successive nodes, such states can be viewed as travelling waves. In this construction the nodes of the modules correspond to, and have the same types, as the nodes of the CPG --- or, more generally, some subset of the CPG. Moreover, any specified synchrony or phase pattern on the CPG can be extended to the new modules. Their inputs also correspond to input arrows within the CPG, except that the tail node for an arrow may be any copy of the corresponding tail node in the CPG that lies further back along the feedforward cascade. This structure implies that any dynamical state of the CPG (or a subset) can be `lifted' to the entire cascade by requiring corresponding nodes to be synchronous. That is, the CPG is a quotient network of the cascade in the sense of~\cite{GS23,GST05, SGP03}. Conversely, the cascade is a lift of the CPG, so the dynamics of the CPG lifts to the feedforward network, and the modules copy the CPG dynamics. \subsection{Summary of Paper} Section~\mbox{Re}f{S:NAO} summarises basic concepts and theorems in the formalism for network dynamics employed here, with particular emphasis on balanced colorings, quotient networks, and associated lifts. We introduce a running example: a 7-node network in which a directed ring of 3 nodes feed forward into a 4-node chain as in Figure \mbox{Re}f{F:7nodeFFZ3} of Section \mbox{Re}f{S:AME}. (The numbers 3 and 4 are chosen for convenience and similar remarks apply for any two positive integers.) Section~\mbox{Re}f{S:FFL} defines feedforward lifts and establishes their main properties, especially in the construction of synchronous patterns. We show that the Jacobian (derivative) has a block-triangular form, and use the 7-node example to illustrate this result. Section \mbox{Re}f{S:BStab} reviews various notions of stability, and the relations between them, for equilibria and periodic orbits. In particular we discuss Liapunov stability, asymptotic stability, linear stability, hyperbolicity, and Floquet theory for periodic orbits. Section \mbox{Re}f{S:TS} deals with analogous `transverse' stability notions for synchrony-breaking perturbations of a periodic orbit $\{a(t)\}$ on a CPG network ${\mathcal G}$ giving rise to a lifted periodic orbit $\{\tilde a(t)\}$ on a feedforward lift $\widetilde {\mathcal G}$. In Theorem~\mbox{Re}f{T:FFStab} we use the block-triangular structure of a feedforward lift to provide a necessary and sufficient condition for a lifted periodic state to be Floquet-stable, hence asymptotically stable. This condition is stated on terms of `transverse Floquet multipliers', which are analogous to the Floquet multipliers for smaller dynamical systems based on the internal dynamics of individual nodes. Moreover, we need consider only the nodes of the CPG. Theorem \mbox{Re}f{T:FFLSstab} provides a similar result for transverse Liapunov stability. Section~\mbox{Re}f{S:RTJ} defines the similar but different condition of `transverse stability' of a synchrony subspace, and relates this to the diagonal entries of the Jacobian for the CPG. Theorem \mbox{Re}f{T:tstab} shows that transverse stability implies Floquet-stability for equilibria, and for periodic orbits when node spaces are $1$-dimensional. A famous example of \cite{MY60} shows that this can be false for the the periodic case when node spaces have dimension greater than $1$. We briefly discuss additional conditions that avoid this problem, together with a related issue: synchronisation of chaotic states. This involves a more general concept: `transverse stability on average' Again this is more satisfactory when node spaces are $1$-dimensional, and even then, some aspects are conjectural. Section~\mbox{Re}f{S:PTW} generalises the transverse stability conditions to phase-synchronous travelling waves, using a feedforward lift whose CPG has cyclic group symmetry $\mathbb{Z}_k$. General results in network dynamics imply that such a network can support states with phase synchrony, in which the phase shifts are integers multiples of $T/k$ where $T$ is the overall period \cite[Chapter 3]{GS02}. In a feedforward lift, these states can be viewed as travelling waves. Finally, we summarise the main conclusions in Section~\mbox{Re}f{S:C}. \section{Networks and Admissible ODEs} \label{S:NAO} We briefly review the formalism for network dynamics of~\cite{GST05, SGP03}, taking into account minor improvements introduced in the monograph \cite{GS23}. A {\em network} is a directed graph whose nodes and directed edges (`arrows') are classified into types. A {\em node space} --- usually a finite-dimensional real vector space --- is assigned to each node, defining a {\em node variable}, which may be multidimensional. The network then encodes a class of {\em admissible ODEs}, coupled in a manner that respects the network topology and the node- and edge-types. We give an example in Section~\mbox{Re}f{S:AME} and a precise definition in Section~\mbox{Re}f{S:AMGC}. The {\em nodes} (previously called `cells', a term we avoid because of potential confusion with biological cells) form a (usually finite) set $\mathbb{C}C = \{1, 2, ,\ldots,ots, n\}$, connected by a set $\mathbb{E}E$ of {\em arrows}. Each arrow $e$ has a {\em head node} $\mathcal{H}(e)$ and a {\em tail node} $\mathcal{T}(e)$. Nodes are classified into {\em node-types}, and in the associated admissible ODEs, nodes of the same type have the same internal dynamic. They also have the same state space, but this property is best treated separately using the notion of {\em state type}, \cite[Section 9.3]{GS23}. Arrows are also classified into {\em arrow-ypes}, and arrows of the same type determine the same coupling structure. \begin{remark}\em In contrast to the conventions in some areas of application where there are standard model ODEs, the network diagram does not encode a specific ODE (subject perhaps to choices of parameters such as reaction rates), and individual nodes and arrows do not correspond to specific {\em terms} in a model ODE. Instead, the network diagram encodes the class of {\em all} ODEs whose couplings model the network architecture. This convention is chosen for mathematical reasons, notably generality \cite[Section 8.10]{GS23}. \end{remark} \subsection{Network Diagrams} A network can be represented graphically by its {\em diagram}, which is an elaboration of a directed graph. In graph-theoretic terms it is a {\em coloured digraph}, with colours of nodes and edges to represent their node-types, but we use colours in a different manner so we avoid this terminology. Instead, nodes are drawn as dots, circles, squares, hexagons, and so on, with a different symbol for each type; arrows are similarly decorated to distinguish arrow-types by using dotted or wavy lines, different shapes of arrowhead, and so on. Each arrow $e$ runs from the {\em tail node} $\mathcal{T}(e)$ to the {\em head node} $\mathcal{H}(e)$. An arrow can have the same head and tail, forming a {\em self-loop}. (A biological term is `autoregulation'.) Two distinct arrows can have the same head and the same tail, giving {\em multiple arrows} between the two nodes. This convention is motivated by some applications and by a basic theoretical construction, the `quotient network', related to synchrony; see Section~\mbox{Re}f{S:QNL}. A network ${\mathcal G}'$ is a {\em subnetwork} of ${\mathcal G}$ if the nodes of ${\mathcal G}'$ are a subset $\mathbb{C}C' \subseteq \mathbb{C}C$ and the arrows of ${\mathcal G}'$ are precisely those of ${\mathcal G}$ whose head and tail both lie in $\mathbb{C}C'$. \subsection{State Spaces} In order to set up an ODE, we must choose its variables, and the functions that determine their derivatives. In dynamical systems theory the variables determine points in the {\em state space} or {\em phase spapce} of the system, which is usually a manifold or more generally a metric space. Because the term `phase' has other meanings in dynamics, we prefer the former term. For each node $c \in \mathbb{C}C$, choose a {\em node (state) space} $P_c$. In general, this can be a smooth manifold, and the basic theory of admissible ODEs and quotient networks remains valid in this context \cite{AF10a,AF10b}. For simplicity we follow \cite{GS23,GST05,SGP03} and assume that $P_c = \mathbb{R}^{n_c}$ is a real vector space. (This assumption is sufficient for local bifurcation analysis, even if node spaces are manifolds.) Systems of {\em phase oscillators}~\cite{K88,KE88,KE90,K84}, another standard choice, correspond to $P_c = {\mathbb S}^1$, the circle. Phenomena such as synchrony require comparison between distinct node variables, and this makes sense only when the corresponding state spaces are equal. State types encode this information: if nodes $c,d$ are state-equivalent then we require $P_c = P_d$. The {\em total state space} of the network is the direct sum \[ P = \bigoplus_{c\in\mathbb{C}C} P_c \] and a state is represented by a vector \[ x = (x_c)_{c\in\mathbb{C}C} \] The entries $x_c$ are themselves vectors when $n_c>1$. \subsection{Input Sets} The dynamics of a node depends on the dynamics of its inputs. We therefore define the {\em input set} of node $c$ to be the set $I(c)$ of all arrows $e$ such that $\mathcal{H}(e) = c$. Arrows are used here because networks can have self-loops and multiple arrows, so specifying the head and tail does not single out a unique input arrow. An {\em input isomorphism} $\beta:I(c) \rightarrow I(d)$ is a one-to-one correspondence between their input sets that preserves arrow-type. That is, $e$ has the same arrow-type as $\beta(e)$ for all $\beta$ and all $e \in I(c)$ Nodes $c, d$ are {\em input isomorphic} if there exists an input isomorphism $\beta:I(c) \rightarrow I(d)$. Equivalently, $c$ and $d$ have the same node-type and the same number of input arrows of each arrow-type. \subsection{Admissible Maps: Example} \label{S:AME} To each network ${\mathcal G}$ and choice of node spaces $P_c = \mathbb{R}^{n_c}$, we associate the class of all ODEs that are compatible with the network architecture. Such ODEs are called {\em network ODEs} (previously {\em coupled cell systems}). They are determined by the space of {\em admissible vector fields}. When all $P_c$ are real vector spaces we refer to these as {\em admissible maps}. For simplicity we work throughout in the $C^\infty$ category, but most results hold for $C^r$ with $r \mbox{\bf g}eq 1$. \begin{example}\em \label{ex:Z3chain1} We introduce an example which is revisited several times for different purposes. Figure~\mbox{Re}f{F:7nodeFFZ3} is a 7-node network, forming a feedforward chain with a single feedback connection from node 3 to node 1. (Later, nodes $\{1,2,3\}$ and connecting arrows are interpreted as a CPG with $\mathbb{Z}_3$ symmetry, and the rest of the network is a feedforward lift.) There is one state-type (all nodes have the same node space), one node-type (all nodes have the same type of internal dynamic), and one-arrow type (all couplings are identical in form but relate to different pairs of nodes). The `colours' of the nodes (white, grey, black) are explained in Section~\mbox{Re}f{S:SBC} and can be ignored here. The numbers $3$ and $7$ are for purposes of illustration, and have no special significance apart from convenience. Similar examples can be constructed for any positive integers $p < q$. \begin{figure} \caption{A 7-node feedforward chain with one node-type and one-arrow type. Colours show a synchrony pattern.} \label{F:7nodeFFZ3} \end{figure} Admissible ODEs for this network have the following form: \begin{equation} \label{E:7nodeFFZ3} \begin{array}{rcl} \dot{x}_1 &=& f(x_1,x_3) \\ \dot{x}_2 &=& f(x_2,x_1) \\ \dot{x}_3 &=& f(x_3,x_2) \\ \dot{x}_4 &=& f(x_4,x_3) \\ \dot{x}_5 &=& f(x_5,x_4) \\ \dot{x}_6 &=& f(x_6,x_5) \\ \dot{x}_7 &=& f(x_7,x_6) \end{array} \end{equation} The same function $f$ is used for all components because all nodes have the same node-type and all arrows have the same arrow-type. The $x_c$ for $1 \leq c \leq 7$ all belong to the same node space $\mathbb{R}^k$ because all nodes have the same state type. (Indeed, using the same $f$ throughout requires all node spaces to be the same.) The component for node $c$ is $f(x_c,x_{i(c)})$ where $i(c)$ is the tail of the (here unique) input arrow to $c$. In this manner, the admissible ODEs are precisely those that respect the network structure, including preserving node- and arrow-types. \end{example} \subsection{Admissible Maps: General Case} \label{S:AMGC} We now describe, informally, a procedure for writing down admissible maps for general networks. Formal definitions are given in \cite[Section 9.4]{GS23} and \cite[Section 3]{GST05}. For each node $c$ choose {\em node coordinates} $x_c$ on $P_c$. Nodes of the same state-type have the same coordinate system. In general, $x_c$ may be multidimensional ($n_c > 1$). Let $P = \oplus_c P_c$ be the total state space. A map $f = (f_1, ,\ldots,ots, f_n)$ from $P$ to itself has components \[ f_c: P \rightarrow P_c \qquad 1 \leq c \leq n \] For admissibility we impose extra conditions on the $f_c$ that reflect network architecture, as follows: \begin{definition}\em \label{D:admiss} Let ${\mathcal G}$ be a network. A map $f: P \rightarrow P$ is ${\mathcal G}$-{\em admissible} if: \begin{itemize} \item[\rm (1)] {\em Domain Condition}: For every node $c$, the component $f_c$ depends only on the node variable $x_c$ and the input variables $x_{\mathcal{T}(e)}$ where $e \in I(c)$. \item[\rm (2)] {\em Symmetry Condition}: If $c$ is a node, $f_c$ is invariant under all permutations of tail node coordinates for equivalent input arrows. \item[\rm (3)] {\em Pullback Condition}: If nodes $c \neq d$ are input isomorphic, the components $f_c, f_d$ are identical as functions. The variables to which they are applied correspond under some (hence any, by condition (2)) input isomorphism. \end{itemize} \end{definition} Formally, conditions (2) and (3) are combined into a single {\em pullback condition} applying to any pair $c, d$ of nodes, equal or different \cite[Remarks 9.20]{GST05}. Each admissible map $f$ determines an {\em admissible ODE} \begin{equation} \label{E:admissODE} \dot x = f(x) \end{equation} where the dot indicate the time-derivative. If $f$ also depends on a (possibly multidimensional) parameter $\lambda$, and is admissible as a function of $x$ for any fixed $\lambda$, we have an {\em admissible family} of maps $f(x,\lambda)$ and ODEs $\dot x =f(x,\lambda)$. Such families arise in bifurcation theory. \subsection{Synchrony and Balanced Colourings} \label{S:SBC} Nodes $c,d$ are {\em synchronous} on a solution $x(t)$ of an admissible ODE if \[ x_c(t) \equiv x_(t) \quad \forall t \in \mathbb{R} \] This equation makes sense only when $P_c = P_d$; that is, $c$ and $d$ have the same state-type. Patterns of synchrony that arise naturally and robustly for {\em any} admissible ODE for a given network are characterised by a property known as balance, which we now define. \begin{definition}\em \label{D:balance} (a) A {\em colouring} of a network ${\mathcal G}$ is a map $\kappa: \mathbb{C}C \rightarrow {\mathcal K}$, where ${\mathcal K}$ is a finite set of {\em colours}. (b) Nodes $c, d$ {\em have the same colour} if $\kappa(c) = \kappa(d)$. (c) The colouring $\kappa$ is {\em balanced} if there exists a {\em colour-preserving} input isomorphism for any two nodes of the same colour. That is, whenever nodes $c, d$ have the same colour, there exists an input isomorphism $\beta : I(c) \rightarrow I(d)$ such that $\mathcal{T}(e)$ and $\mathcal{T}(\beta(e))$ have the same colour for all arrows $e \in I(c)$. In symbols, $\kappa(\mathcal{T}(e))= \kappa(\mathcal{T}(\beta(e)))$. \end{definition} In particular, this definition requires nodes of the same colour to be input isomorphic. However, the relation of input isomorphism need not be balanced. \begin{definition}\em \label{D:synchspace} The {\em synchrony subspace} defined by a colouring $\kappa$ of ${\mathcal G}$ is the vector subspace \[ \mathbb{D}elta_\kappa = \{x \in P : \kappa(c) = \kappa(d) \mbox{im}plies x_c = x_d \} \] That is, nodes of the same colour are synchronous for $x \in \mathbb{D}elta$. \end{definition} \begin{example}\em \label{ex:Z3chain2} Continuing Example~\mbox{Re}f{ex:Z3chain1}, we again Consider the 7-node chain of Figure~\mbox{Re}f{F:7nodeFFZ3}. The colouring $\kappa$ illustrated in Figure~\mbox{Re}f{F:7nodeFFZ3} has three colours ${\mathcal K}= \{\mbox{B,G,W}\}$, using the initials of the colours black, grey, and white. We have \[ \kappa(1) = \kappa(4) = \kappa(7) = \mathrm{W} \qquad \kappa(2) = \kappa(5) = \mathrm{G} \qquad \kappa(3) = \kappa(6) = \mathrm{B} \] All nodes have the same node-type and a single input arrow, and all arrows have the same arrow-type, so the nodes are input isomorphic. The colouring is balanced because: \begin{equation} \label{E:Z3chain_balance} \begin{array}{l} \mbox{Every black node has a single input from a grey node.}\\ \mbox{Every grey node has a single input from a white node.}\\ \mbox{Every white node has a single input from a black node.} \end{array} \end{equation} All nodes have the same state-type so $P_1 = ,\ldots,ots = P_7$. The synchrony subspace is \begin{equation} \label{E:Delta_kappa} \mathbb{D}elta_\kappa = \{(x,y,z,x,y,z,x): x,y,z \in P_1 \} \end{equation} \end{example} The basic theorem on balanced colourings and flow-invariance is: \begin{theorem} \label{T:bal_poly} A subspace $V \subseteq P$ is invariant under every admissible map if and only if $V$ is a synchrony space $\mathbb{D}elta_\kappa$ where $\kappa$ is balanced. \end{theorem} \begin{proof} See \cite[Theorem 10.21]{GS23}. \end{proof} Theorem~\mbox{Re}f{T:bal_poly} implies that when $\kappa$ is balanced, initial conditions that have the synchrony pattern defined by $\kappa$ (that is, lie in $\mathbb{D}elta_\kappa$) give rise to solutions with the same synchrony pattern. However, this result does not guarantee that the synchrony pattern is stable: perturbations that break synchrony could cause the orbit to deviate from $\mathbb{D}elta_\kappa$. This kind of stability depends on the admissible map and the orbit concerned. \subsection{Quotient Networks and Lifts} \label{S:QNL} Balanced colourings give rise to an important construction in which synchronous nodes are identified in {\em clusters} (or {\em synchrony classes} or {\em colour classes}). \begin{definition}\em \label{D:quot} Let $\kappa$ be a balanced colouring on a network ${\mathcal G}$ with colour set ${\mathcal K}$. The {\em quotient network} ${\mathcal G}_\kappa$ has ${\mathcal K}$ as its set of nodes (that is, there is one node per colour). The node type of node $i \in {\mathcal K}$ is that of any node $c \in \mathbb{C}C$ such that $\kappa(c) = i$. The arrows in $I(i)$ in ${\mathcal G}_\kappa$ are obtained from the input set $I(c)$ of any node $c$ with colour $i$ by copying each arrow $e$ to create an arrow with head $\kappa(\mathcal{H}(e)$ and tail $\kappa(\mathcal{T}(e))$, of the same type as $e$. The set of arrows of ${\mathcal G}_\kappa$ is the union of the $I(i)$ as $i$ runs through ${\mathcal K}$. \end{definition} \begin{example}\em \label{ex:Z3chain3} The quotient network for the balanced colouring $\kappa$ of Figure~\mbox{Re}f{F:7nodeFFZ3} has three nodes $\{\mbox{B,G,W}\}$. All nodes have the same node type. From \eqref{E:Z3chain_balance} there is a single arrow-type, with arrows from B to W, W to G, and G to B. In other words, the quotient network is a $\mathbb{Z}_3$-symmetric unidirectional ring, Figure~\mbox{Re}f{F:Z3quotring}. In this case it is isomorphic to the subnetwork ${\mathcal G}$ with nodes $\{1,2,3\}$ and their connecting arrows. In general, quotient networks need not be subnetworks. \end{example} \begin{figure} \caption{Quotient network for the balanced colouring of Figure~\mbox{Re} \label{F:Z3quotring} \end{figure} The state space $P_\kappa$ for the quotient network is not the same as $\mathbb{D}elta_\kappa$, but they can be canonically identified by the map \begin{equation} \label{E:nu} \nu:\mathbb{D}elta_\kappa \to P_\kappa \qquad (\nu(x))_c = x_{\kappa(c)} \end{equation} For example, in \eqref{E:Z3chain_balance}, $\nu(x,y,z,x,y,z,x) = (x,y,z)$. The projection $\nu$ preserves the synchronous dynamics for any admissible ODE. \begin{theorem} \label{T:quot} Let $\kappa$ be a balanced colouring of ${\mathcal G}$. Then \begin{itemize} \item[\rm (1)] The restriction of any ${\mathcal G}$-admissible map to $\mathbb{D}elta_\kappa$ is ${\mathcal G}_\kappa$-admissible. \item[\rm (2)] Every ${\mathcal G}_\kappa$-admissible map is the restriction to $\mathbb{D}elta_\kappa$ of a ${\mathcal G}$-admissible map. \end{itemize} \end{theorem} Another way to say (2) is that every ${\mathcal G}_\kappa$-admissible map on $\mathbb{D}elta_\kappa$ {\em lifts} to a ${\mathcal G}$-admissible map on $P$. If $f$ is ${\mathcal G}$-admissible, the restricted map $f|_{\mathbb{D}elta_\kappa}$ determines the dynamics under $f$ of the synchronous clusters determined by the colouring $\kappa$. Quotient networks can have self-loops and multiple arrows, even if the original network does not. This feature is required to prove property (2); see \cite[Section 8.10]{GS23}. \begin{example}\em \label{ex:Z3chain4} Again consider the balanced colouring $\kappa$ of Figure~\mbox{Re}f{F:7nodeFFZ3}. By Example \mbox{Re}f{ex:Z3chain3} the quotient network has three nodes $\{\mbox{B,G,W}\}$ forming a $\mathbb{Z}_3$-symmetric ring. We can write the corresponding coordinates as $x, y, z$ respectively. Substitute these coordinates, as in \eqref{E:Delta_kappa}, into the admissible ODE \eqref{E:7nodeFFZ3}: \begin{equation} \label{E:7nodeFFZ3_quot} \begin{array}{rcl} \dot{x} &=& f(x,z) \\ \dot{y} &=& f(y,x) \\ \dot{z} &=& f(z,y) \\ \dot{x} &=& f(x,z) \\ \dot{y} &=& f(y,x) \\ \dot{z} &=& f(z,y) \\ \dot{x} &=& f(x,z) \end{array} \end{equation} This list of equations appears to be overdetermined, because there are three equations for $\dot x$, and two for each of $\dot y$ and $\dot z$. However, these equations repeat the same equation three or two times. (This happens precisely because the colouring is balanced. If it were not, some equations would disagree with others.) The dynamics therefore reduces to an ODE with one equation for each coordinate: \begin{equation} \label{E:7nodeFFZ3_quot2} \begin{array}{rcl} \dot{x} &=& f(x,z) \\ \dot{y} &=& f(y,x) \\ \dot{z} &=& f(z,y) \end{array} \end{equation} This is the most general admissible ODE on the quotient network, in accordance with the lifting property. \end{example} \section{Feedforward Lifts} \label{S:FFL} In this section we define feedforward lifts. We observe that (as is well known) the Jacobian of any admissible map is block-triangular, with one block for the CPG and separate blocks for each node in the chain. We discuss the construction of feedforward lifts of a given CPG. (In alternative terminology \cite{BV02,DL14,MLM20}: the CPG is the {\em base} of a {\em graph fibration}, whose {\em fibres} are the synchrony classes of nodes; `feedforward' means that the base receives no inputs from the rest of the directed graph.) We begin by summarising some standard results on feedforward networks; see \cite[Chapter 4]{GS23} for proofs. The usual graph-theoretic term for `feedforward' is {\em acyclic}: no closed directed path exists. In dynamics, the term `feedforward' is more common. By a {\em path} in a network we mean a directed path. In such a path, each node inputs a signal to the next one, so signals propagate along paths, but usually they do so without being synchronous or phase-synchronous. \begin{definition} \rm (a) Node $q$ is {\em downstream} from node $p$ if there exists a path from $p$ to $q$. (b) Node $p$ is {\em upstream} from node $q$ if $q$ is downstream from $p$. (c) Nodes $p$ and $q$ are {\em path equivalent} if node $p$ is both upstream and downstream from node $q$. (d) A {\em path component} or {\em transitive component} of a network is an equivalence class of nodes under path equivalence. We use the same term for the subnetwork obtained by including all arrows between the nodes in the equivalence class. (e) Path component $Q$ is {\em downstream} from path component $P$ if there exist a node $p \in P$ and a node $q \in Q$ such that $q$ is downstream from $p$. Path component $P$ is {\em upstream} from component $Q$ if $Q$ is downstream from $P$. (f) The directed graph induced on the components is the {\em component graph} or {\em condensation} of the original network, Eppstein~\cite{E16}. \end{definition} The following result about the feedforward structure of the component graph is well known in the theory of directed graphs \cite{S02}. \begin{theorem} \label{T:PCFF} The path components are connected in a feedforward manner; that is, the component graph is acyclic. Moreover, there is a total order on the nodes that is compatible with the feedforward structure. \qed\end{theorem} With a compatible order on the nodes, the Jacobian of any admissible map is block lower triangular, with the blocks determined by the path components: \begin{proposition}\label{P:J-lower-block}\rm The Jacobian\index{Jacobian} matrix of any admissible map at any point $x$ is block lower triangular, of the form \begin{equation} \label{Jac_block} J=\Matrix{J_1 & {\bf 0} & {\bf 0} & \cdots & {\bf 0} \\ * & J_2 & {\bf 0} & \cdots & {\bf 0} \\ * & * & J_3 & \cdots & {\bf 0} \\ \vdots & \vdots &\vdots & \mathrm{d}ots & \vdots \\ * & * & * & \cdots * & J_m} \end{equation} where $J_j$ is the Jacobian matrix of $f$ on the $j$th path component and each ${\bf 0}$ is a block of zeros of the appropriate size. \end{proposition} \begin{proof} The value of $f_c(x_c, x_{i_1}, ,\ldots,ots, x_{i_l})$, where the $i_j$ are the tails of input arrows to node $c$, is independent of all $x_d$ for $d>c$. \end{proof} The triangular form of~\eqref{Jac_block} implies that the eigenvalues of the Jacobian, including multiplicity, are determined by the diagonal blocks $J_j$. The same is true for the generalised eigenspaces. \begin{definition}\em \label{D:FFL} Let ${\mathcal G}$ be a network with a set of nodes $\mathbb{C}C$ and a balanced colouring $\kappa$. A {\em feedforward lift} of ${\mathcal G}$ is a network $\widetilde{{\mathcal G}}$ with nodes $\widetilde{\mathbb{C}C}\supseteq \mathbb{C}C$ and a balanced colouring $\tilde\kappa$ such that: (a) ${\mathcal G}$ is a subnetwork of $\widetilde{{\mathcal G}}$. (b) Every node $d \in \widetilde{\mathbb{C}C}\;\mbox{--}\;us \mathbb{C}C$ is downstream from some node $c \in \mathbb{C}C$. (c) The only loops of $\widetilde{{\mathcal G}}$ are those that lie in ${\mathcal G}$. (d) The colouring $\widetilde{\kappa}$ on $\widetilde{{\mathcal G}}$ has the same set of colours as $\kappa$, and restricts to $\kappa$ on ${\mathcal G}$. \end{definition} \begin{example}\em \label{ex:Z3chain5} Yet again, consider the 7-node chain of Figure~\mbox{Re}f{F:7nodeFFZ3}, in which a CPG with $\mathbb{Z}_3$ symmetry feeds forward into four additional nodes. Let ${\mathcal G}$ be the subnetwork whose nodes are $\{1,2,3\}$ together with the arrows that connect them. Let $\kappa$ assign different colours to each of these nodes. Let $\widetilde{{\mathcal G}}$ be the full 7-node network. Then $\widetilde{{\mathcal G}}$ is a feedforward lift of ${\mathcal G}$ for the colouring $\tilde\kappa$ illustrated. It is easy to check properties (a--d). \end{example} There is always at least one balanced colouring on a network ${\mathcal G}$, namely the {\em trivial colouring} in which all nodes have distinct colours. It is easy to see that if $\widetilde{{\mathcal G}}$ is a feedforward lift of ${\mathcal G}$ and the colouring $\kappa$ is trivial, then the quotient network ${\mathcal G}_\kappa$ is isomorphic to ${\mathcal G}$. \begin{proposition} {\rm (a)} Every admissible map $f$ for ${\mathcal G}$ lifts to an admissible map $\tilde f$ for $\widetilde{{\mathcal G}}$. {\rm (b)} The map $\tilde f$ leaves the synchrony subspace for $\widetilde{\kappa}$ invariant. {\rm (c)} The quotient network $\widetilde{{\mathcal G}}_{\widetilde{\kappa}}$ is isomorphic to the quotient network ${\mathcal G}_\kappa$. {\rm (d)} Let $\lambda$ be any balanced colouring of ${\mathcal G}$ that is coarser than $\kappa$ (meaning that $\kappa(c)=\kappa(d) \mathbb{R}ightarrow \lambda(c) =\lambda(d)$) and let $\widetilde{{\mathcal G}}$ be a feedforward lift of ${\mathcal G}$. Then $\lambda$ lifts to a balanced colouring $\widetilde{\lambda}$ of $\widetilde{{\mathcal G}}$ with the same set of colours, and this colouring is coarser than $\widetilde{\kappa} $. \end{proposition} \begin{proof} These are general properties of lifts; see \cite[Theorem 10.27, Proposition 10.38]{GS23}. \end{proof} We call any such $\tilde f$ a {\em synchronous lift} of $f$ with {\em pattern of synchrony} $\tilde\kappa$. By construction, when the colouring on $\mathbb{C}C$ is trivial, every node in $i \in \widetilde{\mathbb{C}C}\;\mbox{--}\;us \mathbb{C}C$ has the same colour as a unique node $c \in \mathbb{C}C$. We denote this node by $[i]$. \subsection{Path Components of Feedforward Lifts} \begin{lemma} \label{L:pcFFlift} Every path component of $\widetilde{{\mathcal G}}$ is either a path component of ${\mathcal G}$ or a single node of $\widetilde{\mathbb{C}C} \;\mbox{--}\;us \mathbb{C}C$. \end{lemma} \begin{proof} Nodes $c, d$ are path-equivalent if and only if they lie on a closed loop. All such loops lie in ${\mathcal G}$. \end{proof} Denote the partial derivative of a function $F$ with respect to a (multidimensional) variable $x_c$ by $\mathbb{D}D_c F$. We have: \begin{corollary} \label{C:pcFFlift1} Let $f$ be admissible for ${\mathcal G}$ with and let $\tilde{f}$ be a lift of $f$ to $\widetilde{{\mathcal G}}$. Order nodes in a manner that is compatible with the partial order on the component graph, with nodes $1, ,\ldots,ots, m$ in $\mathbb{C}C$ and nodes $m+1, ,\ldots,ots, n$ in $\widetilde{\mathbb{C}C}\;\mbox{--}\;us \mathbb{C}C$. Let $J$ be the Jacobian of $f = (f_1, ,\ldots,ots, f_m)$ on $P_1 \oplus \cdots \oplus P_m$. Then at any given point the Jacobian $\tilde{J}$ of $\tilde{f}$ is block lower triangular, of the form \begin{equation} \label{Jac_block2} \tilde{J}=\Matrix{J & {\bf 0} & {\bf 0} & \cdots & {\bf 0} \\ * & \mathbb{D}D_{m+1} f_{m+1} & {\bf 0} & \cdots & {\bf 0} \\ * & * & \mathbb{D}D_{m+2} f_{m+2} & \cdots & {\bf 0} \\ \vdots & \vdots &\vdots & \mathrm{d}ots & \vdots \\ * & * & * & \cdots & \mathbb{D}D_n f_n } \end{equation} evaluated at that point. \end{corollary} \begin{corollary} \label{C:pcFFlift2} {\rm (a)} The eigenvalues of $\tilde{J}$ at any point in $P$ are those of $J$ together with those of the $\mathbb{D}D_c f_c$, for $m+1 \leq c \leq n$. {\rm (b)} At any point in the synchrony space $\mathbb{D}elta_{\kappa}$, and for $m+1 \leq c \leq n$, we have \begin{equation} \label{E:Dc=D[c]} \mathbb{D}D_c f_c = \mathbb{D}D_{[c]}f_{[c]} \end{equation} where $[c]$ is the unique node $c \in {\mathcal G}$ such that $\kappa(c) = \kappa([c])$. In particular, the eigenvalues of $\mathbb{D}D_c f_c$ are the same as the eigenvalues of $\mathbb{D}D_{[c]}f_{[c]}$, when evaluated at the same point. \end{corollary} \begin{proof} (a) This follows from the block-triangular structure. (b) The pullback condition and the synchrony pattern induced by $\kappa$ easily imply that $\mathbb{D}D_c f_c = \mathbb{D}D_{[c]}f_{[c]}$. \end{proof} \begin{remark}\em \label{r:selfloop} There is a minor complication concerning self-loops. We assume that all self-loops of the CPG are lifted to feedforward arrows in $\widetilde{{\mathcal G}}\;\mbox{--}\;us {\mathcal G}$. Thus the matrix $\mathbb{D}D_c f_c$ is the Jacobian for the internal dynamic on node $c$, ignoring all inputs from other nodes and all self-loops at $c$ (if any exist). This the eigenvalues of $\tilde{J}$ are those of $J$ together with those for the internal part of each diagonal block of $J$. \end{remark} \begin{example}\em \label{ex:Z3chain7} Any admissible map $F(x)$ for the 7-node chain of Figure~\mbox{Re}f{F:7nodeFFZ3} has the form \eqref{E:7nodeFFZ3}. Thus the Jacobian at a general point $u =(u_1, ,\ldots,ots, u_7) \in \mathbb{R}^{7k}$ has the block form \begin{equation} \label{E:tildeJ7node} \widetilde{J}|_u = \left[ \begin{array}{ccc|c|c|c|c} f_1(u_1,u_3) & 0 & f_2 (u_1,u_3) & 0 & 0 & 0 & 0 \\ f_2(u_2,u_1)& f_1(u_2,u_1) & 0 & 0 & 0 & 0 & 0 \\ 0 & f_2(u_3,u_2)& f_1(u_3,u_2) & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & f_2(u_4,u_3)& f_1(u_4,u_3) & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & f_2(u_5,u_4)& f_1(u_5,u_4) & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & f_2(u_6,u_5)& f_1(u_6,u_5) & 0 \\ \hline 0 & 0 & 0 & 0 & 0 & f_2(u_7,u_6) & f_1(u_7,u_6) \end{array} \right] \end{equation} Here we write $f_1, f_2$ for the partial derivatives of $f$ with respect to its first and second variables. In our usual notation, $f_i = \mathrm{D}_i f$ for $i = 1, 2$. The lines indicate the block-triangular structure, with a $3 \times 3$ block at top left, which we recognise as the Jacobian of $F$ restricted to ${\mathcal G}$, and a series of four blocks $f_1$. These blocks are evaluated at $u$ and need not be equal, but when evaluated at a point in $\mathbb{D}elta_\kappa$ they are equal for nodes of the same colour, by \eqref{E:Dc=D[c]}. \end{example} \subsection{Construction of Feedforward Lifts} Feedforward lifts of a given network ${\mathcal G}$ are easy to construct. Informally, add new nodes one at a time, choosing a colour from those in the CPG. Copy the set of input arrows from the node of this colour in the CPG, wiring each so that its head is the new node and its tails is any old node with the same colour as the tail of the corresponding arrow in ${\mathcal G}$. Repeat. More formally, let ${\mathcal G}$ be a network with nodes $\mathbb{C}C = \{1,,\ldots,ots, m\}$. Colour all of its nodes differently, so we can identify the colour set ${\mathcal K}$ with $\{1,,\ldots,ots, m\}$. The construction of a feedforward lift with CPG ${\mathcal G}$ is simple and obvious. It can be described inductively, one new node at a time. Let ${\mathcal G}_0 = {\mathcal G}$ and $\mathbb{C}C_0$ = $\mathbb{C}C$. This is a trivial feedforward lift of ${\mathcal G}$ with no extra nodes, and starts the induction process. Assume that $k$ extra nodes have been added, to obtain a feedforward lift ${\mathcal G}_k$ of ${\mathcal G}$ with nodes $\mathbb{C}C_m = \{1,,\ldots,ots, m+k\}$. Add a new node $m+k+1$ to get $\mathbb{C}C_{k+1}$. Assign this node the same colour as some node $d \in \mathbb{C}C_0$. It remains to define the input arrows of node $m+k+1$ in a manner that makes the colouring balanced. To do so, copy the input set $I(d)$ via an input isomorphism $\beta$, assigning all these arrows the new head node $m+k+1$. Now $I(m+k+1) = \beta(I(d))$. Rewire the tail node of each arrow $\beta(e) \in I(m+k+1)$ so that its tail $\mathcal{T}(\beta(e))$ is any node in $\mathbb{C}C_k$ with the same colour as $\mathcal{T}(e)$. (This can be $\mathcal{T}(e)$ itself, but to obtain short-range connections we can use any node further along the chain with the required colour.) Then $\beta$ is a colour-preserving input isomorphism from $I(d)$ to $I(m+k+1)$. Since all tail nodes of the new arrows lie in $\mathbb{C}C_k$, the resulting network ${\mathcal G}_{k+1}$ is a feedforward lift of ${\mathcal G}$. For example, in Figure \mbox{Re}f{F:7nodeFFZ3} we have $\mathbb{C}C_0 = \{1,2,3\}$ and ${\mathcal G}_0$ is the $\mathbb{Z}_3$-symmetric ring on those nodes. We want the colouring with colour-classes $\{1,4,7\}, \{2,5\},\{3,6\}$. To obtain ${\mathcal G}_1$ we add node $4$, which has the same colour as node $1$. Node $1$ has a single input arrow with tail node $3$; copy this arrow so that its head is node $4$, and the tail remains at node $3$ since this is the only node that is earlier than node $4$ in the ordering and has the correct colour. To get ${\mathcal G}_2$ add node $5$ and copy the input arrow to node $2$. This time there are two choices for the tail node: either node $1$ or node $4$. The figure chooses $4$. To get ${\mathcal G}_3$ we need an arrow with head node $6$ and tail node either $2$ or $5$, and similarly for ${\mathcal G}_4$. One set of such choices (with arrows of the shortest possible range) gives Figure~\mbox{Re}f{F:7nodeFFZ3} with the colouring illustrated. \begin{remarks}\em (a) As this description makes clear, feedforward lifts are not unique. (b) A similar construction can be applied when ${\mathcal G}$ has a nontrivial balanced colouring $\kappa$. Its description is essentially identical because the only change is to $\kappa$. Now $\kappa$ lifts to a balanced colouring $\tilde\kappa$ with the same set of colours. This is a general property of colourings \cite[Proposition 10.38]{GS23}. \end{remarks} \subsection{Notation for Feedforward Lift} \label{S:NFL} We use the following notation for a feedforward lift. In general, given a symbol $s$ for an object defined by ${\mathcal G}$, we denote its lift by $\tilde s$ and (where appropriate) the complementary object by $s^*$. Thus we denote the CPG network by ${\mathcal G}$ with nodes $\mathbb{C}C = \{1, ,\ldots,ots, m\}$. The feedforward lift is $\widetilde{\mathcal G}$ with nodes $\widetilde\mathbb{C}C = \{1, ,\ldots,ots, n\}$, where $n > m$. We let $\mathbb{C}C^\ast = \{m+1,,\ldots,ots,n\}$ be the nodes of the feedforward chain. Denote the total state space for ${\mathcal G}$ by $P$, and that for $\widetilde{\mathcal G}$ by $\widetilde P$. For any subset $\mathbb{Q}Q \subseteq \widetilde\mathbb{C}C$ let $P_\mathbb{Q}Q = \oplus_{c \in \mathbb{Q}Q} P_c$. Exceptionally, denote the node space of node $c$ by $P_c$ for all $c \in \widetilde \mathbb{C}C$, since this introduces no ambiguity. Similarly, coordinates of $P = P_\mathbb{C}C$ are denoted by $(x_1,,\ldots,ots, x_m)$, those on $\widetilde P= P_{\widetilde \mathbb{C}C}$ by $(x_1,,\ldots,ots, x_n)$, and those on $P^* = P_{\mathbb{C}C^*}$ by $(x_{m+1},,\ldots,ots, x_n)$. If $f:P \to P$ is admissible for ${\mathcal G}$, its lift is denoted by $\tilde f: \widetilde P \to \widetilde P$. If $x(t)$ is a solution of the ODE $\dot x = f(x)$ on $P$, then its lift is $\tilde x(t)$, and this is a solution of the ODE $\dot x = \tilde f(x)$ on $\widetilde P$. Colour all nodes of $\mathbb{C}C$ differently and let the corresponding synchrony subspace be $\mathbb{D}elta$. With the chosen ordering of nodes, the natural identification $\nu$ of $\mathbb{D}elta$ with $P_\mathbb{C}C$ in \eqref{E:nu} satisfies \[ \nu(v) = (v_1, ,\ldots,ots, v_m) \quad v \in \mathbb{D}elta \] Its inverse is $\nu^{-1} (v_1, ,\ldots,ots, v_m) = V$ where $V_c = v_{[c]}$. The feedforward structure, combined with the balance condition, implies that the quotient dynamics on $\mathbb{D}elta$ identifies with the dynamics of the CPG ${\mathcal G}$ on $P_\mathbb{C}C$. That is, the dynamics of $f|_\mathbb{D}elta$ on $\mathbb{D}elta$ is conjugate to that of $f_\mathbb{C}C$ on $P_\mathbb{C}C$ by the identification $\nu$. \section{Background on Stability} \label{S:BStab} The stability of a state of a dynamical system was defined and analysed by Liapunov in 1892--93; see \cite{L92}. Several different concepts of stability are analysed systematically in \cite{BS70} for a continuous flow on a metric space. The modern treatment mainly focuses on flows and diffeomorphisms on smooth (mostly compact) manifolds; it was initiated by Smale~\cite{S67} and Arnold \cite{A89}, and developed extensively by their students and others. We recall some basic concepts related to stability; see for example \cite{HS74}. We restrict attention to equilibria and periodic orbits. \subsection{Equilibria} First we recall four stability notions for equilibria, two of which are equivalent. For further information see \cite[Chapter 4]{MLS93} and \cite{L64,LL61,L92}. Let $x^*$ be an equilibrium point of the ODE \begin{equation} \label{E:usualODE} \dot x = f(x) \qquad x \in \mathbb{R}^n \end{equation} where $f:\mathbb{R}^n \to \mathbb{R}^n$ is smooth (usually we take this to mean $C^\infty$, but often $C^r$ for $r \mbox{\bf g}eq 1$ suffices). \subsubsection{Stability Notions for Equilibria} \paragraph{Liapunov Stability} The notion of Liapunov stability goes back to Liapunov~\cite{L92} and is the central topic of \cite{LL61}. The equilibrium $x^*$ is {\em Liapunov stable} if, for every $\varepsilon > 0$, there exists $\delta>0$ such that if $\|x(0)-x^*\| < \delta$ then $\|x(t)-x^*\| < \varepsilon$ for all $t > 0$. This notion applies unchanged to non-autonomous ODEs if an equilibrium exists. \paragraph{Asymptotic Stability} The equilibrium $x^*$ is {\em asymptotically stable} if it is Liapunov stable, and in addition $\delta$ can be chosen so that $\|x(t)-x^*\| \to 0$ as $t \to +\infty$. \paragraph{Exponential Stability} The equilibrium $x^*$ is {\em exponentially stable} if there is a neighbourhood $V$ of $x^*$ and constants $K, \alpha > 0$ such that $\|x(t)-x^*\| < K \mathrm{e}^{-\alpha t}$ for all $x(0) \in V$. (For some norm, not necessarily the Euclidean one, we can assume $K=1$.) \paragraph{Linear Stability} The equilibrium $x^*$ is {\em linearly stable} if all eigenvalues of the Jacobian $\mathrm{D}f$ evaluated at $x^*$ have negative real part. Exponential stability implies asymptotic stability, which in turn implies Liapunov stability. Neither converse is valid in general. Linear stability is equivalent to exponential stability. \paragraph{Stable and Unstable Manifolds} In nonlinear dynamics, emphasis is placed on the concept of hyperbolicity. An equilibrium $x^*$ is {\em hyperbolic} if no eigenvalue of $\mathbb{D}D f|_A$ has zero real part. The state space $P$ decomposes as a direct sum $P=E^s \oplus E^u$, where the {\em stable subspace} $E^s$ is the sum of all generalised eigenspaces for eigenvalues with negative real parts, and the {\em unstable subspace} $E^u$ is the sum of all generalised eigenspaces for eigenvalues with positive real parts. Near $x^*$ there exists a smooth {\em stable manifold} $W^s$ and an {\em unstable manifold} $W^u$, tangent repectively to $E^s$ and $E^u$. These manifolds are unique. If $A$ is not hyperbolic there is also a {\em centre subspace} $E^c$ with a tangent {\em centre manifold} $W^c$; in general it is not unique and only $C^k$ smooth. \subsection{Periodic Orbits} The theory for periodic orbits is analogous, but there are minor complications. Consider a periodic orbit $A= \{a(t)\}$ for some initial condition $a(0)$. Stability concepts for a periodic orbit are generally obtained by considering a {\em Poincar\'e section} $\Sigma$ transverse to the orbit, so the orbit intersects $\Sigma $ at a point $a(0)$. There is a corresponding {\em Poincar\'e map} or {\em first return map} $\sigma:\widetilde{\Sigma} \to \Sigma$. Here $\widetilde{\Sigma}$ is a neighbourhood of $a(0)$ such that $\sigma(\widetilde{\Sigma}) \subseteq \Sigma$. Now $a(0)$ is a fixed point of $\sigma$, and we can think of $\sigma$ as a {\em discrete} dynamical system of $\Sigma$. In particular, a periodic orbit $A$ is {\em hyperbolic} if the derivative of a Poincar\'e map at the fixed point corresponding to the orbit has no eigenvalues on the unit circle. (The corresponding Floquet operator has an eigenvalue 1 corresponding to the direction tangent to the orbit, but the Poincar\'e map drops the dimension by 1.) In general there are stable and unstable subspaces with associated smooth tangent manifolds. The smoothness properties of the centre manifold are more technical \cite{K67}. \subsubsection{Floquet Theory} Classically, the main notion of stability for a periodic orbit $A = \{a(t)\}$ is defined via Floquet theory \cite[Chapter 1 Section 4]{HKW81}. Linearise the ODE about the periodic orbit $A$ to obtain a time-dependent ODE \begin{equation} \label{E:Lin} \dot y = M(t)y \end{equation} where $M(t) = \mathbb{D}D_yf |_{a(t)}$ is $T$-periodic. Floquet's Theorem \cite{F83} states that there is a {\em fundamental matrix} $Y(t)$ such that any solution $y(t) = Y(t)v$ for a constant vector $v$. Moreover, there exists a $T$-periodic matrix function $P(t)$ and a constant matrix $B$ such that every fundamental matrix has the form \begin{equation} \label{E:compFloq} Y(t) = P(t)\mathrm{e}^{Bt}K \end{equation} for a constant matrix $K$. The eigenvalues $\beta_i$ of $B$ are the {\em Floquet exponents}, and the eigenvalues $\rho_i$ of $\mathrm{e}^{BT}$ are the {\em Floquet multipliers}. The matrix $\mathrm{e}^{BT}$ is uniquely determined by $M(t)$, so the $\rho_i$ are unique. The real parts of the $\beta_i$ are unique, but their imaginary parts are unique only modulo $2\pi/T$; see \cite[Note 2 p.40]{HKW81}. The stability condition is that all $\beta_i$ have negative real part except for a single eigenvalue $0$ given by the orbit itself; equivalently, all $\rho_i$ lie strictly inside the unit circle except for a single eigenvalue $1$. The lack of uniqueness does not affect these statements. In a more modern treatment \cite{GH83} the matrix $\mathrm{e}^{BT}$ is essentially the Jacobian of a Poincar\'e map at the fixed point corresponding to the periodic orbit, reduced by one dimension to exclude the eigenvalue $1$ along the periodic orbit. Stability in this sense implies {\em asymptotic stability}, where now we let $A= \{a(t)\}$ for some initial condition $a(0)$; then there is a neighbourhood $U \supseteq A$ such that if $x(0) \in U$ with orbit $\{x(t)\}$ then \begin{equation} \label{E:asymp_stab} \lim_{t \to \infty} d(x(t),A) = 0 \end{equation} where $d(x,A)) = \inf_{a \in A} \|x-a\|$. Again the convergence is exponential. Moreover, for each $x(0) \in U$ there exists $\theta \in \mathbb{R}$, depending on $x(0)$, such that \begin{equation} \label{E:asymp_theta} \lim_{t \to \infty} \| x(t) - a(t+\theta)\| = 0 \end{equation} and the convergence is exponential. See \cite[Theorem (3) p.42]{HKW81}. The periodic orbit is then said to have {\em asymptotic phase}. The submanifold of initial conditions leading to a given asymptotic phase $\theta$ is called an {\em isochron}. The isochrons fill out a neighbourhood of the stable periodic orbit. \begin{remark}\em Every Floquet-stable periodic orbit is hyperbolic. By \cite[Theorem 4.1(f)]{HPS77}, this implies that the orbit persists after any sufficiently small $C^1$ perturbation of the ODE (admissible or not), in the sense that there exists a unique periodic orbit close to the original one. (The theorem is proved there for a discrete dynamical system, but at the end of the proof it is stated that the result is also valid for a continuous one.) This shows that existence and stability of periodic orbits, deduced from idealised models, persist when the ideal assumptions are only approximately valid --- provided the approximation is close enough. In practice quite large perturbations often preserve existence and stability; see \cite{SW23} for some numerical experiments on feedforward lifts. \end{remark} \subsubsection{Liapunov Stability} The notion of Liapunov stability transfers to a periodic orbit $\{a(t)\}$ via a Poincar\'e map. More generally, for any orbit $\{a(t)\}$, define $y(t)=x(t)-a(t)$. Then the non-autonomous ODE (called a {\em system of deviations}) \[ \dot y = f(y+a(t))-\dot a(t) \] has an equilibrium at $y=0$. The orbit $a(t)$ is defined to be Liapunov stable if this equilibrium is Liapunov stable. Since $y(t) = x(t)-a(t)$ we can unravel this definition: \begin{definition}\em \label{D:LSorbit} The orbit $a(t)$ is {\em Liapunov stable} if, for any $\varepsilon > 0$, there exists $\delta >0$ such that whenever $\|x(0)-a(0)\| < \delta$ we have $\|x(t)-a(t)\| < \varepsilon$ for all $t \mbox{\bf g}eq 0$. \end{definition} Here the norm can be any norm on $\mathbb{R}^n$, since these are all equivalent. In this paper we use the Euclidean norm. \section{Transverse Stability for a Feedforward Lift} \label{S:TS} We now come to the central results of this paper. We show that this type of feedforward synchrony can be very robust if the node dynamics on the CPG has certain features that are common in models. Not only is it dynamically stable: it is structurally stable, preserved when connection strengths, the forms of couplings, and the dynamical equations for nodes are perturbed slightly. \subsection{Floquet Exponents for Forced Systems} We begin with a general result. It is presumably well known, but we give the proof for completeness. Let $P=\mathbb{R}^k, Q=\mathbb{R}^l$ and consider a forced ODE (skew-product) on $P \oplus Q$: \begin{eqnarray} \label{E:dotX} \dot X &=& F(X) \\ \label{E:dotY} \dot Y &=& G(X,Y) \end{eqnarray} having a periodic orbit $(X(t),Y(t)) = (a(t),b(t))$ of period $T$. The linearised ODE around this orbit (that is, the Floquet equation) is then \begin{equation} \label{E:FFlinODE} \Matrix{\dot U \\ \dot V} = \Matrix{\mathbb{D}D_1F|_{(a(t),b(t))} & 0 \\ \mathbb{D}D_1G|_{(a(t),b(t))} & \mathbb{D}D_2G|_{(a(t),b(t))}} \Matrix{ U \\V} \qquad U \in P, V \in Q \end{equation} where the notation $\mathbb{D}D_kH|_{c(t)}$ indicates the partial derivative of $H$ with respect to the $k$th variable, considering $c(t)$ as a parameter. \begin{lemma} \label{L:FFFloq} With the above notation, the Floquet multipliers of $(a(t),b(t))$ on $P\oplus Q$ are those of $a(t)$ on $P$, together with those for the time-dependent ODE \begin{equation} \label{E:dotVG} \dot V = \mathbb{D}D_2G|_{(a(t),b(t))}V \end{equation} \end{lemma} \begin{proof} By the feedforward structure, $a(t)$ is a $T$-periodic orbit of \eqref{E:dotX}, and this is the Floquet equation for $a(t)$ on $P$. The solution of \eqref{E:dotX} gives the Floquet multipliers for $a(t)$ on $P$. The subspace $0 \oplus Q$ is invariant under the flow of \eqref{E:FFlinODE}, and when restricted to this space \eqref{E:dotY} becomes \eqref{E:dotVG}. Since $0 \oplus Q$ is la complement to $P$, solutions of this equation yield the remaining Floquet multipliers for $a(t)$ on $P\oplus Q$. \end{proof} \subsection{Transverse Floquet Multipliers and Exponents} Lemma~\mbox{Re}f{L:FFFloq} implies that for any feedforward lift of a fixed CPG feeding forward into a chain with an arbitrary number of nodes, the computation of Floquet multipliers can be reduced to simple computations involving only the CPG. Indeed, the rest of the network need not be chain: the same remark applies to any feedforward lift. This simplification arises for two reasons. First, the balanced colouring involved in a feedforward lift creates multiple eigenvalues of the Floquet operator. Second, the feedforward structure of $\widetilde{\mathcal G}$ induces the block-triangular structure \eqref{Jac_block2} on the Jacobian, hence on the Floquet equation. This structure pervades the entire dynamics. In detail, we first need: \begin{definition}\em \label{D:TFE} (a) With the above notation, the {\em transverse Floquet equation} for node $c \in \mathbb{C}C^*$ is \begin{equation} \label{E:TFE} \dot y_c = \mathbb{D}D_{[c]}f_{[c]}|_{a(t)} y_c \end{equation} where $\mathbb{D}D_{[c]}$ is the partial derivative with respect to $x_{[c]}$. Observe that this depends only on the ODE for the CPG and the periodic orbit for those equations. By Floquet theory, every solution has the form \[ y_c(t) = P_c(t)\mathrm{e}^{B_ct}v \] for a constant vector $v$. Here $P_c(t)$ is $T$-periodic and $B_c$ is a constant matrix. Then: (b) The matrix $M_c = \mathrm{e}^{B_cT}$ is the {\em transverse Floquet matrix} for node $c$. (c) The matrix $B_c$ is the {\em transverse Floquet exponent matrix} for node $c$. (d) The periodic orbit $A=\{a(t)\}$ is {\em transversely Floquet-stable} at node $c$ if all eigenvalues of $M_c$ have absolute value $<1$. Equivalently, all eigenvalues of $B_c$ have negative real part. (e) The eigenvalues of $M_c$ are the {\em transverse Floquet multipliers} for node $c$. (f) The eigenvalues of $B_c$ are the {\em transverse Floquet exponents} for node $c$. (We use the word `the' in (b,c) even though these matrices are not unique, because the eigenvalues in (d,e,f) {\em are} unique.) \end{definition} \begin{example}\em \label{ex:7nodeFHNTFE} We find the transverse Floquet equations for the network in Figure \mbox{Re}f{F:7nodeFFZ3}, for FitzHugh--Nagumo neurons with voltage coupling. The nodes in the CPG are $\{1,2,3\}$ and the nodes concerned are those in $\mathbb{C}C^* = \{4,5,6,7\}$. The equations are: \begin{eqnarray*} \dot V_c &=& V_c(a-V_c)(V_c-1) - W_c + I + \mu V_{c-1} \\ \dot W_c &=& bV_c - \mbox{\bf g}amma W_c \end{eqnarray*} for $4 \leq c \leq 7$. Here $\mu$ is the coupling strength. The corresponding diagonal blocks of the Jacobian are \[ J_c(t) = \Matrix{-3V_c^2+2(a+1)V_c-a & -1 \\ b & -\mbox{\bf g}amma} \] Evaluated at $a(t) = (\alpha(t),\beta(t))$ these become \[ J_c(t)|_{a(t)} = \Matrix{-3\alpha_{[c]}(t)^2+2(a+1)\alpha_{[c]}(t)-a & -1 \\ b & -\mbox{\bf g}amma} \] which is independent of $\beta$. Setting $y_c = (v_c,w_c)$ the transverse Floquet equations are \[ \Matrix{\dot v_c \\ \dot w_c} = J_c(t)|_{a(t)}\Matrix{ v_c \\ w_c} = \Matrix{(-3\alpha_{[c]}(t)^2+2(a+1)\alpha_{[c]}(t)c-a)v_c-w_c \\ bv_c-\mbox{\bf g}amma w_c} \] Although $\mu$ does not appear explicitly, it affects the periodic orbit $A$, and so affects the transverse Floquet equations. \end{example} \subsection{Stability Theorem for Feedforward Lift} \label{S:STFL} We can now give a sufficient condition for a feedforward lift of an equilibrium or periodic orbit to be (Floquet) stable in the full state space; that is, stable to perturbations that break synchrony as well as those that preserve synchrony. \begin{theorem} \label{T:FFStab} Let $\{\tilde a(t)\}$ be a feedforward lift of the periodic orbit $\{a(t)\}$ on $P_\mathbb{C}C$. Then: {\rm (a)} The Floquet multipliers for $\{\tilde a(t)\}$ are the Floquet multipliers for $a(t)$, together with the transverse Floquet multipliers for all $c \in \mathbb{C}C^*$. {\rm (b)} The transverse Floquet multipliers for $c \in \mathbb{C}C^*$ are the same as those for $[c] \in \mathbb{C}C$. {\rm (c)} $\{\tilde a(t)\}$ is stable on $P$ if and only if $\{a(t)\}$ is stable on $P_\mathbb{C}C$ and, for all nodes in $\mathbb{C}C$, all transverse Floquet multipliers have absolute value $<1$. \end{theorem} \begin{proof} Order the nodes so that the CPG has nodes $\mathbb{C}C = \{1, ,\ldots,ots, m\}$ and the rest of the network has nodes $\mathbb{C}C^*=\{m+1, ,\ldots,ots, m\}$. Let ${\mathcal G}_k$ be the subnetwork with nodes $\{1, ,\ldots,ots, m+k\}$ together with all arrows linking those nodes. Then ${\mathcal G}_{k+1}$ is a feedforward lift of ${\mathcal G}_k$ for $0 \leq k \leq n-m$. To prove (a) we argue by induction on $k$. The statement is trivial for $k=0$. The step from $k$ to $k+1$ follows from Lemma~\mbox{Re}f{L:FFFloq}, bearing in mind that when $c \in \mathbb{C}C^*$ the domain of $f_c$ is a subspace of $P_1 \oplus \cdots \oplus P_c$, so the time-dependent parameter $a(t)$ restricts onto this subspace. To prove (b), observe that because the state $\tilde a(t)$ is a lift of $a(t)$, the functions $f_c(x)$ and $f_{[c]}(x)$ are equal when $x \in \mathbb{D}elta$. The same holds for their derivatives at points $a(t) \in \mathbb{D}elta$. Part (c) now follows from (a) and (b). \end{proof} This theorem shows that stability of a lifted periodic state depends only on the ODE for the CPG, and is independent of the number of nodes in $\mathbb{C}C^*$. Roughly speaking, the full CPG equation determines the Floquet mutltpliers for the CPG, and its diagonal terms determine all the transverse Floquet multipliers, because these are the same as those for nodes in $\mathbb{C}C$ of the appropriate colour. The theorem also implies that when two nodes in the feedforward chain $\mathbb{C}C^*$ are synchronous, their transverse eigenvalues are equal. In other words, the Floquet matrix can have multiple eigenvalues generically, within the world of network admissible ODEs, even when the network has no symmetry. This phenomenon is well known for steady states (indeed, it happens for the 7-node network); feedforward lifts provide a wide range of examples for periodic orbits. The same goes when two nodes in the feedforward chain $\mathbb{C}C^*$ are phase-synchronous, since the Floquet multipliers are invariant under phase shifts; see Theorem \mbox{Re}f{T:TWtranseigen} below. \subsection{Isochrons} It is easy to see that when $A$ is stable, isochrons $I_\theta$ for $A$ in $P$ extend trivially to isochrons $\tilde I_\theta$ for $\widetilde A$ in $\widetilde P$: \begin{theorem} \label{T:isochron_lift} Let $\pi:\widetilde P \to P$ be projection onto the first $m$ coordinates. Then $\pi(\tilde I_\theta) = I_\theta$ for any $\theta$. \end{theorem} \begin{proof} By the feedforward structure, any orbit $x(t)$ for $\tilde f$ projects to an orbit $\pi(x(t))$ for $f$. \end{proof} Using conjugacy by the natural isomorphism $\nu:\mathbb{D}elta \to P_\mathbb{C}C$, we obtain a related projection onto isochrons of $\widetilde A \subseteq \mathbb{D}elta$. \subsection{Transverse Liapunov Stability} An analogous result to Theorem~\mbox{Re}f{T:FFStab} can be proved for Liapunov stability. Suppose that $\widetilde{\mathcal G}$ is a feedforward lift of ${\mathcal G}$, and let $\preceq$ be the partial ordering of $\widetilde\mathbb{C}C$ such that $c \preceq d$ whenever $d$ is downstream from $c$. \begin{definition}\em \label{D:transLS} A lifted periodic orbit $\tilde{a}(t)\}$ of $\{a(t)\}$ is {\em transversely Liapunov stable} if, for all $d \in \mathbb{C}C^\ast$ and for all $x(t)$ satisfying \begin{equation} \label{E:perLS1} x_c(t)=a_c(t) \quad \mbox{for} \quad c \preceq d\ \mbox{and}\ c \neq d \end{equation} then for all any $\varepsilon_d > 0$ there exists $\delta_d > 0$ such that: \begin{equation} \label{E:perLS2} \mbox{if}\ \|x_d(0) - a_{[d]}(0)\| < \delta_d\ \mbox{then}\ \|x_d(t) - a_{[d]}(t)\| < \varepsilon_d \ \mbox{for all}\ t \mbox{\bf g}eq 0 \end{equation} Here we can replace $a_{[d]}(t)$ by $a_d(t)$, because of the synchrony pattern. \end{definition} Let $P_{\mathcal G}$ be the state space for ${\mathcal G}$ and $P_{\widetilde {\mathcal G}}$ be the state space for $\widetilde{\mathcal G}$. \begin{theorem} \label{T:FFLSstab} The lifted periodic orbit $\widetilde A$ is Liapunov stable in $P_{\widetilde {\mathcal G}}$ if and only if $A$ is Liapunov stable in $P_{\mathcal G}$and $\widetilde A$ is transversely Liapunov stable. \end{theorem} \begin{proof} As in the proof of Theorem~\mbox{Re}f{T:FFStab} we can proceed by induction, so without loss of generality $\mathbb{C}C = {1, ,\ldots,ots, m}$ and $\widetilde \mathbb{C}C = {1, ,\ldots,ots, m+1}$. Therefore $\mathbb{C}C^*=\{m+1\}$. To simplify notation let $d=m+1$. Then $P_{\widetilde {\mathcal G}} = P_{\mathcal G} \oplus P_c$, so if $x \in P_{\widetilde {\mathcal G}}$ we can write \[ x = x_\mathbb{C}C \oplus x_d = (x_1,,\ldots,ots,x_m, 0) + (0,,\ldots,ots,0,x_d) \] Since the norm is Euclidean and $x_d$ is orthogonal to $x_\mathbb{C}C$,, \begin{eqnarray*} &&\| x_\mathbb{C}C \| \leq \| x\| \leq \|x_\mathbb{C}C\|+\|x_d\| \\ &&\| x_d \| \leq \| x\| \end{eqnarray*} First, suppose that $\widetilde A$ is Liapunov stable. We prove that: (a) $A$ is Liapunov stable in $P_{\mathcal G}$. (b) $\widetilde A$ is Liapunov stable in $P_{\widetilde {\mathcal G}}$. To prove (a), let $\varepsilon >0$. Since $\widetilde A$ is Liapunov stable, then for any orbit $x(t)$ and $\varepsilon >0$ there exists $\delta > 0$ such that \[ \|x(0)-\tilde a(0)\| < \delta \mathbb{R}ightarrow \|x(t)-\tilde a(t)\| < \varepsilon\ \mbox{for all}\ t \mbox{\bf g}eq 0 \] First, we show that $A$ is Liapunov stable in $P_{\mathcal G}$. Choose $\{x(t)\} \subseteq P_{\mathcal G}$ such that $x(0) = (x_\mathbb{C}C(0),a_{[d]}(0))$. Then \[ \|x_\mathbb{C}C(0)-a_\mathbb{C}C(0)\| = \|x(0)-a(0)\| < \delta \] so $\|x(t)-a(t)\| < \varepsilon$. But $x= x_\mathbb{C}C+a_{[d]}$, so \[ \|x_\mathbb{C}C-a_\mathbb{C}C(t)| \leq \|x(t)-a(t)\| < \varepsilon \] so $A$ is Liapunov stable on $P_{\mathcal G}$. Second, we show that $\widetilde A$ is transversely Liapunov stable. Since we are proceeding by induction, the only node in $\mathbb{C}C^*$ is $d$. Suppose $\varepsilon >0$. Choose $x(t)$ of the form $x(t) = (x_\mathbb{C}C(t), x_d(t))$. Then \[ \|x(0)-\tilde a(0)\| = \|x_d(0)-a_d(0)\| \] By Liapunov stability of $\widetilde A$ , there exists $\delta$ such that $\|x(0)-\tilde a(0)\| < \delta$ implies $\|x(t)-\tilde a(t)\| < \varepsilon$. Therefore $\|x_d(0)-a_d(0)\| < \delta$ implies $\|x(t)-\tilde a(t)\| < \varepsilon$. But $\|x_d(t)-a_{[d]}(t)\| < \|x(t)-\tilde a(t)\|$, so $\|x_d(t)-a_{[d]}(t)\| < \varepsilon$. Therefore $\widetilde A$ is transversely Liapunov stable. Second, we show that if $A$ is Liapunov stable in $P_{\mathcal G}$ and $\widetilde A$ is transversely Liapunov stable, then $\widetilde A$ is Liapunov stable in $P_{\widetilde {\mathcal G}}$. Given $\varepsilon >0$, there exists $\delta$ such that \begin{eqnarray*} \|x_c(0)-a_{[c]}(0) \| < \delta &\mbox{im}plies& \|x_c(t)-a_{[c](t)} \| <\varepsilon/2\ \mbox{for all}\ t \mbox{\bf g}eq 0 \\ \|x_d(0)-a_{[d]}(0) \| < \delta &\mbox{im}plies& \|x_d(t)-a_{[d]}(t) \| <\varepsilon/2\ \mbox{for all}\ t \mbox{\bf g}eq 0 \end{eqnarray*} If $\|x(0)-a(0)\| < \delta$ then the left-hand side of both implications is true. Therefore both right-hands sides are true, so \[ \|x(t)-a(t)\| \leq \|x_c(t)-a_{[c]}(t) \| + \|x_d(t)-a_{[d]}(t) \| < \varepsilon \] and $\widetilde A$ is Liapunov stable. \end{proof} \subsection{Transverse Asymptotic Stability} There is an analogous notion of {\em transverse asymptotic stability}, obtained by replacing `Liapunov' by `asymptotic' in the definition. This leads to a result analogous to Theorem~\mbox{Re}f{T:FFLSstab}. The proof runs along similar lines, but is simpler, so we omit it. \section{Relation to the Transverse Jacobian} \label{S:RTJ} The main difficulty when applying Theorem~\mbox{Re}f{T:FFStab} is the calculation of the Floquet exponents. As remarked in Section \mbox{Re}f{S:intro}, these exponents must be calculated numerically. Of course, this remains the case for the CPG dynamics alone, but it is useful to have a general criterion for condition (b) of the theorem to be valid, even if only heuristically. We now discuss one approach to this issue. We use the notation of Section \mbox{Re}f{S:NFL}. \begin{definition}\em \label{D:trans_stab} The synchrony subspace $\mathbb{D}elta_\kappa$ is {\em globally transversely stable} if for all $c \in \mathbb{C}C$ and all $x \in \mathbb{D}elta_\kappa$, all eigenvalues of each diagonal partial derivative $\mathbb{D}D_c f_c|_x$ have negative real part for all times $t$. The lifted periodic state $\widetilde A \subseteq \mathbb{D}elta_\kappa$ is {\em transversely stable} if for all $c \in \mathbb{C}C$, all eigenvalues of each diagonal partial derivative $\mathbb{D}D_c f_c|_{a_c(t)}$ have negative real part for all times $t$. \end{definition} If the lift is constructed so that self-loops become feedforward, the diagonal block $\mathbb{D}D_c f_c|_x$ refers only to the `internal dynamics' of node $c$. See Remark \mbox{Re}f{r:selfloop}. We can consider only nodes in $\mathbb{C}C$ because, on $\mathbb{D}elta_\kappa$, all other nodes are synchronous with nodes in $\mathbb{C}C$ via the balanced colouring $\kappa$. Therefore $f_c$ is the same as $f_{[c]}$ on $\mathbb{D}elta_\kappa$. \begin{remark}\em \label{r:notFloquet} This condition is motivated by \eqref{Jac_block2}. Historically, it was conjectured for some time that transverse stability for a stable periodic orbit implies stability in the usual Floquet sense. However, despite the terminology, this conjecture is false in general. The reason is that although a matrix whose eigenvalues all have negative real parts is a contraction in some norm \cite[Section 9.1 Theorem (a)]{HS74}, the relevant norm can change along the periodic orbit. In some circumstances this can create a Floquet multiplier outside the unit circle. \end{remark} \begin{example}\em \label{ex:MY} An explicit instance is the celebrated {\em Markus--Yamabe counterexample} \cite[Example p.310]{MY60}. Consider the ODE $\dot x = A(t)x$ on $\mathbb{R}^2$ where \[ A(t) = \Matrix{-1+\frac{3}{2}\cos^2 t & 1- \frac{3}{2}\sin t \cos t\\ -1- \frac{3}{2}\sin t \cos t & -1+\frac{3}{2}\sin^2 t } \] For any $t$, the trace of $A(t)$ is $-\shf$ and the determinant is $\shf$, so the eigenvalues have negative real part. In fact, they are $\frac{1}{4} (-1\pm \mbox{i} \sqrt{7})$, for any $t$. However, a solution is \[ x(t) = \mathrm{e}^{t/2}\Matrix{-\cos t \\ \sin t} \] so the zero solution (which is trivially periodic) is unstable. \end{example} \begin{example}\em \label{ex:notFloquet} A simpler example justifying Remark~\mbox{Re}f{r:notFloquet} uses a discontinuous family of maps $M(t)$. This can then be smoothed without changing the main conclusion. Let $A, B$ be two constant matrices. Define \[ M(t) = \left\{ \begin{array}{lcl} B & \mbox{if} & 0 \leq t < 1 \\ A & \mbox{if} & 1 \leq t < 2 \end{array}\right. \] and extend periodically to a family of matrices with period $T=2$. The solution of \eqref{E:Lin} on $[0,2]$ is then \[ x(t) = \left\{ \begin{array}{lcl} \mathrm{e}^{Bt} x(0) & \mbox{if} & 0 \leq t < 1 \\ \mathrm{e}^{A(t-1)} \mathrm{e}^{BT} x(0) & \mbox{if} & 1 \leq t < 2 \end{array}\right. \] Thus the Floquet operator is $\mathrm{e}^A\mathrm{e}^B$. When the dimension is 1, this equals $\mathrm{e}^{A+B}$, but when the dimension is 2 or more and $A$ and $B$ do not commute, this expression no longer holds. The Campbell-Hausdorff formula \cite[V.5 Proposition 1]{J62} applies instead. Let \[ A=\Matrix{-0.5 & 0\\ 2& -0.7} \qquad B = \Matrix{-0.5 & 2 \\0 &-0.7} = A^\mathrm{T} \] Both $A$ and $B$ have eigenvalues $-0.5,-0.7 < 0$. Numerically, \[ \mathrm{e}^A = \Matrix{0.606 & 0 \\1.099 & 0.496} \qquad \Matrix{0.606 & 1.099 \\0 & 0.496} \] Now \[ \mathrm{e}^A\mathrm{e}^B = \Matrix{0.367 & 0.666 \\ 0.666 & 1.455} \] whose eigenvalues are $1.772, 0.051$. The first of these lies outside the unit circle. This example can be made smooth by decreasing the off-diagonal terms of $A$ to zero and then increasing the off-diagonal term of $B$, over an arbitrarily short interval of time. the eigenvalues change by an arbitrarily small amount, so the periodic state remains unstable. \end{example} \begin{remark}\em Heuristically, this phenomenon arises because the flow near $A$ travels roughly parallel to $A$, as well as contracting towards $A$ locally in {\em some} norm. However, the `parallel' flow changes the local norm in which contraction occurs. The contraction slows down near $A$, while the flow parallel to $A$ remains roughly constant, and this can prevent overall contraction. The change in the norm required for the flow to be contracting is mainly caused by changes in the (generalised) eigenvectors of the transverse linearised flow. This is why it is not picked up by the eigenvalues. \end{remark} This phenomenon does not occur for equilibria. It can also be avoided in the context of a feedforward lift if the node spaces are $1$-dimensional: \begin{theorem} \label{T:tstab} Let $\widetilde{\mathcal G}$ be a feedforward lift of a network ${\mathcal G}$. Let $f$ be an admissible map for ${\mathcal G}$. Let $A$ be either an equilibrium, for node spaces of any dimension, or a periodic orbit for node spaces of dimension $1$. Assume that $A$ is stable in $P_\mathbb{C}C$. Let $\tilde f$ be the admissible map for $\widetilde{\mathcal G}$ obtained as a lift of $f$, with lifted periodic orbit $\widetilde A$. If $A$ is transversely stable, then $\widetilde A$ is stable for $\tilde f$ in $P_{\widetilde\mathbb{C}C}$. \end{theorem} \begin{proof} The equilibrium case is trivial because the transverse eigenvalues are eigenvalues of the Jacobian at the equilibrium point. The periodic case follows directly from Theorem~\mbox{Re}f{T:FFStab}(b). It is well known that one a 1-dimensional space the Floquet equation can be solved analytically; indeed, the (unique) Floquet exponent is the time-average of the transverse exponent round the periodic orbit. The argument is so simple we give it here. Consider a homogeneous linear equation $\dot y = M(t)y$ where $M:\mathbb{R} \to \mathbb{R}$ is $T$- periodic. The solution for given $y(0)$ is found by separation of variables, and is \[ y(t) = \left(\exp \int_0^t M(t)\mathrm{d} t \right) y(0) \] Since $M(t)<0$ for all $t$, we have $\int_0^T M(t)\mathrm{d} t < 0$. Thus the transverse Floquet exponent is negative. \end{proof} \subsection{Higher-Dimensional Nodes} \label{S:HDN} Example~\mbox{Re}f{ex:notFloquet} can be realised in a feedforward lift without much difficulty. It shows that transverse stability need not imply Floquet stability (hence asymptotic stability) when node spaces have dimension greater than $1$. Now the situation is more delicate. Because transverse stability can often be tackled analytically, we discuss these issues briefly. Additional hypotheses can sometimes be used to establish stability. An extreme case is when all Jacobians $\mathbb{D}D f|_{a(t)}$ have the same eigenspaces. Then we can decompose according to the eigenspaces and use a uniform estimate on each eigenspace to prove that the flow is uniformly exponentially contracting in a suitable norm. More generally, if the Jacobians $\mathbb{D}D f|_{a(t)}$ have approximately the same eigenspaces, in some reasonable sense, then provided the approximation is sufficiently close, transverse stability should imply that the lifted state is stable. Transverse stability implies that the trace of the Floquet matrix is negative, by \cite[Note 3, p.41]{HKW81}. Equivalently, the product of the Floquet multipliers (CPG and transverse) lies inside the unit circle. If the CPG is Floquet-stable, this implies that the product of the transverse Floquet multipliers lies inside the unit circle. \subsection{Synchronisation of Chaotic Signals} \label{S:SCS} We digress to discuss analogous issues when the equilibrium or periodic cycle $A$ is replaced by a chaotic attractor, a setting widely used in studies of synchronisation of chaotic signals. There is a vast literature on this topic, in part because of applications to secure communication. General references include \cite{BPP00,GM04,PRK01}. Theoretical results are presented in \cite{BR97,PC90,PC98,PCJM97}. Applications to communications include in \cite{C95,PCKHS92,PC95}. For chaotic states, there are many notions of stability, and the mathematics is far more technical. A stable chaotic state is an {\em attractor}, but there are many distinct definitions of this notion \cite{M85}. Transverse stability for synchronous chaotic dynamics also relies on ideas that are to some extent conjectural, such as the existence of a Sinai-Bowen-Ruelle (SBR or SRB) measure \cite{B71,KH95}. Some of the issues involved are discussed for discrete dynamics in \cite{ABS94,ABS96}. Here we resort to a heuristic description because the chaotic case is a side-issue for this paper --- though an interesting one. Suppose that $S$ is an invariant submanifold, $A\subseteq S$, and $A$ is an attractor for $f|_S$ in $S$, for any reasonable definition of `attractor'. Let $\mu$ be an invariant measure on $A$. Then we might expect $A$ to be an attractor for $f$ provided that \[ \int_{u\in A} \mathbb{D}D_{c} f_c|_{u}\, d\mu < 0 \] for all $c \in \mathbb{C}C$. That is, the transverse flow is {\em attracting on average} near $A$. The hope is that any local expansion is quickly counteracted by a contraction, and on average the contractions win. However, the same problem with invariant manifolds of codimension greater than $1$ occurs. Moreover --- and worse --- there can be many distinct invariant measures, including Dirac measures supported on unstable periodic orbits inside $A$. Stability can also be defined in several ways. If $\mu$ is an SBR measure, hence absolutely continuous with respect to Lebesgue measure, then we expect almost all (in the sense of Lebesgue measure) initial points near $A$ to be attracted to $A$. Some nearby points may be repelled, but these form a set whose measure tends to zero near the attractor. Two such behaviours are on-off intermittency \cite{PST93} and bubbling \cite{ABS94,ABS96}. Also associated with this set-up is the concept of a riddled basin \cite{AYYK92}. These ideas are discussed rigorously in \cite{ABS94,ABS96}, but only for discrete dynamics and an invariant submanifold of codimension $1$. Even the existence of SBR measures is itself largely conjectural, proved mainly for Axiom A systems in the sense of Smale~\cite{S67} and for more recent generalisations \cite{Y02}, although it is supported by much numerical evidence for other dynamical systems. \section{Propagation of Travelling Waves} \label{S:PTW} We now generalise the setting of Figure~\mbox{Re}f{F:7nodeFFZ3} so that Theorems \mbox{Re}f{T:FFStab} and \mbox{Re}f{T:tstab} apply to certain generic classes of discrete rotating wave in a CPG with cyclic group symmetry, which, as previously remarked, causes the lifted state to resemble a travelling wave. Moreover, the sufficient condition can be applied to just one set of orbit representatives in the CPG, simplifying the calculations involved. \subsection{Rigid Phase Patterns and Cyclic Group Symmetries} \label{S:RPPCGS} Patterns of phase relations in periodic states for network dynamics are intimately related to cyclic group symmetries, either of the network or of its quotient by a balanced colouring. This topic originated in equivariant dynamics \cite{GSS88}; more recent network analogues are discussed comprehensively in \cite[Chapter 17]{GS23}. In particular, there are good reasons to suppose that, subject to some technical conditions, the quotient network by synchrony must have cyclic group symmetry to support a discrete rotating wave \cite{GRW12,S20overdet,SP08} in a structurally stable manner. We summarise some pertinent results. Suppose that the CPG ${\mathcal G}$ has a cyclic symmetry group $\mathbb{Z}_n$. Then the $H/K$ Theorem \cite{BG01,GS02,GS23} implies that there exist admissible ODEs $\dot x = f(x)$ whose solutions include a {\em discrete rotating wave} with spatiotemporal symmetry induced from $\mathbb{Z}_n$. Such states have a `phase shift symmetry' of the form \[ x_{\alpha(i)}(t) = x_i (t+ kT/n) \] where $T$ is the period, $\alpha$ is a generator of $\mathbb{Z}_n$, and $0 \leq k < n$. When such a state is lifted to ${\mathcal G}'$ the rotating wave structure more closely resembles a travelling wave, because the dynamics of successive nodes along the lifted chain are identical except for a fixed phase shift $kT/n$. The stability results of Theorems \mbox{Re}f{T:FFStab} and \mbox{Re}f{T:tstab} apply in particular to such travelling waves. Moreover, the nodes for which we must check the transverse Floquet exponents and transverse eigenvalues can be reduced to those in a single set of orbit representatives for the $\mathbb{Z}_k$-action. \subsection{Motivating Example} Consider the 7-node chain $\widetilde{{\mathcal G}}$ of Figure~\mbox{Re}f{F:7nodeFFZ3}. All nodes have the same state-type: let all node spaces be $P_c=\mathbb{R}$ so node variables $x_c$ are $1$-dimensional. The network is feedforward except for the backward arrow from node $3$ to node $1$. The subnetwork ${\mathcal G}$ with nodes $\{1,2,3\}$ and all arrows connecting those nodes can be considered as a CPG with $\mathbb{Z}_3$ symmetry, which feeds forward into the chain $\{4,5,6,7\}$. Admissible ODEs take the form \eqref{E:7nodeFFZ3}, and the Jacobian at a general point $u =(u_1, ,\ldots,ots, u_7) \in \mathbb{R}^7$ has the block form \eqref{E:tildeJ7node}. For suitable $f$ the CPG ${\mathcal G}$ supports a $T$-periodic $\mathbb{Z}_3$ rotating wave of the form \[ U(t) = (u(t), u(t+T/3), u(t+2T/3)) \] (or its reversal, which we obtain by replacing $T$ with $-T$). Lift this periodic state to ${\mathcal G}'$; as remarked earlier this can be considered as a travelling wave of the form \[ (u(t), u(t+T/3), u(t+2T/3),u(t), u(t+T/3), u(t+2T/3),u(t)) \] The last four diagonal blocks are then \begin{eqnarray*} B_4(t) = \mathbb{D}D_1f|_{(u(t),u(t+2T/3))} \\ B_5(t) = \mathbb{D}D_1f|_{(u(t+T/3),u(t))} \\ B_6(t) = \mathbb{D}D_1f|_{(u(t+2T/3),u(t+T/3))} \\ B_7(t) = \mathbb{D}D_1f|_{(u(t),u(t+2T/3))} \end{eqnarray*} As $t$ runs through $[0,T]$, these are all phase-shifted versions of $B_4$. Indeed, $B_5(t) = B_4(t+T/3), B_6(t) = B_4(t+2T/3), B_7(t)=B_4(t)$. In particular, if all eigenvalues of $B_4(t)$ have negative real part on the periodic orbit $\{u(t)\}$, the same holds for $B_5(t),B_6(t)$, and $B_7(t)$. Theorem~\mbox{Re}f{T:tstab} now implies that the lifted periodic state is stable provided the rotating wave $\{U(t)\}$ on ${\mathcal G}$ is stable on $P_1 \times P_2 \times P_3$, and all eigenvalues of $B_4(t)= \mathbb{D}D_1f|_{(u(t),u(t+2T/3))}$ have negative real part. Thus the transverse eigenvalues (which here determine stability since nodes are $1$-dimensional) depend only on the internal dynamic of one node. This idea generalises to Theorem~\mbox{Re}f{T:TWtranseigen} below. \subsection{Schematic of Construction} \label{S:SC} Figure~\mbox{Re}f{F:Z3FFchain} is a schematic illustration of the four steps involved in constructing a feedforward lift from a rotating wave state to a travelling wave along a chain. (a) Consider a CPG ${\mathcal G}$ with $\mathbb{Z}_k$ symmetry, generated by a bijection $\alpha:\mathbb{C}C \to \mathbb{C}C$. (Here $k=3$.) For simplicity, assume that $\alpha$ is a product of $k$-cycles and all orbits of $\alpha$ have size $k$. Choose an admissible map $f$ so that the ODE $\dot x = f(x)$ has a discrete rotating wave state $u(t)$ satisfying the phase relation \begin{equation} \label{E:phaserel} \alpha u(t) = u(t+T/k) \end{equation} (Relative phases marked inside node symbols.) (b) Choose a {\em module} ${\mathcal M}$ whose nodes are a set of representatives of the $\mathbb{Z}_k$-orbits. Assign phase $0$ to these nodes, so that the other $\mathbb{Z}_k$-orbits correspond to phase shifts $T/k, 2T/k, ,\ldots,ots, (k-1)T/k$. Copy the module (along with any arrows whose heads and tails lie in the module) to obtain ${\mathcal M}_{k+1}, {\mathcal M}_{k+2}, ,\ldots,ots, {\mathcal M}_l$. (Here $l=7$.) (c) Assign phases $0, T/k, ,\ldots,ots, (l-k)T/k$ to nodes in ${\mathcal M}_{k+1}, {\mathcal M}_{k+2}, ,\ldots,ots, {\mathcal M}_l$. Assign input arrows to these nodes, preserving the arrow type and the phase relations in ${\mathcal G}$. Do so in a manner that makes all new arrows feedforward. (d) Rewire internal arrows in ${\mathcal M}_{k+1}, {\mathcal M}_{k+2}, ,\ldots,ots, {\mathcal M}_l$, preserving the arrow type and the phase relations in ${\mathcal G}$. Do so in a manner that makes all rewired arrows feedforward. (This stage is optional: it simplifies the calculation of Jacobians but may be less realistic biologically. For example, if modules correspond to segments of an organism, neuronal connections within segments are likely to be the same in each segment.) \begin{figure} \caption{Four steps in the construction of a feedforward lift for a CPG with cyclic symmetry supporting a periodic orbit with a rotating wave phase pattern. (a) Initial CPG ${\mathcal G} \label{F:Z3FFchain} \end{figure} \subsection{General Theorem} We now state a general theorem for such constructions, and prove that the transverse eigenvalues depend only on the internal dynamic of one module. \begin{theorem} \label{T:TWtranseigen} Assume that ${\mathcal G}$ has nodes $\mathbb{C}C = \{1, ,\ldots,ots, m\}$ with a cyclic automorphism group $\mathbb{Z}_k = \langle \alpha \rightarrowngle$, such that $n=mk$ and $\alpha$ acts like the cycle $(1\, 2\, ,\ldots,ots\, k)$ on all of its orbits on ${\mathcal G}$. Let $u(t)$ be a $T$-periodic solution of an admissible ODE with discrete rotating wave phase pattern~\eqref{E:phaserel}. Choose a module ${\mathcal M}$ of orbit representatives. Let $\widetilde{{\mathcal G}}$ be obtained by lifting appropriate copies of translates of this module by $\mathbb{Z}_k$, as described in Section~{\rm\mbox{Re}f{S:SC}}. Then {\rm (a)} The periodic state $u(t)$ on ${\mathcal G}$ lifts to a $T$-periodic travelling wave state $\tilde{u}(t)$ for $\widetilde{{\mathcal G}}$ with phases corresponding to the extra copies ${\mathcal M}_{k+1}, {\mathcal M}_{k+2}, ,\ldots,ots, {\mathcal M}_l$ of ${\mathcal M}$. {\rm(b)} The Floquet exponents (evaluated at any point) are those on the module ${\mathcal M}$, together with the transverse Floquet exponents for ${\mathcal M}$. {\rm(c)} If the Floquet exponents and the transverse Floquet exponents on ${\mathcal M}$ have negative real part, then $\tilde{u}(t)$ is stable. \end{theorem} \begin{proof} (a) This follows because ${\mathcal G}'$ is a lift of ${\mathcal G}$. (b) Let $\tilde{f}$ be the lift of $f$. The transverse Floquet exponents must have negative real part for Theorem \mbox{Re}f{T:FFStab} to apply. The Floquet matrix $\mathrm{e}^BT$ is independent of the initial time chosen for one period of the flow, hence its eigenvalues are the same after any phase shift. Therefore phase-synchronous nodes have the same transverse Floquet exponents. (c) This follows from Theorem~\mbox{Re}f{T:FFStab}. \end{proof} By Theorem~\mbox{Re}f{T:tstab} we immediately deduce: \begin{corollary} \label{C:} {\rm(a)} The transverse eigenvalues (evaluated at any point) are the same as the eigenvalues of the Jacobian $J^{\mathcal M}$ on the module ${\mathcal M}$, including only the arrows whose heads and tails lie in ${\mathcal M}$ (evaluated at the same point). {\rm(b)} If all eigenvalues of $J^{\mathcal M}$ have negative real part when evaluated on $\{u(t)\}$, then $\tilde{u}(t)$ is globally transversely stable. {\rm(c)} If $u(t)$ is stable on $P^{\mathcal G}$, nodes are $1$-dimensional, and all eigenvalues of $J^{\mathcal M}$ have negative real part when evaluated on $\{u(t)\}$, then $\tilde{u}(t)$ is stable on $P^{\widetilde{{\mathcal G}}}$. \qed \end{corollary} Again we emphasise that transverse stability in this sense applies to the synchrony subspace $\mathbb{D}elta$, and does not guarantee stability in the Floquet sense, except when nodes are 1-dimensional or the state is an equilibrium. More general results of the same kind can be derived for other actions of $\mathbb{Z}_k$ on ${\mathcal G}$, such as those leading to multirhythms \cite{GNS04,GS23}. In each case the connections in the lift must be tailored to the phase relations of the periodic state concerned. We do not state such generalisations but in principle the same ideas apply. Similar remarks to those in Section~\mbox{Re}f{S:HDN} apply to the phase-synchronous case. There is a natural analogue of Theorem~\mbox{Re}f{T:TWtranseigen}(c) for Liapunov stability, proved in the same manner. We do not state it here. \section{Conclusions} \label{S:C} Propagation of synchronous or phase-synchronous states along linear chains is important in biology, medicine, and robotics, among other areas of application. A simple, effective, and robust way to propagate signals with specific synchrony and phase patterns in a stable manner is to use a CPG to generate the underlying patterns and propagate them along a feedforward chain. Suitably constructed, such a feedforward lift preserves the waveform of the signal as it propagates. An important issue is the stability of the propagating signals. Specifically, maintaing the synchrony or phase pattern requires stability to synchrony-breaking perturbations --- transverse stability. We give a necessary and sufficient condition for stability (in the Floquet sense) that depends only on the internal dynamics of the CPG nodes. This implies that if adding a single copy of the CPG leads to a stable periodic orbit, the chain can be extended arbitrarily far, and even branch into a tree, with the lifted orbit remaining stable. Transverse Floquet multipliers for a lifted periodic orbit are generically multiple whenever nodes in the lift, but not in the CPG, are synchronous. Analogous results hold for Liapunov stability. A simpler condition `transverse stability of the synchrony subspace' implies linear stability of equilibria, and Floquet-stability of periodic orbits when nodes are $1$-dimensional. The latter implication can fail for higher-dimensional nodes, but has some heuristic value. There is a straightforward generalisation of these results to propagating phase patterns, where the CPG is a symmetric ring of identical modules, and generates a rotating wave with regularly spaced phase shifts. The lifted periodic orbit can be viewed as a travelling wave along the chain. Again transverse stability need be verified only for a single module in the chain. As a final, more speculative remark: Feedforward lifts have a simple modular structure, capable of generating stable propagating signals with specific phase patterns. This combination of repetitive modules and potentially useful dynamical patterns can evolve naturally from simpler structures, especially in the context of muscles groups driven by a network of neurons. This could be one reason why such architectures are common in living organisms. \paragraph{Acknowledgements} We thank Peter Ashwin, Marty Golubitsky, and John Guckenheimer for helpful discussions. \end{document}
\begin{displaymath}gin{document} \maketitle \begin{displaymath}gin{center} {\it $^\star$ St. Petersburg Branch of Steklov Mathematical Institute, Fontanka 27, St. Petersburg 191011, Russia, [email protected] $^\diamond$Department of Mathematics, University of North Carolina at Chapel Hill, Chapel Hill, NC 27599-3250, USA, [email protected]} \end{center} \centerline{February, 2000} \centerline{\sl To the memory of Anatoly Izergin} \begin{displaymath}gin{abstract} The trigonometric KZ equations associated with a Lie algebra ${{\mathfrak g}\,}$ depend on a parameter $\la\in{{\mathfrak h\,}}$ where ${{\mathfrak h\,}}\subset{{\mathfrak g}\,}$ is the Cartan subalgebra. We suggest a system of dynamical difference equations with respect to $\la$ compatible with the KZ equations. The dynamical equations are constructed in terms of intertwining operators of ${{\mathfrak g}\,}$-modules. \end{abstract} \thispagestyle{empty} \section{Introduction} The trigonometric KZ equations associated with a Lie algebra ${{\mathfrak g}\,}$ depend on a parameter $\la\in{{\mathfrak h\,}}$ where ${{\mathfrak h\,}}\subset{{\mathfrak g}\,}$ is the Cartan subalgebra. We suggest a system of dynamical difference equations with respect to $\la$ compatible with the trigonometric KZ differential equations. The dynamical equations are constructed in terms of intertwining operators of ${{\mathfrak g}\,}$-modules. Our dynamical difference equations are a special example of the difference equations introduced by Cherednik. In \cite{Ch1, Ch2} Cherednik introduces a notion of an affine R-matrix associated with the root system of a Lie algebra and taking values in an algebra $F$ with certain properties. Given an affine R-matrix, he defines a system of equations for an element of the algebra $F$. In this paper we construct an example of an affine R-matrix and call the corresponding system of equations the dynamical equations. In our example, $F$ is the algebra of functions of complex variables $z_1,...,z_n$ and $\la\in{{\mathfrak h\,}}$ taking values in the tensor product of $n$ copies of the universal enveloping algebra of ${{\mathfrak g}\,}$. The fact that our dynamical difference equations are compatible with the trigonometric KZ differential equations is a remarkable property of our affine R-matrix. There is a similar construction of dynamical difference equations compatible with the qKZ difference equations associated with a quantum group. The dynamical difference equations in that case are constructed in the same way in terms of interwining operators of modules over the quantum group. We will describe this construction in a forthcoming paper. There is a degeneration of the trigonometric KZ differential equations to the standard (rational) KZ differential equations. Under this limiting procedure the dynamical difference equations constructed in this paper turn into the system of differential equations compatible with the standard KZ differential equations and described in \cite{FMTV}. In \cite{FMTV} we proved that the standard hypergeometric solutions of the standard KZ equations \cite{SV, V} satisfy also the dynamic differential equations of \cite{FMTV}. The trigonometric KZ differential equations also have hypergeometric solutions, see \cite{Ch3, EFK}. We conjecture that the hypergeometric solutions of the trigonometric KZ differential equations also solve the dynamical difference equations of this paper. In Section 2 we study relations between intertwining operators of ${{\mathfrak g}\,}$-modules and the Weyl group ${{{\Bbb B}bb W\,}}$ of ${{\mathfrak g}\,}$. For any finite dimensional ${{\mathfrak g}\,}$-module $V$ and $w\in{{{\Bbb B}bb W\,}}$ we construct a rational function ${\Bbb B}_{w,V} : {\mathbb C} \to {\operatorname{End\,}} (V)$. The operators ${\Bbb B}_{w,V}(\la)$ are used later to construct an affine R-matrix and dynamical equations. In Section 3 we define the dynamical difference equations for ${{\mathfrak g}\,}=sl_N$ in terms of operators ${\Bbb B}_{w,V}(\la)$ directly (without introducing affine R-matrices). For ${{\mathfrak g}\,}=sl_N$, we prove that the dynamical equations are compatible with the trigonometric KZ differential equations. We give a formula for the determinant of a square matrix solution of the combined system of KZ and dynamical equations. In Section 4 we review \cite{Ch1, Ch2} and construct the dynamical difference equations for any simple Lie algebra ${{\mathfrak g}\,}$. We show that the dynamical equations are compatible with the trigonometric KZ equations if the Lie algebra ${{\mathfrak g}\,}$ has minuscle weights, i.e. is not of type $E_8, F_4, G_2$. We conjecture that the dynamical difference equations and trigonometric KZ equations are compatible for any simple Lie algebra. We thank I.Cherednik for valuable discussions and explanation of his articles \cite{Ch1, Ch2} and P.Etingof who taught us all about the Weyl group and intertwining operators. \section{Intertwining Operators} \subsection{Preliminaries} Let ${{\mathfrak g}\,}$ be a complex simple Lie algebra with root space decomposition ${{\mathfrak g}\,} = {{\mathfrak h\,}} \oplus(\oplus_{\alpha\in\Si}{{\mathfrak g}\,}_{\alpha})$ where $\Si\subset{{\mathfrak h\,}}^*$ is the set of roots. Fix a system of simple roots $\al_1,...,\al_r$. Let ${{{\Bbb B}bb G\,}}amma$ be the corresponding Dynkin diagram, and $\Si_{\partial}m$ --- the set of positive (negative) roots. Let ${{\mathfrak n}}_{{\partial}m}=\oplus_{\al\in \Si_{{\partial}m}}{{\mathfrak g}\,}_\al$. Then ${{\mathfrak g}\,}={{\mathfrak n}}_+\oplus{{\mathfrak h\,}}\oplus{{\mathfrak n}}_-$. Let $(\,,\,)$ be an invariant bilinear form on ${{\mathfrak g}\,}$. The form gives rise to a natural identification ${{\mathfrak h\,}}\to{{\mathfrak h\,}}^*$. We use this identification and make no distinction between ${{\mathfrak h\,}}$ and ${{\mathfrak h\,}}^*$. This identification allows us to define a scalar product on ${{\mathfrak h\,}}^*$. We use the same notation $(\,,\,)$ for the pairing ${{\mathfrak h\,}}\!\otimes\!\, {{\mathfrak h\,}}^*\to{\mathbb C}$. We use the notation: $Q=\oplus_{i=1}^r{\mathbb Z}\al_i$ - root lattice; $Q^+=\oplus_{i=1}^r{\mathbb Z}_{{{\mathfrak g}\,}e 0}\al_i$; $Q^\vee=\oplus_{i=1}^r{\mathbb Z}\al_i^\vee$ - dual root lattice, where $\al^\vee=2\al/(\al,\al)$; $P=\{\la\in{{\mathfrak h\,}}\,|\, (\la,\al^\vee_i)\in{\mathbb Z}\}$ - weight lattice; $P^+=\{\la\in{{\mathfrak h\,}}\,|\, (\la,\al^\vee_i)\in{\mathbb Z}_{{{\mathfrak g}\,}e 0}\}$ - cone of dominant integral weights; $\om_i\in P^+$ - fundamental weights: $(\om_i,\al^\vee_j)=\dl_{ij}$; $\rho={1\over 2}\sum_{\al\in\Si_+}\al=\sum_{i=1}^r\om_i$; $P^\vee=\oplus_{i=1}^r{\mathbb Z}\om^\vee_i$ - dual weight lattice, where $\om^\vee_i$ -dual fundamental weights: $(\om^\vee_i,\al_j)=\dl_{ij}$. Define a partial order on ${{\mathfrak h\,}}$ putting $\mu<\la$ if $\la-\mu\in Q^+$. Let $s_i:{{\mathfrak h\,}}\to{{\mathfrak h\,}}$ denote a simple reflection, defined by $s_i(\la)=\la-(\al_i^\vee,\la)\al_i$; ${{{\Bbb B}bb W\,}}$ - Weyl group, generated by $s_1,...,s_r$. The following relations are defining: \begin{displaymath}an\label{rela} s_i^2=1, \qquad (s_is_j)^m=1 \qquad \text{for}\qquad m=2,3,4,6, {{\mathfrak n}}otag \end{displaymath}an where $m=2$ if $\al_i$ and $\al_j$ are not neighboring in ${{{\Bbb B}bb G\,}}amma$, otherwise, $m=3,4,6$ if 1,2,3 lines respectively connect $\al_i$ and $\al_j$ in ${{{\Bbb B}bb G\,}}amma$. For an element $w\in {{{\Bbb B}bb W\,}}$, denote $l(w)$ the length of the minimal (reduced) presentation of $w$ as a product of generators $s_1,...,s_r$. Let $U{{\mathfrak g}\,}$ be the universal enveloping algebra of ${{\mathfrak g}\,}$; $U{{\mathfrak g}\,}^{\!\otimes\!\, n}$ - tensor product of $n$ copies of $U{{\mathfrak g}\,}$; $\Dl^{(n)}:U{{\mathfrak g}\,}\to U{{\mathfrak g}\,}^{\!\otimes\!\, n}$ - the iterated comultiplication (in particular, $\Dl^{(1)}$ is the identity, $\Dl^{(2)}$ is the comultiplication); $U{{\mathfrak g}\,}^{\!\otimes\!\, n}_0 =\{ x\in U{{\mathfrak g}\,}^{\!\otimes\!\, n}\,|\, [\Dl^{(n)}(h),x]=0\,{}\, \text{for any } h\in{{\mathfrak h\,}}\}$ - subalgebra of weight zero elements. For $\al\in\Si$ choose generators $e_\al\in{{\mathfrak g}\,}_\al$ so that $(e_\al,e_{-\al})=1$. For any $\al$, the triple \begin{displaymath}an\label{sl2-al} H_\al=\al^\vee, \qquad E_\al={2\over (\al,\al)}e_\al,\qquad F_\al=e_{-\al} {{\mathfrak n}}otag \end{displaymath}an forms an $sl_2$-subalgebra in ${{\mathfrak g}\,}$, $[H_\al,E_\al]=2E_\al,\, [H_\al,F_\al]=-2F_\al,\, [E_\al,F_\al]=H_\al$. A dual fundamental weight $\om_i^\vee$ is called minuscule if $(\om_i^\vee,\al)$ is 0 or 1 for all $\al\in\Si_+$, i.e. for any positive root $\al=\sum_{i=1}^r m_i \al_i$, the coefficient $m_i$ is either 0 or 1. For a root system of type $A_r$ all dual fundamental weights are minuscule. There is no minuscule dual fundamental weight for $E_8, F_4, G_2$. For a minuscule dual fundamental weight $\om^\vee_i$, define an element $w_{[i]}=w_0w^i_0 \in {{{\Bbb B}bb W\,}}$ where $w_0$ (respectively, $w_0^i$) is the longest element in ${{{\Bbb B}bb W\,}}$ (respectively, in ${{{\Bbb B}bb W\,}}^i$ generated by all simple reflections $s_j$ preserving $\om_i^\vee$). \begin{displaymath}gin{lemma}\label{wi} Let $\al$ be a positive root. Then $w_{[i]}(\al)\in\Si_+$ if $(\om_i^\vee,\al)=0$ and $w_{[i]}(\al)\in\Si_-$ if $(\om_i^\vee,\al)=1$. \end{lemma} Let ${{{\Bbb B}bb G\,}}$ be the simply connected complex Lie group with Lie algebra ${{\mathfrak g}\,}$, ${{{\Bbb B}bb G\,}}H \subset {{{\Bbb B}bb G\,}}$ the Cartan subgroup corresponding to ${{\mathfrak h\,}}$, $N({{{\Bbb B}bb G\,}}H)=\{x\in {{{\Bbb B}bb G\,}}\,|\, x{{{\Bbb B}bb G\,}}H x^{-1}={{{\Bbb B}bb G\,}}H\}$ the normalizer of ${{{\Bbb B}bb G\,}}H$. Then the Weyl group is canonically isomorphic to $N({{{\Bbb B}bb G\,}}H)/{{{\Bbb B}bb G\,}}H$. The isomorphism sends $x$ to Ad$_x|_{{\mathfrak h\,}}$. Let $V$ be a finite dimensional ${{\mathfrak g}\,}$-module with weight decomposition $V=\oplus_{\mu\in{{\mathfrak h\,}}}V[\mu]$. ${{{\Bbb B}bb G\,}}$ acts on $V$ so that ${{{\Bbb B}bb G\,}}H$ acts trivially on $V[0]$. Thus the action of ${{{\Bbb B}bb W\,}}$ on $V[0]$ is well defined. For any $n$, the Weyl group in the same way acts also on $U{{\mathfrak g}\,}_0^{\!\otimes\!\, n}$. \begin{displaymath}gin{lemma}\label{ef} For $\al\in \Si$ and $k\in{\mathbb Z}_{{{\mathfrak g}\,}e 0}$, consider $e_\al^ke_{-\al}^k\in U{{\mathfrak g}\,}_0$ and $e_{\al}\!\otimes\!\, e_{-\al}\in U{{\mathfrak g}\,}^{\!\otimes\!\, 2}_0$. Then for any $w\in {{{\Bbb B}bb W\,}}$, \begin{displaymath}an w(e_\al^ke_{-\al}^k)=e_{w(\al)}^ke_{-w(\al)}^k,\qquad w(e_\al\!\otimes\!\, e_{-\al})=e_{w(\al)}\!\otimes\!\, e_{-w(\al)}. {{\mathfrak n}}otag \end{displaymath}an \end{lemma} \begin{displaymath}gin{proof} Let $x\in N({{{\Bbb B}bb G\,}}H)$ be a lifting of $w$. Ad$_{x}:{{\mathfrak g}\,}\to{{\mathfrak g}\,}$ is an automorphism of ${{\mathfrak g}\,}$ preserving the invariant scalar product and sending ${{\mathfrak g}\,}_\begin{displaymath}ta$ to ${{\mathfrak g}\,}_{w(\begin{displaymath}ta)}$ for all $\begin{displaymath}ta$. Thus, Ad$_{x}e_{\begin{displaymath}ta}=c_{x,\begin{displaymath}ta}e_{w(\begin{displaymath}ta)}$ for suitable numbers $c_{x,\begin{displaymath}ta}$ and $c_{x,\al}c_{x,-\al}=1$. \end{proof} Let $x_1,...,x_r$ be an orthonormal basis in ${{\mathfrak h\,}}$, set \begin{displaymath}an \Om^0={1\over 2}\sum_{i=1}^r x_i\!\otimes\!\, x_i, \qquad\Om^+=\Om^0+\sum_{\al\in\Si_+}e_\al\!\otimes\!\, e_{-\al},\qquad \Om^-=\Om^0+\sum_{\al\in\Si_+}e_{-\al}\!\otimes\!\, e_{\al}. {{\mathfrak n}}otag \end{displaymath}an Define the Casimir operator $\Om$ and the trigonometric R-matrix $r(z)$ by \begin{displaymath}an \Om=\Om^++\Om^- \,, \qquad r(z)={ \Om^+ z+\Om^- \over z-1}\,. {{\mathfrak n}}otag \end{displaymath}an For any $x\in U{{\mathfrak g}\,}$, we have $\Dl(x)\,\Om\,=\,\Om\,\Dl(x)$. We will use a more symmetric form of the trigonometric R-matrix: $r(z_1/z_2)$. The Weyl group acts on $r(z_1/z_2), \Om\in U{{\mathfrak g}\,}^{\!\otimes\!\, 2}_0$. $\Om$ is Weyl invariant. For any $w\in {{{\Bbb B}bb W\,}}$, \begin{displaymath}an w(r(z_1/z_2))= {1\over z_1-z_2}\,(\,{z_1+z_2\over 2}\sum_{i=1}^rx_i \!\otimes\!\, x_i\,+\, \sum_{\al\in\Si_+} \,(z_1\,e_{w(\al)}\!\otimes\!\, e_{-w(\al)}\,+\,z_2\, e_{w(-\al)}\!\otimes\!\, e_{w(\al)})\,) . {{\mathfrak n}}otag \end{displaymath}an \begin{displaymath}gin{lemma}\label{lemma-2} For a minuscule dual fundamental weight $\om_i^\vee$, \begin{displaymath}an\label{wr} z_1^{-(\om_i^\vee)^{(1)}}z_2^{-(\om_i^\vee)^{(2)}}r(z_1/z_2)z_1^{(\om_i^\vee)^{(1)}}z_2^{(\om_i^\vee)^{(2)}} \,=\, w_{[i]}^{-1}(r(z_1/z_2))\,. {{\mathfrak n}}otag \end{displaymath}an \end{lemma} {\bf Proof.} Using Lemma \ref{wi} it is easy to see that both sides of the equation are equal to \begin{displaymath}an {1\over z_1-z_2}\,(\,{z_1+z_2\over 2}\sum_{i=1}^rx_i \!\otimes\!\, x_i\,+\, \sum_{\al\in\Si_+,\,(\al,\om^\vee_i)=0} \,( z_1\,e_{\al}\!\otimes\!\, e_{-\al}\,+\,z_2\, e_{-\al}\!\otimes\!\, e_{\al}) + {{\mathfrak n}}otag \\ \sum_{\al\in\Si_+,\,(\al,\om^\vee_i)=1} \,(z_1\,e_{-\al}\!\otimes\!\, e_{\al}\,+\,z_2\, e_{\al}\!\otimes\!\, e_{-\al})\,)\,.\qquad \square {{\mathfrak n}}otag \end{displaymath}an \subsection{The Trigonometric KZ Equations} Let $V=V_1\!\otimes\!\, ...\!\otimes\!\, V_n$ be a tensor product of ${{\mathfrak g}\,}$-modules. For $\kappa\in {\mathbb C}$ and $\la\in {{\mathfrak h\,}}$, introduce the KZ operators ${{\mathfrak n}}abla_i(\la,\kappa), \,i=1,...,n,$ acting on functions $u(z_1,...,z_n)$ of $n$ complex variables with values in $V$ and defined by \begin{displaymath}an\label{KZ} {{\mathfrak n}}abla_i(\la,\kappa) \,=\, \kappa z_i{{\partial} \over {\partial} z_i}-\sum_{j,\,j{{\mathfrak n}}eq i}r(z_i/z_j)^{(i,j)}-\la^{(i)}. {{\mathfrak n}}otag \end{displaymath}an Here $r^{(i,j)}$, $\la^{(i)}$ denote $r$ acting in the $i$-th and $j$-th factors of the tensor product and $\la$ acting in the $i$-th factor. The trigonometric KZ equations are the equations \begin{displaymath}an\label{KZ-equa} {{\mathfrak n}}abla_i(\la,\kappa)u(z_1,...,z_n,\la)\,=\,0\,,\qquad i=1,...,n\,, \end{displaymath}an see \cite{EFK}. The KZ equations are compatible, $[{{\mathfrak n}}abla_i,{{\mathfrak n}}abla_j]=0$. \subsection{Intertwining Operators, Fusion Matrices, \cite{ES,EV1}} For $\la\in {{\mathfrak h\,}}$, let $M_\la$ be the Verma module over ${{\mathfrak g}\,}$ with highest weight $\la$ and highest weight vector $v_\la$. We have ${{\mathfrak n}}_+ v_\la=0$, and $\,h v_\la=(h,\la)v_\la$ for all $h\in{{\mathfrak h\,}}$. Let $M_\la=\oplus_{\mu\leq\la}M_\la[\mu]$ be the weight decomposition. The Verma module is irreducible for a generic $\la$. Define the dual Verma module $M^*_\la$ to be the graded dual space $\oplus_{\mu\leq \la}M^*_\la[\mu]$ equipped with the ${{\mathfrak g}\,}$-action: $\langle u, a v\rightarrowngle=- \langle a u, v \rightarrowngle$ for all $a\in{{\mathfrak g}\,},\,u\in M_\la,\,v\in M^*_\la$. Let $v^*_\la$ be the lowest weight vector of $M^*_\la$ satisfying $\langle v_\la,v^*_\la\rightarrowngle =1$. Let $V$ be a finite dimensional ${{\mathfrak g}\,}$-module with weight decompostion $V=\oplus_{\mu\in{{\mathfrak h\,}}}V[\mu]$. For $\la,\mu\in{{\mathfrak h\,}}$ consider an intertwining operator $\Phi\,:\,M_\la\,\to\,M_\mu\!\otimes\!\, V$. Define its expectation value by $\langle\Phi\rightarrowngle =\langle \Phi (v_\la), v_\mu^*\rightarrowngle \in V[\la-\mu]$. If $M_\mu$ is irreducible, then the map Hom$_{{\mathfrak g}\,}(M_\la,M_\mu\!\otimes\!\, V)\to V[\la-\mu],\, \Phi\mapsto \langle\Phi\rightarrowngle$, is an isomorphism. Thus for any $v\in V[\la-\mu]$ there exists a unique intertwining operator $\Phi^v_\la:M_\la\to M_\mu\!\otimes\!\, V$ such that $\Phi^v_\la(v_\la)\in v_\la\!\otimes\!\, v + \oplus_{{{\mathfrak n}}u<\mu}M_\mu[{{\mathfrak n}}u]\!\otimes\!\, V$. Let $V,W$ be finite-dimensional ${{\mathfrak g}\,}$-modules and $v \in V[\mu],\;w\in W[{{\mathfrak n}}u]$. Consider the composition \begin{displaymath}an \Phi^{w,v}_{\lambda}:\;M_\lambda \stackrel{\Phi^v_\lambda}{\longrightarrow} M_{\lambda-\mu} \otimes V \stackrel{\Phi^w_{\lambda-\mu}}{\longrightarrow} M_{\lambda-\mu-{{\mathfrak n}}u} \otimes W \otimes V. {{\mathfrak n}}otag \end{displaymath}an Then $\Phi^{w,v}_\lambda \in \mathrm{Hom}_{{\mathfrak g}\,}(M_\lambda,M_{\lambda-\mu-{{\mathfrak n}}u} \otimes W \otimes V)$. Hence, for a generic $\lambda$ there exists a unique element $u \in \,(V \otimes W) [\mu+{{\mathfrak n}}u]$ such that $\Phi^u_\lambda=\Phi^{w,v}_\lambda$. The assignment $(w, v) \mapsto u$ is bilinear, and defines an ${{\mathfrak h\,}}$-linear map $$ J_{WV}(\lambda):\; W \otimes V \to W \otimes V. $$ The operator $J_{WV}(\lambda)$ is called the fusion matrix of $W$ and $V$. The fusion matrix $J_{WV}(\lambda)$ is a rational function of $\lambda$. $J_{WV}(\lambda)$ is strictly lower triangular, i.e. $J=1+L$ where $ L(W[{{\mathfrak n}}u] \otimes V[\mu]) \subset \oplus_{\tau<{{\mathfrak n}}u, \,\mu<\sigma} W[\tau]\otimes V[\sigma]$. In particular, $J_{WV}(\lambda)$ is invertible. If $V_1,\ldots V_n$ are ${{\mathfrak h\,}}$-modules and $F(\lambda): V_1 \otimes \ldots \otimes V_n \to V_1 \otimes \ldots \otimes V_n$ is a linear operator depending on $\lambda \in {{\mathfrak h\,}}$, then for any homogeneous $u_1,\ldots , u_n$, $u_i\in V_i[{{\mathfrak n}}u_i]$, we define $ F(\lambda-h^{(i)})(u_1 \otimes \ldots \otimes u_n)$ to be $F(\lambda-{{\mathfrak n}}u_i) (u_1 \otimes \ldots \otimes u_n)$. There is a universal fusion matrix $J(\la)\in U{{\mathfrak g}\,}^{\!\otimes\!\, 2}_0$ such that $J_{WV}(\lambda)=J(\lambda)|_{W\otimes V}$ for all $W, V$. The universal fusion matrix $J(\lambda)$ is the unique solution of the \cite{ABRR} equation \begin{displaymath}an J(\lambda)\, (1 \otimes (\la+\rho -{1\over 2}\sum_{i=1}^rx_i^2))= (1 \otimes (\la+\rho -{1\over 2}\sum_{i=1}^rx_i^2) + \sum_{\alpha \in \Si_+} e_{-\alpha}\otimes e_{\alpha}) J(\lambda). {{\mathfrak n}}otag \end{displaymath}an such that $\bigl(J(\la)-1\bigr)\in{{\mathfrak b}}_-(U{{\mathfrak b}}_-)\otimes (U{{\mathfrak b}}_+){{\mathfrak b}}_+$ where ${{\mathfrak b}}_{\partial}m={{\mathfrak h\,}}\oplus{{\mathfrak n}}_{\partial}m$. We transform this equation to a more convenient form. The equation can be written as \begin{displaymath}an\label{abrr-1} J(\lambda)\, (\la+\rho -{1\over 2}\sum_{i=1}^rx_i^2)^{(2)}= ((\la+\rho -{1\over 2}\sum_{i=1}^rx_i^2)^{(2)} -{1\over 2}\sum_{i=1}^rx_i\!\otimes\!\, x_i + \Om^-) J(\la). {{\mathfrak n}}otag \end{displaymath}an We make a change of variables: $\la \mapsto \la -\rho + {1\over 2}(h^{(1)}+h^{(2)})$. Then the equation takes the form \begin{displaymath}an\label{abrr-2} {}\,{}\,{}\,J( \la -\rho + {1\over 2}(h^{(1)}+h^{(2)}))\, ( \la + {1\over 2}(h^{(1)}+h^{(2)}) -{1\over 2}\sum_{i=1}^rx_i^2)^{(2)}= {{\mathfrak n}}otag \\ (( \la + {1\over 2}(h^{(1)}+h^{(2)}) -{1\over 2}\sum_{i=1}^rx_i^2)^{(2)} -{1\over 2}\sum_{i=1}^rx_i\!\otimes\!\, x_i + \Om^-) J( \la -\rho + {1\over 2}(h^{(1)}+h^{(2)})). {{\mathfrak n}}otag \end{displaymath}an Notice that $(h^{(1)}+h^{(2)})^{(2)}=\sum_{i=1}^r x_i^{(2)}(x_i^{(1)}+x_i^{(2)})$. Now the equation takes the form \begin{displaymath}an\label{ABRR} J( \la -\rho + {1\over 2}(h^{(1)}+h^{(2)}))\, (\la^{(2)} + \Om^0)\,=\,(\la^{(2)} + \Om^-)\,J( \la -\rho + {1\over 2}(h^{(1)}+h^{(2)})). \end{displaymath}an For $w\in {{{\Bbb B}bb W\,}}$, let $w(J(\la))$ be the image of $J(\la)$ under the action of $w$. Let $x\in N({{{\Bbb B}bb G\,}}H)$ be a lifting of $w$. Let $W,V$ be finite dimensional ${{\mathfrak g}\,}$-modules. Then \begin{displaymath}an\label{wJ} w(J(\la))|_{W\!\otimes\!\, V}=x J_{WV}(\la)x^{-1}, \end{displaymath}an and RHS does not depend on the choice of $x$. \subsection{Main Construction, I}\label{main-I} Introduce a new action of the Weyl group ${{{\Bbb B}bb W\,}}$ on ${{\mathfrak h\,}}$ by $$ w\cdot \la= w(\la+\rho)-\rho. $$ Remind facts from \cite{BGG}. Let $M_\mu, M_\la$ be Verma modules. Two cases are possible: a) Hom$_{{\mathfrak g}\,}(M_\mu,M_\la)=0$, {{\mathfrak n}}ewline b) Hom$_{{\mathfrak g}\,}(M_\mu,M_\la)={\mathbb C}$ and every nontrivial homomorphism $M_\mu\to M_\la$ is an embedding. Let $M_\la$ be a Verma module with dominant weight $\la\in P^+$. Then Hom$_{{\mathfrak g}\,}(M_\mu,M_\la)={\mathbb C}$ if and only if there is $w\in {{{\Bbb B}bb W\,}}$ such that $\mu= w\cdot \la$. Let $w=s_{i_k}\ldots s_{i_1}$ be a reduced presentation. Set $\al^{1}=\al_{i_1}$ and $\al^{j}=(s_{i_1}\ldots s_{i_{j-1}})(\al_{i_j})$ for $j=2,\ldots,k$. Let $n_j=(\la+\rho,(\al^{j})^\vee)$. For a dominant $\la\in P^+$, $n_j$ are positive integers. \begin{displaymath}gin{lemma}\label{sing-v} The collection of integers $n_1,\ldots n_k$ and the product $(e_{-\al_{i_k}})^{n_k}\cdots (e_{-\al_{i_1}})^{n_1}$ do not depend on the reduced presentation. \end{lemma} \begin{displaymath}gin{proof} It is known that $\al^{1},\ldots,\al^{k}$ are distinct positive roots and $\{\al^{1},\ldots,\al^{k}\}=\{\al\in\Si_+\ |\ w(\al)\in\Si_-\}\,$. Hence, the collection $n_1,\ldots n_k$ does not depend on the reduced presentation. The vector $(e_{-\al_{i_k}})^{n_k}\cdots (e_{-\al_{i_1}})^{n_1}v_\la$ is a singular vector in $M_\la$. If $w=s_{i'_k}\ldots s_{i'_1}$ is another reduced presentation, then the vectors $(e_{-\al_{i_k}})^{n_k}\ldots (e_{-\al_{i_1}})^{n_1}v_\la$ and \\ $(e_{-\al_{i'_k}})^{n'_k}\ldots (e_{-\al_{i'_1}})^{n'_1}v_\la$ are proportional. Since $M_\la$ is a free ${{\mathfrak n}}_-$-module, we have\\ $(e_{-\al_{i'_k}})^{n'_k}\ldots (e_{-\al_{i'_1}})^{n'_1}\,=\, c\,(e_{-\al_{i_k}})^{n_k}\ldots (e_{-\al_{i_1}})^{n_1}$ in ${{\mathfrak n}}_-$ for a suitable $c\in{\mathbb C}$. $c=1$ since the monomials are equal when projected to the commutative polynomial algebra generated by $e_{-\al_1},\ldots,e_{-\al_r}$. \end{proof} Define a singular vector $v_{w\cdot\la}^\la\in M_\la$ by \begin{displaymath}an v_{w\cdot\la}^\la\,=\, {(e_{-\al_{i_k}})^{n_k}\over n_1!} \ldots {(e_{-\al_{i_1}})^{n_1}\over n_k!}\,v_\la\,. \end{displaymath}an This vector does not depend on the reduced presentation by Lemma \ref{sing-v}. For all $\la\in P^+$, $w\in {{{\Bbb B}bb W\,}}$, fix an embedding $M_{w\cdot \la} {{\mathfrak h\,}}ookrightarrow M_\la$ sending $v_{w\cdot \la}$ to $v_{w\cdot \la}^ \la$. Let $V$ be a finite dimensional ${{\mathfrak g}\,}$-module, $V=\oplus_{{{\mathfrak n}}u\in{{\mathfrak h\,}}}V[{{\mathfrak n}}u]$ the weight decomposition, $P(V)=\{{{\mathfrak n}}u \in{{\mathfrak h\,}}\,|\,V[{{\mathfrak n}}u]{{\mathfrak n}}eq 0\}$ the set of weights of $V$. We say that $\la\in P^+$ is generic with respect to $V$ if \begin{displaymath}gin{enumerate} \item[I.] For any ${{\mathfrak n}}u\in P(V)$ there exist a unique intertwining operator $\Phi^v_\la:M_\la\to M_{\la-{{\mathfrak n}}u}\!\otimes\!\, V$ such that $\Phi^v_\la (v_\la)= v_{\la-{{\mathfrak n}}u}\!\otimes\!\, v + $ lower order terms. \item[II.] For any $w,w' \in {{{\Bbb B}bb W\,}},\, w{{\mathfrak n}}eq w'$, and any ${{\mathfrak n}}u \in P(V)$, the vector $w\cdot\la -w'\cdot(\la -{{\mathfrak n}}u)$ does not belong to $P(V)$. \end{enumerate} It is clear that all dominant weights lying far inside the cone of dominant weights are generic with respect to $V$. \begin{displaymath}gin{lemma}\label{a} Let $\la\in P^+$ be generic with respect to $V$. Let $v\in V[{{\mathfrak n}}u]$. Consider the intertwining operator $\Phi^v_\la:M_\la\to M_{\la-{{\mathfrak n}}u}\!\otimes\!\, V$. For $w\in {{{\Bbb B}bb W\,}}$, consider the singular vector $v_{w\cdot \la}^\la\in M_\la$. Then there exists a unique vector $A_{w,V}(\la)(v)\in V[w({{\mathfrak n}}u)]$ such that \begin{displaymath}an\label{A} \Phi_\la^v (v_{w\cdot\la}^\la)= v_{w\cdot(\la-{{\mathfrak n}}u)}^{ \la-{{\mathfrak n}}u}\!\otimes\!\, A_{w,V}(\la)(v)\, +\,\text{lower order terms}\,. {{\mathfrak n}}otag \end{displaymath}an \end{lemma} \begin{displaymath}gin{proof} $\Phi_\la^v (v_{w\cdot\la}^\la)$ is a singular vector in $M_{\la-{{\mathfrak n}}u}\!\otimes\!\, V$. It has to have weight components of the form $v_{w'\cdot (\la-{{\mathfrak n}}u)}^{\la-{{\mathfrak n}}u}\!\otimes\!\, u \,$ for suitable $w'\in{{{\Bbb B}bb W\,}}$ and $u\in V$. Since $\la$ is generic, we have $w=w'$ and $\Phi_\la^v (v_{w\cdot \la}^\la)$ is of the required form for a suitable $A_{w,V}(\la)(v)\in V[w({{\mathfrak n}}u)]$. \end{proof} For generic $\la\in P^+$, Lemma \ref{a} defines a linear operator $A_{w, V}(\la): V\to V$ such that $A_{w, V}(\la)(V[{{\mathfrak n}}u])) \subset V[w({{\mathfrak n}}u)]$ for all ${{\mathfrak n}}u\in P(V)$. It follows from calculations in Section \ref{main-sl2} that $A_{w, V}(\la)$ is a rational function of $\la\in{{\mathfrak h\,}}$. The following Lemmas are easy consequences of definitions. \begin{displaymath}gin{lemma}\label{Aww} If $w_1,w_2\in{{{\Bbb B}bb W\,}}$ and $l(w_1w_2)=l(w_1)+l(w_2)$, then \begin{displaymath}a A_{w_1w_2,V}(\la)\,=\,A_{w_1,V}(w_2\cdot\la)A_{w_2,V}(\la)\,. \end{displaymath}a \end{lemma} \begin{displaymath}gin{lemma}\label{com-A} Let $W, V$ be finite dimensional ${{\mathfrak g}\,}$-modules. Let $w\in {{{\Bbb B}bb W\,}}$. Then \begin{displaymath}an\label{Com-A} A_{w,W\!\otimes\!\, V}(\la)J_{WV}(\la)\,=\,J_{WV}(w\cdot\la)(A_{w,W}(\la-h^{(2)}) \!\otimes\!\, A_{w,V}(\la))\,. {{\mathfrak n}}otag \end{displaymath}an \end{lemma} Let $x_w\in N({{{\Bbb B}bb G\,}}H)\subset {{{\Bbb B}bb G\,}}$ be a lifting of $w\in {{{\Bbb B}bb W\,}}$. For a finite dimensional ${{\mathfrak g}\,}$-module $V$, define an operator \begin{displaymath}an\label{def-b} B_{x_w, V}(\la)\,:\,V\,\to\,V\,,\qquad v\,\mapsto \, x_w^{-1}A_{w, V}(\la)v\,. {{\mathfrak n}}otag \end{displaymath}an $B_{x_w, V}$ preserves the weight of elements of $V$. Lemma \ref{com-A} implies \begin{displaymath}an\label{com-xB} B_{x_w,W\!\otimes\!\, V}(\la)J_{WV}(\la)\,=\,(x_w^{-1}J_{WV}(w\cdot\la)x_w)\,(B_{x_w,W}(\la-h^{(2)}) \!\otimes\!\, B_{x_w,V}(\la))\,, {{\mathfrak n}}otag \end{displaymath}an cf. {\mathbb R}ef{wJ}. The operator $B_{x_w, V}$ depends on the choice of $x_w$. If $x_wg,\, g\in {{{\Bbb B}bb G\,}}H$, is another lifting of $w$, then $B_{x_wg, V}\,=\,g^{-1}B_{x_w, V}$. The operators $B_{x_w,V}(\la)$, $w\in{{{\Bbb B}bb W\,}}$, are defined now for generic dominant $\la$ and depend on the choice of liftings $x_w$. In the next two Sections we fix a normalization $B_{w,V}(\la)$ of $B_{x_w,V}(\la)$ so that $B_{w,V}(\la)\,\to \,1 $ as $\la\to\infty$. We show that for any $w\in{{{\Bbb B}bb W\,}}$, there is a universal $B_w(\la)\in U{{\mathfrak g}\,}_0$ such that $B_w(\la)|_V=B_{w,V}(\la)$ for every finite dimensional ${{\mathfrak g}\,}$-module $V$. For any $w\in{{{\Bbb B}bb W\,}}$, we present $B_w(\la)$ as a suitable product of operators $B_{s_i}(\la)$ corresponding to simple reflections. \subsection{Operators $B_{x_w, V}(\la)$ for ${{\mathfrak g}\,}=sl_2$}\label{main-sl2} Consider $sl_2$ with generators $H,E,F$ and relations $[H,E]=2E,\, [H,F]=-2F,\, [E,F]=H$. Let $\al_1$ be the positive root. Identifying ${{\mathfrak h\,}}$ and ${{\mathfrak h\,}}^*$, we have $\al_1=\al_1^\vee=H$, $\om_1=\om_1^\vee=H/2$,\, ${{{\Bbb B}bb W\,}}=\{1,s_1\}$. Let $\la=l\om_1$, $l\in{\mathbb Z}_{{{\mathfrak g}\,}e 0}$, be a dominant weight. Then $s_1\cdot \la= -(l+2)\om_1$. For any dominant weight $\la$, fix an embedding \begin{displaymath}an M_{s_1\cdot \la}\,{{\mathfrak h\,}}ookrightarrow \,M_\la,\qquad v_{s_1\cdot\la}\,\mapsto\, v_{s_1\cdot\la}^\la\,=\,{ F^{(\la,\al_1)+1}v_{\la}\over ((\la,\al_1)+1)!}\, {{\mathfrak n}}otag \end{displaymath}an as in Section \ref{main-I}. For $m\in {\mathbb Z}_{{{\mathfrak g}\,}e 0}$, let $L_m$ be the irreducible $sl_2$ module with highest weight $m\om_1$. $L_m$ has a basis $v^m_0,..., v^m_m$ such that $$ Hv^m_k=(m-2k)v^m_k\,,\qquad Fv^m_k=(k+1)v^m_{k+1}\,,\qquad Ev^m_k=(m-k+1)v^m_{k-1}\,. $$ For ${{\mathfrak g}\,}=sl_2$, we have ${{{\Bbb B}bb G\,}}=SL(2,{\mathbb C})$. Then ${{{\Bbb B}bb G\,}}H\subset{{{\Bbb B}bb G\,}}$ is the subgroup of diagonal matrices. Fix a lifting ${ x}\in N({{{\Bbb B}bb G\,}}H)$ of $s_1$, set $x=(x_{ij})$ where $x_{11}=x_{22}=0$, $x_{12}=-1$, $x_{21}=1$. Then the action of $x$ in $L_m$ is given by $v^m_k\mapsto (-1)^kv^m_{m-k}$ for any $k$. We have $x\,=\,\text{exp}(-E)\,\text{exp}(F)\,\text{exp}(-E)$. For $t\in{\mathbb C}$, introduce \begin{displaymath}an\label{B-sl2} p(t;\,H,E,F)\, = \,\sum_{k=0}^\infty \,F^kE^k\,{1\over k!}\,{\partial}rod_{j=0}^{k-1} {1\over (t-H-j)}\,. \end{displaymath}an $p(t;\,H,E,F)$ is an element of $U(sl_2)_0$. \begin{displaymath}gin{thm}\label{sl2-B} Let $\la$ be a dominant weight for $sl_2$. Let $L_m,\,x$ be as above. Let $B_{x, L_m}(\la): L_m\to L_m$ be the operator defined in Section \ref{main-I}. Then for $k=0,...,m$, \begin{displaymath}an\label{Bv} \\ B_{x, L_m}(\la)v^m_k\,=\, {((\la,\al^\vee_1)+2)((\la,\al_1^\vee)+3)\cdots ((\la,\al_1^\vee)+k+1) \over ((\la,\al_1^\vee)-m+k+1)((\la,\al_1^\vee)-m+k+2)\cdots ((\la,\al_1^\vee) - m + 2k)}\,v^m_k\, {{\mathfrak n}}otag \end{displaymath}an and \begin{displaymath}an\label{B=B-un} p((\la,\al_1^\vee);\,H,E,F)|_{L_m}\,=\,B_{x, L_m}(\la)\,. \end{displaymath}an \end{thm} \begin{displaymath}gin{corollary} $B_{x, L_m}(\la)$ is a rational function of $(\la,\al_1^\vee)$. $B_{x, L_m}(\la)$ tends to $1$ as $(\la,\al_1^\vee)$ tends to infinity. \end{corollary} The Theorem is proved by direct verification. First we calculate explicitly $\Phi^{v^m_k}_\la \,(v_\la)$, $\Phi^{v^m_k}_\la \,(\,{ F^{(\la,\al_1^\vee)+1}\over ((\la,\al^\vee_1)+1)!}\,v_\la\,)$, and then get an expression for $B_{x, L_m}(\la)v^m_k$ as a sum of a hypergeometric type. Using standard formulas from \cite{GR} we see that $B_{x, L_m}(\la)v^m_k$ is given by {\mathbb R}ef{Bv}. Similarly we check that $p((\la,\al_1^\vee);\,H,E,F)\,v^m_k$ gives the same result. Thus we get {\mathbb R}ef{B=B-un}. $\square$ Formula {\mathbb R}ef{Bv} becomes more symmetric if $\la$ is replaced by $\la-\rho+{1\over 2}{{\mathfrak n}}u$ where ${{\mathfrak n}}u=m\om_1-k\al_1$ is the weight of $v^m_k$, then \begin{displaymath}an\label{product} p((\la+{1\over 2}{{\mathfrak n}}u,\,\al_1^\vee) - 1;\,H,E,F) v^m_k\,=\,{\partial}rod_{j=0}^{k-1} {(\la,\al_1^\vee) + {m\over 2} -j \over (\la,\al_1^\vee) - {m\over 2} +j}\, v^m_k\,. \end{displaymath}an \begin{displaymath}gin{thm}\label{b-sl2} \begin{displaymath}an p(-t-2;\,-H,F,E)\,\cdot\,p(t;\,H,E,F))\,= \,{t-H+1\over t+1}\,. {{\mathfrak n}}otag \end{displaymath}an \end{thm} To prove this formula it is enough to check that RHS and LHS give the same result when applied to $v^m_k\in L_m$, which is done using {\mathbb R}ef{product}. $\square$ Notice that $p(t;\,-H,F,E)=s_1(p(t;\,H,E,F))$. {\bf Remark.} Let $J(\la)=\sum_ia_i\!\otimes\!\, b_i$ be the universal fusion matrix of $sl_2$. Following \cite{EV2} introduce $S( Q)(\la)\in U(sl_2)_0$ as $S(Q)(\la)=\sum_i S(a_i)b_i$ where $S(a_i)$ is the antipode of $a_i$. The action of $S( Q)(\la)$ in $L_m$ was computed in \cite{EV2}. Comparing the result with Theorem \ref{sl2-B}, one sees that $p((\la,\al_1^\vee);\,H,E,F)$ is equal to $(S( Q)(\la))^{-1}$ up to a simple change of argument $\la$. \begin{displaymath}gin{corollary}\label{AsL} Let $A_{s_1, L_m}(\la):L_m\to L_m$ be the operator defined in Section \ref{main-I}. Then $A_{s_1, L_m}(\la)\,=\,x\,p((\la,\al_1^\vee);\,H,E,F)|_{L_m}$. $A_{s_1, L_m}(\la)$ is a rational function of $(\la,\al_1^\vee)$. $A_{s_1, L_m}(\la)$ tends to $x$ as $(\la,\al_1^\vee)$ tends to infinity. \end{corollary} \subsection{Main Construction, II}\label{main-II} Return to the situation considered in Section \ref{main-I}. For any simple root $\al_i$, the triple $H_{\al_i}, E_{\al_i}, F_{\al_i}$ defines an embedding $sl_2 {{\mathfrak h\,}}ookrightarrow {{\mathfrak g}\,}$ and induces an embedding $ SL(2,{\mathbb C}) {{\mathfrak h\,}}ookrightarrow {{{\Bbb B}bb G\,}}$. Denote $x_i \in{{{\Bbb B}bb G\,}}$ the image under this embedding of the element $x\in SL(2,{\mathbb C})$ defined in Section \ref{main-sl2}. \begin{displaymath}gin{lemma}\label{x-in-N} For $i=1,...,r$, we have $x_i\in N({{{\Bbb B}bb G\,}}H)$ and Ad$_{x_i}:{{\mathfrak g}\,}\to{{\mathfrak g}\,}$ restricted to ${{\mathfrak h\,}}$ is the simple reflection $s_i:{{\mathfrak h\,}}\to{{\mathfrak h\,}}$. \end{lemma} \begin{displaymath}gin{proof} Since $x_i\,=\,\exp(-E_{\al_i})$ $\exp(F_{\al_i})$ $\exp(-E_{\al_i})$, we have that Ad$_{x_i}(H_{\al_i})=-H_{\al_i}$ and Ad$_{x_i}(h)=h$ for any $h\in{{\mathfrak h\,}}$ orthogonal to $\al_i$. Hence $x_i\in N({{{\Bbb B}bb G\,}}H)$ and Ad$_{x_i}|_{{\mathfrak h\,}}=s_i$. \end{proof} For $i=1,...,r$ and $\la\in{{\mathfrak h\,}}$, set \begin{displaymath}an\label{AB-i} B_{s_i}(\la)\,=\,p((\la,\al_i^\vee);\,H_{\al_i},E_{\al_i},F_{\al_i}) {{\mathfrak n}}otag \end{displaymath}an where $p(t;\,H,E,F)$ is defined in {\mathbb R}ef{B-sl2}. Set \begin{displaymath}an A_{s_i}(\la)\,=\,x_i\,B_{s_i}(\la)\,. {{\mathfrak n}}otag \end{displaymath}an For any ${{\mathfrak n}}u\in P(V)$, we have $A_{s_i}(\la)(V[{{\mathfrak n}}u])\subset V[s_i({{\mathfrak n}}u)]$. Let $V$ be a finite dimensional ${{\mathfrak g}\,}$-module. For $w\in{{{\Bbb B}bb W\,}}$, let $w=s_{i_k}...s_{i_1}$ be a reduced presentation. For a generic dominant $\la\in P^+$, consider the operator $A_{w,V}(\la) :V\to V$ defined in Section \ref{main-I}. \begin{displaymath}gin{lemma}\label{A=AAA} \begin{displaymath}an A_{w,V}(\la)\,=\,A_{s_{i_k}}((s_{i_{k-1}}...s_{i_{1}})\cdot\la)|_{V}\, A_{s_{i_{k-1}}}((s_{i_{k-2}}...s_{i_{1}})\cdot\la)|_{V}... A_{s_{i_1}}(\la)|_{V}\,. {{\mathfrak n}}otag \end{displaymath}an \end{lemma} \begin{displaymath}gin{proof} See Corollary \ref{AsL} and Lemma \ref{Aww}. \end{proof} \begin{displaymath}gin{corollary} The operator $A_{w,V}(\la)$ is a rational function of $\la$. $A_{w, V}(\la)$ tends to $x_{i_k}...x_{i_1}$ as $\la$ tends to infinity in a generic direction. In particular, the product $x_{i_k}...x_{i_1}$ does not depend on the choice of the reduced presentation. \end{corollary} Set $x_w=x_{i_k}...x_{i_1}$. $x_w\in N({{{\Bbb B}bb G\,}}H)$ is a lifting of $w$. Consider the operator $B_{x_w,V}(\la):V\to V$ defined in Section \ref{main-I} for this lifting $x_w$. Denote this operator $B_{w,V}(\la)$. \begin{displaymath}gin{corollary}\label{B=BBB} \begin{displaymath}an &{}&B_{w,V}(\la)\,= {{\mathfrak n}}otag \\ &{}&(s_{i_{k-1}}...s_{i_{1}})^{-1}( B_{s_{i_k}}((s_{i_{k-1}}...s_{i_{1}})\cdot\la))|_{V}\, (s_{i_{k-2}}...s_{i_{1}})^{-1}(B_{s_{i_{k-1}}}((s_{i_{k-2}}...s_{i_{1}})\cdot\la))|_{V}... B_{s_{i_1}}(\la)|_{V}\,. {{\mathfrak n}}otag \end{displaymath}an $B_{w,V}(\la)$ is a rational function of $\la$. $B_{w, V}(\la)$ tends to $1$ as $\la$ tends to infinity in a generic direction. \end{corollary} For any notrivial element $w\in{{{\Bbb B}bb W\,}}$ and $\la\in{{\mathfrak h\,}}$, define an element $B_w(\la)\in U{{\mathfrak g}\,}_0$ by \begin{displaymath}an\label{def-B} &{}&B_{w}(\la)\,= {{\mathfrak n}}otag \\ &{}&(s_{i_{k-1}}...s_{i_{1}})^{-1}( B_{s_{i_k}}((s_{i_{k-1}}...s_{i_{1}})\cdot\la))\, (s_{i_{k-2}}...s_{i_{1}})^{-1}(B_{s_{i_{k-1}}}((s_{i_{k-2}}...s_{i_{1}})\cdot\la))... B_{s_{i_1}}(\la)\,. {{\mathfrak n}}otag \end{displaymath}an Set $B_w(\la)=1$ if $w$ is the identity in ${{{\Bbb B}bb W\,}}$. We have $B_w(\la)|_V\,=\,B_{w,V}(\la)$, and $B_w(\la)$ does not depend on the choice of the reduced presentation of $w$. {\bf Properties of $B_w(\la)$.} \begin{displaymath}gin{enumerate} \item[I.] If $w_1,w_2\in{{{\Bbb B}bb W\,}}$ and $l(w_1w_2)=l(w_1)+l(w_2)$, then \begin{displaymath}an\label{B=BB} B_{w_1w_2}(\la)\,=\, (w_2)^{-1}(B_{w_1}(w_2\cdot \la))\,B_{w_2}(\la)\,. {{\mathfrak n}}otag \end{displaymath}an \item[II.] Let $i=1,...,r$, {}\, $\om\in{{\mathfrak h\,}}$, and $(\al_i,\om)=0$, then \begin{displaymath}an B_{s_i}(\la+\om)\,=\,B_{s_i}(\la)\,. {{\mathfrak n}}otag \end{displaymath}an \item[III.] For $i=1,...,r$, \begin{displaymath}an s_i(B_{s_i}(s_i\cdot\la))\,\cdot\,B_{s_i}(\la)\,=\,{(\la,\al_i^\vee)-H_{\al_i}+1 \over (\la,\al_i^\vee)+1}\,. {{\mathfrak n}}otag \end{displaymath}an \item[IV.] Every relation $(s_is_j)^m=1$ for $m=2,3,4,6$ in ${{{\Bbb B}bb W\,}}$ is equivalent to a homogeneous relation $s_is_j...=s_js_i...$. Every such a homogeneous relation generates a relation for $B_{s_i}(\la), B_{s_j}(\la)$. Namely, for $m=2$, the relation is \begin{displaymath}an\label{ss=ss} (s_j)^{-1}(B_{s_i}(s_j\cdot\la))\,{}\,B_{s_j}(\la)\,=\, (s_i)^{-1}(B_{s_j}(s_i\cdot\la))\,{}\,B_{s_i}(\la)\,, {{\mathfrak n}}otag \end{displaymath}an for $m=3$, the relation is \begin{displaymath}an\label{ss=ss} (s_js_i)^{-1}(B_{s_i}((s_js_i)\cdot\la))\,{}\, (s_i)^{-1}(B_{s_j}(s_i\cdot\la))\,{}\,B_{s_i}(\la)\,=\,&{}& {{\mathfrak n}}otag \\ (s_is_j)^{-1}(B_{s_j}((s_is_j)\cdot\la))\,{}\, (s_j)^{-1}(B_{s_i}(s_j\cdot\la))\,{}\,B_{s_j}(\la)\,, {{\mathfrak n}}otag \end{displaymath}an and so on. \item[V.] \begin{displaymath}an\label{com-B} \Dl (B_{w}(\la))\,J(\la)\,=\,w^{-1}(J(w\cdot\la))\,(B_{w}(\la-h^{(2)}) \!\otimes\!\, B_{w}(\la))\,. {{\mathfrak n}}otag \end{displaymath}an \end{enumerate} The operators $B_w(\la)$ are closely connected with extremal projectors of Zhelobenko, see \cite{Zh1, Zh2}. \subsection{Operators ${\Bbb B}_{w,V}$}\label{Bbb-B} In order to study interrelations of operators $B_{w,V}(\la)$ with KZ operators it is convenient to change the argument $\la$. Let $V$ be a finite dimensional ${{\mathfrak g}\,}$-module. For $w_1,w_2\in{{{\Bbb B}bb W\,}}$ and $\la\in{{\mathfrak h\,}}$, define $w_1({\Bbb B}_{w_2,V}(\la)):V\to V$ as follows. For any ${{\mathfrak n}}u\in P(V)$ and $v\in V[{{\mathfrak n}}u]$, set \begin{displaymath}an w_1({\Bbb B}_{w_2,V}(\la))\,v\,=\,w_1(B_{w_2}(\la-\rho + {1\over 2}{{\mathfrak n}}u))|_{V}\,v\,. {{\mathfrak n}}otag \end{displaymath}an In particular, \begin{displaymath}an {\Bbb B}_{w,V}(\la)v\,=\,B_{w,V}(\la-\rho+{1\over 2}{{\mathfrak n}}u)v\,. {{\mathfrak n}}otag \end{displaymath}an $w_1({\Bbb B}_{w_2,V}(\la))$ is a meromorphic function of $\la$, $w_1({\Bbb B}_{w_2,V}(\la))$ tends to 1 as $\la$ tends to infinity in a generic direction. {\bf Properties of ${\Bbb B}_{w,V}(\la)$.} \begin{displaymath}gin{enumerate} \item[I.] If $w_1,w_2\in{{{\Bbb B}bb W\,}}$ and $l(w_1w_2)=l(w_1)+l(w_2)$, then \begin{displaymath}an\label{B=BB} {\Bbb B}_{w_1w_2,V}(\la))\,=\, w_2^{-1}({\Bbb B}_{w_1,V}(w_2( \la)))\,{\Bbb B}_{w_2,V}(\la)\,. {{\mathfrak n}}otag \end{displaymath}an \item[II.] If $i=1,...,r$, $w\in{{{\Bbb B}bb W\,}}$, $v\in V[{{\mathfrak n}}u]$, then \begin{displaymath}an {\Bbb B}_{s_i,V}(\la)\,v\, =\,p((\la+{1\over 2}{{\mathfrak n}}u,\,\al_i^\vee)- 1;\,H_{\al_i}, E_{\al_i}, F_{\al_i})\,v {{\mathfrak n}}otag \end{displaymath}an and \begin{displaymath}an w({\Bbb B}_{s_i,V}(w^{-1}(\la)))\,v\, =\,p((\la+{1\over 2}{{\mathfrak n}}u,\,w(\al_i^\vee))- 1;\,H_{w(\al_i)}, E_{w(\al_i)}, F_{w(\al_i)})\,v {{\mathfrak n}}otag \end{displaymath}an where $p(t;\,H,E,F)$ is defined in {\mathbb R}ef{B-sl2}. \end{enumerate} For $\al\in\Si,\,\la\in{{\mathfrak h\,}}$, define a linear operator ${\Bbb B}^\al_V(\la):V\to V$ by $$ {\Bbb B}^\al_V(\la)v\,=\,p((\la+{1\over 2}{{\mathfrak n}}u,\al^\vee)-1;\,H_\al,E_\al,F_\al)v $$ for any $v\in V[{{\mathfrak n}}u]$. \begin{displaymath}gin{enumerate} \item[III.] $$ {\Bbb B}^\al_V(\la)\,{\Bbb B}^{-\al}_V(\la)v={(\la-{1\over 2}{{\mathfrak n}}u,\al^\vee)\over (\la+{1\over 2}{{\mathfrak n}}u,\al^\vee)}v $$ for any $v\in V[{{\mathfrak n}}u]$. \item[IV.] Let $\al\in\Sigma$, $\om\in{{\mathfrak h\,}}$, and $(\al,\om)=0$, then \begin{displaymath}an {\Bbb B}_{V}^\al(\la+\om)\,=\,{\Bbb B}_{V}^\al(\la)\,. {{\mathfrak n}}otag \end{displaymath}an \item[V.] Every relation $(s_is_j)^m=1$ for $m=2,3,4,6$ in ${{{\Bbb B}bb W\,}}$ is equivalent to a homogeneous relation $s_is_j...=s_js_i...$. Every such a homogeneous relation generates a relation for ${\Bbb B}_{s_i,V}(\la), {\Bbb B}_{s_j,V}(\la)$. Namely, for $m=2$, the relation is \begin{displaymath}an\label{ss=ss} (s_j)^{-1}({\Bbb B}_{s_i,V}(s_j(\la)))\,{}\,{\Bbb B}_{s_j,V}(\la)\,=\, (s_i)^{-1}({\Bbb B}_{s_j,V}(s_i(\la)))\,{}\,{\Bbb B}_{s_i,V}(\la)\,, {{\mathfrak n}}otag \end{displaymath}an for $m=3$, the relation is \begin{displaymath}an\label{ss=ss} (s_js_i)^{-1}({\Bbb B}_{s_i,V}((s_js_i)(\la)))\,{}\, (s_i)^{-1}({\Bbb B}_{s_j,V}(s_i(\la)))\,{}\,{\Bbb B}_{s_i,V}(\la)\,=\,&{}& {{\mathfrak n}}otag \\ (s_is_j)^{-1}({\Bbb B}_{s_j,V}((s_is_j)(\la)))\,{}\, (s_j)^{-1}({\Bbb B}_{s_i}(s_j(\la)))\,{}\,B_{s_j}(\la)\,, {{\mathfrak n}}otag \end{displaymath}an and so on. \end{enumerate} These relations can be written in terms of operators ${\Bbb B}^\al_V(\la)$. \begin{displaymath}gin{enumerate} \item[VI.] For $\al,\begin{displaymath}ta\in\Si$, denote ${\mathbb R}\langle \al,\bt\rightarrowngle $ the subspace ${\mathbb R}\al+{\mathbb R}\bt\subset{{\mathfrak h\,}}$. Then \begin{displaymath}an {\Bbb B}^{\al}_V(\la){\Bbb B}^{\bt}_V(\la)&=&{\Bbb B}^{\bt}_V(\la){\Bbb B}^{\al}_V(\la)\,, {{\mathfrak n}}otag \\ {\Bbb B}^{\al}_V(\la){\Bbb B}^{\al+\bt}_V(\la){\Bbb B}^{\bt}_V(\la)&=& {\Bbb B}^{\bt}_V(\la){\Bbb B}^{\al+\bt}_V(\la){\Bbb B}^{\al}_V(\la)\,, {{\mathfrak n}}otag \\ {\Bbb B}^{\al}_V(\la){\Bbb B}^{\al+\bt}_V(\la){\Bbb B}^{\al+2\bt}_V(\la){\Bbb B}^{\bt}_V(\la)&=& {\Bbb B}^{\bt}_V(\la){\Bbb B}^{\al+2\bt}_V(\la){\Bbb B}^{\al+\bt}_V(\la){\Bbb B}^{\al}_V(\la)\,, {{\mathfrak n}}otag \end{displaymath}an \begin{displaymath}an {\Bbb B}^{\al}_V(\la){\Bbb B}^{3\al+\bt}_V(\la){\Bbb B}^{2\al+\bt}_V(\la){\Bbb B}^{3\al+2\bt}_V(\la) {\Bbb B}^{\al+\bt}_V(\la){\Bbb B}^{\bt}_V(\la)= {{\mathfrak n}}otag \\ {\Bbb B}^{\bt}_V(\la){\Bbb B}^{\al+\bt}_V(\la){\Bbb B}^{3\al+2\bt}_V(\la){\Bbb B}^{2\al+\bt}_V(\la)\, {\Bbb B}^{3\al+\bt}_V(\la){\Bbb B}^{\al}_V(\la)\, {{\mathfrak n}}otag \end{displaymath}an under the assumption that ${\mathbb R}\langle\al,\bt\rightarrowngle =\{{\partial}m{{\mathfrak g}\,}m\}$ where ${{\mathfrak g}\,}m$ runs over all indices in the corresponding identity. \item[VII.] \begin{displaymath}an\label{} {\Bbb B}_{w,W\!\otimes\!\, V}(\la))\,=&{}&\,x_w^{-1}(J_{WV}(w(\la)-\rho+{1\over 2}(h^{(1)}+h^{(2)})))x_w\,\cdot {{\mathfrak n}}otag \\ &{}&({\Bbb B}_{w,W}(\la-{1\over 2}h^{(2)}) \!\otimes\!\, {\Bbb B}_{w,V}(\la+{1\over 2}h^{(1)}))\,J(\la-\rho+{1\over 2}(h^{(1)}+h^{(2)}))^{-1}\, {{\mathfrak n}}otag \end{displaymath}an \end{enumerate} \begin{displaymath}gin{lemma}\label{r-B} Let $W,V$ be finite dimensional ${{\mathfrak g}\,}$-modules, $\la\in {{\mathfrak h\,}}$, $w\in{{{\Bbb B}bb W\,}}$. Then \begin{displaymath}an\label{1} \Om\,{\Bbb B}_{w,W\!\otimes\!\, V}(\la)\,=\, {\Bbb B}_{w,W\!\otimes\!\, V}(\la) \,\Om {{\mathfrak n}}otag \end{displaymath}an and \begin{displaymath}an\label{2} (w^{-1}(\Om^-)+\la^{(2)}){\Bbb B}_{w,W\!\otimes\!\, V}(\la)\,=\, {\Bbb B}_{w,W\!\otimes\!\, V}(\la) (\Om^-+\la^{(2)})\,. {{\mathfrak n}}otag \end{displaymath}an \end{lemma} \begin{displaymath}gin{proof} The first equation holds since $\Om$ commutes with the comultiplication. Now \begin{displaymath}an {\Bbb B}_{w,W\!\otimes\!\, V}(\la) \,(\Om^-+\la^{(2)})\,= x_w^{-1}(J_{WV}(w(\la)-\rho+{1\over 2}(h^{(1)}+h^{(2)})))x_w\,\cdot {{\mathfrak n}}otag \\ ({\Bbb B}_{w,W}(\la-{1\over 2}h^{(2)}) \!\otimes\!\, {\Bbb B}_{w,V}(\la+{1\over 2}h^{(1)}))\,J_{WV}(\la-\rho+{1\over 2}(h^{(1)}+h^{(2)}))^{-1}\, (\Om^-+\la^{(2)})\,= {{\mathfrak n}}otag \\ x_w^{-1}(J_{WV}(w(\la)-\rho+{1\over 2}(h^{(1)}+h^{(2)})))x_w\,\cdot {{\mathfrak n}}otag \\ ({\Bbb B}_{w,W}(\la-{1\over 2}h^{(2)}) \!\otimes\!\, {\Bbb B}_{w,V}(\la+{1\over 2}h^{(1)}))\,(\Om^0+\la^{(2)})\,J_{WV}(\la-\rho+{1\over 2}(h^{(1)}+h^{(2)}))^{-1}\,= {{\mathfrak n}}otag \\ x_w^{-1}(J_{WV}(w(\la)-\rho+{1\over 2}(h^{(1)}+h^{(2)})))x_w\,\,(\Om^0+\la^{(2)})\cdot {{\mathfrak n}}otag \\ ({\Bbb B}_{w,W}(\la-{1\over 2}h^{(2)}) \!\otimes\!\, {\Bbb B}_{w,V}(\la+{1\over 2}h^{(1)}))\,J_{WV}(\la-\rho+{1\over 2}(h^{(1)}+h^{(2)}))^{-1}\,= {{\mathfrak n}}otag \\ x_w^{-1}(J_{WV}(w(\la)-\rho+{1\over 2}(h^{(1)}+h^{(2)})))\,(\Om^0+(w(\la))^{(2)})\,x_w\cdot {{\mathfrak n}}otag \\ ({\Bbb B}_{w,W}(\la-{1\over 2}h^{(2)}) \!\otimes\!\, {\Bbb B}_{w,V}(\la+{1\over 2}h^{(1)}))\,J_{WV}(\la-\rho+{1\over 2}(h^{(1)}+h^{(2)}))^{-1}\,= {{\mathfrak n}}otag \\ x_w^{-1}(\Om^-+(w(\la))^{(2)})(J_{WV}(w(\la)-\rho+{1\over 2}(h^{(1)}+h^{(2)})))\,x_w\cdot {{\mathfrak n}}otag \\ ({\Bbb B}_{w,W}(\la-{1\over 2}h^{(2)}) \!\otimes\!\, {\Bbb B}_{w,V}(\la+{1\over 2}h^{(1)}))\,J_{WV}(\la-\rho+{1\over 2}(h^{(1)}+h^{(2)}))^{-1}\,= {{\mathfrak n}}otag \\ (w^{-1}(\Om^-)+\la^{(2)})x_w^{-1}(J_{WV}(w(\la)-\rho+{1\over 2}(h^{(1)}+h^{(2)})))\,x_w\cdot {{\mathfrak n}}otag \\ ({\Bbb B}_{w,W}(\la-{1\over 2}h^{(2)}) \!\otimes\!\, {\Bbb B}_{w,V}(\la+{1\over 2}h^{(1)}))\,J_{WV}(\la-\rho+{1\over 2}(h^{(1)}+h^{(2)}))^{-1}\,= {{\mathfrak n}}otag \\ (w^{-1}(\Om^-)+\la^{(2)}){\Bbb B}_{w,W\!\otimes\!\, V}(\la)\,. {{\mathfrak n}}otag \end{displaymath}an \end{proof} \section{Difference Equations Compatible with KZ Equations for ${{\mathfrak g}\,}=sl_{N}$} \subsection{Statement of Results} Let $e_{i,j}$, $i,j=1,...N$, be the standard generators of the Lie algebra $gl_N$, $$ [ e_{i,j}\,, \, e_{k,l}]\,=\,\dl_{j,k}\,e_{i,l}\,-\,\dl_{i,l}\,e_{j,k}\,. $$ $sl_N$ is the Lie subalgebra of $gl_N$ such that $sl_n={{\mathfrak n}}_+\oplus{{\mathfrak h\,}}\oplus{{\mathfrak n}}_-$ where $$ {{\mathfrak n}}_+=\oplus_{1\leq i < j\leq N}{\mathbb C}\,e_{i,j}\,,\qquad {{\mathfrak n}}_-=\oplus_{1\leq j < i\leq N}{\mathbb C}\,e_{i,j}\,, $$ and ${{\mathfrak h\,}}=\{ \la=\sum_{i=1}^N\la_ie_{i,i}\,|\,\la_i\in{\mathbb C},\,\,\sum_{i=1}^N\la_i=0\}$. The invariant scalar product is defined by $(e_{i,j}, e_{k,l})=\dl_{i,l}\dl_{j,k}$. The roots are $e_{i,i}-e_{j,j}$ for $i{{\mathfrak n}}eq j$. $\al^\vee=\al$ for any root. For a root $\al=e_{i,i}-e_{j,j}$, we have $H_\al=e_{i,i}-e_{j,j},\, E_\al=e_{i,j},\,F_\al=e_{j,i}$. The simple roots are $\al_i=e_{i,i}-e_{i+1,i+1}$ for $i=1,...,N-1$. ${{{\Bbb B}bb W\,}}$ is the symmetric group $S^N$ permutting coordinates of $\la\in{{\mathfrak h\,}}$. The (dual) fundamental weights are $\om_i=\om_i^\vee=\sum_{j=1}^i(1-{i\over N})e_{j,j} -\sum_{j=i+1}^N{i\over N}e_{j,j}$ for $i=1,...,N-1$. All dual fundamental weights are minuscule. For $i=1,...,N-1$, the permutation $w_{[i]}^{-1}\in S^N$ is $\left( {}^1_{i+1}\,{}^{2}_{i+2}\,{}^{...}_{...}\,{}^{N-i}_{N}\,{}^{N-i+1}_{1}\, {}^{...}_{...}\,{}^{N}_{i} \right)$. For any finite dimensional $sl_N$-module $V$ and $w\in S^N$ consider the operators ${\Bbb B}_{w,V}(\la)\,:V\to V$. Let $V=V_1\!\otimes\!\, ...\!\otimes\!\, V_n$ be a tensor product of finite dimensional $sl_N$-modules. For $\kappa\in {\mathbb C}$ and $\la\in {{\mathfrak h\,}}$, consider the trigonometric KZ equations with values in $V$, \begin{displaymath}an\label{KZ-sl} {{\mathfrak n}}abla_j(\la,\kappa)u(z_1,...,z_n,\la)\,=\,0\,,\qquad j=1,...,n\,. \end{displaymath}an Here $u(z_1,...,z_n,\la)\in V$ is a function of complex variables $z_1,...,z_n$ and $\la\in{{\mathfrak h\,}}$. Introduce {\it the dynamical difference equations} on a $V$-valued function $u(z_1,...,z_n,\la)$ as \begin{displaymath}an\label{dyn-sl} {} \\ u(z_1,...,z_n,\la+\kappa \om_i^\vee)\,=\,K_i(z_1,...,z_n,\la)\, u(z_1,...,z_n,\la)\,, \qquad i=1,...,N-1\, {{\mathfrak n}}otag \end{displaymath}an where $$ K_i(z_1,...,z_n,\la)\,=\,{\partial}rod_{k=1}^n z_k^{(\om_i^\vee)^{(k)}}\,{\Bbb B}_{w_{[i]},V}(\la)\,. $$ The operator ${\partial}rod_{k=1}^n z_k^{(\om_i^\vee)^{(k)}}$ is well defined if the argument of $z_1,...,z_n$ is fixed. The dynamical difference equations are well defined on functions of $(z,\la)$ where $\la\in{{\mathfrak h\,}}$ and $z$ belongs to the universal cover of $({\mathbb C}^*)^n$. Notice that the KZ equations are well defined for $V$-valued functions of the same variables. The KZ operators ${{\mathfrak n}}abla_j(\la,\kappa) $ and the operators $K_i(z_1,...,z_n,\la)$ preserve the weight decomposition of $V$. \begin{displaymath}gin{thm}\label{comp-sl} The dynamical equations {\mathbb R}ef{dyn-sl} together with the KZ equations {\mathbb R}ef{KZ-sl} form a compatible system of equations. \end{thm} \subsection{Proof} First prove that $$ {\partial}rod_{k=1}^n z_k^{(\om_i^\vee)^{(k)}}\,{\Bbb B}_{w_{[i]},V}(\la)\,{{\mathfrak n}}abla_j(\la,\kappa) = {{\mathfrak n}}abla_j(\la+\kappa \om_i^\vee,\kappa)\,{\partial}rod_{k=1}^n z_k^{(\om_i^\vee)^{(k)}}\,{\Bbb B}_{w_{[i]},V}(\la) $$ for all $i$ and $j$. Multiplying both sides from the left by ${\partial}rod_{k=1}^n z_k^{-(\om_i^\vee)^{(k)}}$ and using Lemma \ref{lemma-2}, we reduce the equation to \begin{displaymath}an\label{compa-sl} {\Bbb B}_{w_{[i]},V}(\la)\,(\,\sum_{k,\,k{{\mathfrak n}}eq j} r(z_j/z_k)^{(j,k)}+\la^{(j)}\,)\,=\, (\,\sum_{k,\,k{{\mathfrak n}}eq j}w_{[i]}^{-1}( r(z_j/z_k))^{(j,k)}+\la^{(j)}\,)\, {\Bbb B}_{w_{[i]},V}(\la)\,. {{\mathfrak n}}otag \end{displaymath}an \begin{displaymath}gin{lemma}\label{nice} For $j=1,...,n$ and $w\in{{{\Bbb B}bb W\,}}$, we have \begin{displaymath}an\label{compa-sl} {\Bbb B}_{w,V}(\la)\,(\,\sum_{k,\,k{{\mathfrak n}}eq j} r(z_j/z_k)^{(j,k)}+\la^{(j)}\,)\,=\, (\,\sum_{k,\,k{{\mathfrak n}}eq j}w^{-1}( r(z_j/z_k))^{(j,k)}+\la^{(j)}\,)\, {\Bbb B}_{w,V}(\la)\,. {{\mathfrak n}}otag \end{displaymath}an \end{lemma} \begin{displaymath}gin{proof} It is sufficient to check the equation for the residues of both sides at $z_j=z_k,\,k{{\mathfrak n}}eq j$, and for the limit of both sides as $z_j\to\infty$. The residue equation $[{\Bbb B}_{w,V}(\la), \Om^{(j,k)}]=0$ is true since the Casimir operator commutes with the comultiplication. The limit equation \begin{displaymath}an\label{compa-sl} {\Bbb B}_{w,V}(\la)\,(\,\sum_{k,\,k{{\mathfrak n}}eq j} (\Om^+)^{(j,k)}+\la^{(j)}\,)\,=\, (\,\sum_{k,\,k{{\mathfrak n}}eq j}w^{-1}(\Om^+)^{(j,k)}+\la^{(j)}\,)\, {\Bbb B}_{w_{[i]},V}(\la)\, {{\mathfrak n}}otag \end{displaymath}an is a corollary of Lemma \ref{r-B}. \end{proof} The Theorem is proved for $sl_N,\, N=2$. For $N>2$, it remains to prove that \begin{displaymath}an\label{compat-dyn} K_i(z,\la+\kappa\om^\vee_j)\,K_j(z,\la)\,=\, K_j(z,\la+\kappa\om^\vee_i)\,K_i(z,\la)\, \end{displaymath}an for all $i,j$, $0< i<j < N$. We prove {\mathbb R}ef{compat-dyn} for $N=3$. For arbitrary $N$ the proof is similar. Another proof see in Section \ref{dynamical}. For $N=3$, $i=1,\, j=2$, equation {\mathbb R}ef{compat-dyn} takes the form \begin{displaymath}an\label{compatib} {}&{}&{\partial}rod_{k=1}^n z_k^{(\om^\vee_1)^{(k)}} \,{\Bbb B}_{V}^{\al_1+\al_2}(\la+\kappa\om^\vee_2)\, \,{\Bbb B}_{V}^{\al_1}(\la+\kappa\om^\vee_2)\, {\partial}rod_{k=1}^n z_k^{(\om_2^\vee)^{(k)}}\, \,{\Bbb B}_{V}^{\al_1+\al_2}(\la)\, \,{\Bbb B}_{V}^{\al_2}(\la)\,= \\ &{}&{\partial}rod_{k=1}^n z_k^{(\om_2^\vee)^{(k)}}\, \,{\Bbb B}_{V}^{\al_1+\al_2}(\la+\kappa \om^\vee_1)\, \,{\Bbb B}_{V}^{\al_2}(\la+\kappa\om^\vee_1)\, {\partial}rod_{k=1}^n z_k^{(\om^\vee_1)^{(k)}}\ \,{\Bbb B}_{V}^{\al_1+\al_2}(\la)\, \,{\Bbb B}_{V}^{\al_1}(\la)\,. {{\mathfrak n}}otag \end{displaymath}an We have ${\Bbb B}_{V}^{\al_1}(\la+\kappa\om^\vee_2)= {\Bbb B}_{V}^{\al_1}(\la)$ since $(\om^\vee_2,\al_1)=0$. We have $[{\Bbb B}_{V}^{\al_1}(\la), {\partial}rod_{k=1}^n z_k^{(\om^\vee_2)^{(k)}}]=0$ since ${\Bbb B}_{V}^{\al_1}(\la)$ is a power series in $E_{\al_1},\,F_{\al_1}$. Similarly, ${\Bbb B}_{V}^{\al_2}(\la+\kappa\om^\vee_1)={\Bbb B}_{V}^{\al_2}(\la)$ and $[{\Bbb B}_{V}^{\al_2} (\la),{\partial}rod_{k=1}^n z_k^{(\om^\vee_1)^{(k)}}]=0$. Using these remarks and the relation $$ {\Bbb B}_{V}^{\al_2}(\la) {\Bbb B}_{V}^{\al_1+\al_2}(\la) {\Bbb B}_{V}^{\al_1}(\la)= {\Bbb B}_{V}^{\al_1}(\la) {\Bbb B}_{V}^{\al_1+\al_2}(\la) {\Bbb B}_{V}^{\al_2}(\la) $$ we reduce {\mathbb R}ef{compatib} to \begin{displaymath}an {\partial}rod_{k=1}^n z_k^{(\om^\vee_1-\om^\vee_2)^{(k)}}\, \,{\Bbb B}_{V}^{\al_1+\al_2}(\la+\kappa\om^\vee_2)\,= \,{\Bbb B}_{V}^{\al_1+\al_2}(\la+\kappa\om^\vee_1)\, {\partial}rod_{k=1}^n z_k^{(\om^\vee_1-\om^\vee_2)^{(k)}}\,. {{\mathfrak n}}otag \end{displaymath}an This equation holds since $\,{\Bbb B}_{V}^{\al_1+\al_2}(\la+\kappa\om^\vee_2)\,= \,{\Bbb B}_{V}^{\al_1+\al_2}(\la+\kappa\om^\vee_1)$, each of these operators is a power series in $E_{\al_1+\al_2},\,F_{\al_1+\al_2}$, and $(\om^\vee_1-\om^\vee_2, \al_1+\al_2)=0$. \subsection{An Equivalent Form of Dynamical Equations for $sl_N$} For $j=1,...,N$, set $\delta_j=\om^\vee_j-\om^\vee_{j-1}$ where $\om^\vee_0=\om^\vee_N=0$. Then the system of equations {\mathbb R}ef{dyn-sl} is equivalent to the system \begin{displaymath}an\label{dyn-sl-mod} u(z_1,...,z_n,\la+\kappa\delta_i)\,=&{}& \left({\Bbb B}_V^{e_{i-1,i-1}-e_{i,i}}(\la+\kappa\delta_i)\right)^{-1}... \left({\Bbb B}_V^{e_{1,1}-e_{i,i}}(\la+\kappa\delta_i)\right)^{-1}\times {{\mathfrak n}}otag \\ &{}&{\partial}rod_{k=1}^n z_k^{(\delta_i)^{(k)}}\, {\Bbb B}_V^{e_{i,i}-e_{n,n}}(\la)...{\Bbb B}_V^{e_{i,i}-e_{i+1,i+1}}(\la) u(z_1,...,z_n,\la) {{\mathfrak n}}otag \end{displaymath}an where $i=1,...,N$. Notice that the inverse powers can be eliminated using property III in Section \ref{Bbb-B}. \subsection{Application to Determinants} Let ${{\mathfrak g}\,}$ be a simple Lie algebra, $V$ a finite dimensional ${{\mathfrak g}\,}$-module, $V[{{\mathfrak n}}u]$ a weight subspace. For a positive root $\al$ fix the $sl_2$ subalgebra in ${{\mathfrak g}\,}$ generated by $H_\al, E_\al, F_\al$. Consider $V$ as an $sl_2$-module. Let $V[{{\mathfrak n}}u]_\al\subset V$ be the $sl_2$-submodule generated by $V[{{\mathfrak n}}u]$, $$ V[{{\mathfrak n}}u]_\al=\oplus_{k\in{\mathbb Z}_{{{\mathfrak g}\,}e 0}}W^\al_k\otimes L_{{{\mathfrak n}}u+k\al} $$ the decomposition into irreducible $sl_2$-modules. Here $L_{{{\mathfrak n}}u+k\al}$ is the irreducible module with highest weight ${{\mathfrak n}}u+k\al$ and $W^\al_k$ the multiplicity space. Let $d^\al_k=$ dim $W^\al_k$. Set \begin{displaymath}an\label{X} X_{\al,V[{{\mathfrak n}}u]}(\la)= {\partial}rod_{k\in{\mathbb Z}_{{{\mathfrak g}\,}e 0}} \left({\partial}rod_{j=1}^k {{{{\Bbb B}bb G\,}}amma\left(1- {(\la-{1\over 2}({{\mathfrak n}}u+j\al),\al)\over \kappa}\right) \over {{{\Bbb B}bb G\,}}amma\left(1-{(\la+{1\over 2}({{\mathfrak n}}u+j\al),\al)\over \kappa}\right)} \right)^{d^\al_k}\,, {{\mathfrak n}}otag \end{displaymath}an cf. formula {\mathbb R}ef{product}. Here ${{{\Bbb B}bb G\,}}amma$ is the standard gamma function. Let $V=V_1\!\otimes\!\, ...\!\otimes\!\, V_n$ be a tensor product of finite dimensional ${{\mathfrak g}\,}$-modules. Set $\Lambda_{k}(\la)=\text{tr}_{V[{{\mathfrak n}}u]}\la^{(k)}$, $\epe_{k,l}=\text{tr}_{V[{{\mathfrak n}}u]}\Om^{(k,l)}$, ${{\mathfrak g}\,}amma_k=\sum_{l,\,l{{\mathfrak n}}e k} \epsilon_{k,l}$. Set \begin{displaymath}an\label{Det} D_{V[{{\mathfrak n}}u]}(z_1,...,z_n,\la)= {\partial}rod_{k=1}^nz_k^{{\Lambda_k(\la)\over \kappa} - {{{\mathfrak g}\,}m_k \over 2\kappa}} \,{\partial}rod_{1\leq k<l\leq n}(z_k-z_l)^ {\epe_{k,l}\over \kappa}\, {\partial}rod_{\al\in\Si_+}X_{\al,V[{{\mathfrak n}}u]}(\la)\,. \end{displaymath}an Let ${{\mathfrak g}\,}=sl_N$, $V=V_1\!\otimes\!\, ...\!\otimes\!\, V_n$ a tensor product of finite dimensional $sl_N$-modules. Fix a basis $v_1,...,v_d$ in a weight subspace $V[{{\mathfrak n}}u]$. Suppose that $u_i(z_1,...,z_n,\la)= \sum_{j=1}^d u_{i,j}v_j$, $i=1, \ldots,d$, is a set of $V[{{\mathfrak n}}u]$-valued solutions of the combined system of KZ equations {\mathbb R}ef{KZ-sl} and dynamical equations {\mathbb R}ef{dyn-sl}. \begin{displaymath}gin{corollary}\label{determ} $$ \text{det}\,(u_{i,j})_{1\leq i,j \leq d}\,=\, C_{V[{{\mathfrak n}}u]}(\la) \,D_{V[{{\mathfrak n}}u]}(z_1,...,z_n,\la) $$ where $C_{V[{{\mathfrak n}}u]}(\la)$ is a function of $\la$ (depending also on $V_1,...,V_n$ and ${{\mathfrak n}}u$) such that $$ C_{V[{{\mathfrak n}}u]}(\la+\kappa\om)=C_{V[{{\mathfrak n}}u]}(\la) $$ for all $\om\in P^\vee$. \end{corollary} \begin{displaymath}gin{proof} The Corollary follows from the following simple Lemma. \begin{displaymath}gin{lemma}\label{16} For $i=1,...,N-1$, the operator ${\Bbb B}_{w_{[i]},V}(\la)$ is the product in a suitable order of all operators ${\Bbb B}^\al_V(\la)$ with $\al\in\Si_+$ and $(\om^\vee_i,\al)>0$. \end{lemma} \end{proof} Notice that Lemma \ref{16} in particular implies that operators ${\Bbb B}_{w_{[i]},V}(\la)$ and the dynamical equations are well defined in the tensor product of any highest weight $sl_N$-modules. \section{Dynamical Difference Equations}\label{dynamical} In this section we introduce dynamical difference equations for arbitrary simple Lie algebra. The compatibility of the dynamical equations follows from \cite{Ch1}. We prove the compatibility of dynamical and KZ equations. \subsection{Affine Root Systems, \cite{Ch1, Ch2}} Let ${{\mathfrak g}\,}$ be a simple Lie algebra. The vectors $\tilde\al = [\al,j]\in {{\mathfrak h\,}}\times {\mathbb R}$ for $\al\in \Si, j \in{\mathbb Z}$ form the affine root system $\Si^a$ corresponding to the root system $\Si\subset {{\mathfrak h\,}}$. We view $\Si$ as a subset in $\Si^a$ identifying $\al\in{{\mathfrak h\,}}$ with $[\al,0]$. The simple roots of $\Si^a$ are $\al_1,...,\al_r \in \Si$ and $\al_0=[-\theta, 1]$ where $\theta\in\Si$ is the maximal root. The positive roots are $\Si_+^a=\{[\al,j]\in\Si^a\,|\, \al\in \Si,\,j>0 \,{}\,\text{or}\, \,\,\al\in\Si_+,\,j=0\}$. The Dynkin diagram and its affine completion with $\{\al_i\}_{0\leq i\leq n}$ as vertices are denoted ${{{\Bbb B}bb G\,}}amma$ and ${{{\Bbb B}bb G\,}}amma^a$, respectively. The set of the indices of the images of $\al_0$ with respect to all authomorphisms of ${{{\Bbb B}bb G\,}}m^a$ is denoted $O$ ($O=\{0\}$ for $E_8, F_4, G_2$ ). Let $O^*=\{i\in O\,|\, i{{\mathfrak n}}eq 0\}$. For $i=1,...,r$, the dual fundamental weight $\om^\vee_i$ is minuscule if and only if $i\in O^*$. Given $\tilde\al=[\al,j]\in\Si^a$ and $\om\in P^\vee$, set $$ s_{\tilde\al}(\tilde z)=\tilde z - (z,\al^\vee)\tilde\al, \qquad t_{\om}(\tilde z)=[z,\xi - (z,\om)] $$ for $\tilde z=[z,\xi]$. The affine Weyl group ${{{\Bbb B}bb W\,}}^a$ is the group generated by reflections $s_{\tilde\al},\,\tilde\al\in \Si^a_+$. One defines the length of elements of ${{{\Bbb B}bb W\,}}^a$ taking the simple reflections $s_i=s_{\al_i},\, i=0,...,r$, as generators of ${{{\Bbb B}bb W\,}}^a$. The group ${{{\Bbb B}bb W\,}}^a$ is the semidirect product ${{{\Bbb B}bb W\,}} \ltimes Q^\vee_t$ of its subgroups ${{{\Bbb B}bb W\,}}=\langle s_\al\,|\,\al\in\Si_+\rightarrowngle$ and $Q^\vee_t=\{t_\om\,|\, \om\in Q^\vee\}$, where for $\al\in \Si$ we have $t_{\al^\vee}=s_\al s_{[\al,1]}=s_{[-\al,1]}s_\al$. Consider the group $P^\vee_t=\{t_\om\,|\, \om\in P^\vee\}$. The {\it extended affine Weyl group} ${{{\Bbb B}bb W\,}}^b$ is the group of transformations of ${{\mathfrak h\,}}\times{\mathbb R}$ generated by ${{{\Bbb B}bb W\,}}$ and $P^\vee_t$. ${{{\Bbb B}bb W\,}}^b$ is isomorphic to ${{{\Bbb B}bb W\,}}\ltimes P^\vee_t$ with action $(w,\om)([z,\xi])=[w(z),\xi-(z,\om)]$. Notice that for any $w\in{{{\Bbb B}bb W\,}}^b$ and $\tilde\al\in\Si^a$, we have $w(\tilde\al)\in \Si^a$. The extended affine Weyl group has a remarkable subgroup $\Pi=\{{\partial}i_i\,|\, i\in O\}$, where ${\partial}i_0\in\Pi$ is the identity element in ${{{\Bbb B}bb W\,}}^b$ and for $i\in O^*$ we have ${\partial}i_i= t_{\om^\vee_i}w_{[i]}^{-1}$. The group $\Pi$ is isomorphic to $P^\vee/Q^\vee$ with the isomorphism sending ${\partial}i_i$ to the minuscle weight $\om^\vee_i$. For $i\in O^*$, the element $w_{[i]}$ preserves the set $\{-\theta,\,\al_1,...,\al_r\}$ and ${\partial}i_i(\al_0)=\al_i=w_{[i]}^{-1}(-\theta)$. We have $$ {{{\Bbb B}bb W\,}}^b=\Pi\ltimes {{{\Bbb B}bb W\,}}^a, \qquad \text{where}\qquad {\partial}i_is_l{\partial}i^{-1}_i=s_k \qquad \text{if} \qquad {\partial}i_i(\al_l)=\al_k\, \qquad \text{and}\qquad 0\leq k\leq r\,. $$ We extend the notion of length to ${{{\Bbb B}bb W\,}}^b$. For $i\in O^*,\, w\in {{{\Bbb B}bb W\,}}^a$, we set the length of ${\partial}i_iw$ to be equal to the length of $w$ in ${{{\Bbb B}bb W\,}}^a$. \subsection{Affine R-matrices, \cite{Ch1, Ch2}} Fix a ${\mathbb C}$-algebra $F$. A set $G=\{G^\al\in F\,|\,\al\in \Si\}$ is called a closed R-matrix if \begin{displaymath}an G^{\al}G^{\bt}&=&G^{\bt}G^{\al}\,, {{\mathfrak n}}otag \\ G^{\al}G^{\al+\bt}G^{\bt}&=& G^{\bt}G^{\al+\bt}G^{\al}\,, {{\mathfrak n}}otag \\ G^{\al}G^{\al+\bt}G^{\al+2\bt}G^{\bt}&=& G^{\bt}G^{\al+2\bt}G^{\al+\bt}G^{\al}\,, {{\mathfrak n}}otag \\ G^{\al}G^{3\al+\bt}G^{2\al+\bt}G^{3\al+2\bt} G^{\al+\bt}G^{\bt}&=& G^{\bt}G^{\al+\bt}G^{3\al+2\bt}G^{2\al+\bt} G^{3\al+\bt}G^{\al}\, {{\mathfrak n}}otag \end{displaymath}an under the assumption that $\al,\bt\in\Si$ and ${\mathbb R}\langle\al,\bt\rightarrowngle =\{{\partial}m{{\mathfrak g}\,}m\}$ where ${{\mathfrak g}\,}m$ runs over all indices in the corresponding identity. A set $G^a=\{\tilde G^{\tilde\al}\in F\,|\,\tilde \al\in \Si^a\}$ is called a closed affine R-matrix if $\tilde G^{\tilde\al}$ satisfy the same relations where $\al,\bt$ are replaced with $\tilde\al, \tilde\bt$. If $G^a$ is an affine R-matrix, for any $w\in{{{\Bbb B}bb W\,}}^b$ define an element $ \tilde G_w\in F$ as follows. Given a reduced presentation $w={\partial}i_is_{j_l}...s_{j_1}$, $i\in O$, $0\leq j_1,...,j_l\leq r$, set $\tilde G_w=\tilde G^{\tilde \al^l}...\tilde G^{\tilde \al^1}$ where $\tilde \al^1=\al_{j_1},\, \tilde \al^2=s_{j_1}(\al_{j_2}),\, \tilde \al^3=s_{j_1}s_{j_2}(\al_{j_3})$,... The element $\tilde G_w$ does not depend on the reduced presentation of $w$. We set $\tilde G_{\text{id}}=1$. The unordered set $\{\tilde\al^1,...,\tilde\al^l\}$ is denoted $\tilde A (w)$. There is a useful formula valid for any (not necessarily minuscule) dual fundamental weight $\om^\vee_i$, $i=1,...,r$, \begin{displaymath}an\label{useful} \tilde A(t_{\om^\vee_i})\,=\,\{[\al,j]\,|\,\al\in\Si_+,\,\text{and}\, (\om^\vee_i,\al)>j{{\mathfrak g}\,}eq 0\}\,, \end{displaymath}an Prop. 1.4 \cite{Ch2}. Introduce the following formal notation: for $w\in{{{\Bbb B}bb W\,}}^b$, $\tilde \al, \tilde\bt\in\Si^a$, set ${}^w(\tilde G^{\tilde\al})=G^{w(\tilde\al)},\, {}^w(\tilde G^{\tilde\al}\tilde G^{\tilde\bt})=G^{w(\tilde\al)}G^{w(\tilde\bt)}$,... Then the elements $\{ \tilde G_w\,|\,w\in{{{\Bbb B}bb W\,}}^b\}$ form a 1-cocycle: $$ \tilde G_{xy}={}^{y^{-1}}\tilde G_x\,\tilde G_y\, $$ for all $x,y\in{{{\Bbb B}bb W\,}}^b$ such that $l(xy)=l(x)+l(y)$. There is a way to construct a closed affine R-matrix if a closed nonaffine R-matrix $G=\{ G^{\al}\in F\,|\,\al\in \Si\}$ is given. Namely, assume that the group $P^\vee_t$ acts on the algebra $F$ so that ${}^{t_\om} (G^\al)= G^\al$ whenever $(\om,\al)=0$, $\om\in P^\vee$, $\al\in\Si$. Then for $\tilde \al=[\al,j]\in\Si^a$, choose $\om\in P^\vee$ so that $(\om,\al)=-j$ and set $\tilde G^{\tilde\al}= {}^{t_\om} (G^\al)$. The set $G^a=\{\tilde G^{\tilde\al}\in F\,|\,\tilde \al\in \Si^a\}$ is well defined and forms a closed affine R-matrix called the affine completion of the R-matrix $G$. Assume that a closed affine R-matrix $G^a$ is the affine completion of a closed nonaffine R-matrix $G$. Consider the system of equations for an element $\Phi\in F$: \begin{displaymath}an\label{chered} {}^{t_{-\om^\vee_i}} (\Phi)=\tilde G_{t_{\om^\vee_i}}\Phi\,, \qquad i=1,...,r\,, \end{displaymath}an where $\om^\vee_1,...,\om^\vee_r$ are the dual fundamental weights. \begin{displaymath}gin{thm}\label{Chered} \cite{Ch1} The system of equations {\mathbb R}ef{chered} is compatible, \begin{displaymath}an\label{cher-comp} {}^{t_{-\om^\vee_i}} (\tilde G_{t_{\om^\vee_j}})\,\,\tilde G_{t_{\om^\vee_i}}\,= \,{}^{t_{-\om^\vee_j}} (\tilde G_{t_{\om^\vee_i}})\,\tilde G_{t_{\om^\vee_j}} {{\mathfrak n}}otag \end{displaymath}an for $1\leq i<j\leq r$. \end{thm} {\bf Example, \cite{Ch1}.} Let $\al=\al_1, \,\begin{displaymath}ta=\al_2,\, a=-\om^\vee_1, \,b=-\om^\vee_2$. Then the system for $A_2$ is \begin{displaymath}an {}^{t_a}(\Phi)=\tilde G^{\al+\bt}\tilde G^{\al}\Phi, \qquad {}^{t_b}(\Phi)=\tilde G^{\al+\bt}\tilde G^{\bt}\Phi. {{\mathfrak n}}otag \end{displaymath}an The system for $B_2$ is \begin{displaymath}an {}^{t_a}(\Phi)=\tilde G^{\al+2\bt}\tilde G^{\al+\bt}\tilde G^{\al}\Phi, \qquad {}^{t_b}(\Phi)=\tilde G^{[\al+2\bt,1]}\tilde G^{\al+\bt}\tilde G^{\al+2\bt} \tilde G^{\bt}\Phi. {{\mathfrak n}}otag \end{displaymath}an The system for $G_2$ is \begin{displaymath}an {}^{t_a}(\Phi)&=&\tilde G^{[3\al+2\bt,2]}\tilde G^{[3\al+\bt,2]}\tilde G^{[2\al+\bt,1]} \tilde G^{[3\al+2\bt,1]}\tilde G^{[3\al+\bt,1]}\times {{\mathfrak n}}otag \\ &{}& \tilde G^{\al+\bt}\tilde G^{3\al+2\bt}\tilde G^{2\al+\bt} \tilde G^{3\al+\bt}\tilde G^{\al}\Phi, {{\mathfrak n}}otag \\ {}^{t_b}(\Phi)&=&\tilde G^{[3\al+2\bt,1]} \tilde G^{3\al+\bt}\tilde G^{2\al+\bt}\tilde G^{3\al+2\bt} \tilde G^{\al+\bt}\tilde G^{\bt}\Phi. {{\mathfrak n}}otag \end{displaymath}an \subsection{Affine R-matrix for Dynamical Equations} Fix $\kappa\in{\mathbb C}$ and a natural number $n$. Let $F$ be the algebra of meromorphic functions of $z_1,...,z_n \in {\mathbb C}$ and $\la\in{{\mathfrak h\,}}$ with values in $U{{\mathfrak g}\,}_0^{\!\otimes\!\, n}$. Define an action of ${{{\Bbb B}bb W\,}}$ on $F$ by $$ {}^wf(z_1,...,z_n,\la)\,=\, w(f(z_1,...,z_n,w^{-1}( \la))) $$ and an action of $P^\vee_t$ on $F$ by $$ {}^{t_\om} f(z_1,...,z_n,\la)\,=\, {\partial}rod_{k=1}^n z_k^{\om^{(k)}}f(z_1,...,z_n,\la-\kappa\om) {\partial}rod_{k=1}^n z_k^{-\om^{(k)}} $$ where $w\in{{{\Bbb B}bb W\,}}$, $\om\in P^\vee$, $f\in F$. \begin{displaymath}gin{lemma} Those actions extend to an action of ${{{\Bbb B}bb W\,}}^b={{{\Bbb B}bb W\,}}\ltimes P^\vee_t$ on $F$, i.e. ${}^{w}({}^{t_\om}f)={}^{t_{w(\om)}}({}^{w}f)$ for $w\in{{{\Bbb B}bb W\,}}$, $\om\in P^\vee$, $f\in F$. $\square$ \end{lemma} Define a closed nonaffine $F$-valued R-matrix $G_F=\{ G_F^\al\,|\,\al\in\Si\}$ by $$ G_F^\al(\la)\,=\,\Delta^{(n)}(p((\la,\al^\vee)-1; H_\al, E_\al, F_\al)). $$ Properties of operators ${\Bbb B}^\al_V$ described in Section \ref{Bbb-B} ensure that $G_F$ is a closed R-matrix. The action of $P^\vee_t$ on $F$ defined above clearly has the property: ${}^{t_\om} (G_F^\al)= G_F^\al$ whenever $(\om,\al)=0$, $\om\in P^\vee$, $\al\in\Si$. This allows us to define a closed affine R-matrix $G_F^a=\{\tilde G_F^{\tilde\al}\in F\,|\,\tilde \al\in \Si^a\}$ as the affine completion of the R-matrix $G_F$. Namely, for $\tilde \al=[\al,j]\in\Si^a$, we choose $\om\in P^\vee$ so that $(\om,\al)=-j$ and set $$ \tilde G_F^{[\al,j]}(z_1,...,z_n,\la)\,= \,{}^{t_\om}( G_F^\al)\,=\, {\partial}rod_{k=1}^n z_k^{\om^{(k)}}\,G_F^\al(\la-\kappa\om)\, {\partial}rod_{k=1}^n z_k^{-\om^{(k)}}\,. $$ Let $V=V_1\!\otimes\!\, ...\!\otimes\!\, V_n$ be a tensor product of finite dimensional ${{\mathfrak g}\,}$-modules. Let $F_V$ be the algebra of meromorphic functions of $z_1,...,z_n \in {\mathbb C}$ and $\la\in{{\mathfrak h\,}}$ with values in {{\mathfrak n}}ewline End $(V)$. The closed affine R-matrix $G^a_F$ induces a closed affine R-matrix $G^a_V=\{\tilde G_V^{\tilde\al}\}$ where $$ \tilde G_V^{\tilde\al}(z_1,...,z_n,\la)=\tilde G_F^{\tilde\al}(z_1,...,z_n, \la+{1\over 2}\sum_{k=1}^n h^{(k)})|_V\,. $$ In other words, $$ \tilde G_V^{[\al,j]}(z_1,...,z_n,\la)\,=\, {\partial}rod_{k=1}^n z_k^{\om^{(k)}}\,{\Bbb B}_V^\al(\la-\kappa\om)\, {\partial}rod_{k=1}^n z_k^{-\om^{(k)}}\, $$ where $(\om,\al)=-j$ and the operators ${\Bbb B}^\al_V$ are defined in Section \ref{Bbb-B}. For any $w\in {{{\Bbb B}bb W\,}}^b$ and $\tilde \al\in\Si^a$, we have ${}^w(\tilde G_V^{\tilde\al})=\tilde G_V^{w(\tilde\al)}$. Let $\{\tilde G^V_w\in F_V\,|\,w\in{{{\Bbb B}bb W\,}}^b\}$ be the 1-cocycle associated with the affine R-matrix $G_V^a$. Consider the system $$ {\partial}rod_{k=1}^n z_k^{-(\om_i^\vee)^{(k)}}\Phi(z_1,...,z_n,\la+\kappa\om^\vee_i) {\partial}rod_{k=1}^n z_k^{(\om^\vee_i)^{(k)}}\,=\,\tilde G^V_{t_{\om^\vee_i}} (z_1,...,z_n,\la)\Phi(z_1,...,z_n,\la)\,, $$ $i=1,...,r$, of equations {\mathbb R}ef{chered} associated with the affine R-matrix $G^a_V$. By Theorem \ref{Chered} this system is compatible. {\bf Example.} For ${{\mathfrak g}\,}=sl_N$, this system of equations for an element $\Phi\in F_V$ has the form $$ {\partial}rod_{k=1}^n z_k^{-(\om^\vee_i)^{(k)}}\Phi(z_1,...,z_n,\la+\kappa\om^\vee_i) {\partial}rod_{k=1}^n z_k^{(\om^\vee_i)^{(k)}}\,=\, {\Bbb B}_{w_{[i]},V}(\la)\Phi(z_1,...,z_n,\la)\,, $$ $i=1,...,N-1$, cf. {\mathbb R}ef{dyn-sl}. Introduce {\it the dynamical difference equations} on a $V$-valued function $u(z_1,...,z_n,\la)$ as \begin{displaymath}an\label{main-en} {} \\ {\partial}rod_{k=1}^n z_k^{-(\om^\vee_i)^{(k)}}\,u(z_1,...,z_n,\la+\kappa\om^\vee_i) \,=\, \tilde G^V_{t_{\om^\vee_i}}(z_1,...,z_n,\la)\,u(z_1,...,z_n,\la)\,, {{\mathfrak n}}otag \end{displaymath}an $ i=1,...,r$. Notice that the operators $\tilde G^V_{t_{\om^\vee_i}}$ preserve the weight decomposition of $V$. Notice also that the operators $\tilde G^V_{t_{\om^\vee_i}}$ are well defined on the tensor product of any highest weight ${{\mathfrak g}\,}$-modules according to formula {\mathbb R}ef{useful}. An easy corollary of the compatibility of system {\mathbb R}ef{chered} is \begin{displaymath}gin{lemma}\label{our-comp} The dynamical difference equations {\mathbb R}ef{main-en} form a compatible system of equations for a $V$-valued function $u(z_1,...,z_n,\la)$. \end{lemma} In particular, for ${{\mathfrak g}\,}=sl_N$, the Lemma says that the system {\mathbb R}ef{dyn-sl} is compatible. \begin{displaymath}gin{thm}\label{not-thm} Assume that the Lie algebra ${{\mathfrak g}\,}$ has a minuscle dual fundamental weight, i.e. ${{\mathfrak g}\,}$ is not of type $E_8, F_4, G_2$. Then the dynamical equations {\mathbb R}ef{main-en} together with the KZ equations {\mathbb R}ef{KZ-equa} form a compatible system of equations. \end{thm} The Theorem is proved in Section \ref{Proof}. We conjecture that the statement of the Theorem holds for any simple Lie algebra. Let ${{\mathfrak g}\,}$ be a simple Lie algebra for which the KZ and dynamical equations are compatible. Let $V=V_1\!\otimes\!\, ...\!\otimes\!\, V_n$ be a tensor product of finite dimensional ${{\mathfrak g}\,}$-modules. Fix a basis $v_1,...,v_d$ in a weight subspace $V[{{\mathfrak n}}u]$. Suppose that $u_i(z_1,...,z_n,\la)= \sum_{j=1}^d u_{i,j}v_j$, $i=1, \ldots,d$, is a set of $V[{{\mathfrak n}}u]$-valued solutions of the combined system of KZ equations {\mathbb R}ef{KZ-equa} and dynamical equations {\mathbb R}ef{main-en}. \begin{displaymath}gin{corollary}\label{determ-general} $$ \text{det}\,(u_{i,j})_{1\leq i,j \leq d}\,=\, C_{V[{{\mathfrak n}}u]}(\la) \,D_{V[{{\mathfrak n}}u]}(z_1,...,z_n,\la) $$ where $C_{V[{{\mathfrak n}}u]}(\la)$ is a function of $\la$ (depending also on $V_1,...,V_n$ and ${{\mathfrak n}}u$) such that $$ C_{V[{{\mathfrak n}}u]}(\la+\kappa\om)=C_{V[{{\mathfrak n}}u]}(\la) $$ for all $\om\in P^\vee$ and $D_{V[{{\mathfrak n}}u]}(z_1,...,z_n,\la)$ is defined in {\mathbb R}ef{Det}. \end{corollary} The Corollary follows from formula {\mathbb R}ef{useful}. \subsection{Proof of Theorem \ref{not-thm}}\label{Proof} Introduce an action of ${{{\Bbb B}bb W\,}}^b$ on the KZ operators ${{\mathfrak n}}abla_j(\la,\kappa),\, j=1,...,n$. Namely, for any $w\in{{{\Bbb B}bb W\,}}$, set $$ {}^w{{\mathfrak n}}abla_j(\la,\kappa)=w({{\mathfrak n}}abla_j(w^{-1}(\la),\kappa))= \kappa z_j{{\partial} \over {\partial} z_j}-\sum_{l,\,l{{\mathfrak n}}eq j}w(r(z_j/z_l))^{(j,l)}-\la^{(j)} $$ and for any $\om\in P_t^\vee$ set \begin{displaymath}an {}^{t_\om}{{\mathfrak n}}abla_j(\la,\kappa)&=& {\partial}rod_{k=1}^n z_k^{\om^{(k)}} {{\mathfrak n}}abla_j(\la -\kappa\om,\kappa) {\partial}rod_{k=1}^n z_k^{-\om^{(k)}}= {{\mathfrak n}}otag \\ \kappa z_j{{\partial} \over {\partial} z_j}&-&{\partial}rod_{k=1}^n z_k^{\om_i^{(k)}} \left(\sum_{l,\,l{{\mathfrak n}}eq j}r(z_j/z_l)^{(j,l)}\right) {\partial}rod_{k=1}^n z_k^{-\om_i^{(k)}} -\la^{(j)}\,. {{\mathfrak n}}otag \end{displaymath}an The compatibility conditions of the dynamical and KZ equations take the form $$ \tilde G^V_{t_{\om^\vee_i}}(z_1,...,z_n,\la)\,{{\mathfrak n}}abla_j(\la,\kappa)\, = \,{}^{t_{-\om^\vee_i}}{{\mathfrak n}}abla_j(\la,\kappa)\, \,\tilde G^V_{t_{\om^\vee_i}}(z_1,...,z_n,\la) $$ for $i=1,...,r$, $j=1,...,n$. The compatibility conditions follow from a more general statement. \begin{displaymath}gin{thm}\label{Last} Assume that the Lie algebra ${{\mathfrak g}\,}$ has a minuscle dual fundamental weight, i.e. ${{\mathfrak g}\,}$ is not of type $E_8, F_4, G_2$. Then for any $j=1,...,n$ and any $w\in {{{\Bbb B}bb W\,}}^b$ we have \begin{displaymath}an\label{last} \tilde G^V_{w}(z_1,...,z_n,\la)\,{{\mathfrak n}}abla_j(\la,\kappa)\, = \,{}^{w^{-1}}{{\mathfrak n}}abla_j(\la,\kappa)\, \,\tilde G^V_{w}(z_1,...,z_n,\la). {{\mathfrak n}}otag \end{displaymath}an \end{thm} We conjecture that the statement of the Theorem holds for any simple Lie algebra. The Theorem follows from the next four Lemmas. \begin{displaymath}gin{lemma} Let $j=1,...,n$. Assume that $$ \tilde G^V_{s_l}{{\mathfrak n}}abla_j(\la,\kappa) = {}^{s_l}{{\mathfrak n}}abla_j(\la,\kappa) \tilde G^V_{s_l}\,, \qquad {}^{{\partial}i_i}{{\mathfrak n}}abla_j(\la,\kappa) = {{\mathfrak n}}abla_j(\la,\kappa) $$ for $l=0,...,r$ and $i\in O^*$. Then \begin{displaymath}an\label{} \tilde G^V_{w}(z_1,...,z_n,\la)\,{{\mathfrak n}}abla_j(\la,\kappa)\, = \,{}^{w^{-1}}{{\mathfrak n}}abla_j(\la,\kappa)\, \,\tilde G^V_{w}(z_1,...,z_n,\la) {{\mathfrak n}}otag \end{displaymath}an for all $w\in{{{\Bbb B}bb W\,}}^b$. \end{lemma} \begin{displaymath}gin{proof} If $w={\partial}i_i s_{m_l}...s_{m_1}$ is a reduced presentation, then {{\mathfrak n}}ewline $\tilde G^V_w={}^{s_{m_1}...s_{m_{l-1}}}(\tilde G^V_{s_{m_l}})... {}^{s_{m_1}}(\tilde G^V_{s_{m_2}}) \tilde G^V_{s_{m_1}}$ and \begin{displaymath}an \tilde G^V_w{{\mathfrak n}}abla_j(\la,\kappa)= {}^{s_{m_1}...s_{m_{l-1}}}(\tilde G^V_{s_{m_l}})... {}^{s_{m_1}}(\tilde G^V_{s_{m_2}}) \tilde G^V_{s_{m_1}}{{\mathfrak n}}abla_j(\la,\kappa)= {{\mathfrak n}}otag \\ {}^{s_{m_1}...s_{m_{l-1}}}(\tilde G^V_{s_{m_l}})... {}^{s_{m_1}}(\tilde G^V_{s_{m_2}}) {}^{s_{m_1}}{{\mathfrak n}}abla_j(\la,\kappa)\tilde G^V_{s_{m_1}}= {{\mathfrak n}}otag \\ {}^{s_{m_1}...s_{m_{l-1}}}(\tilde G^V_{s_{m_l}})... {}^{s_{m_1}s_{m_2}}{{\mathfrak n}}abla_j(\la,\kappa) {}^{s_{m_1}}(\tilde G^V_{s_{m_2}}) \tilde G^V_{s_{m_1}}= {{\mathfrak n}}otag \\ {}^{s_{m_1}s_{m_2}...s_{m_l}}{{\mathfrak n}}abla_j(\la,\kappa) {}^{s_{m_1}...s_{m_{l-1}}}(\tilde G^V_{s_{m_l}})... {}^{s_{m_1}}(\tilde G^V_{s_{m_2}}) \tilde G^V_{s_{m_1}}= {{\mathfrak n}}otag \\ {}^{w^{-1}}{{\mathfrak n}}abla_j(\la,\kappa) \tilde G^V_w\,. {{\mathfrak n}}otag \end{displaymath}an \end{proof} \begin{displaymath}gin{lemma}\label{very-nice} Let $j=1,...,n$ and $w\in{{{\Bbb B}bb W\,}}$. Then $$ \tilde G^V_{w}{{\mathfrak n}}abla_j(\la,\kappa) = {}^{w^{-1}}{{\mathfrak n}}abla_j(\la,\kappa) \tilde G^V_{w}\,. $$ \end{lemma} \begin{displaymath}gin{proof} For $w\in{{{\Bbb B}bb W\,}}$ we have $\tilde G^V_w(z_1,...,z_n\la)={\Bbb B}_{w,V}(\la)$, and Lemma \ref{very-nice} is equivalent to Lemma \ref{nice}. \end{proof} \begin{displaymath}gin{lemma}\label{very-very-nice} Let $j=1,...,n$ and $i\in O^*$. Then $$ {}^{{\partial}i_i}{{\mathfrak n}}abla_j(\la,\kappa) = {{\mathfrak n}}abla_j(\la,\kappa). $$ \end{lemma} \begin{displaymath}gin{proof} We have ${\partial}i_i=t_{\om^\vee_i}w^{-1}_{[i]}$. Hence \begin{displaymath}an {}^{{\partial}i_i}{{\mathfrak n}}abla_j(\la,\kappa)={}^{t_{\om^\vee_i}}({}^{w^{-1}_{[i]}}{{\mathfrak n}}abla_j(\la,\kappa))= {}^{t_{\om^\vee_i}} (\kappa z_j{{\partial} \over {\partial} z_j}-\sum_{l,\,l{{\mathfrak n}}eq j}w^{-1}_{[i]}(r(z_j/z_l))^{(j,l)}-\la^{(j)})= {{\mathfrak n}}otag \\ \kappa z_j{{\partial} \over {\partial} z_j}-{\partial}rod_{k=1}^n z_k^{(\om^\vee_i)^{(k)}} \left(\sum_{l,\,l{{\mathfrak n}}eq j}w^{-1}_{[i]}(r(z_j/z_l))^{(j,l)}\right) {\partial}rod_{k=1}^n z_k^{-(\om_i^\vee)^{(k)}} -\la^{(j)}\,=\,{{\mathfrak n}}abla_j(\la,\kappa)\,. {{\mathfrak n}}otag \end{displaymath}an The last equality follows from Lemma \ref{lemma-2}. \end{proof} \begin{displaymath}gin{lemma}\label{third} Let $j=1,...,n$. Assume that the Lie algebra ${{\mathfrak g}\,}$ has a minuscle dual fundamental weight. Then $$ \tilde G^V_{s_0}{{\mathfrak n}}abla_j(\la,\kappa) = {}^{s_0}{{\mathfrak n}}abla_j(\la,\kappa) \tilde G^V_{s_0}\,. $$ \end{lemma} \begin{displaymath}gin{proof} Let $\om^\vee_i$ be a minuscle dual fundamental weight. We have $s_0={\partial}i_i^{-1}s_i{\partial}i_i$ and $\tilde G^V_{s_0}= {}^{{\partial}i_i^{-1}}(\tilde G^V_{s_i})$ according to the 1-cocycle property. Now \begin{displaymath}an {}^{s_0}{{\mathfrak n}}abla_j(\la,\kappa)\tilde G^V_{s_0}= {}^{{\partial}i^{-1}_is_i{\partial}i_i}{{\mathfrak n}}abla_j(\la,\kappa){}^{{\partial}i_i^{-1}}(\tilde G^V_{s_i})= {}^{{\partial}i^{-1}_i}({}^{s_i}({}^{{\partial}i_i}{{\mathfrak n}}abla_j(\la,\kappa))\tilde G^V_{s_i})= {{\mathfrak n}}otag \\ {}^{{\partial}i^{-1}_i}({}^{s_i}({{\mathfrak n}}abla_j(\la,\kappa))\tilde G^V_{s_i})= {}^{{\partial}i^{-1}_i}(\tilde G^V_{s_i}{{\mathfrak n}}abla_j(\la,\kappa))= {}^{{\partial}i^{-1}_i}(\tilde G^V_{s_i}){}^{{\partial}i^{-1}_i}({{\mathfrak n}}abla_j(\la,\kappa))= \tilde G^V_{s_0}{{\mathfrak n}}abla_j(\la,\kappa)\,. {{\mathfrak n}}otag \end{displaymath}an \end{proof} Theorems \ref{not-thm} and \ref{Last} are proved. \begin{displaymath}gin{thebibliography}{WW} {{\mathfrak n}}ormalsize \bibitem [ABRR] {ABRR} D.Arnaudon, E.Buffenoir, E.Ragoucy, and Ph.Roche, {\it Universal Solutions of quantum dynamical Yang-Baxter equations}, q-alg/9712037. \bibitem[BGG]{BGG} I.N.Bernshtein, I.M.Gelfand, S.I.Gelfand, {\it Structure of Representations Generated by Vectors of Highest Weight}, Funct. Anal.\ Appl.\ {\bf 5} (1971), 1--8. \bibitem[Ch1]{Ch1} I.Cherednik, {\it Quantum Knizhnik-Zamolodchikov Equations and Affine Root Systems}, Commun.\ Math.\ Phys.\ {\bf 150} (1992), 109--136. \bibitem[Ch2]{Ch2} I.Cherednik, {\it Difference Elliptic Operators and Root Systems}, Int.\ Math.\ Res.\ Notices (1995), no.\;1, 44--59. \bibitem[Ch3]{Ch3} I.Cherednik, {\it Integral Solutions of Trigonometric Knizhnik-Zamolodchikov Equations and Kac-Moody Algebras}, Publ.\ RIMS {\bf 27} (1991), 727--744. \bibitem[EFK]{EFK} P.Etingof, I.Frenkel, A.Kirillov, {\it Lectures on Representation Theory and Knizhnik-Zamolodchikov Equations}, AMS, 1998. \bibitem[ES]{ES} P.Etingof, O.Schiffmann, {\it Lectures on the Dynamical Young-Baxter Equations}, math.QA/9908064. \bibitem[EV1]{EV1} P.Etingof, A.Varchenko, {\it Exchange Dynamical Quantum Groups}, Commun.\ Math.\ Phys.\ {\bf 205} (1999), 19--52. \bibitem[EV2]{EV2} P.Etingof, A.Varchenko, {\it Traces of Intertwining Operators for Quantum Groups and Difference Equations, I}, math.QA/9907181. \bibitem[FMTV]{FMTV} G. Felder, Y. Markov, V. Tarasov, A.Varchenko, {\it Differential Equations Compatible with KZ Equations}, math.QA/0001184. \bibitem[GR]{GR} G.Gaspar, M.Rahman, {\it Basic hypergeometric Series}, Cambridge University Press, 1990. \bibitem[SV]{SV} V.Schechtman, A.Varchenko, {\it Arrangements of hyperplanes and Lie algebra homology}, Invent.\ Math.\ {\bf 106} (1991), 139--194. \bibitem[V]{V} A.Varchenko, {\it Multidimensional hypergeometric functions and representation theory of Lie algebras and quantum groups}, Adv.\ Ser.\ Math.\ Phys. {\bf 21}, World Scientific, 1995. \bibitem[Zh1]{Zh1} D.P.Zhelobenko, {\it Extremal cocycles on Weyl groups\/}, Funct.\ Anal.\ Appl.\ {\bf 21} (1987), 183--192. \bibitem[Zh2]{Zh2} D.P.Zhelobenko, {\it Extremal projectors and generalized Mickelsson algebras on reductive Lie algebras\/}, Math.\ USSR, Izv.\ {\bf 33} (1989), 85--100. \end{thebibliography} \end{document} \begin{displaymath}gin{thebibliography}{TV2} {{\mathfrak n}}ormalsize \bibitem [ABRR] {ABRR} D.Arnaudon, E.Buffenoir, E.Ragoucy, and Ph.Roche, {\it Universal Solutions of quantum dynamical Yang-Baxter equations}, q-alg/9712037. \bibitem[BGG]{BGG} I.N.Bernshtein, I.M.Gelfand, S.I.Gelfand, {\it Structure of Representations Generated by Vectors of Highest Weight}, Functional Analysis and Its Applications 5 (1971), 1-8. \bibitem[Ch1]{Ch1} I.Cherednik, {\it Quantum Knizhnik-Zamolodchikov Equations and Affine Root Systems}, Comm. Math. Phys. 150 (1992), 109-136. \bibitem[Ch2]{Ch2} I.Cherednik, {\it Difference Elliptic Operators and Root Systems}, Int. Math. Res. Notices, (1995), No. 1, 44-59. \bibitem[Ch3]{Ch3} I.Cherednik, {\it Integral Solutions of Trigonometric Knizhnik-Zamolodchikov Equations and Kac-Moody Algebras}, Publ. of RIMS 27/5 (1991) 727-744. \bibitem[EFK]{EFK} P.Etingof, I.Frenkel, A.Kirillov, {\it Lectures on Representation Theory and Knizhnik-Zamolodchikov Equations}, AMS, 1998. \bibitem[ES]{ES} P.Etingof, O.Schiffmann, {\it Lectures on the Dynamical Young-Baxter Equations}, q-alg/9908064. \bibitem[EV1]{EV1} P.Etingof, A.Varchenko, {\it Exchange Dynamical Quantum Groups}, Commun. Math. Phys. 205 (1999), 19-52. \bibitem[EV2]{EV2} P.Etingof, A.Varchenko, {\it Traces of Intertwining Operators for Quantum Groups and Difference Equations, I}, q-alg/9907181. \bibitem[FMTV]{FMTV} G. Felder, Y. Markov, V. Tarasov, A.Varchenko, {\it Differential Equations Compatible with KZ Equations}, q-alg/0001. \bibitem[GR]{GR} G.Gaspar, M.Rahman, {\it Basic hypergeometric Series}, Cambridge University Press, 1990. \bibitem[SV]{SV} V.Schechtman, A.Varchenko, {\it Arrangements of hyperplanes and Lie algebra homology}, Inv. Math. 106 (1991) 139-194. \bibitem[V]{V} A.Varchenko, {\it Multidimensional hypergeometric functions and representation theory of Lie algebras and quantum groups}, Adv. Ser. Math. Phys. { 21}, World Scientific, 1995. \bibitem[Zh1]{Zh1} D.P.Zhelobenko, {it Extremal cocycles on Weyl groups\/}, Funct.\ Anal.\ Appl.\ 21 (1987), 183--192. \bibitem[Zh2]{Zh2} D.P.Zhelobenko, {\it Extremal projectors and generalized Mickelsson algebras on reductive Lie algebras\/}, Math.\ USSR, Izv.\ 33 (1989), 85--100. \end{thebibliography} \end{document}
\begin{document} \title{f Crystal Analysis of type $C$ Stanley Symmetric Functions} \begin{abstract} Combining results of T.K. Lam and J. Stembridge, the type $C$ Stanley symmetric function $F_w^C(\mathbf{x})$, indexed by an element $w$ in the type $C$ Coxeter group, has a nonnegative integer expansion in terms of Schur functions. We provide a crystal theoretic explanation of this fact and give an explicit combinatorial description of the coefficients in the Schur expansion in terms of highest weight crystal elements. \noindent \textbf{Keywords:} Stanley symmetric functions, crystal bases, Kra\'skiewicz insertion, mixed Haiman insertion, unimodal tableaux, primed tableaux \end{abstract} \section{Introduction} Schubert polynomials of type $B$ and type $C$ were independently introduced by Billey and Haiman~\cite{Billey.Haiman.1995} and Fomin and Kirillov~\cite{Fomin.Kirillov.1996}. Stanley symmetric functions~\cite{Stanley.1984} are stable limits of Schubert polynomials, designed to study properties of reduced words of Coxeter group elements. In his Ph.D. thesis, T.K. Lam~\cite{Lam.1995} studied properties of Stanley symmetric functions of types $B$ (and similarly $C$) and $D$. In particular he showed, using Kra\'skiewicz insertion~\cite{Kraskiewicz.1989,Kraskiewicz.1995}, that the type $B$ Stanley symmetric functions have a positive integer expansion in terms of $P$-Schur functions. On the other hand, Stembridge~\cite{Stembridge.1989} proved that the $P$-Schur functions expand positively in terms of Schur functions. Combining these two results, it follows that Stanley symmetric functions of type $B$ (and similarly type $C$) have a positive integer expansion in terms of Schur functions. Schur functions $s_\lambda(\mathbf{x})$, indexed by partitions $\lambda$, are ubiquitous in combinatorics and representation theory. They are the characters of the symmetric group and can also be interpreted as characters of type $A$ crystals. In~\cite{Morse.Schilling.2016}, this was exploited to provide a combinatorial interpretation in terms of highest weight crystal elements of the coefficients in the Schur expansion of Stanley symmetric functions in type $A$. In this paper, we carry out a crystal analysis of the Stanley symmetric functions $F_w^C(\mathbf{x})$ of type $C$, indexed by a Coxeter group element $w$. In particular, we use Kra\'skiewicz insertion~\cite{Kraskiewicz.1989,Kraskiewicz.1995} and Haiman's mixed insertion~\cite{Haiman.1989} to find a crystal structure on primed tableaux, which in turn implies a crystal structure $\mathcal{B}_w$ on signed unimodal factorizations of $w$ for which $F^C_w(\mathbf{x})$ is a character. Moreover, we present a type $A$ crystal isomorphism $\Phi \colon \mathcal{B}_w \rightarrow \bigoplus_\lambda \mathcal{B}_{\lambda}^{\oplus g_{w\lambda}}$ for some combinatorially defined nonnegative integer coefficients $g_{w\lambda}$; here $\mathcal{B}_\lambda$ is the type $A$ highest weight crystal of highest weight $\lambda$ . This implies the desired decomposition $F^C_w(\mathbf{x}) = \sum_\lambda g_{w\lambda} s_\lambda (\mathbf{x})$ (see Corollary~\ref{corollary.main2}) and similarly for type $B$. The paper is structured as follows. In Section~\ref{section.background}, we review type $C$ Stanley symmetric functions and type $A$ crystals. In Section~\ref{section.isomorphism} we describe our crystal isomorphism by combining a slight generalization of the Kra\'skiewicz insertion~\cite{Kraskiewicz.1989,Kraskiewicz.1995} and Haiman's mixed insertion~\cite{Haiman.1989}. The main result regarding the crystal structure under Haiman's mixed insertion is stated in Theorem~\ref{theorem.main2}. The combinatorial interpretation of the coefficients $g_{w\lambda}$ is given in Corollary~\ref{corollary.main2}. In Section~\ref{section.semistandard}, we provide an alternative interpretation of the coefficients $g_{w\lambda}$ in terms of semistandard unimodal tableaux. Appendices~\ref{section.proof main2} and~\ref{section.proof main3} are reserved for the proofs of Theorems~\ref{theorem.main2} and~\ref{theorem.main3}. \subsection*{Acknowledgments} We thank the anonymous referee for pointing out reference~\cite{Liu.2017} and furthermore the connections between our crystal operators and those obtained by intertwining crystal operators on words with Haiman's symmetrization of shifted mixed insertion~\cite[Section 5]{Haiman.1989} and the conversion map~\cite[Proposition~14]{SW.2001} as outlined in Remark~\ref{remark.doubling}. We thank Toya Hiroshima for pointing out that the definition of the reading word of a primed tableau was misleading in a previous version of this paper. \section{Background} \label{section.background} \subsection{Type $C$ Stanley symmetric functions} The \defn{Coxeter group} $W_C$ of type $C_n$ (or type $B_n$), also known as the hyperoctahedral group or the group of signed permutations, is a finite group generated by $\{s_0, s_1, \ldots, s_{n-1}\}$ subject to the quadratic relations $s_i^2 = 1$ for all $i \in I = \{0,1,\ldots,n-1\}$, the commutation relations $s_i s_j = s_j s_i$ provided $|i-j|>1$, and the braid relations $s_i s_{i+1} s_i = s_{i+1} s_i s_{i+1}$ for all $i>0$ and $s_0 s_1 s_0 s_1 = s_1 s_0 s_1 s_0$. It is often convenient to write down an element of a Coxeter group as a sequence of indices of $s_i$ in the product representation of the element. For example, the element $w = s_2 s_1 s_2 s_1 s_0 s_1 s_0 s_1$ is represented by the word ${\bf w} = 2120101$. A word of shortest length $\ell$ is referred to as a \defn{reduced word} and $\ell(w):=\ell$ is referred as the length of $w$. The set of all reduced words of the element $w$ is denoted by $R(w)$. \begin{example} The set of reduced words for $w = s_2 s_1 s_2 s_0 s_1 s_0$ is given by $$R(w) = \{ 210210, 212010, 121010, 120101, 102101 \}.$$ \end{example} We say that a reduced word $a_1 a_2 \ldots a_\ell$ is \defn{unimodal} if there exists an index $v$, such that $$a_1 > a_2 > \cdots > a_v < a_{v+1} < \cdots < a_\ell.$$ Consider a reduced word $\textbf{a} = a_1 a_2 \ldots a_{\ell(w)}$ of a Coxeter group element $w$. A \defn{unimodal factorization} of $\textbf{a}$ is a factorization $\mathbf{A} = (a_1 \ldots a_{\ell_1}) (a_{\ell_1+1} \ldots a_{\ell_2}) \cdots (a_{\ell_r + 1} \ldots a_L)$ such that each factor $(a_{\ell_i+1} \ldots a_{\ell_{i+1}})$ is unimodal. Factors can be empty. For a fixed Coxeter group element $w$, consider all reduced words $R(w)$, and denote the set of all unimodal factorizations for reduced words in $R(w)$ as $U(w)$. Given a factorization $\mathbf{A} \in U(w)$, define the \defn{weight} of a factorization $\mathrm{wt}(\mathbf{A})$ to be the vector consisting of the number of elements in each factor. Denote by $\mathrm{nz}(\mathbf{A})$ the number of non-empty factors of $\mathbf{A}$. \begin{example} For the factorization $\mathbf{A} = (2102)()(10) \in U(s_2 s_1 s_2 s_0 s_1 s_0)$, we have $\mathrm{wt}(\mathbf{A}) = (4,0,2)$ and $\mathrm{nz}(\mathbf{A}) = 2$. \end{example} Following~\cite{Billey.Haiman.1995, Fomin.Kirillov.1996, Lam.1995}, the \defn{type $C$ Stanley symmetric function} associated to $w\in W_C$ is defined as \begin{equation} \label{equation.StanleyC} F^C_w(\mathbf{x}) = \sum_{\mathbf{A} \in U(w)} 2^{\mathrm{nz}(\mathbf{A})} \mathbf{x}^{\mathrm{wt}(\mathbf{A})}. \end{equation} Here $\mathbf{x} = (x_1, x_2, x_3, \ldots)$ and $\mathbf{x}^{\mathbf{v}} = x_1^{v_1} x_2^{v_2} x_3^{v_3} \cdots$. It is not obvious from the definition why the above functions are symmetric. We refer reader to~\cite{Billey.2014}, where this fact follows easily from an alternative definition. \defn{Type $B$ Stanley symmetric functions} are also labeled by $w\in W_C$ (as the type $B$ and $C$ Coxeter groups coincide) and differ from $F_w^C(w)$ by an overall factor $2^{-o(w)}$ \[ F_w^B(\mathbf{x}) = 2^{-o(w)} F_w^C(\mathbf{x}), \] where $o(w)$ is the number of zeroes in a reduced word for $w$. Loosely speaking, our combinatorial interpretation in the type $C$ case respects this power of 2 -- that is, we will get a valid combinatorial interpretation in the type $B$ case by dividing by $2^{o(w)}$. \subsection{Type $A$ crystal of words} Crystal bases~\cite{kashiwara.1994} play an important role in many areas of mathematics. For example, they make it possible to analyze representation theoretic questions using combinatorial tools. Here we only review the crystal of words in type $A_n$ and refer the reader for more background on crystals to~\cite{Bump.Schilling.2017}. Consider the set of words $\mathcal{B}_n^h$ of length $h$ in the alphabet $\{1,2,\ldots,n+1\}$. We impose a crystal structure on $\mathcal{B}_n^h$ by defining lowering operators $f_i$ and raising operators $e_i$ for $1\leqslant i \leqslant n$ and a weight function. The weight of $\mathbf{b} \in \mathcal{B}_n^h$ is the tuple $\mathrm{wt}(\mathbf{b}) = (a_1,\ldots, a_{n+1})$, where $a_i$ is the number of letters $i$ in $\mathbf{b}$. The crystal operators $f_i$ and $e_i$ only depend on the letters $i$ and $i+1$ in $\mathbf{b}$. Consider the subword $\mathbf{b}^{\{i,i+1\}}$ of $\mathbf{b}$ consisting only of the letters $i$ and $i+1$. Successively bracket any adjacent pairs $(i+1) i$ and remove these pairs from the word. The resulting word is of the form $i^a (i+1)^b$ with $a,b\geqslant 0$. Then $f_i$ changes this subword within $\mathbf{b}$ to $i^{a-1} (i+1)^{b+1}$ if $a>0$ leaving all other letters unchanged and otherwise annihilates $\mathbf{b}$. The operator $e_i$ changes this subword within $\mathbf{b}$ to $i^{a+1} (i+1)^{b-1}$ if $b>0$ leaving all other letters unchanged and otherwise annihilates $\mathbf{b}$. We call an element $\mathbf{b}\in \mathcal{B}_n^h$ \defn{highest weight} if $e_i(\mathbf{b})=\mathbf{0}$ for all $1\leqslant i\leqslant n$ (meaning that all $e_i$ annihilate $\mathbf{b}$). \begin{theorem} \cite{Kashiwara.Nakashima.1994} A word $\mathbf{b} = b_1 \ldots b_h \in \mathcal{B}_n^h$ is highest weight if and only if it is a Yamanouchi word. That is, for any index $k$ with $1 \leqslant k \leqslant h$ the weight of a subword $b_k b_{k+1} \ldots b_h$ is a partition. \end{theorem} \begin{example} The word $85744234654333222211111$ is highest weight. \end{example} Two crystals $\mathcal{B}$ and $\mathcal{C}$ are said to be \defn{isomorphic} if there exists a bijective map $\Phi \colon \mathcal{B} \rightarrow \mathcal{C}$ that preserves the weight function and commutes with the crystal operators $e_i$ and $f_i$. A \defn{connected component} $X$ of a crystal is a set of elements where for any two $\mathbf{b},\mathbf{c} \in X$ one can reach $\mathbf{c}$ from $\mathbf{b}$ by applying a sequence of $f_i$ and $e_i$. \begin{theorem} \cite{Kashiwara.Nakashima.1994} Each connected component of $\mathcal{B}_n^h$ has a unique highest weight element. Furthermore, if $\mathbf{b}, \mathbf{c} \in \mathcal{B}_n^h$ are highest weight elements such that $\mathrm{wt}(\mathbf{b}) = \mathrm{wt}(\mathbf{c})$, then the connected components generated by $\mathbf{b}$ and $\mathbf{c}$ are isomorphic. \end{theorem} We denote a connected component with a highest weight element of highest weight $\lambda$ by $\mathcal{B}_\lambda$. The \defn{character} of the crystal $\mathcal{B}$ is defined to be a polynomial in the variables $\mathbf{x}=(x_1,x_2,\ldots,x_{n+1})$ $$\chi_{\mathcal{B}} (\mathbf{x}) = \sum_{\mathbf{b} \in \mathcal{B}} \mathbf{x}^{\mathrm{wt}(\mathbf{b})}.$$ \begin{theorem}[\cite{Kashiwara.Nakashima.1994}] The character of $\mathcal{B}_{\lambda}$ is equal to the Schur polynomial $s_\lambda (\mathbf{x})$ (or Schur function in the limit $n\to \infty$). \end{theorem} \section{Crystal isomorphism} \label{section.isomorphism} In this section, we combine a slight generalization of the Kra\'skiewicz insertion, reviewed in Section~\ref{section.kraskiewicz}, and Haiman's mixed insertion, reviewed in Section~\ref{section.implicit}, to provide an isomorphism of crystals between the crystal of words $\mathcal{B}^h$ and certain sets of primed tableaux. Our main result of this section is stated in Theorem~\ref{theorem.main0}, which asserts that the recording tableaux under the mixed insertion is constant on connected components of $\mathcal{B}^h$. \subsection{Kra\'skiewicz insertion} \label{section.kraskiewicz} In this section, we describe the Kra\'skiewicz insertion. To do so, we first need to define the \defn{Edelman--Greene insertion}~\cite{Edelmann.Greene.1987}. It is defined for a word $\mathbf{w} = w_1 \ldots w_\ell$ and a letter $k$ such that the concatenation $w_1 \ldots w_\ell k$ is an $A$-type reduced word. The Edelman--Greene insertion of a letter $k$ into an {\it increasing} word $\mathbf{w} = w_1 \ldots w_\ell$, denoted by $\mathbf{w} \leftsquigarrow k$, is constructed as follows: \begin{enumerate} \item If $w_\ell < k$, then $\mathbf{w} \leftsquigarrow k = \mathbf{w'},$ where $\mathbf{w'} = w_1 w_2 \ldots w_\ell\ k$. \item If $k>0$ and $k\, k+1 = w_i \, w_{i+1}$ for some $1\leqslant i < \ell$, then $\mathbf{w} \leftsquigarrow k = k+1 \leftsquigarrow\mathbf{w}$. \item Else let $w_i$ be the leftmost letter in $\mathbf{w}$ such that $w_i>k$. Then $\mathbf{w} \leftsquigarrow k = w_i \leftsquigarrow \mathbf{w'}$, where $\mathbf{w'} = w_1 \ldots w_{i-1}\ k\ w_{i+1} \ldots w_\ell$. \end{enumerate} In the cases above, when $\mathbf{w} \leftsquigarrow k = k' \leftsquigarrow\mathbf{w'}$, the symbol $k' \leftsquigarrow\mathbf{w'}$ indicates a word $\mathbf{w'}$ together with a ``bumped'' letter $k'$. Next we consider a reduced unimodal word $\mathbf{a} = a_1 a_2 \ldots a_\ell$ with $a_1 > a_2 >\cdots > a_v < a_{v+1} < \cdots < a_\ell$. The \defn{Kra\'skiewicz row insertion} \cite{Kraskiewicz.1989,Kraskiewicz.1995} is defined for a unimodal word $\mathbf{a}$ and a letter $k$ such that the concatenation $a_1 a_2 \ldots a_\ell k$ is a $C$-type reduced word. The Kra\'skiewicz row insertion of $k$ into $\mathbf{a}$ (denoted similarly as $\mathbf{a} \leftsquigarrow k$), is performed as follows: \begin{enumerate} \item If $k=0$ and there is a subword $101$ in $\mathbf{a}$, then $\mathbf{a} \leftsquigarrow 0 = 0 \leftsquigarrow \mathbf{a}$. \item If $k \neq 0$ or there is no subword $101$ in $\mathbf{a}$, denote the decreasing part $a_1 \ldots a_v$ as $\mathbf{d}$ and the increasing part $a_{v+1} \ldots a_\ell$ as $\mathbf{g}$. Perform the Edelman-Greene insertion of $k$ into $\mathbf{g}$. \begin{enumerate} \item If $a_\ell < k$, then $\mathbf{g} \leftsquigarrow k = a_{v+1} \ldots a_\ell k =: \mathbf{g'}$ and $\mathbf{a} \leftsquigarrow k = \mathbf{d} \mathbf{g} \leftsquigarrow k = \mathbf{d\ g'} =: \mathbf{a'}$. \item If there is a bumped letter and $\mathbf{g} \leftsquigarrow k = k' \leftsquigarrow \mathbf{g'}$, negate all the letters in $\mathbf{d}$ (call the resulting word $-\mathbf{d}$) and perform the Edelman-Greene insertion $-\mathbf{d} \leftsquigarrow -k'$. Note that there will always be a bumped letter, and so $-\mathbf{d} \leftsquigarrow -k' = -k'' \leftsquigarrow -\mathbf{d'}$ for some decreasing word $\mathbf{d'}$. The result of the Kra\'skiewicz insertion is: $\mathbf{a} \leftsquigarrow k = \mathbf{d}[\mathbf{g} \leftsquigarrow k] = \mathbf{d}[k' \leftsquigarrow \mathbf{g'}] = - [\mathbf{-d} \leftsquigarrow -k']\ \mathbf{g'} = [k'' \leftsquigarrow \mathbf{d'}]\mathbf{g'} = k'' \leftsquigarrow \mathbf{a'}$, where $\mathbf{a'} := \mathbf{d'g'}$. \end{enumerate} \end{enumerate} \begin{example} \begin{equation*} 31012 \leftsquigarrow 0 =0 \leftsquigarrow 31012, \quad 3012 \leftsquigarrow 0 = 0 \leftsquigarrow 3102, \end{equation*} \begin{equation*} 31012 \leftsquigarrow 1 = 1 \leftsquigarrow 32012, \quad 31012 \leftsquigarrow 3 = 310123. \end{equation*} \end{example} The insertion is constructed to ``commute'' a unimodal word with a letter: If $\mathbf{a} \leftsquigarrow k = k' \leftsquigarrow \mathbf{a'}$, the two elements of the type $C$ Coxeter group corresponding to concatenated words $\mathbf{a}\ k$ and $k' \mathbf{a'}$ are the same. The type $C$ Stanley symmetric functions~\eqref{equation.StanleyC} are defined in terms of unimodal factorizations. To put the formula on a completely combinatorial footing, we need to treat the powers of $2$ by introducing signed unimodal factorizations. A \defn{signed unimodal factorization} of $w\in W_C$ is a unimodal factorization $\mathbf{A}$ of $w$, in which every non-empty factor is assigned either a $+$ or $-$ sign. Denote the set of all signed unimodal factorizations of $w$ by $U^{\pm} (w)$. For a signed unimodal factorization $\mathbf{A} \in U^{\pm} (w)$, define $\mathrm{wt}(\mathbf{A})$ to be the vector with $i$-th coordinate equal to the number of letters in the $i$-th factor of $\mathbf{A}$. Notice from~\eqref{equation.StanleyC} that \begin{equation} \label{equation.Upm} F^C_{w}(\mathbf{x}) = \sum_{\mathbf{A} \in U^{\pm}(w)} \mathbf{x}^{\mathrm{wt}(\mathbf{A})}. \end{equation} We will use the Kra\'skiewicz insertion to construct a map between signed unimodal factorizations of a Coxeter group element $w$ and pairs of certain types of tableaux $(\mathbf{P},\mathbf{T})$. We define these types of tableaux next. A \defn{shifted diagram} $\mathcal{S}(\lambda)$ associated to a partition $\lambda$ with distinct parts is the set of boxes in positions $\{(i,j) \mid \ 1\leqslant i\leqslant \ell(\lambda), \ i\leqslant j\leqslant \lambda_i+i-1\}$. Here, we use English notation, where the box $(1,1)$ is always top-left. Let $X^\circ_n$ be an ordered alphabet of $n$ letters $X^\circ_n = \{0< 1 < 2< \cdots < n-1\}$, and let $X'_n$ be an ordered alphabet of $n$ letters together with their primed counterparts as $X'_n = \{1' < 1 < 2'< 2< \cdots <n' < n\}$. Let $\lambda$ be a partition with distinct parts. A \defn{unimodal tableau} $\mathbf{P}$ of shape $\lambda$ on $n$ letters is a filling of $\mathcal{S}(\lambda)$ with letters from the alphabet $X^\circ_n$ such that the word $P_i$ obtained by reading the $i$th row from the top of $\mathbf{P}$ from left to right, is a unimodal word, and $P_i$ is the longest unimodal subword in the concatenated word $P_{i+1} P_i$ \cite{Billey.2014} (cf. also with decomposition tableaux~\cite{Serrano.2010,Cho.2013}). The \defn{reading word} of a unimodal tableau $\mathbf{P}$ is given by $\pi_{\mathbf{P}} = P_\ell P_{\ell-1} \ldots P_1$. A unimodal tableau is called \textit{reduced} if $\pi_{\mathbf{P}}$ is a type $C$ reduced word corresponding to the Coxeter group element $w_{\mathbf{P}}$. Given a fixed Coxeter group element $w$, denote the set of reduced unimodal tableaux $\mathbf{P}$ of shape $\lambda$ with $w_{\mathbf{P}} = w$ as $\mathcal{UT}_w (\lambda)$. A \defn{signed primed tableau} $\mathbf{T}$ of shape $\lambda$ on $n$ letters (cf. semistandard $Q$-tableau~\cite{Lam.1995}) is a filling of $\mathcal{S}(\lambda)$ with letters from the alphabet $X'_n$ such that: \begin{enumerate} \item The entries are weakly increasing along each column and each row of $\mathbf{T}$. \item Each row contains at most one $i'$ for every $i = 1,\ldots,n$. \item Each column contains at most one $i$ for every $i = 1,\ldots,n$. \end{enumerate} The reason for using the word ``signed'' in the name is to distinguish the set of primed tableaux above from the ``unsigned" version described later in the chapter. Denote the set of signed primed tableaux of shape $\lambda$ by $\mathcal{PT^{\pm}} (\lambda)$. Given an element $\mathbf{T} \in \mathcal{PT^{\pm}} (\lambda)$, define the weight of the tableau $\mathrm{wt}(\mathbf{T})$ as the vector with $i$-th coordinate equal to the total number of letters in $\mathbf{T}$ that are either $i$ or $i'$. \begin{example} $\Bigg(\young(43201,:212,::0),\ \young(112'3'3,:2'23',::4)\Bigg)$ is a pair consisting of a unimodal tableau and a signed primed tableau both of shape $(5,3,1)$. \end{example} For a reduced unimodal tableau $\mathbf{P}$ with rows $P_\ell, P_{\ell-1}, \ldots, P_1$, the Kra\'skiewicz insertion of a letter $k$ into tableau $\mathbf{P}$ (denoted again by $\mathbf{P} \leftsquigarrow k$) is performed as follows: \begin{enumerate} \item Perform Kra\'skiewicz insertion of the letter $k$ into the unimodal word $P_1$. If there is no bumped letter and $P_1 \leftsquigarrow k = P'_1$, the algorithm terminates and the new tableau $\mathbf{P'}$ consists of rows $P_\ell, P_{\ell-1}, \ldots, P_2, P'_1$. If there is a bumped letter and $P_1 \leftsquigarrow k = k' \leftsquigarrow P'_1$, continue the algorithm by inserting $k'$ into the unimodal word $P_2$. \item Repeat the previous step for the rows of $\mathbf{P}$ until either the algorithm terminates, in which case the new tableau $\mathbf{P}'$ consists of rows $P_\ell, \ldots, P_{s+1}, P'_s, \ldots, P'_1$, or, the insertion continues until we bump a letter $k_e$ from $P_\ell$, in which case we then put $k_e$ on a new row of the shifted shape of $\mathbf{P'}$, so that the resulting tableau $\mathbf{P'}$ consists of rows $k_e, P'_\ell, \ldots, P'_1$. \end{enumerate} \begin{example} $$\young(43201,:212,::0) \leftsquigarrow 0 = \young(43210,:210,::01),$$ since the insertions row by row are given by $43201 \leftsquigarrow 0 =0 \leftsquigarrow 43210$, $212 \leftsquigarrow 0 = 1 \leftsquigarrow 210$, and $0 \leftsquigarrow 1 = 01$. \end{example} \begin{lemma} \cite{Kraskiewicz.1989} Let $\mathbf{P}$ be a reduced unimodal tableau with reading word $\pi_\mathbf{P}$ for an element $w\in W_C$. Let $k$ be a letter such that $\pi_\mathbf{P}k$ is a reduced word. Then the tableau $\mathbf{P'} = \mathbf{P} \leftsquigarrow k$ is a reduced unimodal tableau, for which the reading word $\pi_{\mathbf{P'}}$ is a reduced word for $w s_k$. \end{lemma} \begin{lemma} \cite[Lemma 3.17]{Lam.1995} \label{lemma.ins} Let $\mathbf{P}$ be a unimodal tableau, and $\mathbf{a}$ a unimodal word such that $\pi_{\mathbf{P}}\mathbf{a}$ is reduced. Let $(x_1,y_1), \ldots, (x_r, y_r)$ be the (ordered) list of boxes added when $\mathbf{P} \leftsquigarrow {\mathbf{a}}$ is computed. Then there exists an index $v$, such that $x_1 < \cdots < x_v \geqslant \cdots \geqslant x_r $ and $y_1 \geqslant \cdots \geqslant y_v < \cdots < y_r$. \end{lemma} Let $\mathbf{A} \in U^{\pm} (w)$ be a signed unimodal factorization with unimodal factors $\mathbf{a}_1, \mathbf{a}_2, \ldots, \mathbf{a}_n$. We recursively construct a sequence $(\emptyset, \emptyset) = (\mathbf{P}_0, \mathbf{T}_0),\ (\mathbf{P}_1, \mathbf{T}_1), \ldots, (\mathbf{P}_n, \mathbf{T}_n) = (\mathbf{P}, \mathbf{T})$ of tableaux, where $\mathbf{P}_s \in \mathcal{UT}_{(\mathbf{a}_1 \mathbf{a}_2 \ldots \mathbf{a}_s)} (\lambda^{(s)})$ and $\mathbf{T}_s \in \mathcal{PT}^{\pm} (\lambda^{(s)})$ are tableaux of the same shifted shape $\lambda^{(s)}$. To obtain the \defn{insertion tableau} $\mathbf{P}_s$, insert the letters of $\mathbf{a}_s$ one by one from left to right, into $\mathbf{P}_{s-1}$. Denote the shifted shape of $\mathbf{P}_{s}$ by $\lambda^{(s)}$. Enumerate the boxes in the skew shape $\lambda^{(s)} / \lambda^{(s-1)}$ in the order they appear in $\mathbf{P}_s$. Let these boxes be $(x_1,y_1), \ldots, (x_{\ell_s}, y_{\ell_s})$. Let $v$ be the index that is guaranteed to exist by Lemma~\ref{lemma.ins} when we compute $\mathbf{P_{s-1}} \leftsquigarrow {\mathbf{a_s}}$. The \defn{recording tableau} $\mathbf{T}_{s}$ is a primed tableau obtained from $\mathbf{T}_{s-1}$ by adding the boxes $(x_1, y_1), \ldots, (x_{v-1}, y_{v-1})$, each filled with the letter $s'$, and the boxes $(x_{v+1},y_{v+1}), \ldots, (x_{\ell_s}, y_{\ell_s})$, each filled with the letter $s$. The special case is the box $(x_v,y_v)$, which could contain either $s'$ or $s$. The letter is determined by the sign of the factor $\mathbf{a}_s$: If the sign is $-$, the box is filled with the letter $s'$, and if the sign is $+$, the box is filled with the letter $s$. We call the resulting map the \defn{primed Kra\'skiewicz map} $\mathrm{KR}'$. \begin{example} Given a signed unimodal factorization $\mathbf{A} = (-0) (+212) (-43201)$, the sequence of tableaux is $$ (\emptyset,\emptyset), \quad (\ \young(0),\young(1')\ ), \quad \Big( \ \young(212,:0), \young(1'2'2,:2)\ \Big), \quad \Bigg(\ \young(43201,:212,::0),\young(1'2'23'3,:23'3,::3')\ \Bigg). $$ \end{example} If the recording tableau is constructed, instead, by simply labeling its boxes with $1,2,3,\ldots$ in the order these boxes appear in the insertion tableau, we recover the original Kra\'skiewicz map \cite{Kraskiewicz.1989,Kraskiewicz.1995}, which is a bijection \begin{equation*} \mathrm{KR}\colon R(w) \rightarrow \bigcup_{\lambda} \big[\mathcal{UT}_w (\lambda) \times \mathcal{ST} (\lambda)\big], \end{equation*} where $\mathcal{ST}(\lambda)$ is the set of \defn{standard shifted tableau} of shape $\lambda$, i.e., the set of fillings of $\mathcal{S} (\lambda)$ with letters $1,2, \ldots,|\lambda|$ such that each letter appears exactly once, each row filling is increasing, and each column filling is increasing. \begin{theorem} \label{theorem.KR} The primed Kra\'skiewicz map is a bijection \begin{equation*} \mathrm{KR}'\colon U^{\pm}(w) \rightarrow \bigcup_{\lambda} \big[\mathcal{UT}_w (\lambda) \times \mathcal{PT}^{\pm} (\lambda)\big]. \end{equation*} \end{theorem} \begin{proof} First we show that the map is well-defined: Let $\mathbf{A} \in U^{\pm}(w)$ such that $\mathrm{KR}'(A) = (\mathbf{P}, \mathbf{Q})$. The fact that $\mathbf{P}$ is a unimodal tableau follows from the fact that $\mathrm{KR}$ is well-defined. On the other hand, $\mathbf{Q}$ satisfies Condition (1) in the definition of signed primed tableaux since its entries are weakly increasing with respect to the order the associated boxes are added to $\mathbf{P}$. Now fix an $s$ and consider the insertion $\mathbf{P_{s-1}} \leftsquigarrow {\mathbf{a_s}}$. Refer to the set-up in Lemma~\ref{lemma.ins}. Then, $y_1<\cdots<y_v$ implies there is at most one $s'$ in each row and $y_v\geqslant \cdots \geqslant y_{\ell_s}$ implies there is at most one $s$ in each column, so Conditions (2) and (3) of the definition have been verified, implying that indeed $\mathbf{Q}$ is a signed primed tableau. Now suppose $(\mathbf{P},\mathbf{Q}) \in \bigcup_{\lambda} \big[\mathcal{UT}_w (\lambda) \times \mathcal{PT}^{\pm} (\lambda)\big]$. The ordering of the alphabet $X'$ induces a partial order on the set of boxes of $\mathbf{Q}$. Refine this ordering as follows: Among boxes containing an $s'$, box $b$ is greater than box $c$ if box $b$ lies below box $c$. Among boxes containing an $s$, box $b$ is greater than box $c$ if box $b$ lies to the right of box $c$. Let the standard shifted tableau induced by the resulting total order be denoted $\mathbf{Q}^*$. Let $w=\mathrm{KR}^{-1}(\mathbf{P},\mathbf{Q}^*)$. Divide $w$ into factors, where the size of the $s$-th factor is equal to the $s$-th entry in $\mathrm{wt}(\mathbf{Q})$. Let $\textbf{A} =\textbf{a}_1 \ldots \textbf{a}_n$ be the resulting factorization, where the sign of $\mathbf{a}_s$ is determined as follows: Consider the lowest leftmost box in $\mathbf{Q}$ that contains an $s$ or $s'$ (such a box must exist if $\textbf{a}_s \neq \emptyset$). If this box contains an $s$ give $\mathbf{a}_s$ a positive sign, and otherwise a negative sign. Let $b_1,\ldots, b_{|\textbf{a}_s|}$ denote the boxes of $\mathbf{Q}^*$ corresponding to $\textbf{a}_s$ under $\mathrm{KR}^{-1}$. The construction of $\mathbf{Q}^*$ and the fact that $\mathbf{Q}$ is a primed shifted tableau imply that the coordinates of these boxes satisfy the hypothesis of Lemma \ref{lemma.ins}. Since these are exactly the boxes that appear when we compute $\mathbf{P_{s-1}} \leftsquigarrow \mathbf{a}_s$, Lemma \ref{lemma.ins} implies that $\mathbf{a}_s$ is unimodal. It follows that $\mathbf{A}$ is a signed unimodal factorization mapping to $(\mathbf{P},\mathbf{Q})$ under $\mathrm{KR}'$. It is not hard to see $\mathbf{A}$ is unique. \end{proof} Theorem~\ref{theorem.KR} and Equation~\eqref{equation.Upm} imply the following relation: \begin{equation} \label{equation.PTpm} F^C_{w}(\mathbf{x}) = \sum_{\lambda} \big|\mathcal{UT}_w (\lambda) \big| \sum_{\mathbf{T} \in \mathcal{PT}^{\pm}(\lambda)} \mathbf{x}^{\mathrm{wt}(\mathbf{T})}. \end{equation} \begin{remark} The sum $\sum_{\mathbf{T} \in \mathcal{PT}^{\pm}(\lambda)} \mathbf{x}^{\mathrm{wt}(\mathbf{T})}$ is also known as the $Q$-Schur function. The expansion~\eqref{equation.PTpm}, with a slightly different interpretation of $Q$-Schur function, was shown in~\cite{Billey.Haiman.1995}. \end{remark} At this point, we are halfway there to expand $F^C_{w}(\mathbf{x})$ in terms of Schur functions. In the next section we introduce a crystal structure on the set $\mathcal{PT} (\lambda)$ of unsigned primed tableaux. \subsection{Mixed insertion} \label{section.implicit} Set $\mathcal{B}^h = \mathcal{B}^h_{\infty}$. Similar to the well-known RSK-algorithm, mixed insertion~\cite{Haiman.1989} gives a bijection between $\mathcal{B}^h$ and the set of pairs of tableaux $(\mathbf{T}, \mathbf{Q})$, but in this case $\mathbf{T}$ is an (unsigned) primed tableau of shape $\lambda$ and $\mathbf{Q}$ is a standard shifted tableau of the same shape. An \defn{(unsigned) primed tableau} of shape $\lambda$ (cf. semistandard $P$-tableau~\cite{Lam.1995} or semistandard marked shifted tableau~\cite{Cho.2013}) is a signed primed tableau $\mathbf{T}$ of shape $\lambda$ with only unprimed elements on the main diagonal. Denote the set of primed tableaux of shape $\lambda$ by $\mathcal{PT}(\lambda)$. The weight function $\mathrm{wt}(\mathbf{T})$ of $\mathbf{T} \in \mathcal{PT}(\lambda)$ is inherited from the weight function of signed primed tableaux, that is, it is the vector with $i$-th coordinate equal to the number of letters $i'$ and $i$ in $\mathbf{T}$. We can simplify~\eqref{equation.PTpm} as \begin{equation} \label{equation.PT} F^C_{w}(\mathbf{x}) = \sum_{\lambda} 2^{\ell(\lambda)} \big|\mathcal{UT}_w (\lambda) \big| \sum_{\mathbf{T} \in \mathcal{PT}(\lambda)} \mathbf{x}^{\mathrm{wt}(\mathbf{T})}. \end{equation} \begin{remark} The sum $\sum_{\mathbf{T} \in \mathcal{PT}(\lambda)} \mathbf{x}^{\mathrm{wt}(\mathbf{T})}$ is also known as a $P$-Schur function. \end{remark} Given a word $b_1 b_2 \ldots b_h$ in the alphabet $X = \{1<2<3<\cdots\}$, we recursively construct a sequence of tableaux $(\emptyset, \emptyset) = (\mathbf{T}_0, \mathbf{Q}_0),$ $(\mathbf{T}_1, \mathbf{Q}_1), \ldots, (\mathbf{T}_h, \mathbf{Q}_h) = (\mathbf{T}, \mathbf{Q})$, where $\mathbf{T}_s \in \mathcal{PT}(\lambda^{(s)})$ and $\mathbf{Q}_s \in \mathcal{ST}(\lambda^{(s)})$. To obtain the tableau $\mathbf{T}_{s}$, insert the letter $b_s$ into $\mathbf{T}_{s-1}$ as follows. First, insert $b_s$ into the first row of $\mathbf{T}_{s-1}$, bumping out the leftmost element $y$ that is strictly greater than $b_i$ in the alphabet $X' = \{1' < 1 < 2' < 2< \cdots \}$. \begin{enumerate} \item If $y$ is not on the main diagonal and $y$ is not primed, then insert it into the next row, bumping out the leftmost element that is strictly greater than $y$ from that row. \item If $y$ is not on the main diagonal and $y$ is primed, then insert it into the next column to the right, bumping out the topmost element that is strictly greater than $y$ from that column. \item If $y$ is on the main diagonal, then it must be unprimed. Prime $y$ and insert it into the column on the right, bumping out the topmost element that is strictly greater than $y$ from that column. \end{enumerate} If a bumped element exists, treat it as a new $y$ and repeat the steps above -- if the new $y$ is unprimed, row-insert it into the row below its original cell, and if the new $y$ is primed, column-insert it into the column to the right of its original cell. The insertion process terminates either by placing a letter at the end of a row, bumping no new element, or forming a new row with the last bumped element. \begin{example} Under mixed insertion, $$\young(223'3,:33) \leftarrow 1 = \young(12'3'3,:23',::3).$$ Let us explain each step in detail. The letter $1$ is inserted into the first row bumping out the $2$ from the main diagonal, making it a $2'$, which is then inserted into the second column. The letter $2'$ bumps out $2$, which we insert into the second row. Then $3$ from the main diagonal is bumped from the second row, making it a $3'$, which is then inserted into third column. The letter $3'$ bumps out the 3 on the second row, which is then inserted as the first element in the third row. \end{example} The shapes of $\mathbf{T}_{s-1}$ and $\mathbf{T}_s$ differ by one box. Add that box to $\mathbf{Q}_{s-1}$ with a letter $s$ in it, to obtain the standard shifted tableau $\mathbf{Q}_s$. \begin{example} For a word $332332123$, some of the tableaux in the sequence $(\mathbf{T}_i, \mathbf{Q}_i)$ are $$\Big(\ \young(23',:3),\young(12,:3) \ \Big), \quad \Big(\ \young(223'3,:33),\young(1245,:36) \ \Big), \quad \Bigg(\ \young(12'23'3,:23'3,::3),\young(12459,:368,::7) \ \Bigg).$$ \end{example} \begin{theorem} \cite{Haiman.1989} The construction above gives a bijection \begin{equation*} \mathrm{HM} \colon \mathcal{B}^h \rightarrow \bigcup_{\lambda\vdash h} \big[ \mathcal{PT}(\lambda) \times \mathcal{ST}(\lambda) \big]. \end{equation*} \end{theorem} The bijection $\mathrm{HM}$ is called a \defn{mixed insertion}. If $\mathrm{HM}(\mathbf{b}) = (\mathbf{T},\mathbf{Q})$, denote $P_{\mathrm{HM}} (\mathbf{b}) = \mathbf{T}$ and $R_{\mathrm{HM}}(\mathbf{b}) = \mathbf{Q}$. Just as for the RSK-algorithm, the mixed insertion has the property of preserving the recording tableau within each connected component of the crystal $\mathcal{B}^h$. \begin{theorem} \label{theorem.main0} The recording tableau $R_{\mathrm{HM}} (\cdot)$ is constant on each connected component of the crystal $\mathcal{B}^h$. \end{theorem} Before we provide the proof of Theorem~\ref{theorem.main0}, we need to define one more insertion from~\cite{Haiman.1989}, which serves as a dual to the previously discussed mixed insertion. We use the notion of \defn{generalized permutations}. Similar to a regular permutation in two-line notation, a generalized permutation $w$ consists of two lines $\binom{a_1 a_2\cdots a_h}{b_1 b_2 \cdots b_h}$, which gives a correspondence between $a_s$ and $b_s$, but there can be repeated letters now. We order the pairs $(a_s, b_s)$ by making the top line weakly increasing $a_1 \leqslant\cdots \leqslant a_h$, and forcing $b_{s} \leqslant b_{s+1}$ whenever $a_s = a_{s+1}$. The inverse of a generalized permutation $w^{-1}$ consists of pairs $(b_s, a_s)$, ordered appropriately. Given a word $\mathbf{b} = b_1\ldots b_h$, it can be represented as a generalized permutation $w$ by setting the first line of the permutation to be $1\ 2\ \ldots h$ and the second line to be $b_1\ b_2\ \ldots b_h$. Since the inverse of the generalized permutation $w$ exists, it also defined $\mathbf{b}^{-1}$. Now, let $w=\binom{a_1 a_2\cdots a_h}{b_1 b_2 \cdots b_h}$ be a generalized permutation on the alphabet $X$, where the second line consists of distinct letters. We recursively construct a sequence of tableaux $(\emptyset, \emptyset) = (\mathbf{Q}_0, \mathbf{T}_0),$ $(\mathbf{Q}_1, \mathbf{T}_1), \ldots, (\mathbf{Q}_h, \mathbf{T}_h) = (\mathbf{Q}, \mathbf{T})$, where $\mathbf{Q}_s \in \mathcal{ST}(\lambda_s)$ and $\mathbf{T}_s \in \mathcal{PT}(\lambda_s)$. To obtain the tableau $\mathbf{Q}_{s}$, insert the letter $b_s$ into $\mathbf{Q}_{s-1}$ as follows: \begin{itemize} \item Insert $b_s$ into the first row of $\mathbf{Q}_{s-1}$, and insert each bumped element into the next row until either an element is inserted into an empty cell and the algorithm terminates, or an element $b$ has been bumped from the diagonal. In the latter case, insert $b$ into the column to its right and continue bumping by columns, until an empty cell is filled. \item The shapes of $\mathbf{Q}_{s-1}$ and $\mathbf{Q}_s$ differ by one box. Add that box to $\mathbf{T}_{s-1}$ with a letter $a_s$ in it. Prime that letter if a diagonal element has been bumped in the process of inserting $b_s$ into $\mathbf{Q}_{s-1}$. \end{itemize} The above insertion process is called a \defn{Worley--Sagan insertion algorithm}. The insertion tableau $\mathbf{Q}$ will be denoted by $P_{\mathrm{WS}} (w)$ and the recording tableau $\mathbf{T}$ is denoted by $R_{\mathrm{WS}} (w)$. \begin{theorem} \cite[Theorem 6.10 and Corollary 6.3]{Haiman.1989} \label{theorem.insertion dual} Given $\mathbf{b} \in \mathcal{B}^h$, we have $R_{\mathrm{HM}} (\mathbf{b}) = P_{\mathrm{WS}} (\mathbf{b}^{-1})$. \end{theorem} Next, we want to find out when the Worley--Sagan insertion tableau is preserved. Fortunately, other results from~\cite{Haiman.1989} provide this description. \begin{theorem} \cite[Corollaries 5.8 and 6.3]{Haiman.1989} \label{theorem.haiman WS} If two words with distinct letters $\mathbf{b}$ and $\mathbf{b}'$ are related by a shifted Knuth transformation, then $P_{\mathrm{WS}} (\mathbf{b}) = P_{\mathrm{WS}} (\mathbf{b}')$. \end{theorem} Here, a \defn{shifted Knuth transformation} is an exchange of consecutive letters in one of the following forms: \begin{enumerate} \item Knuth transformations: $cab \leftrightarrow acb$ or $bca \leftrightarrow bac$, where $a<b<c$, \item Worley--Sagan transformation: $xy \leftrightarrow yx$, where $x$ and $y$ are the first two letters of the word. \end{enumerate} We are now ready to prove the theorem. \begin{proof}[Proof of Theorem~\ref{theorem.main0}] If $\mathbf{b}$ and $\mathbf{b}'$ are two words in the same connected component of $\mathcal{B}^h$, their RSK-recording tableaux $R_{\mathrm{RSK}} (\mathbf{b})$ and $R_{\mathrm{RSK}} (\mathbf{b}')$ are the same. Thus, $P_{\mathrm{RSK}} (\mathbf{b}^{-1})$ and $P_{\mathrm{RSK}} (\mathbf{b}'^{-1})$ are the same, and the second lines of $\mathbf{b}^{-1}$ and $\mathbf{b}'^{-1}$ are related by a sequence of Knuth transformations. This in turn means that $P_{\mathrm{WS}} (\mathbf{b}^{-1})$ and $P_{\mathrm{WS}} (\mathbf{b}'^{-1})$ are the same, and $R_{\mathrm{HM}} (\mathbf{b}) = R_{\mathrm{HM}} (\mathbf{b}')$ by Theorem~\ref{theorem.haiman WS}. \end{proof} Let us fix a recording tableau $\mathbf{Q}_{\lambda} \in \mathcal{ST} (\lambda)$. Define a map $\Psi_\lambda \colon \mathcal{PT}(\lambda) \rightarrow \mathcal{B}^{h}$ as $\Psi_\lambda (\mathbf{T}) = \mathrm{HM}^{-1} (\mathbf{T}, \mathbf{Q}_\lambda)$. By Theorem~\ref{theorem.main0}, the set $\mathrm{Im}(\Psi_{\lambda})$ consists of several connected components of $\mathcal{B}^h$. The map $\Psi_{\lambda}$ can thus be taken as a crystal isomorphism, and we can define the crystal operators and weight function on $\mathcal{PT}(\lambda)$ as \begin{equation} \label{equation.ef} e_i(\mathbf{T}) := (\Psi_\lambda^{-1} \circ e_i \circ \Psi_\lambda) (\mathbf{T}), \quad f_i(\mathbf{T}) := (\Psi_\lambda^{-1} \circ f_i \circ \Psi_\lambda) (\mathbf{T}), \quad \mathrm{wt}(\mathbf{T}) := (\mathrm{wt} \circ \Psi_\lambda) (\mathbf{T}). \end{equation} Although it is not clear that the crystal operators constructed above are independent of the choice of $\mathbf{Q}_\lambda$, in the next section we will construct explicit crystal operators on the set $\mathcal{PT}(\lambda)$ that satisfy the relations above and do not depend on the choice of $\mathbf{Q}_\lambda$. \begin{example} For $\mathbf{T} = \young(12'23'3,:23'3,::3)$, choose $\mathbf{Q}_{\lambda} = \young(12345,:678,::9)$. Then $\Psi_\lambda (\mathbf{T}) = 333332221$ and $e_1 \circ \Psi_\lambda (\mathbf{T}) = 333331221$. Thus, \begin{equation*} \ e_1 (\mathbf{T}) = (\Psi_\lambda^{-1} \circ e_1 \circ \Psi_\lambda) (\mathbf{T}) = \young(1123'3,:23'3,::3), \quad f_1(\mathbf{T}) = f_2(\mathbf{T}) = \mathbf{0}. \end{equation*} \end{example} To summarize, we obtain a crystal isomorphism between the crystal $(\mathcal{PT}(\lambda), e_i, f_i, \mathrm{wt})$, denoted again by $\mathcal{PT}(\lambda)$, and a direct sum $\bigoplus_\mu \mathcal{B}_{\mu}^{\oplus h_{\lambda\mu}}$. We will provide a combinatorial description of the coefficients $h_{\lambda\mu}$ in the next section. This implies the relation on characters of the corresponding crystals $\chi_{\mathcal{PT}(\lambda)} = \sum_\mu h_{\lambda\mu} s_\mu$. Thus we can rewrite~\eqref{equation.PT} one last time \begin{equation*} F^C_{w}(\mathbf{x}) = \sum_{\lambda} 2^{\ell(\lambda)} \big|\mathcal{UT}_w (\lambda) \big| \sum_{\mu} h_{\lambda\mu} s_\mu = \sum_\mu \Big( \sum_\lambda 2^{\ell(\lambda)} \big|\mathcal{UT}_w (\lambda) \big|\ h_{\lambda\mu} \Big) s_\mu. \end{equation*} \section{Explicit crystal operators on shifted primed tableaux} \label{section.explicit} We consider the alphabet $X'=\{1' < 1 < 2' < 2 < 3' < \cdots\}$ of primed and unprimed letters. It is useful to think about the letter $(i+1)'$ as a number $i + 0.5$. Thus, we say that letters $i$ and $(i+1)'$ differ by half a unit and letters $i$ and $(i+1)$ differ by a whole unit. Given an (unsigned) primed tableau $\mathbf{T}$, we construct the \defn{reading word} $\mathrm{rw}(\mathbf{T})$ as follows: \begin{enumerate} \item List all primed letters in the tableau, column by column, from top to bottom within each column, moving from the rightmost column to the left, and with all the primes removed (i.e. all letters are increased by half a unit). (Call this part of the word the \defn{primed reading word}.) \item Then list all unprimed elements, row by row, from left to right within each row, moving from the bottommost row to the top. (Call this part of the word the \defn{unprimed reading word}.) \end{enumerate} To find the letter on which the crystal operator $f_i$ acts, apply the bracketing rule for letters $i$ and $i+1$ within the reading word $\mathrm{rw}(\mathbf{T})$. If all letters $i$ are bracketed in $\mathrm{rw}(\mathbf{T})$, then $f_i(\mathbf{T}) = \mathbf{0}$. Otherwise, the rightmost unbracketed letter $i$ in $\mathrm{rw}(\mathbf{T})$ corresponds to an $i$ or an $i'$ in $\mathbf{T}$, which we call \defn{bold unprimed} $i$ or \defn{bold primed} $i$ respectively. If the bold letter $i$ is unprimed, denote the cell it is located in as $x$. If the bold letter $i$ is primed, we \textit{conjugate} the tableau $\mathbf{T}$ first. The \defn{conjugate} of a primed tableau $\mathbf{T}$ is obtained by reflecting the tableau over the main diagonal, changing all primed entries $k'$ to $k$ and changing all unprimed elements $k$ to $(k+1)'$ (i.e. increase the entries of all boxes by half a unit). The main diagonal is now the North-East boundary of the tableau. Denote the resulting tableau as $\mathbf{T}^*$. Under the transformation $\mathbf{T} \to \mathbf{T}^*$, the bold primed $i$ is transformed into bold unprimed $i$. Denote the cell it is located in as $x$. Given any cell $z$ in a shifted primed tableau $\mathbf{T}$ (or conjugated tableau $\mathbf{T}^*$), denote by $c(z)$ the entry contained in cell $z$. Denote by $z_E$ the cell to the right of $z$, $z_W$ the cell to its left, $z_S$ the cell below, and $z_N$ the cell above. Denote by $z^*$ the corresponding conjugated cell in $\mathbf{T}^*$ (or in $\mathbf{T}$). Now, consider the box $x_E$ (in $\mathbf{T}$ or in $\mathbf{T}^*$) and notice that $c(x_E) \geqslant (i+1)'$.\\ \noindent \textbf{Crystal operator $f_i$ on primed tableaux:} \begin{enumerate} \item If $c(x_E) = (i+1)'$, the box $x$ must lie outside of the main diagonal and the box immediately below $x_E$ cannot contain $(i+1)'$. Change $c(x)$ to $(i+1)'$ and change $c(x_E)$ to $(i+1)$ (i.e. increase the entry in cell $x$ and $x_E$ by half a unit). \item If $c(x_E) \neq (i+1)'$ or $x_E$ is empty, then there is a maximal connected ribbon (expanding in South and West directions) with the following properties: \begin{enumerate} \item The North-Eastern most box of the ribbon (the tail of the ribbon) is $x$. \item The entries of all boxes within a ribbon besides the tail are either $(i+1)'$ or $(i+1)$. \end{enumerate} Denote the South-Western most box of the ribbon (the head) as $x_H$. \begin{enumerate} \item If $x_H = x$, change $c(x)$ to $(i+1)$ (i.e. increase the entry in cell $x$ by a whole unit). \item If $x_H \neq x$ and $x_H$ is on the main diagonal (in case of a tableau $\mathbf{T}$), change $c(x)$ to $(i+1)'$ (i.e. increase the entry in cell $x$ by half a unit). \item Otherwise, $c(x_H)$ must be $(i+1)'$ due to the bracketing rule. We change $c(x)$ to $(i+1)'$ and change $c(x_H)$ to $(i+1)$ (i.e. increase the entry in cell $x$ and $x_H$ by half a unit). \end{enumerate} \end{enumerate} In the case when the bold $i$ in $\mathbf{T}$ is unprimed, we apply the above crystal operator rules to $\mathbf{T}$ to find $f_i(\mathbf{T})$ \begin{example} We apply operator $f_2$ on the following tableaux. The bold letter is marked if it exists: \begin{enumerate} \item $\mathbf{T} = \young(12'23',:23'3)\ $, $\mathrm{rw}(\mathbf{T}) = 3322312$, thus $f_2(\mathbf{T}) = \mathbf{0}$;\\ \item $\mathbf{T} = \young(12'\mathbf{2}3',:23'4)\ $, $\mathrm{rw}(\mathbf{T}) = 3322412$, thus $f_2(\mathbf{T}) = \young(12'3'3,:23'4)$ by Case (1). \\ \item $\mathbf{T} = \young(112\mathbf{2},:34'4)\ $, $\mathrm{rw}(\mathbf{T}) = 4341122$, thus $f_2(\mathbf{T}) = \young(1123,:34'4)$ by Case (2a).\\ \item $\mathbf{T} = \young(112'\mathbf{2}3,:223',::33)$, $\mathrm{rw}(\mathbf{T}) = 3233221123$, thus $f_2(\mathbf{T}) = \young(112'3'3,:223',::33)$ by Case~(2b).\\ \item $\mathbf{T} = \young(111\mathbf{2}3,:223',::34')$, $\mathrm{rw}(\mathbf{T}) = 3432211123$, thus $f_2(\mathbf{T}) = \young(1113'3,:223,::34')$ by Case~(2c). \end{enumerate} \end{example} In the case when the bold $i$ is primed in $\mathbf{T}$, we first conjugate $\mathbf{T}$ and then apply the above crystal operator rules on $\mathbf{T}^*$, before reversing the conjugation. Note that Case~(2b) is impossible for $\mathbf{T}^*$, since the main diagonal is now on the North-East. \begin{example} \begin{equation*} \text{Let} \ \mathbf{T} = \young(1\mathbf{2}p23,:34',::4)\ , \quad \text{then} \ \mathbf{T}^* = \young(2',\mathbf{2}4',3'45',4') \quad \text{and} \ f_2 (\mathbf{T}) = \young(123'3,:34',::4)\ . \end{equation*} \end{example} \begin{theorem} \label{theorem.main2} For any $\mathbf{b} \in \mathcal{B}^h$ with $P_{\mathrm{HM}}(\mathbf{b}) = \mathbf{T}$ and $f_i(\mathbf{b})\neq \mathbf{0}$, the operator $f_i$ defined on above satisfies \begin{equation*} P_{\mathrm{HM}}(f_i(\mathbf{b})) = f_i(\mathbf{T}). \end{equation*} Also, $f_i(\mathbf{b}) = \mathbf{0}$ if and only if $f_i(\mathbf{T})=\mathbf{0}$. \end{theorem} The proof of Theorem~\ref{theorem.main2} is quite technical and is relegated to Appendix~\ref{section.proof main2}. It implies that the explicit operators $f_i$ in this section are indeed equal to those defined in~\eqref{equation.ef} and that they are independent of the choice of $\mathbf{Q}_\lambda$. We also immediately obtain: \begin{proof}[Second proof of Theorem~\ref{theorem.main0}] Given a word $\mathbf{b}=b_1\ldots b_h$, let $\mathbf{b}'= f_i(\mathbf{b}) = b'_1 \ldots b'_h$, so that $b_m \neq b'_m$ for some $m$ and $b_i = b'_i$ for any $i \neq m$. We show that $Q_{\mathrm{HM}} (\mathbf{b}) = Q_{\mathrm{HM}} (\mathbf{b}')$. Denote $\mathbf{b}^{(s)} = b_1\ldots b_s$ and similarly $\mathbf{b}'^{(s)} = b'_1\ldots b'_s$. Due to the construction of the recording tableau $Q_{\mathrm{HM}}$, it suffices to show that $P_{\mathrm{HM}}(\mathbf{b}^{(s)})$ and $P_{\mathrm{HM}}(\mathbf{b}'^{(s)})$ have the same shape for any $1 \leqslant s \leqslant h$. If $s < m$, this is immediate. If $s \geqslant m$, note that $\mathbf{b}'^{(s)}=f_i(\mathbf{b}^{(s)})$. Using Theorem~\ref{theorem.main2}, one can see that $P_{\mathrm{HM}} (\mathbf{b}'^{(s)}) = P_{\mathrm{HM}}(f_i(\mathbf{b}^{(s)})) = f_i(P_{\mathrm{HM}}(\mathbf{b}^{(s)}))$ has the same shape as $P_{\mathrm{HM}}(\mathbf{b}^{(s)})$. \end{proof} The next step is to describe the raising operators $e_i (\mathbf{T})$. Consider the reading word $\mathrm{rw}(\mathbf{T})$ and apply the bracketing rule on the letters $i$ and $i+1$. If all letters $i+1$ are bracketed in $\mathrm{rw}(\mathbf{T})$, then $e_i(\mathbf{T}) = \mathbf{0}$. Otherwise, the leftmost unbracketed letter $i+1$ in $\mathrm{rw}(\mathbf{T})$ corresponds to an $i+1$ or an $(i+1)'$ in $\mathbf{T}$, which we will call bold unprimed $i+1$ or bold primed $i+1$, respectively. If the bold $i+1$ is unprimed, denote the cell it is located in by $y$. If the bold $i+1$ is primed, conjugate $\mathbf{T}$ and denote the cell with the bold $i+1$ in $\mathbf{T}^*$ by $y$.\\ \noindent \textbf{Crystal operator $e_i$ on primed tableaux:} \begin{enumerate} \item If $c(y_W) = (i+1)'$, then change $c(y)$ to $(i+1)'$ and change $c(y_W)$ to $i$ (i.e. decrease the entry in cell $y$ and $y_W$ by half a unit). \item If $c(y_W) < (i+1)'$ or $y_W$ is empty, then there is a maximal connected ribbon (expanding in North and East directions) with the following properties: \begin{enumerate} \item The South-Western most box of the ribbon (the head of the ribbon) is $y$. \item The entry in all boxes within a ribbon besides the tail is either $i$ or $(i+1)'$. \end{enumerate} Denote the North-Eastern most box of the ribbon (the tail) as $y_T$. \begin{enumerate} \item If $y_T = y$, change $c(y)$ to $i$ (i.e. decrease the entry in cell $y$ by a whole unit). \item If $y_T \neq y$ and $y_T$ is on the main diagonal (in case of a conjugate tableau $\mathbf{T}^*$), then change $c(y)$ to $(i+1)'$ (i.e. decrease the entry in cell $y$ by half a unit). \item If $y_T \neq y$ and $y_T$ is not on the diagonal, the entry of cell $y_T$ must be $(i+1)'$ and we change $c(y)$ to $(i+1)'$ and change $c(y_T)$ to $i$ (i.e. decrease the entry of cell $y$ and $y_T$ by half a unit). \end{enumerate} \end{enumerate} When the bold $i+1$ is unprimed, $e_i(\mathbf{T})$ is obtained by applying the rules above to $\mathbf{T}$. When the bold $i+1$ is primed, we first conjugate $\mathbf{T}$, then apply the raising crystal operator rules on $\mathbf{T}^*$, and then reverse the conjugation. \begin{proposition} \begin{equation*} e_i (\mathbf{b}) = \mathbf{0} \quad \text{if and only if} \quad e_i (\mathbf{T}) = \mathbf{0}. \end{equation*} \end{proposition} \begin{proof} According to Lemma~\ref{lemma.main}, the number of unbracketed letters $i$ in $\mathbf{b}$ is equal to the number of unbracketed letters $i$ in $\mathrm{rw}(\mathbf{T})$. Since the total number of both letters $i$ and $j=i+1$ is the same in $\mathbf{b}$ and in $\mathrm{rw}(\mathbf{T})$, that also means that the number of unbracketed letters $j$ in $\mathbf{b}$ is equal to the number of unbracketed letters $j$ in $\mathrm{rw}(\mathbf{T})$. Thus, there are no unbracketed letters $j$ in $\mathbf{b}$ if and only if there are no unbracketed letters $j$ in $\mathbf{T}$. \end{proof} \begin{theorem} \label{theorem.main3} Given a primed tableau $\mathbf{T}$ with $f_i(\mathbf{T}) \neq \mathbf{0}$, for the operators $e_i$ defined above we have the following relation: \begin{equation*} e_i(f_i(\mathbf{T})) = \mathbf{T}. \end{equation*} \end{theorem} The proof of Theorem~\ref{theorem.main3} is relegated to Appendix~\ref{section.proof main3}. \begin{corollary} \label{theorem.main4} For any $\mathbf{b} \in \mathcal{B}^h$ with $\mathrm{HM}(\mathbf{b}) = (\mathbf{T},\mathbf{Q})$, the operator $e_i$ defined above satisfies \begin{equation*} \mathrm{HM}(e_i(\mathbf{b})) = (e_i(\mathbf{T}), \mathbf{Q}), \end{equation*} given the left-hand side is well-defined. \end{corollary} The consequence of Theorem~\ref{theorem.main2}, as discussed in Section~\ref{section.implicit}, is a crystal isomorphism $\Psi_\lambda \colon \mathcal{PT}(\lambda) \rightarrow \bigoplus \mathcal{B}_{\mu}^{\oplus h_{\lambda\mu}}$. Now, to determine the nonnegative integer coefficients $h_{\lambda\mu}$, it is enough to count the highest weight elements in $\mathcal{PT}(\lambda)$ of given weight $\mu$. \begin{proposition} \label{proposition.highest} A primed tableau $\mathbf{T} \in \mathcal{PT}(\lambda)$ is a highest weight element if and only if its reading word $\mathrm{rw}(\mathbf{T})$ is a Yamanouchi word. That is, for any suffix of $\mathrm{rw}(\mathbf{T})$, its weight is a partition. \end{proposition} Thus we define $h_{\lambda\mu}$ to be the number of primed tableaux $\mathbf{T}$ of shifted shape $\mathcal{S}(\lambda)$ and weight $\mu$ such that $\mathrm{rw}(\mathbf{T})$ is Yamanouchi. \begin{example} Let $\lambda = (5,3,2)$ and $\mu = (4,3,2,1)$. There are three primed tableaux of shifted shape $\mathcal{S}((5,3,2))$ and weight $(4,3,2,1)$ with a Yamanouchi reading word, namely \begin{equation*} \young(11112',:223',::34') \ , \quad \young(11113',:222,::34') \quad \text{and} \quad \young(11114',:222,::33)\ . \end{equation*} Therefore $h_{(5,3,2)(4,3,2,1)} = 3$. \end{example} We summarize our results for the type $C$ Stanley symmetric functions as follows. \begin{corollary} \label{corollary.main2} The expansion of $F^C_w(\mathbf{x})$ in terms of Schur symmetric functions is \begin{equation} \label{equation.FC} F^C_w(\mathbf{x}) = \sum_\lambda g_{w\lambda} s_\lambda (\mathbf{x}), \quad \text{where} \quad g_{w\lambda} = \sum_\mu 2^{\ell(\mu)} \big|\mathcal{UT}_w (\mu) \big| \ h_{\mu\lambda}\ . \end{equation} \end{corollary} Replacing $\ell(\mu)$ by $\ell(\mu)-o(w)$ gives the Schur expansion of $F^B_w(\mathbf{x})$. Note that since any row of a unimodal tableau contains at most one zero, $\ell(\mu)-o(w)$ is nonnegative. Thus the given expansion makes sense combinatorially. \begin{example}\label{exa} Consider the word $w=0101=1010$. There is only one unimodal tableau corresponding to $w$, namely $\mathbf{P} = \young(101,:0)$, which belongs to $\mathcal{UT}_{0101} (3,1)$. Thus, $g_{w\lambda} = 4h_{(3,1)\lambda}$. There are only three possible highest weight primed tableaux of shape $(3,1)$, namely $\young(111,:2),\ \young(112',:2)$ and $\young(113',:2)$, which implies that $h_{(3,1)(3,1)}= h_{(3,1)(2,2)} = h_{(3,1)(2,1,1)} = 1$ and $h_{(3,1)\lambda} = 0$ for other weights $\lambda$. The expansion of $F^C_{0101}(\mathbf{x})$ is thus \begin{equation*} F^C_{0101} = 4s_{(3,1)} + 4s_{(2,2)} + 4s_{(2,1,1)}. \end{equation*} \end{example} \begin{remark} \label{remark.doubling} In~\cite[Section 5]{Haiman.1989}, Haiman showed that shifted mixed insertion can be understood in terms of nonshifted mixed insertion operators that produce a symmetric tableau, which can subsequently be cut along the diagonal. More precisely, starting with a word $\mathbf{b}$, consider its doubling $\mathrm{double}(\mathbf{b})$ by replacing each letter $\ell$ by $-\ell \;\ell$. By~\cite[Proposition 6.8]{Haiman.1989} the mixed insertion of $\mathrm{double}(\mathbf{b})$ is the symmetrized version of $P_{\mathrm{HM}}(\mathbf{b})$. This symmetrized version can also be obtained by first applying usual insertion to obtain $P(\mathrm{double}(\mathbf{b}))$ and then applying conversion~\cite[Proposition 14]{SW.2001}. Since both doubling (where the operators are also replaced by their doubled versions) and regular insertion commute with crystal operators, it follows that our crystal operators $f_i$ on primed tableaux can be described as follows: To apply $f_i$ to $\mathbf{T}$, first form the symmetrization of $\mathbf{T}$ and then apply inverse conversion (changing primed entries to negatives). Next apply the doubled operator $f_if_{-i}$, and then convert ``forwards" (negatives to primes). This produces a symmetric tableau, which can then be cut along the diagonal to obtain $f_i(\mathbf{T})$. \end{remark} \section{Semistandard unimodal tableaux} \label{section.semistandard} Many of the results of this paper have counterparts which involve the notion of semi\-standard unimodal tableaux in place of primed tableaux. We give a brief overview of these results, mostly without proof. First, let us define semistandard unimodal tableaux. We say that a word $a_1 a_2 \ldots a_h \in \mathcal{B}^h$ is \defn{weakly unimodal} if there exists an index $v$, such that \[ a_1 > a_2 > \cdots > a_v \leqslant a_{v+1} \leqslant \cdots \leqslant a_h. \] A \defn{semistandard unimodal tableau} $\mathbf{P}$ of shape $\lambda$ is a filling of $\mathcal{S}(\lambda)$ with letters from the alphabet $X$ such that the $i^{th}$ row of $\mathbf{P}$, denoted by $P_i$, is weakly unimodal, and such that $P_i$ is the longest weakly unimodal subword in the concatenated word $P_{i+1} P_i$. Denote the set of semistandard unimodal tableaux of shape $\lambda$ by $\mathcal{SUT}(\lambda)$. Let $\mathbf{a}=a_1\ldots a_h \in \mathcal{B}^h$. The alphabet $X$ imposes a partial order on the entries of $\mathbf{a}$. We can extend this to a total order by declaring that if $a_i=a_j$ as elements of $X$, and $i<j$, then as entries of $\mathbf{a}$, $a_i<a_j$. For each entry $a_i$, denote its numerical position in the total ordering on the entries of $\mathbf{a}$ by $n_i$ and define the \defn{standardization} of $\mathbf{a}$ to be the word with superscripts, $n_1^{a_1} \ldots n_h^{a_h}$. Since its entries are distinct, $n_1 \ldots n_h$ can be considered as a reduced word. Let $(\mathbf{R},\mathbf{S})$ be the Kra\'skiewicz insertion and recording tableaux of $n_1 \ldots n_h$, and let $\mathbf{R}^*$ be the tableau obtained from $\mathbf{R}$ by replacing each $n_i$ by $a_i$. One checks that setting $\mathrm{SK}(\mathbf{a})=(\mathbf{R}^*,\mathbf{S})$ defines a map, \[ \mathrm{SK} \colon \mathcal{B}=\bigoplus_{h \in \mathbb{N}} \mathcal{B}^h \rightarrow \bigcup_{\lambda} \big[\mathcal{SUT} (\lambda) \times \mathcal{ST} (\lambda)\big]. \] In fact, this map is a bijection \cite{Serrano.2010,Lam.1995}. It follows that the composition $\mathrm{SK} \circ \mathrm{HM}^{-1}$ gives a bijection \[ \bigcup_{\lambda} \big[\mathcal{PT} (\lambda) \times \mathcal{ST} (\lambda)\big] \rightarrow \bigcup_{\lambda} \big[\mathcal{SUT} (\lambda) \times \mathcal{ST} (\lambda)\big]. \] The following remarkable fact, which appears as \cite[Proposition 2.23]{Serrano.2010}, can be deduced from \cite[Theorem 3.32]{Lam.1995}, which itself utilizes results of \cite{Haiman.1989}. \begin{theorem} \label{theorem.same} For any word $\mathbf{a}\in \mathcal{B}^h$, $Q_{\mathrm{SK}}(\mathbf{a}) = Q_{\mathrm{HM}}(\mathbf{a})$. \end{theorem} This allows us to define a bijective map $\Phi_{\mathbf{Q}} \colon \mathcal{PT} (\lambda) \rightarrow \mathcal{SUT} (\lambda)$ as follows. Choose a standard shifted tableau $\mathbf{Q}$ of shape $\lambda$. Then, given a primed tableau $\mathbf{P}$ of shape $\lambda$ set $(\mathbf{R}, \mathbf{Q}) = \mathrm{SK}(\mathrm{HM}^{-1}(\mathbf{P},\mathbf{Q}))$, and let $\Phi_{\mathbf{Q}}(\mathbf{P})=\mathbf{R}$. For any filling of a shifted shape $\lambda$ with letters from $X$, associating this filling to its reading word (the element of $\mathcal{B}^{|\lambda|}$ obtained by reading rows left to right, bottom to top) induces crystal operators on the set of all fillings of this shape. In particular, we can apply these induced operators to any element of $\mathcal{SUT} (\lambda)$ (although, a priori, it is not clear that the image will remain in $\mathcal{SUT} (\lambda)$). We now summarize our main results for SK insertion and its relation to this induced crystal structure. \begin{theorem} \label{theorem.main2'} For any $\mathbf{b} \in \mathcal{B}^h$ with $\mathrm{SK}(\mathbf{b}) = (\mathbf{T},\mathbf{Q})$ and $f_i(\mathbf{b})\neq \mathbf{0}$, the induced operator $f_i$ described above satisfies \begin{equation*} \mathrm{SK}(f_i(\mathbf{b})) = (f_i(\mathbf{T}), \mathbf{Q}). \end{equation*} Also, $f_i(\mathbf{b}) = \mathbf{0}$ if and only if $f_i(\mathbf{T})=\mathbf{0}$. \end{theorem} \begin{corollary} $\mathcal{SUT} (\lambda)$ is closed under the induced crystal operators described above. \end{corollary} Replacing $\mathrm{HM}$ by $\mathrm{SK}$ in the second proof of Theorem~\ref{theorem.main0}, or by combining Theorem~\ref{theorem.main0} with Theorem~\ref{theorem.same} yields: \begin{theorem} \label{theorem.main0'} The recording tableau under $\mathrm{SK}$ insertion is constant on each connected component of the crystal $\mathcal{B}^h$. \end{theorem} The upshot of all this is the following theorem. \begin{theorem} \label{theorem.upshot} With respect to the crystal operators we have defined on primed tableaux and the induced operators on semistandard unimodal tableaux described above, the map $\Phi_Q$ is a crystal isomorphism. \end{theorem} \begin{proof} This says no more than that $\Phi_Q$ is a bijection (which we have established) and that it commutes with the crystal operations on primed tableaux and semistandard unimodal tableaux. But this is simply combining Theorem~\ref{theorem.main0} with Theorem \ref{theorem.main0'}. \end{proof} Theorem~\ref{theorem.upshot} immediately gives us another combinatorial interpretation of the coefficients $g_{w \lambda}$. Let $k_{\mu \lambda}$ be the number of semistandard unimodal tableaux of shape $\mu$ and weight $\lambda$, whose reading words are Yamanouchi (that is, tableaux that are the highest weight elements of $\mathcal{SUT}(\mu)$). \begin{corollary} \label{corollary.main2'} The expansion of $F^C_w(\mathbf{x})$ in terms of Schur symmetric functions is \begin{equation*} F^C_w(\mathbf{x}) = \sum_\lambda g_{w\lambda} s_\lambda (\mathbf{x}), \quad \text{where} \quad g_{w\lambda} = \sum_\mu 2^{\ell(\mu)} \big|\mathcal{UT}_w (\mu) \big| \ k_{\mu\lambda}\ . \end{equation*} \end{corollary} Again, replacing $\ell(\mu)$ by $\ell(\mu)-o(w)$ gives the Schur expansion of $F^B_w(\mathbf{x})$. \begin{example} According to Example~\ref{exa}, we should find three highest weight semistandard unimodal tableaux of shape $(3,1)$, one for each of the weights $(3,1)$, $(2,2)$, and $(2,1,1)$. These are $\young(211,:1),\ \young(211,:2)$ and $\young(321,:1)$. \end{example} \section{Outlook} There are several other generalizations of the results in this paper that one could pursue. First of all, it would be interesting to consider affine Stanley symmetric functions of type $B$ or $C$. As in affine type $A$, this would involve a generalization of crystal bases as the expansion is no longer in terms of Schur functions. Another possible extension is to consider $K$-theoretic analogues of Stanley symmetric functions, such as the (dual) stable Grothendieck polynomials. In type $A$, a crystal theoretic analysis of dual stable Grothendieck polynomials was carried out in~\cite{galashin.2015}. Type $D$ should also be considered from this point of view. Finally, the definition of the reading word $\mathrm{rw}$ of Section~\ref{section.explicit} and the characterization of highest weight elements in Proposition~\ref{proposition.highest} is very similar to the reading words in~\cite[Section 3.2]{Liu.2017} in the analysis of Kronecker coefficients. \appendix \section{Proof of Theorem~\ref{theorem.main2}} \label{section.proof main2} In this appendix, we provide the proof of Theorem~\ref{theorem.main2}. \subsection{Preliminaries} We use the fact from \cite{Haiman.1989} that taking only elements smaller or equal to $i+1$ from the word $\mathbf{b}$ and applying the mixed insertion corresponds to taking only the part of the tableau $\mathbf{T}$ with elements $\leqslant i+1$. Thus, it is enough to prove the theorem for a ``truncated'' word $\mathbf{b}$ without any letters greater than $i+1$. To shorten the notation, we set $j= i+1$ in this appendix. We sometimes also restrict to just the letters $i$ and $j$ in a word $w$. We call this the \defn{$\{i,j\}$-subword} of $w$. First, in Lemma~\ref{lemma.main} we justify the notion of the reading word $\mathrm{rw}(\textbf{T})$ and provide the reason to use a bracketing rule on it. After that, in Section~\ref{section.main.proof} we prove that the action of the crystal operator $f_i$ on $\mathbf{b}$ corresponds to the action of $f_i$ on $\mathbf{T}$ after the insertion. Given a word $\mathbf{b}$, we apply the crystal bracketing rule for its $\{i,j\}$-subword and globally declare the rightmost unbracketed $i$ in $\mathbf{b}$ (i.e. the letter the crystal operator $f_i$ acts on) to be a bold $i$. Insert the letters of $\mathbf{b}$ via Haiman insertion to obtain the insertion tableau $\mathbf{T}$. During this process, we keep track of the position of the bold $i$ in the tableau via the following rules. When the bold $i$ from $\mathbf{b}$ is inserted into $\mathbf{T}$, it is inserted as the rightmost $i$ in the first row of $\mathbf{T}$ since by definition it is unbracketed in $\mathbf{b}$ and hence cannot bump a letter $j$. From this point on, the tableau $\mathbf{T}$ has a \defn{special} letter $i$ and we track its position: \begin{enumerate} \item If the special $i$ is unprimed, it is always the rightmost $i$ in its row. When a letter $i$ is bumped from this row, only one of the non-special letters $i$ can be bumped, unless the special $i$ is the only $i$ in the row. When the non-diagonal special $i$ is bumped from its row to the next row, it will be inserted as the rightmost $i$ in the next row. \item When the diagonal special $i$ is bumped from its row to the column to its right, it is inserted as the bottommost $i'$ in the next column. \item If the special $i$ is primed, it is always the bottommost $i'$ in its column. When a letter $i'$ is bumped from this column, only one of the non-special letters $i'$ can be bumped, unless the special $i'$ is the only $i'$ in the column. When the primed special $i$ is bumped from its column to the next column, it is inserted as the bottommost $i'$ in the next column. \item When $i$ is inserted into a row with the special unprimed $i$, the rightmost $i$ becomes special. \item When $i'$ is inserted into a column with the special primed $i$, the bottommost primed $i$ becomes special. \end{enumerate} \begin{lemma} \label{lemma.main} Using the rules above, after the insertion process of $\mathbf{b}$, the special $i$ in $\mathbf{T}$ is the same as the rightmost unbracketed $i$ in the reading word $\mathrm{rw}(\mathbf{T})$ (i.e. the definition of the bold $i$ in $\mathbf{T}$). Moreover, the number of unbracketed letters $i$ in $\mathbf{b}$ is equal to the number of unbracketed letters $i$ in $\mathrm{rw}(\mathbf{T})$. \end{lemma} \begin{proof} First, note that since both the number of letters $i$ and the number of letters $j$ are equal in $\mathbf{b}$ and $\mathrm{rw}(\mathbf{T})$, the fact that the number of unbracketed letters $i$ is the same implies that the number of unbracketed letters $j$ must also be the same. We use induction on $1 \leqslant s \leqslant h$, where the letters $b_1 \ldots b_s$ of $\mathbf{b}=b_1 b_2 \ldots b_h$ have been inserted using Haiman mixed insertion with the above rules. That is, we check that at each step of the insertion algorithm the statement of our lemma stays true. The induction step is as follows: Consider the word $b_1 \ldots b_{s-1}$ with a corresponding insertion tableau $\mathbf{T}^{(s-1)}$. If the bold $i$ in $\mathbf{b}$ is not in $b_1\ldots b_{s-1}$, then $\mathbf{T}^{(s-1)}$ does not contain a special letter $i$. Otherwise, by induction hypothesis assume that the bold $i$ in $b_1\ldots b_{s-1}$ by the above rules corresponds to the special $i$ in $\mathbf{T}^{(s-1)}$, that is, it is in the position corresponding to the rightmost unbracketed $i$ in the reading word $\mathrm{rw}(\mathbf{T}^{(s-1)})$. Then we need to prove that for $b_1 \ldots b_s$, the special $i$ in $\mathbf{T}^{(s-1)}$ ends up in the position corresponding to the rightmost unbracketed $i$ in the reading word of $\mathbf{T}^{(s)} = \mathbf{T}^{(s-1)} \leftsquigarrow b_s$. We also need to verify that the second part of the lemma remains true for $\mathbf{T}^{(s)}$. Remember that we are only considering ``truncated'' words $\mathbf{b}$ with all letters $\leqslant j$. \noindent \textbf{Case 1.} Suppose $b_s = j$. In this case $j$ is inserted at the end of the first row of $\mathbf{T}^{(s-1)}$, and $\mathrm{rw}(\mathbf{T}^{(s)})$ has $j$ attached at the end. Thus, both statements of the lemma are unaffected. \noindent \textbf{Case 2.} Suppose $b_s = i$ and $b_s$ is unbracketed in $b_1 \ldots b_{s-1} b_s$. Then there is no special $i$ in tableau $\mathbf{T}^{(s-1)}$, and $b_s$ might be the bold $i$ of the word $\mathbf{b}$. Also, there are no unbracketed letters $j$ in $b_1 \ldots b_{s-1}$, and thus all $j$ in $\mathrm{rw}(\mathbf{T}^{(s-1)})$ are bracketed. Thus, there are no letters $j$ in the first row of $\mathbf{T}^{(s-1)}$, and $i$ is inserted in the first row of $\mathbf{T}^{(s-1)}$, possibly bumping the letter $j'$ from column $c$ into an empty column $c+1$ in the process. Note that if $j'$ is bumped, moving it to column $c+1$ of $\mathbf{T}^{(s)}$ does not change the reading word, since column $c$ of $\mathbf{T}^{(s-1)}$ does not contain any primed letters other than $j'$. The reading word of $\mathbf{T}^{(s)}$ is thus the same as $\mathrm{rw}(\mathbf{T}^{(s-1)})$ except for an additional unbracketed $i$ at the end. The number of unbracketed letters $i$ in both $\mathrm{rw}(\mathbf{T}^{(s)})$ and $b_1 \ldots b_{s-1} b_s$ is thus increased by one compared to $\mathrm{rw}(\mathbf{T}^{(s-1)})$ and $b_1 \ldots b_{s-1}$. If $b_s$ is the bold $i$ of the word $\mathbf{b}$, the special $i$ of tableau $\mathbf{T}^{(s)}$ is the rightmost $i$ on the first row and corresponds to the rightmost unbracketed $i$ in $\mathrm{rw}(\mathbf{T}^{(s)})$. \noindent \textbf{Case 3.} Suppose $b_s = i$ and $b_s$ is bracketed with a $j$ in the word $b_1\ldots b_{s-1}$. In this case, according to the induction hypothesis, $\mathrm{rw}(\mathbf{T}^{(s-1)})$ has an unbracketed $j$. There are two options. \noindent \textbf{Case 3.1.} If the first row of $\mathbf{T}^{(s-1)}$ does not contain $j$, $b_s$ is inserted at the end of the first row of $\mathbf{T}^{(s-1)}$, possibly bumping $j'$ in the process. Regardless, $\mathrm{rw}(\mathbf{T}^{(s)})$ does not change except for attaching an $i$ at the end (see Case 2). This $i$ is bracketed with one unbracketed $j$ in $\mathrm{rw}(\mathbf{T}^{(s)})$. The special $i$ (if there was one in $\mathbf{T}^{(s-1)}$) does not change its position and the statement of the lemma remains true. \noindent \textbf{Case 3.2.} If the first row of $\mathbf{T}^{(s-1)}$ does contain a $j$, inserting $b_s$ into $\mathbf{T}^{(s-1)}$ bumps $j$ (possibly bumping $j'$ beforehand) into the second row, where $j$ is inserted at the end of the row. So, if the first row contains $n \geqslant 0$ elements $i$ and $m \geqslant 1$ elements $j$, the reading word $\mathrm{rw}(\mathbf{T}^{(s-1)})$ ends with $\ldots i^n j^m$, and $\mathrm{rw}(\mathbf{T}^{(s)})$ ends with $\ldots j i^{n+1} j^{m-1}$. Thus, the number of unbracketed letters $i$ does not change and if there was a special $i$ in the first row, it remains there and it still corresponds to the rightmost unbracketed $i$ in $\mathrm{rw}(\mathbf{T}^{(s)})$. \noindent \textbf{Case 4.} Suppose $b_s < i$. Inserting $b_s$ could change both the primed reading word and unprimed reading word of $\mathbf{T}^{(s-1)}$. As long as neither $i$ nor $j$ is bumped from the diagonal, we can treat primed and unprimed changes separately. \noindent \textbf{Case 4.1.} Suppose neither $i$ nor $j$ is not bumped from the diagonal during the insertion. This means that there are no transitions of letters $i$ or $j$ between the primed and the unprimed parts of the reading word. Thus, it is enough to track the bracketing relations in the unprimed reading word; the bracketing relations in the primed reading word can be verified the same way via the transposition. After we make sure that the number of unbracketed letters $i$ and $j$ changes neither in the primed nor unprimed reading word, it is enough to consider the case when the special $i$ is unprimed, since the case when it is primed can again be checked using the transposition. To avoid going back and forth, we combine these two processes together in each subcase to follow. \noindent \textbf{Case 4.1.1.} If there are no letters $i$ and $j$ in the bumping sequence, the unprimed $\{i,j\}$-subword of $\mathrm{rw}(\mathbf{T}^{(s)})$ is the same as in $\mathrm{rw}(\mathbf{T}^{(s-1)})$. The special $i$ (if there is one) remains in its position, and thus the statement of the lemma remains true. \noindent \textbf{Case 4.1.2.} Now consider the case when there is a $j$ in the bumping sequence, but no $i$. Let that $j$ be bumped from the row $r$. Since there is no $i$ bumped, row $r$ does not contain any letters $i$. Thus, bumping $j$ from row $r$ to the end of row $r+1$ does not change the $\{i,j\}$-subword of $\mathrm{rw}(\mathbf{T}^{(s-1)})$, so the statement of the lemma remains true. \noindent \textbf{Case 4.1.3.} Consider the case when there is an $i$ in the bumping sequence. Let that $i$ be bumped from the row $r$. \noindent \textbf{Case 4.1.3.1.} If there is a (non-diagonal) $j$ in row $r+1$, it is bumped into row $r+2$ ($j'$ may have been bumped in the process). Note that in this case the $i$ bumped from row $r$ could not have been a special one. If there are $n \geqslant 0$ elements $i$ and $m \geqslant 1$ elements $j$ in row $r$, the part of the reading word $\mathrm{rw}(\mathbf{T}^{(s-1)})$ with $\ldots i^n j^m i \ldots$ changes to $\ldots j i^{n+1} j^{m-1} \ldots$ in $\mathrm{rw}(\mathbf{T}^{(s)})$. The bracketing relations remain the same, and if row $r+1$ contained a special $i$, it would remain there and would correspond to the rightmost $i$ in $\mathrm{rw}(\mathbf{T}^{(s)})$. \noindent \textbf{Case 4.1.3.2.} If there are no letters $j$ in row $r+1$, and $j'$ in row $r+1$ does not bump a $j$, the $\{i,j\}$-subword does not change and the statement of the lemma remains true. \noindent \textbf{Case 4.1.3.3.} Now suppose there are no letters $j$ in row $r+1$ and $j'$ from row $r+1$ bumps a $j$ from another row. This can only happen if, before the $i$ was bumped, there was only one $i$ in row $r$ of $\mathbf{T}^{(s-1)}$, there is a $j'$ immediately below it, and there is a $j$ in the column to the right of $i$ and in row $r' \leqslant r$. If $r'=r$, then after the insertion process, $i$ and $j$ are bumped from row $r$ to row $r+1$. Since there was only one $i$ in row $r$ and there are no letters $j$ in row $r+1$, the $\{i,j\}$-subword of $\mathrm{rw}(\mathbf{T}^{(s-1)})$ does not change and the statement of the lemma remains true. Otherwise $r' < r$. Then there are no letters $i$ in row $r'$ and by assumption there is no letter $j$ in row $r+1$. Thus, moving $i$ to row $r+1$ and moving $j$ to the row $r'+1$ does not change the $\{i,j\}$-subword of $\mathrm{rw}(\mathbf{T}^{(s-1)})$ and the statement of the lemma remains true. \noindent \textbf{Case 4.2.} Suppose $i$ or $j$ (or possibly both) are bumped from the diagonal in the insertion process. \noindent \textbf{Case 4.2.1.} Consider the case when the insertion sequence ends with $\quad\cdots \rightarrow z \rightarrow j [j']$ with $z<i$ and possibly $ \rightarrow j$ right after it. Let the bumped diagonal $j$ be in column $c$. Then columns $1,2, \ldots, c$ of $\mathbf{T}^{(s-1)}$ could only contain elements $\leqslant z$, except for the $j$ on the diagonal. Thus, the bumping process just moves $j$ from the unprimed reading word to the primed reading word without changing the overall order of the $\{i,j\}$-subword. \noindent \textbf{Case 4.2.2.} Consider the case when the insertion sequence ends with $\quad \cdots \rightarrow i' \rightarrow i \rightarrow j[j']$ and possibly $\rightarrow j$. Let the bumped diagonal $j$ be in row (and column) $r$. Note that $r$ must be the last row of $\mathbf{T}^{(s-1)}$. Then $i$ has to be bumped from row $r-1$ (and, say, column $c$) and $i'$ also has to be in row $r-1$ (moreover, it has to be the only $i'$ in column $c-1$). Also, since there are no letters $j'$ in column $c$ (otherwise it would be in row $r$, which is impossible), bumping $i'$ to column $c$ does not change the $\{i,j\}$-subword of $\mathrm{rw}(\mathbf{T}^{(s-1)})$. Note that after $i'$ moves to column $c$, there are no $i'$ or $j'$ in columns $1,\ldots, r$, and thus priming $j$ and moving it to column $r+1$ does not change the $\{i,j\}$-subword. If the last row $r$ contains $n$ elements $j$, the $\{i,j\}$-subword of $\mathbf{T}^{(s-1)}$ contains $\ldots j^n i \ldots$ and after the insertion it becomes $\ldots j i j^{n-1} \ldots$, where the left $j$ is from the primed subword. Thus, the number of bracketed letters $i$ does not change. Also, if we moved the special $i$ in the process, it could only have been the bumped $i'$. Its position in the reading word is unaffected. \noindent \textbf{Case 4.2.3.} The case when the insertion sequence does not contain $i'$, does not bump $i$ from the diagonal, but contains $i$ and bumps $j$ from the diagonal is analogous to the previous case. \noindent \textbf{Case 4.2.4.} Suppose both $i$ and $j$ are bumped from the diagonal. That could only be the case with diagonal $i$ bumped from row (and column) $r$, bumping another letter $i$ from the row $r$ and column $r+1$, and bumping $j$ from row (and column) $r+1$ (and possibly bumping $j$ to row $r+2$ at the end). Let the number of letters $i'$ in column $r+1$ be $n$ and let the number of letters $j$ in row $r+1$ be $m$. \noindent \textbf{Case 4.2.4.1} Let $m\geqslant 2$. Then the $\{i,j\}$-subword of $\mathrm{rw}(\mathbf{T}^{(s-1)})$ contains $\ldots i^n j^m ii \ldots$ and after the insertion it becomes $\ldots j i^{n+1} j i j^{m-2} \ldots$. The number of unbracketed letters $i$ stays the same. Since $m \geqslant 2$, the special $i$ of $\mathbf{T}^{(s-1)}$ could not have been involved in the bumping procedure. However, the special $i$ might have been the bottommost $i'$ in column $r+1$ of $\mathbf{T}^{(s-1)}$, and after the insertion the special $i$ would still be the bottommost $i'$ in column $r+1$ and would correspond to the rightmost unbracketed $i$ in $\mathrm{rw}(\mathbf{T}^{(s)})$: \begin{equation*} \young(\cdot\cdoti'\cdot,:ii\cdot,::jj) \quad \mapsto \quad \young(\cdot\cdoti'\cdot,:\cdoti'\cdot,::ij',:::j) \end{equation*} \noindent \textbf{Case 4.2.4.2.} Let $m=1$. Then the $\{i,j\}$-subword of $\mathbf{T}^{(s-1)}$ contains $\ldots i^n j ii \ldots$ and after the insertion it becomes $\ldots j i^{n+1} i$. The number of unbracketed letters $i$ stays the same. If the special $i$ was in row $r$ and column $r+1$, then after the insertion it becomes a diagonal one, and it would still correspond to the rightmost unbracketed $i$ in $\mathrm{rw}(\mathbf{T}^{(s)})$. \noindent \textbf{Case 4.2.5.} Suppose only $i$ is bumped from the diagonal (let that $i$ be on row and column $r$). Note that there cannot be an $i'$ in column $r$. \noindent \textbf{Case 4.2.5.1.} Suppose $i$ from the diagonal bumps another $i$ from column $r+1$ and row $r$. In that case there are no letters $j$ in row $r+1$. No letters $j$ or $j'$ are affected and thus the $\{i,j\}$-subword of $\mathbf{T}^{(s)}$ does not change, and the special $i$ in $\mathbf{T}^{(s)}$ (if there is one) still corresponds to the rightmost unbracketed $i$ in $\mathrm{rw}(\mathbf{T}^{(s)})$. \noindent \textbf{Case 4.2.5.2.} Suppose $i$ from the diagonal bumps $j'$ from column $r+1$ and row $r$. Note that $j'$ must be the only $j'$ in column $r+1$. Suppose also that there is one $j$ in row $r+1$. Denote the number of letters $i'$ in column $r+1$ of $\mathbf{T}^{(s-1)}$ by $n$. If there is a $j$ in row $r+1$ of $\mathbf{T}^{(s-1)}$, then the $\{i,j\}$-subword of $\mathbf{T}^{(s-1)}$ contains $\ldots i^n jji \ldots$ and after the insertion it becomes $\ldots ji^{n+1}j \ldots$. If there is no $j$ in row $r+1$ of $\mathbf{T}^{(s-1)}$, then the $\{i,j\}$-subword of $\mathbf{T}^{(s-1)}$ contains $\ldots i^n ji \ldots$ and after the insertion it becomes $\ldots ji^{n+1} \ldots$. The number of unbracketed letters $i$ is unaffected. If the special $i$ of $\mathbf{T}^{(s-1)}$ was the bottommost $i'$ in column $r+1$ of $\mathbf{T}^{(s-1)}$, after the insertion the special $i$ is still the bottommost $i'$ in column $r+1$ and corresponds to the rightmost unbracketed $i$ in $\mathrm{rw}(\mathbf{T}^{(s)})$. \end{proof} \begin{corollary} \label{corollary.f annihilate} \begin{equation*} f_i (\mathbf{b}) = \mathbf{0} \quad \text{if and only if} \quad f_i (\mathbf{T}) = \mathbf{0}. \end{equation*} \end{corollary} \subsection{Proof of Theorem~\ref{theorem.main2}} \label{section.main.proof} By Lemma~\ref{lemma.main}, the cell $x$ in the definition of the operator $f_i$ corresponds to the bold $i$ in the tableau $\mathbf{T}$. Furthermore, we know how the bold $i$ moves during the insertion procedure. We assume that the bold $i$ exists in both $\mathbf{b}$ and $\mathbf{T}$, meaning that $f_i(\mathbf{b}) \neq \mathbf{0}$ and $f_i(\mathbf{T}) \neq \mathbf{0}$ by Corollary~\ref{corollary.f annihilate}. We prove Theorem~\ref{theorem.main2} by induction on the length of the word $\mathbf{b}$. \noindent \textbf{Base.} Our base is for words $\mathbf{b}$ with the last letter being a bold $i$ (i.e. rightmost unbracketed $i$). Let $\mathbf{b} = b_1 \ldots b_{h-1} b_h$ and $f_i(\mathbf{b}) = b_1 \ldots b_{h-1} b'_h$, where $b_h = i$ and $b'_h = j$. Denote the mixed insertion tableau of $b_1 \ldots b_{h-1}$ as $\mathbf{T}_0$, the insertion tableau of $b_1 \ldots b_{h-1} b_h$ as $\mathbf{T}$, and the insertion tableau of $b_1 \ldots b_{h-1} b'_h$ as $\mathbf{T}'$. Note that $\mathbf{T}_0$ does not have letters $j$ in the first row. If the first row of $\mathbf{T}_0$ ends with $\ldots j'$, then the first row of $\mathbf{T}$ ends with $\ldots \mathbf{i} j'$ and the first row of $\mathbf{T}'$ ends with $\ldots j' j$. If the first row of $\mathbf{T}_0$ does not contain $j'$, the first row of $\mathbf{T}$ ends with $\ldots \mathbf{i}$ and the first row of $\mathbf{T}'$ ends with $\ldots j$, and the cell $x_S$ is empty. In both cases $f_i(\mathbf{T}) = \mathbf{T}'$. \noindent \textbf{Induction step.} Now, let $\mathbf{b} = b_1 \ldots b_h$ with operator $f_i$ acting on the letter $b_s$ in $\mathbf{b}$ with $s < h$. Denote the mixed insertion tableau of $b_1 \ldots b_{h-1}$ as $\mathbf{T}$ and the insertion tableau of $f_i(b_1 \ldots b_{h-1})$ as $\mathbf{T}'$. By induction hypothesis, we know that $f_i(\mathbf{T}) = \mathbf{T}'$. We want to show that $f_i(\mathbf{T} \leftsquigarrow b_h) = \mathbf{T}' \leftsquigarrow b_h$. In Cases 1-3 below, we assume that the bold letter $i$ is unprimed. Since almost all results from the case with unprimed $i$ are transferrable to the case with primed bold $i$ via the transposition of the tableau $\mathbf{T}$, we just need to cover the differences in Case 4. \noindent \textbf{Case 1.} Suppose $\mathbf{T}$ falls under Case (1) of the rules for $f_i$: the bold $i$ is in the non-diagonal cell $x$ in row $r$ and column $c$ and the cell $x_E$ in the same row and column $c+1$ contains the entry $j'$. Consider the insertion path of $b_h$. \noindent \textbf{Case 1.1.} If the insertion path of $b_h$ in $\mathbf{T}$ contains neither cell $x$ nor cell $x_E$, the insertion path of $b_h$ in $\mathbf{T}'$ also does not contain cells $x$ and $x_E$. Thus, $f_i(\mathbf{T} \leftsquigarrow b_h) = \mathbf{T}' \leftsquigarrow b_h$. \noindent \textbf{Case 1.2.} Suppose that during the insertion of $b_h$ into $\mathbf{T}$, the bold $i$ is row-bumped by an unprimed element $d < i$ or is column-bumped by a primed element $d' \leqslant i'$. This could only happen if the bold $i$ is the unique $i$ in row $r$ of $\mathbf{T}$. During the insertion process, the bold $i$ is inserted into row $r+1$. Since there are no letters $i$ in row $r$ of $\mathbf{T}'$, inserting $b_h$ into $\mathbf{T}'$ inserts $d$ in cell $x$, bumps $j'$ to cell $x_E$, and bumps $j$ into row $r+1$. Thus we are in a situation similar to the induction base. It is easy to check that row $r+1$ does not contain any letters $j$ in $\mathbf{T}$. If it contains $j'$, this $j'$ is bumped back into row $r+1$. Similar to the induction base, $f_i(\mathbf{T} \leftsquigarrow b_h) = \mathbf{T}' \leftsquigarrow b_h$. \noindent \textbf{Case 1.3.} Suppose that during the insertion of $b_h$ into $\mathbf{T}$, an unprimed $i$ is inserted into row $r$. Note that in this case, row $r$ in $\mathbf{T}$ must contain a $j$ (or else the $i$ from row $r$ would not be the rightmost unbracketed $i$ in $\mathrm{rw}(\mathbf{T})$). Thus inserting $i$ into row $r$ in $\mathbf{T}$ shifts the bold $i$ to column $c+1$, shifts $j'$ to column $c+2$ and bumps $j$ to row $r+1$. Inserting $i$ into row $r$ in $\mathbf{T}'$ shifts $j'$ to column $c+1$ with a $j$ to the right of it, and bumps $j$ into row $r+1$. Thus $f_i(\mathbf{T} \leftsquigarrow b_h) = \mathbf{T}' \leftsquigarrow b_h$. \noindent \textbf{Case 1.4.} Suppose that during the insertion of $b_h$ into $\mathbf{T}$, the $j'$ in cell $x_E$ is column-bumped by a primed element $d'$ and the cell $x$ is unaffected. Note that in order for $\mathbf{T} \leftsquigarrow b_h$ to be a valid primed tableau, $i$ must be smaller than $d'$, and thus $d'$ could only be $j'$. On the other hand, $j'$ cannot be inserted into column $c+1$ of $\mathbf{T}'$ in order for $\mathbf{T}' \leftsquigarrow b_h$ to be a valid primed tableau. Thus this case is impossible. \noindent \textbf{Case 2.} Suppose tableau $\mathbf{T}$ falls under Case (2a) of the crystal operator rules for $f_i$. This means that for a bold $i$ in cell $x$ (in row $r$ and column $c$) of tableau $\mathbf{T}$, the cell $x_E$ contains the entry $j$ or is empty and cell $x_S$ is empty. Tableau $\mathbf{T}'$ has all the same elements as $\mathbf{T}$, except for a $j$ in the cell $x$. We are interested in the case when inserting $b_h$ into either $\mathbf{T}$ or $\mathbf{T}'$ bumps the element from cell $x$. \noindent \textbf{Case 2.1.} Suppose that the non-diagonal bold $i$ in $\mathbf{T}$ (in row $r$) is row-bumped by an unprimed element $d < i$ or column-bumped by a primed element $d' < j'$. Element $d$ (or $d'$) bumps the bold $i$ into row $r+1$ of $\mathbf{T}$, while in $\mathbf{T}'$ (since there are no letters $i$ in row $r$ of $\mathbf{T}'$) it bumps $j$ from cell $x$ into row $r+1$. Thus we are in the situation of the induction base and $f_i(\mathbf{T} \leftsquigarrow b_h) = \mathbf{T}' \leftsquigarrow b_h$. \noindent \textbf{Case 2.2.} Suppose $x$ is a non-diagonal cell in row $r$, and during the insertion of $b_h$ into $\mathbf{T}$, an unprimed $i$ is inserted into the row $r$. In this case, row $r$ in $\mathbf{T}$ must contain a letter $j$. The insertion process shifts the bold $i$ one cell to the right in $\mathbf{T}$ and bumps a $j$ into row $r+1$, while in $\mathbf{T}'$ it just bumps $j$ into the row $r+1$. We end up in Case (2a) of the crystal operator rules for $f_i$ with bold $i$ in the cell $x_E$. \noindent \textbf{Case 2.3.} Suppose that during the insertion of $b_h$ into $\mathbf{T}'$, the $j$ in the non-diagonal cell $x$ is column-bumped by a $j'$. This means that $j'$ was previously bumped from column $c-1$ and row $\geqslant r$. Thus the cell $x_{SW}$ (cell to the left of an empty $x_{S}$) is non-empty. Moreover, right before inserting $j'$ into the column $c$, the cell $x_{SW}$ contains an entry $< j'$. Inserting $j'$ into column $c$ of $\mathbf{T}$ just places $j'$ into the empty cell $x_S$. Inserting $j'$ into column $c$ of $\mathbf{T}'$ places $j'$ into $x$, and bumps $j$ into the empty cell $x_S$. Thus, we end up in Case (2c) of the crystal operator rules after the insertion of $b_h$ with $y = x_S$. \noindent \textbf{Case 2.4.} Suppose that $x$ in $\mathbf{T}$ is a diagonal cell (in row $r$ and column $r$) and that it is row-bumped by an element $d<i$. Note that in this case there cannot be any letter $j$ in row $r+1$. Also, since $d$ is inserted into cell $x$, there cannot be any letters $i'$ in columns $1,\ldots, r$, and thus there cannot be any letters $j'$ in column $r+1$ (otherwise the $i$ in cell $x$ would not be bold). The bumped bold $i$ in tableau $\mathbf{T}$ is inserted as a primed bold $i'$ into the cell $z$ of column $r+1$. \noindent \textbf{Case 2.4.1.} Suppose that there are no letters $i$ in column $r+1$ of $\mathbf{T}$. In this case, the cell $z$ in $\mathbf{T}$ either contains $j$ (and then that $j$ would be bumped to the next row) or is empty. Inserting $b_h$ into tableau $\mathbf{T}'$ bumps the diagonal $j$ in cell $x$, which is inserted as a $j'$ into cell $z$, possibly bumping $j$ after that. Thus, $\mathbf{T} \leftsquigarrow b_h$ falls under Case (2a) of the ``primed'' crystal rules with the bold $i'$ in cell $z$ (note that there cannot be any $j'$ in cell $(z*)_E$ of the tableau $(\mathbf{T} \leftsquigarrow b_h)*$). Since $\mathbf{T} \leftsquigarrow b_h$ and $\mathbf{T}' \leftsquigarrow b_h$ differ only by the cell $z$, $f_i(\mathbf{T} \leftsquigarrow b_h) = \mathbf{T}' \leftsquigarrow b_h$. \noindent \textbf{Case 2.4.2.} Suppose that there is a letter $i$ in cell $z$ of column $r+1$ of $\mathbf{T}$. Note that cell $z$ can only be in rows $1, \ldots, r-1$ and thus $z_{SW}$ contains an element $< i$. Thus, during the insertion process of $b_h$ into $\mathbf{T}$, diagonal bold $i$ from cell $x$ is inserted as bold $i'$ into cell $z$, bumping the $i$ from cell $z$ into cell $z_S$ (possibly bumping $j$ afterwards). On the other hand, inserting $b_h$ into $\mathbf{T}'$ bumps the diagonal $j$ from cell $x$ into cell $z_S$ as a $j'$ (possibly bumping $j$ afterwards). Thus, $\mathbf{T} \leftsquigarrow b_h$ falls under Case (1) of the ``primed'' crystal rules with the bold $i'$ in cell $z$, and so $f_i(\mathbf{T} \leftsquigarrow b_h) = \mathbf{T}' \leftsquigarrow b_h$. \noindent \textbf{Case 2.5.} Suppose that $x$ is a diagonal cell (in row $r$ and column $r$) and that during the insertion of $b_h$ into $\mathbf{T}$, an unprimed $i$ is inserted into row $r$. In this case, the entry in cell $x_E$ has to be $j$ and the diagonal cell $x_{ES}$ must be empty. Inserting $i$ into row $r$ of $\mathbf{T}$ bumps a $j$ from cell $x_E$ into cell $x_{ES}$. On the other hand, inserting $i$ into row $r$ of $\mathbf{T}'$ bumps a $j$ from the diagonal cell $x$, which in turn is inserted as a $j'$ into cell $x_E$, which bumps $j$ from cell $x_E$ into cell $x_{ES}$. Thus, $\mathbf{T} \leftsquigarrow b_h$ falls under Case (2b) of the crystal rules with bold $i$ in cell $x_E$ and $y= x_{ES}$, and so $f_i(\mathbf{T} \leftsquigarrow b_h) = \mathbf{T}' \leftsquigarrow b_h$. \noindent \textbf{Case 3.} Suppose that $\mathbf{T}$ falls under Case (2b) or (2c) of the crystal operator rules. That means $x_E$ contains the entry $j$ or is empty and $x_S$ contains the entry $j'$ or $j$. There is a chain of letters $j'$ and $j$ in $\mathbf{T}$ starting from $x_S$ and ending on a box $y$. According to the induction hypothesis, $y$ is either on the diagonal and contains the entry $j$ or $y$ is not on the diagonal and contains the entry $j'$. The tableau $\mathbf{T}' = f_i (\mathbf{T})$ has $j'$ in cell $x$ and $j$ in cell $y$. We are interested in the case when inserting $b_h$ into $\mathbf{T}$ affects cell $x$ or affects some element of the chain. Let $r_x$ and $c_x$ be the row and the column index of cell $x$, and $r_y$, $c_y$ are defined accordingly. Note that during the insertion process, $j'$ cannot be inserted into columns $c_y,\ldots, c_x$ and $j$ cannot be inserted into rows $r_x +1,\ldots, r_y$, since otherwise $\mathbf{T} \leftsquigarrow b_h$ would not be a primed tableau. \noindent \textbf{Case 3.1.} Suppose the bold $i$ in cell $x$ (of row $r_x$ and column $c_x$) of $\mathbf{T}$ is row-bumped by an unprimed element $d < i$ or column-bumped by a primed element $d' < i$. Note that in this case, bold $i$ in row $r_x$ is the only $i$ in this row, so row $r_x+1$ cannot contain any letter $j$. Therefore the entry in cell $x_S$ must be $j'$. In tableau $\mathbf{T}$, the bumped bold $i$ is inserted into cell $x_S$ and $j'$ is bumped from cell $x_S$ into column $c_x+1$, reducing the chain of letters $j'$ and $j$ by one. Notice that since $x_E$ either contains a $j$ or is empty, $j'$ cannot be bumped into a position to the right of $x_S$, so Case (1) of the crystal rules for $\mathbf{T} \leftsquigarrow b_h$ cannot occur. As for $\mathbf{T}'$, inserting $d$ into row $r_x$ (or inserting $d'$ into column $c_x$) just bumps $j'$ into column $c_x+1$, thus reducing the length of the chain by one in that tableau as well. Note that in the case when the length of the chain is one (i.e. $y=x_S$), we would end up in Case (2a) of the crystal rules after the insertion. Otherwise, we are still in Case (2b) or (2c). In both cases, $f_i(\mathbf{T} \leftsquigarrow b_h) = \mathbf{T}' \leftsquigarrow b_h$. \noindent \textbf{Case 3.2.} Suppose a letter $i$ is inserted into the same row as $x$ (in row $r_x$). In this case, $x_E$ must contain a $j$ (otherwise the bold $i$ would not be in cell $x$). After inserting $b_h$ into $\mathbf{T}$, the bold $i$ moves to cell $x_E$ (note that there cannot be a $j'$ to the right of $x_E$) and $j$ from $x_E$ is bumped to cell $x_{ES}$, thus the chain now starts at $x_{ES}$. As for $\mathbf{T}'$, inserting $i$ into the row $r_x$ moves $j'$ from cell $x$ to the cell $x_E$ and moves $j$ from cell $x_E$ to cell $x_{ES}$. Thus, $f_i(\mathbf{T} \leftsquigarrow b_h) = \mathbf{T}' \leftsquigarrow b_h$. \noindent \textbf{Case 3.3.} Consider the chain of letters $j$ and $j'$ in $\mathbf{T}$. Suppose an element of the chain $z \neq x,y$ is row-bumped by an element $d < j$ or is column-bumped by an element $d'<j'$. The bumped element $z$ (of row $r_z$ and column $c_z$) must be a ``corner'' element of the chain, i.e. in $\mathbf{T}$ the entry in the boxes must be $c(z)=j', \ c(z_E) = j$ and $c(z_S)$ must be either $j$ or $j'$. Therefore, inserting $b_h$ into $\mathbf{T}$ bumps $j'$ from box $z$ to box $z_E$ and bumps $j$ from box $z_E$ to box $z_{ES}$, and inserting $b_h$ into $\mathbf{T}'$ has exactly the same effect. Thus, there is still a chain of letters $j$ and $j'$ from $x_S$ to $y$ in $\mathbf{T}$ and $\mathbf{T}'$, and $f_i(\mathbf{T} \leftsquigarrow b_h) = \mathbf{T}' \leftsquigarrow b_h$. \noindent \textbf{Case 3.4.} Suppose $\mathbf{T}$ falls under Case (2c) of the crystal rules (i.e. $y$ is not a diagonal cell) and during the insertion of $b_h$ into $\mathbf{T}$, $j'$ in cell $y$ is row-bumped (resp. column-bumped) by an element $d<j'$ (resp. $d'<j'$). Since $y$ is the end of the chain of letters $j$ and $j'$, $y_S$ must be empty. Also, since it is bumped, the entry in $y_E$ must be $j$. Thus, inserting $b_h$ into $\mathbf{T}$ bumps $j'$ from cell $y$ to cell $y_E$ and bumps $j$ from cell $y_E$ into row $r_y+1$ and column $\leqslant c_y$. On the other hand, inserting $b_h$ into $\mathbf{T}'$ bumps $j$ from cell $y$ into row $r_y+1$ and column $\leqslant c_y$. The chain of letters $j$ and $j'$ now ends at $y_E$ and $f_i(\mathbf{T} \leftsquigarrow b_h) = \mathbf{T}' \leftsquigarrow b_h$. \noindent \textbf{Case 3.5.} Suppose $\mathbf{T}$ falls under Case (2b) of the crystal rules (i.e. $y$ with entry $j$ is a diagonal cell) and during the insertion of $b_h$ into $\mathbf{T}$, $j$ in cell $y$ is row-bumped by an element $d < j$. In this case, the cell $y_E$ must contain the entry $j$. Thus, inserting $b_h$ into $\mathbf{T}$ bumps $j$ from cell $y$ (making it $j'$) to cell $y_E$ and bumps $j$ from cell $y_E$ to the diagonal cell $y_{ES}$. On the other hand, inserting $b_h$ into $\mathbf{T}'$ has exactly the same effect. The chain of letters $j$ and $j'$ now ends at the diagonal cell $y_{ES}$, so $\mathbf{T}\leftsquigarrow b_h$ falls under Case (2b) of the crystal rules and $f_i(\mathbf{T} \leftsquigarrow b_h) = \mathbf{T}' \leftsquigarrow b_h$. \noindent \textbf{Case 4.} Suppose the bold $i$ in tableau $\mathbf{T}$ is a primed $i$. We use the transposition operation on $\mathbf{T}$, and the resulting tableau $\mathbf{T}^*$ falls under one of the cases of the crystal operator rules. When $b_h$ is inserted into $\mathbf{T}$, we can easily translate the insertion process to the transposed tableau $\mathbf{T}^*$ so that $[\mathbf{T}^* \leftsquigarrow (b_h+1)'] = [\mathbf{T} \leftsquigarrow b_h]^*$: the letter $(b_h+1)'$ is inserted into the first column of $\mathbf{T}^*$, and all other insertion rules stay exactly same, with one exception -- when the diagonal element $d'$ is column-bumped from the diagonal cell of $\mathbf{T}^*$, the element $d'$ becomes $(d-1)$ and is inserted into the row below. Notice that the primed reading word of $\mathbf{T}$ becomes an unprimed reading word of $\mathbf{T}^*$. Thus, the bold $i$ in tableau $\mathbf{T}^*$ corresponds to the rightmost unbracketed $i$ in the \textit{unprimed} reading word of $\mathbf{T}^*$. Therefore, everything we have deduced in Cases 1-3 from the fact that bold $i$ is in the cell $x$ will remain valid here. Given $f_i(\mathbf{T}^*) = \mathbf{T}'^*$, we want to make sure that $f_i(\mathbf{T}^* \leftsquigarrow (b_h+1)') = \mathbf{T}'^* \leftsquigarrow (b_h+1)'$. The insertion process of $(b_h+1)'$ into $\mathbf{T}^*$ falls under one of the cases above and the proof of $f_i(\mathbf{T}^* \leftsquigarrow (b_h+1)') = \mathbf{T}'^* \leftsquigarrow (b_h+1)'$ is exactly the same as the proof in those cases. We only need to check the cases in which the diagonal element might be affected differently in the insertion process of $(b_h+1)'$ into $\mathbf{T}^*$ compared to the insertion process of $(b_h+1)'$ into $\mathbf{T}'^*$. Fortunately, this never happens: in Case 1 neither $x$ nor $x_E$ could be diagonal elements; in Cases 2 and 3 $x$ cannot be on the diagonal, and if $x_E$ is on diagonal, it must be empty. Following the proof of those cases, $f_i(\mathbf{T}^* \leftsquigarrow (b_h+1)') = \mathbf{T}'^* \leftsquigarrow (b_h+1)'$. \section{Proof of Theorem~\ref{theorem.main3}} \label{section.proof main3} This appendix provides the proof of Theorem~\ref{theorem.main3}. In this section we set $j=i+1$. We begin with two preliminary lemmas. \subsection{Preliminaries} \begin{lemma} \label{lemma.chains} Consider a shifted tableau $\mathbf{T}$. \begin{enumerate} \item Suppose tableau $\mathbf{T}$ falls under Case (2c) of the $f_i$ crystal operator rules, that is, there is a chain of letters $j$ and $j'$ starting from the bold $i$ in cell $x$ and ending at $j'$ in cell $x_H$. Then for any cell $z$ of the chain containing $j$, the cell $z_{NW}$ contains $i$. \item Suppose tableau $\mathbf{T}$ falls under Case (2b) of the $f_i$ crystal operator rules, that is, there is a chain of letters $j$ and $j'$ starting from the bold $i$ in cell $x$ and ending at $j$ in the diagonal cell $x_H$. Then for any cell $z$ of the chain containing $j$ or $j'$, the cell $z_{NW}$ contains $i$ or $i'$ respectively. \end{enumerate} \end{lemma} \Yboxdim 13pt \begin{equation*} \young(\cdot\cdot\cdot\cdot\cdot\cdot\cdot\boldsymbol{i},:\cdot\cdot\cdot iiij',::\cdot\cdotj' jjj,:::\cdotj') \qquad \young(\cdot\cdot\cdot\cdoti'\boldsymbol{i},:\cdoti' iij',::ij' jj,:::j) \end{equation*} \begin{proof} The proof of the first part is based on the observation that every $j$ in the chain must be bracketed with some $i$ in the reading word $\mathrm{rw}(\mathbf{T})$. Moreover, if the bold $i$ is located in row $r_x$ and rows $r_x, r_x+1,\ldots, r_z$ contain $n$ letters $j$, then rows $r_x, r_x +1,\ldots, r_z-1$ must contain exactly $n$ non-bold letters $i$. To prove that these elements $i$ must be located in the cells to the North-West of the cells containing $j$, we proceed by induction on $n$. When we consider the next cell $z$ containing $j$ in the chain that must be bracketed, notice that the columns $c_z, c_z+1,\ldots, c_x$ already contain an $i$, and thus we must put the next $i$ in column $c_z -1$; there is no other row to put it than $r_z-1$. Thus, $z_{NW}$ must contain an $i$. This line of logic also works for the second part of the lemma. We can show that for any cell $z$ of the chain containing $j$, the cell $z_{NW}$ must contain an $i$. As for cells $z$ containing $j'$, we can again use the fact that the corresponding letters $j$ in the primed reading word of $\mathbf{T}$ must be bracketed. Notice that these letters $j'$ cannot be bracketed with unprimed letters $i$, since all unprimed letters $i$ are already bracketed with unprimed letters $j$. Thus, $j'$ must be bracketed with some $i'$ from a column to its left. Let columns $1,2, \ldots, c_z$ contain $m$ elements $j'$. Using the same induction argument as in the previous case, we can show that $z_{NW}$ must contain $i'$. \end{proof} Next we need to figure out how $y$ in the raising crystal operator $e_i$ is related to the lowering operator rules for $f_i$. \begin{lemma} \label{lemma.y} Consider a pair of tableaux $\mathbf{T}$ and $\mathbf{T}' = f_i(\mathbf{T})$. \begin{enumerate} \item If tableau $\mathbf{T}$ (in case when bold $i$ in $\mathbf{T}$ is unprimed) or $\mathbf{T}^*$ (if bold $i$ is primed) falls under Case (1) of the $f_i$ crystal operator rules, then cell $y$ of the $e_i$ crystal operator rules is cell $x_E$ of $\mathbf{T}'$ or $(\mathbf{T}')^*$, respectively. \item If tableau $\mathbf{T}$ (in case when bold $i$ in $\mathbf{T}$ is unprimed) or $\mathbf{T}^*$ (if bold $i$ is primed) falls under Case (2a) of the $f_i$ crystal operator rules, then cell $y$ of the $e_i$ crystal operator rules is located in cell $x$ of $\mathbf{T}'$ or $(\mathbf{T}')^*$, respectively. \item If tableau $\mathbf{T}$ falls under Case (2b) of the $f_i$ crystal operator rules, then cell $y$ of the $e_i$ crystal operator rules is cell $x^*$ of $(\mathbf{T}')^*$. \item If tableau $\mathbf{T}$ (in case when bold $i$ in $\mathbf{T}$ is unprimed) or $\mathbf{T}^*$ (if bold $i$ is primed) falls under Case (2c) of the $f_i$ crystal operator rules, then cell $y$ of the $e_i$ crystal operator rules is cell $x_H$ of $\mathbf{T}'$ or $(\mathbf{T}')^*$, respectively. \end{enumerate} \end{lemma} \begin{proof} In all the cases above, we need to compare reading words $\mathrm{rw}(\mathbf{T})$ and $\mathrm{rw}(\mathbf{T}')$. Since $f_i$ affects at most two boxes of $\mathbf{T}$, it is easy to track how the reading word $\mathrm{rw}(\mathbf{T})$ changes after applying $f_i$. We want to check where the bold $j$ under $e_i$ ends up in $\mathrm{rw}(\mathbf{T}')$ and in $\mathbf{T}'$, which allows us to determine the cell $y$ of the $e_i$ crystal operator rules. \noindent \textbf{Case 1.1.} Suppose $\mathbf{T}$ falls under Case (1) of the $f_i$ crystal operator rules, that is, the bold $i$ in cell $x$ is to the left of $j'$ in cell $x_E$. Furthermore, $f_i$ acts on $\mathbf{T}$ by changing the entry in $x$ to $j'$ and by changing the entry in $x_E$ to $j$. In the reading word $\mathrm{rw}(\mathbf{T})$, this corresponds to moving the $j$ corresponding to $x_E$ to the left and changing the bold $i$ (the rightmost unbracketed $i$) corresponding to cell $x$ to $j$ (that then corresponds to $x_E$). Moving a bracketed $j$ in $\mathrm{rw}(\mathbf{T})$ to the left does not change the $\{i,j\}$ bracketing, and thus the $j$ corresponding to $x_E$ in $\mathrm{rw}(\mathbf{T}')$ is still the leftmost unbracketed $j$. Therefore, this $j$ is the bold $j$ of $\mathbf{T}'$ and is located in cell $x_E$. \noindent \textbf{Case 1.2.} Suppose the bold $i$ in $\mathbf{T}$ is primed and $\mathbf{T}^*$ falls under Case (1) of the $f_i$ crystal operator rules. After applying lowering crystal operator rules to $\mathbf{T}^*$ and conjugating back, the bold primed $i$ in cell $x^*$ of $\mathbf{T}$ changes to an unprimed $i$, and the unprimed $i$ in cell $(x^*)_S$ of $\mathbf{T}$ changes to $j'$. In terms of the reading word of $\mathbf{T}$, it means moving the bracketed $i$ (in the unprimed reading word) corresponding to $(x^*)_S$ to the left so that it corresponds to $x^*$, and then changing the bold $i$ (in the primed reading word) corresponding to $x^*$ into the letter $j$ corresponding to $(x^*)_S$. The first operation does not change the bracketing relations between $i$ and $j$, and thus the leftmost unbracketed $j$ in $\mathrm{rw}(\mathbf{T}')$ corresponds to $(x^*)_S$. Hence the bold unprimed $j$ is in cell $x_E$ of $(\mathbf{T}')^*$. \noindent \textbf{Case 2.1.} If $\mathbf{T}$ falls under Case (2a) of the $f_i$ crystal operator rules, $f_i$ just changes the entry in $x$ from $i$ to $j$. The rightmost unbracketed $i$ in the reading word of $\mathbf{T}$ changes to the leftmost unbracketed $j$ in $\mathrm{rw}(\mathbf{T}')$. Thus, the bold $j$ in $\mathrm{rw}(\mathbf{T}')$ corresponds to cell $x$. \noindent \textbf{Case 2.2.} The case when $\mathbf{T}^*$ falls under Case (2a) of the $f_i$ crystal operator rules is the same as the previous case. \noindent \textbf{Case 3.} Suppose $\mathbf{T}$ falls under Case (2b) of $f_i$ crystal operator rules. Then there is a chain starting from cell $x$ (of row $r_x$ and column $c_x$) and ending at the diagonal cell $z$ (of row and column $r_z$) consisting of elements $j$ and $j'$. Applying $f_i$ to $\mathbf{T}$ changes the entry in $x$ from $i$ to $j'$. In $\mathrm{rw}(\mathbf{T})$ this implies moving the bold $i$ from the unprimed reading word to the left through elements $i$ and $j$ corresponding to rows $r_x, r_x +1,\ldots, r_z$, then through elements $i$ and $j$ in the primed reading word corresponding to columns $c_z-1, \ldots, c_x$, and then changing that $i$ to $j$ which corresponds to cell $x$. But according to Lemma~\ref{lemma.chains}, the letters $i$ and $j$ in these rows and columns are all bracketed with each other, since for every $j$ or $j'$ in the chain there is a corresponding $i$ or $i'$ in the North-Western cell. (Notice that there cannot be any other letter $j$ or $j'$ outside of the chain in rows $r_x +1,\ldots, r_z$ and in columns $c_z-1, \ldots, c_x$.) Thus, moving the bold $i$ to the left in $\mathrm{rw}(\mathbf{T})$ does not change the bracketing relations. Changing it to $j$ makes it the leftmost unbracketed $j$ in $\mathrm{rw}(\mathbf{T}')$. Therefore, the bold $j$ in $\mathrm{rw}(\mathbf{T}')$ corresponds to the primed $j$ in cell $x$ of $\mathbf{T}'$, and the cell $y$ of the $e_i$ crystal operator rules is thus cell $x^*$ in $(\mathbf{T}')^*$. \noindent \textbf{Case 4.1.} Suppose $\mathbf{T}$ falls under Case (2c) of the $f_i$ crystal operator rules. There is a chain starting from cell $x$ (in row $r_x$ and column $c_x$) and ending at cell $x_H$ (in row $r_H$ and column $c_H$) consisting of elements $j$ and $j'$. Applying $f_i$ to $\mathbf{T}$ changes the entry in $x$ from $i$ to $j'$ and changes the entry in $x_H$ from $j'$ to $j$. Moving $j'$ from cell $x_H$ to cell $x$ moves the corresponding bracketed $j$ in the reading word $\mathrm{rw}(\mathbf{T})$ to the left, and thus does not change the $\{i,j\}$ bracketing relations in $\mathrm{rw}(\mathbf{T}')$. On the other hand, moving the bold $i$ from cell $x$ to cell $x_H$ and then changing it to $j$ moves the bold $i$ in $\mathrm{rw}(\mathbf{T})$ to the right through elements $i$ and $j$ corresponding to rows $r_x, r_x +1,\ldots, r_H$, and then changes it to $j$. Note that according to Lemma~\ref{lemma.chains}, each $j$ in rows $r_x+1, r_x +2,\ldots, r_H$ has a corresponding $i$ from rows $r_x, r_x +1,\ldots, r_H - 1$ that it is bracketed with, and vise versa. Thus, moving the bold $i$ to the position corresponding to $x_H$ does not change the fact that it is the rightmost unbracketed $i$ in $\mathrm{rw}(\mathbf{T})$. Thus, the bold $j$ in $\mathrm{rw}(\mathbf{T}')$ corresponds to the unprimed $j$ in cell $x_H$ of $\mathbf{T}'$. \noindent \textbf{Case 4.2.} Suppose $\mathbf{T}$ has a primed bold $i$ and $\mathbf{T}^*$ falls under Case (2c) of the $f_i$ crystal operator rules. This means that there is a chain (expanding in North and East directions) in $\mathbf{T}$ starting from $i'$ in cell $x^*$ and ending in cell $x_H^*$ with entry $i$ consisting of elements $i$ and $j'$. The crystal operator $f_i$ changes the entry in cell $x^*$ from $i'$ to $i$ and changes the entry in $x_H^*$ from $i$ to $j'$. For the reading word $\mathrm{rw}(\mathbf{T})$ this means moving the bracketed $i$ in the unprimed reading word to the right (which does not change the bracketing relations) and moving the bold $i$ in the primed reading word through letters $i$ and $j$ corresponding to columns $c_x, c_x +1 ,\ldots, c_H$, which are bracketed with each other according to Lemma~\ref{lemma.chains}. Thus, after changing the bold $i$ to $j$ makes it the leftmost unbracketed $j$ in $\mathrm{rw}(\mathbf{T}')$. Hence the bold primed $j$ in $\mathbf{T}'$ corresponds to cell $x_H^*$. Therefore $y$ from the $e_i$ crystal operator rules is cell $x_H$ of $(\mathbf{T}')^*$. \end{proof} \subsection{Proof of Theorem~\ref{theorem.main3}} Let $\mathbf{T'} = f_i(\mathbf{T})$. \noindent \textbf{Case 1.} If $\mathbf{T}$ (or $\mathbf{T}^*$) falls under Case (1) of the $f_i$ crystal operator rules, then according to Lemma~\ref{lemma.y}, $e_i$ acts on $\mathbf{T}'$ (or on $(\mathbf{T}')^*$) by changing the entry in cell $y_W = x$ back to $i$ and changing the entry in $y = x_E$ back to $j'$. Thus, the statement of the theorem is true. \noindent \textbf{Case 2.} If $\mathbf{T}$ (or $\mathbf{T}^*$) falls under Case (2a) of the $f_i$ crystal operator rules, then according to Lemma~\ref{lemma.y}, $e_i$ acts on $\mathbf{T}'$ (or on $(\mathbf{T}')^*$) by changing the entry in the cell $y = x$ back to $i$. Thus, the statement of the theorem is true. \noindent \textbf{Case 3.} If $\mathbf{T}$ falls under Case (2b) of the $f_i$ crystal operator rules, then according to Lemma~\ref{lemma.y}, $e_i$ acts on cell $y=x^*$ of $(\mathbf{T}')^*$. Note that according to Lemma~\ref{lemma.chains}, there is a maximal chain of letters $i$ and $j'$ in $(\mathbf{T}')^*$ starting at $y$ and ending at a diagonal cell $y_T$. Thus, $e_i$ changes the entry in cell $y=x^*$ in $(\mathbf{T}')^*$ from $j$ to $j'$, so the entry in cell $x$ in $\mathbf{T}'$ goes back from $j'$ to $i$. Thus, the statement of the theorem is true. \noindent \textbf{Case 4.} If $\mathbf{T}$ (or $\mathbf{T}^*$) falls under Case (2c) of the $f_i$ crystal operator rules, then according to Lemma~\ref{lemma.y}, $e_i$ acts on cell $y=x_H$ of $\mathbf{T}'$ (or of $(\mathbf{T}')^*$). Note that according to Lemma~\ref{lemma.chains}, there is a maximal (since $c(x_E) \neq j'$ and $c(x_E) \neq i$) chain of letters $i$ and $j'$ in $\mathbf{T}'$ (or $(\mathbf{T}')^*$) starting at $y$ and ending at cell $y_T = x$. Thus, $e_i$ changes the entry in cell $y=x_H$ in $(\mathbf{T}')^*$ from $j$ back to $j'$ and changes the entry in $y_T = x$ from $j'$ back to $i$. Thus, the statement of the theorem is true. \end{document}
\begin{document} \title[Unbounded operator valued local positive maps ]{Factorization properties for unbounded local positive maps } \author{Maria Joi\c{t}a} \address{Department of Mathematics, Faculty of Applied Sciences, University Politehnica of Bucharest, 313 Spl. Independentei, 060042, Bucharest, Romania and Simion Stoilow Institute of Mathematics of the Romanian Academy, P.O. Box 1-764, 014700, Bucharest, Romania} \email{[email protected] and [email protected]} \urladdr{http://sites.google.com/a/g.unibuc.ro/maria-joita/} \subjclass[2000]{ 46L05} \keywords{locally $C^{\ast }$-algebras, quantized domain, local completely positive maps, local completely contractive maps, local decomposable maps, local completely copositive maps } \thanks{This work was partially supported by a grant of the Ministry of Research, Innovation and Digitization, CNCS/CCCDI--UEFISCDI, project number PN-III-P4-ID-PCE-2020-0458, within PNCDI III} \begin{abstract} In this paper we present some factorization properties for unbounded local positive maps. We show that an unbounded local positive map $\phi $ on the minimal tensor product of the locally $C^{\ast }$-algebras $\mathcal{A}$ and $C^{\ast }(\mathcal{D}_{\mathcal{E}}),$ where $\mathcal{D}_{\mathcal{E}}$ is a Fr\'{e}chet quantized domain, that is dominated by $\varphi \otimes $id is of the forma $\psi \otimes $id, where $\psi $ is an unbounded local positive map dominated by $\varphi $. As an application of this result, we show that given a local positive map $\varphi :$ $\mathcal{A}\rightarrow $ $\mathcal{B} ,$ the local positive map $\varphi \otimes $id$_{M_{n}\left( \mathbb{C} \right) }$ is local decomposable for some $n\geq 2$ if and only if $\varphi $ is a local\ $CP$-map. Also, we show that an unbounded local $CCP$-map\textit{ \ }$\phi $ on the minimal tensor product of the unital locally $C^{\ast }$ -algebras $\mathcal{A}$ and $\mathcal{B},$ that is dominated by $\varphi \otimes \psi $ is of the forma $\varphi \otimes \widetilde{\psi }$, where $ \widetilde{\psi }$ is an unbounded local $CCP$- map dominated by $\psi $, whenever $\varphi $ is pure. \end{abstract} \maketitle \section{$\protect $Introduction} Locally \ $C^{\ast }$-algebras are generalizations of $C^{\ast }$-algebras, on which the topology instead of being given by a single $C^{\ast }$-norm is defined by an upward directed family of $C^{\ast }$-seminorms. The concrete models for locally $C^{\ast }$-algebras are $\ast $-algebras of unbounded linear operators on a Hilbert space. In the literature, the locally $C^{\ast }$-algebras are studied under different names like pro-$C^{\ast }$-algebras (D. Voiculescu, N.C. Philips), $LMC^{\ast }$-algebras (G. Lassner, K. Schm \"{u}dgen), $b^{\ast }$-algebras (C. Apostol) and multinormed $C^{\ast }$ -algebras (A. Dosiev). The term locally \ $C^{\ast }$-algebra is due to A. Inoue \cite{I}. A locally $C^{\ast }$-algebra is a complete Hausdorff complex topological $ \ast $-algebra $\mathcal{A}$ whose topology is determined by a upward filtered family $\{p_{\lambda }\}_{\lambda \in \Lambda }\ $of $C^{\ast }$ -seminorms defined on $\mathcal{A}$. An element $a\in \mathcal{A}$ is positive if there exists $b\in \mathcal{A}$ such that $a=b^{\ast }b$ and it is local positive if there exist $c,d\in \mathcal{A}$ and $\lambda \in \Lambda $ such that $a=c^{\ast }c+d$ with $p_{\lambda }\left( d\right) =0$. Thus, the notion of (local) completely positive maps appeared naturally while studying linear maps between locally $C^{\ast }$-algebras. The structure of strictly continuous completely positive maps between locally $ C^{\ast }$-algebras is described in \cite{J}. The same is done for strongly bounded completely positive maps of order zero \cite{MJ2}. Dosiev \cite{D1} proved a Stinespring type theorem for unbounded local completely positive and local completely contractive maps on unital locally $C^{\ast }$ -algebras. A Radon-Nikodym type theorem for such maps was proved by Bhat, Ghatak and Kumar \cite{BGK}. In \cite{MJ1}, we obtained a structure theorem for unbounded local completely positive maps of local order zero. Bhat and Osaka \cite{BO} proved some factorization properties for bounded positive maps on $C^{\ast }$-algebras. In this paper, we extend the results of \cite{BO} to unbounded local positive maps on locally $C^{\ast }$ -algebras. The paper is organized as follows. In Section 2 we gather some basic facts on locally $C^{\ast }$-algebras, concrete models for locally $C^{\ast }$ -algebras and unbounded local completely positive and local completely contractive maps needed for understanding the main results of this paper. In Section 3, we show that a linear map between locally $C^{\ast }$-algebras is local $CP$ (completely positive) if and only if it is continuous and completely positive (Proposition \ref{5}). Therefore, the local $CP$-maps \textit{\ }on unital locally $C^{\ast }$-algebras are exactly strictly continuous completely positive maps on unital locally $C^{\ast }$-algebras \cite[Remark 4.4]{J} and the structure theorem \cite[Theorem 4.6]{J} is valid for local\textit{\ }$CP$-maps\textit{\ }on unital locally $C^{\ast }$ -algebras. As in the case of bounded completely positive maps on $C^{\ast }$ -algebras, we show that two unbounded local $CCP$ (local completely contractive and local completely positive)-maps on unital locally $C^{\ast }$ -algebras determine an unbounded local $CCP$-map on the minimal tensor product and a minimal Stinespring dilation for the tensor product map can be obtained in terms of the minimal Stinespring dilations for each map\ (Proposition \ref{3}). In section 4, we show that if $\mathcal{D}_{\mathcal{E }}$ is a Fr\'{e}chet quantized domain, then an unbounded local positive map $ \phi $ on the minimal tensor product of the locally $C^{\ast }$-algebras $ \mathcal{A}$ and $C^{\ast }(\mathcal{D}_{\mathcal{E}}),$ that is dominated by $\varphi \otimes $id$_{C^{\ast }(\mathcal{D}_{\mathcal{E}})}$ factorizes as $\psi \otimes $id$_{C^{\ast }(\mathcal{D}_{\mathcal{E}})}$, where $\psi $ is an unbounded local positive map dominated by $\varphi \ $(Theorem \ref{6} ). As an application of this result, we show that given a local positive map $\varphi :$ $\mathcal{A}\rightarrow $ $\mathcal{B}$, the local positive map $ \varphi \otimes $id$_{M_{n}\left( \mathbb{C}\right) }$ is local decomposable for $n\geq 2$ if and only if $\varphi $ is a local\ $CP$-map\ (Theorem \ref {7}). Also, we show that given unbounded\textbf{\ }local\textbf{\ }$CCP$ -maps $\phi :\mathcal{A\otimes B}\rightarrow C^{\ast }(\mathcal{D}_{\mathcal{ E\otimes F}}),$ $\varphi :\mathcal{A}\rightarrow C^{\ast }(\mathcal{D}_{ \mathcal{E}})$ and $\psi :\mathcal{B}\rightarrow C^{\ast }(\mathcal{D}_{ \mathcal{F}})$, if $\phi $ is dominated by $\varphi \otimes \psi $ and $ \varphi $ is pure, then $\phi $ factorizes as $\varphi \otimes \widetilde{ \psi }$, where $\widetilde{\psi }$ is an unbounded \ local $CCP$-map dominated by $\psi $ (Theorem \ref{8}). \section{Preliminaries} Let $\mathcal{A}$ be a locally $C^{\ast }$-algebra with the topology defined by the family of $C^{\ast }$-seminorms $\left\{ p_{\lambda }\right\} _{\lambda \in \Lambda }.$ An element $a\in \mathcal{A}$ is \textit{bounded} if $\sup \{p_{\lambda }\left( a\right) ;\lambda \in \Lambda \}<\infty $. The subset $b\left( \mathcal{A}\right) =\{a\in \mathcal{A};\left\Vert a\right\Vert _{\infty }:=\sup \{p_{\lambda }\left( a\right) ;\lambda \in \Lambda \}<\infty \}\ $is a $C^{\ast }$-algebra with respect to the $C^{\ast }$-norm $\left\Vert \cdot \right\Vert _{\infty }.$ Moreover, $b\left( \mathcal{A}\right) $ is dense in $\mathcal{A}.$ Let us observe that $\mathcal{A}$ can be realized as a projective limit of an inverse family of $C^{\ast }$-algebras as follows: For each $\lambda \in \Lambda $, let $\mathcal{I}_{\lambda }=\{a\in \mathcal{A};p_{\lambda }\left( a\right) =0\}$. Clearly, $\mathcal{I}_{\lambda }$ is a closed two sided $ \ast $-ideal in $\mathcal{A}$ and $\mathcal{A}_{\lambda }=\mathcal{A}/ \mathcal{I}_{\lambda }$ is a $C^{\ast }$-algebra with respect to the norm induced by $p_{\lambda }$. The canonical quotient $\ast $-morphism from $ \mathcal{A\ }$to $\mathcal{A}_{\lambda }$ is denoted by $\pi _{\lambda }^{ \mathcal{A}}$. For each $\lambda _{1},\lambda _{2}\in \Lambda $ with $ \lambda _{1}\leq \lambda _{2}$, there is a canonical surjective $\ast $ -morphism $\pi _{\lambda _{2}\lambda _{1}}^{\mathcal{A}}:$ $\mathcal{A} _{\lambda _{2}}\rightarrow \mathcal{A}_{\lambda _{1}}$ defined by $\pi _{\lambda _{2}\lambda _{1}}^{\mathcal{A}}\left( a+\mathcal{I}_{\lambda _{2}}\right) =a+\mathcal{I}_{\lambda _{1}}$ for $a\in \mathcal{A}$. Then, $\{ \mathcal{A}_{\lambda },\pi _{\lambda _{2}\lambda _{1}}^{\mathcal{A}}\}$\ forms an inverse system of $C^{\ast }$-algebras, since $\pi _{\lambda _{1}}^{ \mathcal{A}}=$ $\pi _{\lambda _{2}\lambda _{1}}^{\mathcal{A}}\circ \pi _{\lambda _{2}}^{\mathcal{A}}$ whenever $\lambda _{1}\leq \lambda _{2}$. The projective limit \begin{equation*} \lim\limits_{\underset{\lambda }{\leftarrow }}\mathcal{A}_{\lambda }=\{\left( a_{\lambda }\right) _{\lambda \in \Lambda }\in \tprod\limits_{\lambda \in \Lambda }\mathcal{A}_{\lambda };\pi _{\lambda _{2}\lambda _{1}}^{\mathcal{A}}\left( a_{\lambda _{2}}\right) =a_{\lambda _{1}}\text{ whenever }\lambda _{1}\leq \lambda _{2},\lambda _{1},\lambda _{2}\in \Lambda \} \end{equation*} of the inverse system of $C^{\ast }$-algebras $\{\mathcal{A}_{\lambda },\pi _{\lambda _{2}\lambda _{1}}^{\mathcal{A}}\}$ is a locally $C^{\ast }$ -algebra that can be identified with $\mathcal{A}$ via the map $a\mapsto \left( \pi _{\lambda }^{\mathcal{A}}\left( a\right) \right) _{\lambda \in \Lambda }.$ An element $a\in \mathcal{A}$ is \textit{self-adjoint} if $a^{\ast }=a$ and it is\textit{\ positive} if $a=b^{\ast }b$ for some $b\in \mathcal{A}$. An element $a\in \mathcal{A}$ is called \textit{local self-adjoint} if $ a=a^{\ast }+c$, where $c\in \mathcal{A}$ such that $p_{\lambda }\left( c\right) =0$ for some $\lambda \in \Lambda ,$ and \textit{local positive} if $a=b^{\ast }b+c$ where $b,c\in $ $\mathcal{A}$ such that $p_{\lambda }\left( c\right) =0\ $ for some $\lambda \in \Lambda $. In the first case, we call $ a $ as $\lambda $-self-adjoint, and in the second case, we call $a$ as $ \lambda $-positive and write $a\geq _{\lambda }0$. We write, $a=_{\lambda }0$ whenever $p_{\lambda }\left( a\right) =0$. Note that $a\in \mathcal{A}$ is local self-adjoint if and only if there is $\lambda \in \Lambda $ such that $ \pi _{\lambda }^{\mathcal{A}}\left( a\right) $ is self adjoint in $\mathcal{A }_{\lambda }$ and $a\in \mathcal{A}$ is local positive if and only if there is $\lambda \in \Lambda $ such that $\pi _{\lambda }^{\mathcal{A}}\left( a\right) $ is positive in $\mathcal{A}_{\lambda }.$ Let $\mathcal{A}$ and $\mathcal{B}$ be two locally $C^{\ast }$-algebras with the topology defined by the family of $C^{\ast }$-seminorms $\left\{ p_{\lambda }\right\} _{\lambda \in \Lambda }$ and $\left\{ q_{\delta }\right\} _{\delta \in \Delta }$, respectively. For each $n\in \mathbb{N},$ $ M_{n}(\mathcal{A})$ denotes the collection of all matrices of size $n$ with elements in $\mathcal{A}$. Note that $M_{n}(\mathcal{A})$ is a locally $ C^{\ast }$-algebra where the associated family of $C^{\ast }$-seminorms is denoted by $\{p_{\lambda }^{n}\}_{\lambda \in \Lambda }.$ For each $n\in \mathbb{N}$, the $n$-amplification of a linear map $\varphi : \mathcal{A}\rightarrow \mathcal{B}$ is the map $\varphi ^{\left( n\right) }:M_{n}(\mathcal{A})$ $\rightarrow $ $M_{n}(\mathcal{B})$ defined by \begin{equation*} \varphi ^{\left( n\right) }\left( \left[ a_{ij}\right] _{i,j=1}^{n}\right) = \left[ \varphi \left( a_{ij}\right) \right] _{i,j=1}^{n} \end{equation*} for $\left[ a_{ij}\right] _{i,j=1}^{n}\in M_{n}(\mathcal{A})$ . A linear map $\varphi :\mathcal{A}\rightarrow \mathcal{B}$ is called : \begin{enumerate} \item \textit{local contractive} if for each $\delta \in \Delta $, there exists $\lambda \in \Lambda $ such that \begin{equation*} q_{\delta }\left( \varphi \left( a\right) \right) \leq p_{\lambda }\left( a\right) \text{ for all }a\in \mathcal{A}; \end{equation*} \item \textit{local positive} if for each $\delta \in \Delta ,$there exists $ \lambda \in \Lambda $\ such that $\varphi \left( a\right) \geq _{\delta }0$ whenever $a\geq _{\lambda }0$ and $\varphi \left( a\right) =_{\delta }0$\ \ \ whenever $a=_{\lambda }0;$ \item \textit{local completely contractive }(\textit{local }$CC$\textit{-map} )\textit{\ }if for each $\delta \in \Delta $, there exists $\lambda \in \Lambda $ such that \begin{equation*} q_{\delta }^{n}\left( \varphi ^{\left( n\right) }\left( \left[ a_{ij}\right] _{i,j=1}^{n}\right) \right) \leq p_{\lambda }^{n}\left( \left[ a_{ij}\right] _{i,j=1}^{n}\right) \text{ } \end{equation*} for all $\left[ a_{ij}\right] _{i,j=1}^{n}\in M_{n}(\mathcal{A})\ $and for all $n\in \mathbb{N};$ \item \textit{local completely positive }(\textit{local }$CP$\textit{-map}) \textit{\ }if for each $\delta \in \Delta $, there exists $\lambda \in \Lambda $ such that $\varphi ^{\left( n\right) }\left( \left[ a_{ij}\right] _{i,j=1}^{n}\right) \geq _{\delta }0\ $whenever $\left[ a_{ij}\right] _{i,j=1}^{n}\geq _{\lambda }0$ and $\varphi ^{\left( n\right) }\left( \left[ a_{ij}\right] _{i,j=1}^{n}\right) =_{\delta }0\ \ $whenever $\left[ a_{ij} \right] _{i,j=1}^{n}=_{\lambda }0,\ $for all $n\in \mathbb{N}.$ \end{enumerate} Throughout the paper, $\mathcal{H}$ is a complex Hilbert space and $B( \mathcal{H})$ is the algebra of all bounded linear operators on $\mathcal{H}$ . Let $(\Lambda ,\leq )$ be a directed poset. A \textit{quantized domain }in a Hilbert space $\mathcal{H}$ is a triple $\{\mathcal{H};\mathcal{E};\mathcal{D }_{\mathcal{E}}\}$, where $\mathcal{E}=\{\mathcal{H}_{\lambda };\lambda \in \Lambda \}$ is an upward filtered family of closed subspaces with dense union $\mathcal{D}_{\mathcal{E}}=\tbigcup\limits_{\lambda \in \Lambda } \mathcal{H}_{\lambda }$ in $\mathcal{H\ }$\cite{D1}. A quantized family $\mathcal{E}=\{\mathcal{H}_{\lambda };\lambda \in \Lambda \}$ determines an upward filtered family $\{P_{\lambda };\lambda \in \Lambda \}$ of projections in $B(\mathcal{H})$, where $P_{\lambda }$ is a projection onto $\mathcal{H}_{\lambda }$. We say that a quantized domain $\mathcal{F}=\{\mathcal{K}_{\lambda };\lambda \in \Lambda \}$ \ of $\mathcal{H}$ with its union space $\mathcal{D}_{ \mathcal{F}}$ and $\mathcal{K}$ $=\overline{\tbigcup\limits_{\lambda \in \Lambda }\mathcal{K}_{\lambda }}$ is a \textit{quantized subdomian} of $ \mathcal{E}$, if $\mathcal{K}_{\lambda }\subseteq \mathcal{H}_{\lambda }$ for all $\lambda \in \Lambda $. In this case, we write $\mathcal{F}\subseteq $ $\mathcal{E}$. Let $\mathcal{E}^{i}=\{\mathcal{H}_{\lambda }^{i};\lambda \in \Lambda \}$ be a quantized domain in a Hilbert space $\mathcal{H}^{i}$ for $i=1,2.$Given a linear operator $V:\mathcal{D}_{\mathcal{E}^{1}}\rightarrow \mathcal{H}^{2}$ , we write $V(\mathcal{E}^{1})\subseteq \mathcal{E}^{2}$ if $V(\mathcal{H} _{\lambda }^{1})\subseteq \mathcal{H}_{\lambda }^{2}$\ for all $\lambda \in \Lambda $. Let $\mathcal{H}$ and $\mathcal{K}$ be Hilbert spaces. A linear operator $T$ $:$ dom$(T)$ $\subseteq $ $\mathcal{H}$ $\rightarrow $ $\mathcal{K}$ is said to be densely defined if dom$(T)$ is a dense subspace of $\mathcal{H}$. The adjoint of $T$ is a linear map $T^{\bigstar }:$ dom$(T^{\bigstar })$ $ \subseteq $ $\mathcal{K}\rightarrow \mathcal{H},$ where \begin{equation*} \text{dom}(T^{\bigstar })=\{\xi \in \mathcal{K};\eta \rightarrow \left\langle T\eta ,\xi \right\rangle _{\mathcal{K}}\ \text{is continuous for every }\eta \in \text{dom}(T)\} \end{equation*} satisfying $\left\langle T\eta ,\xi \right\rangle _{\mathcal{K} }=\left\langle \eta ,T^{\bigstar }\xi \right\rangle _{\mathcal{H}}$ for all $ \xi \in $dom$(T^{\bigstar })$ and $\eta \in $dom$(T).$ Let $\mathcal{E}=\{\mathcal{H}_{\lambda };\lambda \in \Lambda \}$ be a quantized domain in a Hilbert space $\mathcal{H}$ and \begin{equation*} C(\mathcal{D}_{\mathcal{E}})=\{T\in \mathcal{L}(\mathcal{D}_{\mathcal{E} });TP_{\lambda }=P_{\lambda }TP_{\lambda }\in B(\mathcal{H})\text{ for all } \lambda \in \Lambda \} \end{equation*} where $\mathcal{L}(\mathcal{D}_{\mathcal{E}})$ is the collection of all linear operators on $\mathcal{D}_{\mathcal{E}}$. If $T\in \mathcal{L}( \mathcal{D}_{\mathcal{E}})$, then $T\in C(\mathcal{D}_{\mathcal{E}})$ if and only if $T(\mathcal{H}_{\lambda })\subseteq \mathcal{H}_{\mathcal{\lambda }}$ and $\left. T\right\vert _{\mathcal{H}_{\lambda }}\in B(\mathcal{H}_{\lambda })$ for all $\lambda \in \Lambda $, and so $C(\mathcal{D}_{\mathcal{E}})$ is an algebra. Let \begin{equation*} C^{\ast }(\mathcal{D}_{\mathcal{E}})=\{T\in C(\mathcal{D}_{\mathcal{E} });P_{\lambda }T\subseteq TP_{\lambda }\text{ for all }\lambda \in \Lambda \}. \end{equation*} If $T\in C(\mathcal{D}_{\mathcal{E}})$, then $T\in C^{\ast }(\mathcal{D}_{ \mathcal{E}})$ if and only if $T(\mathcal{H}_{\lambda }^{\bot }\cap \mathcal{ D}_{\mathcal{E}})\subseteq \mathcal{H}_{\lambda }^{\bot }\cap \mathcal{D}_{ \mathcal{E}}$ for all $\lambda \in \Lambda .\ $ If $T\in C^{\ast }(\mathcal{D}_{\mathcal{E}})$, then $\mathcal{D}_{\mathcal{E }}$ $\subseteq $ dom$(T^{\bigstar })$. Moreover, $T^{\bigstar }(\mathcal{H} _{\lambda })\subseteq \mathcal{H}_{\lambda }$ for all $\lambda \in \Lambda $ . Now, let $T^{\ast }=\left. T^{\bigstar }\right\vert _{\mathcal{D}_{ \mathcal{E}}}$. It is easy to check that $T^{\ast }\in C^{\ast }(\mathcal{D} _{\mathcal{E}})$, and so $C^{\ast }(\mathcal{D}_{\mathcal{E}})$ is a unital $ \ast $-algebra. For each $\lambda \in \Lambda $, the map $\left\Vert \cdot \right\Vert _{\lambda }:C^{\ast }(\mathcal{D}_{\mathcal{E}})\rightarrow \lbrack 0,\infty )$, \begin{equation*} \left\Vert T\right\Vert _{\lambda }=\left\Vert \left. T\right\vert _{ \mathcal{H}_{\lambda }}\right\Vert =\sup \{\left\Vert T\left( \xi \right) \right\Vert ;\xi \in \mathcal{H}_{\lambda },\left\Vert \xi \right\Vert \leq 1\} \end{equation*} is a $C^{\ast }$-seminorm on $C^{\ast }(\mathcal{D}_{\mathcal{E}})$. Moreover, $C^{\ast }(\mathcal{D}_{\mathcal{E}})$ is a locally $C^{\ast }$ -algebra with respect to the topology determined by the family of $C^{\ast }$ -seminorms $\{\left\Vert \cdot \right\Vert _{\lambda }\}_{\lambda \in \Lambda }$ and $b(C^{\ast }(\mathcal{D}_{\mathcal{E}}))$ is identified with the $C^{\ast }$-algebra $\{T\in B\left( \mathcal{H}\right) ;P_{\lambda }T=TP_{\lambda }\ $for all $\lambda \in \Lambda \}$\ via\ the map $T\mapsto \widetilde{T}$, where $\widetilde{T}$ is the extension of $T$ to $\mathcal{H} $ (see the proof of \cite[Lemma 3.1]{D})\textbf{.} If $\mathcal{E}=\{\mathcal{H}_{\lambda };\lambda \in \Lambda \}$ is a quantized domain in a Hilbert space $\mathcal{H}$, then $\mathcal{D}_{ \mathcal{E}}$ can be regarded as a strict inductive limit of the direct family of Hilbert spaces $\mathcal{E}=\{\mathcal{H}_{\lambda };\lambda \in \Lambda \},$ $\mathcal{D}_{\mathcal{E}}=\lim\limits_{\rightarrow }\mathcal{H} _{\lambda }$, and it is called a locally Hilbert space (see \cite{I}). If $ T\in C^{\ast }(\mathcal{D}_{\mathcal{E}})$, then $T$ is continuous \cite[ Lemma 5.2 ]{I}. For every locally $C^{\ast }$-algebra $\mathcal{A}$ there is a quantized domain $\mathcal{E}$ in a Hilbert space $\mathcal{H}$ and a local isometric $ \ast $-homomorphism $\pi :\mathcal{A\rightarrow }C^{\ast }(\mathcal{D}_{ \mathcal{E}})$ \cite[Theorem 7.2]{D1}. This result can be regarded as an unbounded analog of the Ghelfand-Naimark theorem. \section{Local completely positive maps} Let $\mathcal{A}$ and $\mathcal{B}$ be two locally $C^{\ast }$-algebras with the topology defined by the family of $C^{\ast }$-seminorms $\left\{ p_{\lambda }\right\} _{\lambda \in \Lambda }$ and $\left\{ q_{\delta }\right\} _{\delta \in \Delta }$, respectively. \begin{proposition} \label{4} Let $\varphi :\mathcal{A}\rightarrow $ $\mathcal{B}$ be a linear map. If $\varphi $ is local positive, then $\varphi $ is continuous. \end{proposition} \begin{proof} Since $\varphi $ is local positive, for each $\delta \in \Delta $, there exists $\lambda \in \Lambda $ such that $\varphi \left( a\right) \geq _{\delta }0\ $whenever $a\geq _{\lambda }0$ and $\varphi \left( a\right) =_{\delta }0\ \ \ $whenever $a=_{\lambda }0$. Define the map $\varphi _{\delta }^{+}:\left( \mathcal{A}_{\lambda }\right) _{+}\rightarrow \mathcal{ B}_{\delta }$ by $\varphi _{\delta }^{+}\left( \pi _{\lambda }^{\mathcal{A} }\left( a\right) \right) =\pi _{\delta }^{\mathcal{B}}\left( \varphi \left( a\right) \right) $, and extend it to a linear map $\varphi _{\delta }: \mathcal{A}_{\lambda }\rightarrow \mathcal{B}_{\delta }$. This map is positive, and so continuous. Therefore, there is $C_{\delta }>0\ $such that \begin{equation*} q_{\delta }\left( \varphi \left( a\right) \right) =\left\Vert \pi _{\delta }^{\mathcal{B}}\left( \varphi \left( a\right) \right) \right\Vert _{\mathcal{ B}_{\delta }}=\left\Vert \varphi _{\delta }\left( \pi _{\lambda }^{\mathcal{A }}\left( a\right) \right) \right\Vert \leq C_{\delta }\left\Vert \pi _{\lambda }^{\mathcal{A}}\left( a\right) \right\Vert _{\mathcal{A}_{\lambda }}=C_{\delta }p_{\lambda }\left( a\right) \end{equation*} for all $a\in \mathcal{A}$. \end{proof} Proposition \ref{4} it is a particular case of \cite[Lemma 4.4]{D1}. \begin{remark} \label{1}Let $\varphi :\mathcal{A}\rightarrow $ $\mathcal{B}$ be a local positive linear map. Then, for each $\delta \in \Delta $, there exist $ \lambda \in \Lambda $ and a positive map $\varphi _{\delta }:\mathcal{A} _{\lambda }\rightarrow \mathcal{B}_{\delta }$ such that $\varphi _{\delta }\left( \pi _{\lambda }^{\mathcal{A}}\left( a\right) \right) =\pi _{\delta }^{\mathcal{B}}\left( \varphi \left( a\right) \right) $ for all $a\in \mathcal{A}$. \end{remark} \begin{proposition} \label{5}Let $\mathcal{A}$ and $\mathcal{B}$\ be two locally $C^{\ast }$ -algebras and $\varphi :\mathcal{A}\rightarrow $ $\mathcal{B}$ be a linear map. Then $\varphi $ is local completely positive if and only if $\varphi $ is continuous and completely positive. \end{proposition} \begin{proof} If $\varphi $ is local completely positive, then, by Proposition \ref{4}, for each $n\in \mathbb{N}$, the map $\varphi ^{\left( n\right) }$ is continuous and by \cite[Proposition 2.1]{MJ1}, it is positive. Therefore, $ \varphi $ is continuous and completely positive. Conversely, suppose that $\varphi $ is continuous and completely positive. Since $\varphi $ is continuous, for each $\delta \in \Delta $, there exist $ \lambda \in \Lambda $ and a positive map $\varphi _{\delta }:\mathcal{A} _{\lambda }\rightarrow \mathcal{B}_{\delta }$ such that $\varphi _{\delta }\left( \pi _{\lambda }^{\mathcal{A}}\left( a\right) \right) =\pi _{\delta }^{\mathcal{B}}\left( \varphi \left( a\right) \right) $ for all $a\in \mathcal{A}$. Let $\left[ a_{ij}\right] _{i,j=1}^{n}\in M_{n}\left( \mathcal{ A}\right) $ such that $\left[ a_{ij}\right] _{i,j=1}^{n}\geq _{\lambda }0$. This means that there exist $\left[ b_{ij}\right] _{i,j=1}^{n},$ $\left[ c_{ij}\right] _{i,j=1}^{n}\in M_{n}\left( \mathcal{A}\right) $ such that $ \left[ a_{ij}\right] _{i,j=1}^{n}=\left( \left[ b_{ij}\right] _{i,j=1}^{n}\right) ^{\ast }\left[ b_{ij}\right] _{i,j=1}^{n}+$ $\left[ c_{ij}\right] _{i,j=1}^{n}\ $and $p_{\lambda }^{n}\left( \left[ c_{ij}\right] _{i,j=1}^{n}\right) =0$. Then \begin{eqnarray*} \pi _{\delta }^{M_{n}(\mathcal{B)}}\left( \varphi ^{\left( n\right) }\left( \left[ c_{ij}\right] _{i,j=1}^{n}\right) \right) &=&\left[ \pi _{\delta }^{ \mathcal{B}}\left( \varphi \left( c_{ij}\right) \right) \right] _{i,j=1}^{n}= \left[ \varphi _{\delta }\left( \pi _{\lambda }^{\mathcal{A}}\left( c_{ij}\right) \right) \right] _{i,j=1}^{n} \\ &=&\varphi _{\delta }^{\left( n\right) }\left( \pi _{\lambda }^{M_{n}\left( \mathcal{A}\right) }\left( \left[ c_{ij}\right] _{i,j=1}^{n}\right) \right) = \left[ 0\right] _{i,j=1}^{n} \end{eqnarray*} and, since $\varphi \ $is completely positive, \begin{equation*} \pi _{\delta }^{M_{n}(\mathcal{B)}}\left( \varphi ^{\left( n\right) }\left( \left[ a_{ij}\right] _{i,j}^{n}\right) \right) =\pi _{\delta }^{M_{n}( \mathcal{B)}}\left( \varphi ^{\left( n\right) }\left( \left( \left[ b_{ij} \right] _{i,j=1}^{n}\right) ^{\ast }\left[ b_{ij}\right] _{i,j=1}^{n}\right) \right) \geq 0. \end{equation*} If $\left[ a_{ij}\right] _{i,j}^{n}=_{\lambda }\left[ 0\right] _{i,j=1}^{n}$, $\ $then $p_{\lambda }^{n}\left( \left[ a_{ij}\right] _{i,j}^{n}\right) =0\ $ and \begin{equation*} \pi _{\delta }^{M_{n}(\mathcal{B)}}\left( \varphi ^{\left( n\right) }\left( \left[ a_{ij}\right] _{i,j=1}^{n}\right) \right) =\varphi _{\delta }^{\left( n\right) }\left( \pi _{\lambda }^{M_{n}\left( \mathcal{A}\right) }\left( \left[ a_{ij}\right] _{i,j=1}^{n}\right) \right) =\left[ 0\right] _{i,j=1}^{n}. \end{equation*} Therefore, \begin{equation*} \varphi ^{\left( n\right) }\left( \left[ a_{ij}\right] _{i,j}^{n}\right) \geq _{\delta }0 \end{equation*} for all $\left[ a_{ij}\right] _{i,j=1}^{n}\in M_{n}\left( \mathcal{A}\right) $ such that $\left[ a_{ij}\right] _{i,j=1}^{n}\geq _{\lambda }0,\ $and $ \varphi ^{\left( n\right) }\left( \left[ a_{ij}\right] _{i,j}^{n}\right) =_{\delta }\left[ 0\right] _{i,j=1}^{n}$ for all $\left[ a_{ij}\right] _{i,j=1}^{n}\in M_{n}\left( \mathcal{A}\right) $ such that $\left[ a_{ij} \right] _{i,j=1}^{n}=_{\lambda }\left[ 0\right] _{i,j=1}^{n}$ and for all $ n, $ and so $\varphi $ is local completely positive. \end{proof} \begin{corollary} \label{localpositive}Let $\varphi :\mathcal{A}\rightarrow $ $\mathcal{B}$ be a linear map. Then $\varphi $ is local positive if and only if $\varphi $ is continuous and positive. \end{corollary} \begin{remark} \label{London} Let $\varphi :\mathcal{A}\rightarrow $ $\mathcal{B}$ be a local completely positive map. If the locally $C^{\ast }$-algebra $\mathcal{A }$ is unital, then $\varphi $ is a strictly continuous completely positive\ map \cite[Remark 4.4]{J}. In particular, the structure theorem \cite[Theorem 4.6]{J} is valid for local completely positive maps on unital locally $ C^{\ast }$-algebras. \end{remark} Let $\mathcal{CPCC}_{\text{loc}}(\mathcal{A},C^{\ast }(\mathcal{D}_{\mathcal{ E}}))$ denote the set of all maps $\varphi :\mathcal{A}\rightarrow C^{\ast }( \mathcal{D}_{\mathcal{E}})$ which are local completely positive and local completely contractive. If $\varphi \in \mathcal{CPCC}_{\text{loc}}(\mathcal{ A},C^{\ast }(\mathcal{D}_{\mathcal{E}})),$ then $\varphi \left( b(\mathcal{A} )\right) \subseteq b(C^{\ast }(\mathcal{D}_{\mathcal{E}}))$. Moreover, there is a completely contractive and completely positive map $\left. \varphi \right\vert _{b(\mathcal{A})}:b(\mathcal{A})\rightarrow B(\mathcal{H})$ such that $\left. \left. \varphi \right\vert _{b(\mathcal{A})}\left( a\right) \right\vert _{\mathcal{D}_{\mathcal{E}}}=\varphi \left( a\right) \ $for all $ a\in b(\mathcal{A}).$ The following result is a version of the Stinespring theorem for unbounded local completely positive and local completely contractive maps. \begin{theorem} \label{s} \cite[Theorem 5.1]{D1} Let $\mathcal{A}$ be a unital locally $ C^{\ast }$-algebra and $\varphi \in \mathcal{CPCC}_{\text{loc}}(\mathcal{A} ,C^{\ast }(\mathcal{D}_{\mathcal{E}}))$. Then there exist a quantized domain $\{\mathcal{H}^{\varphi },\mathcal{E}^{\varphi },\mathcal{D}_{\mathcal{E} ^{\varphi }}\}$, where $\mathcal{E}^{\varphi }=\{\mathcal{H}_{\lambda }^{\varphi };\lambda \in \Lambda \}$ is an upward filtered family of closed subspaces of $\mathcal{H}^{\varphi }$, a contraction $V_{\varphi }:\mathcal{H }\rightarrow \mathcal{H}^{\varphi }$ and a unital local contractive $\ast $ -homomorphism $\pi _{\varphi }:\mathcal{A\rightarrow }C^{\ast }(\mathcal{D}_{ \mathcal{E}^{\varphi }})$ such that \begin{enumerate} \item $V_{\varphi }\left( \mathcal{E}\right) \subseteq \mathcal{E}^{\varphi };$ \item $\varphi \left( a\right) \subseteq V_{\varphi }^{\ast }\pi _{\varphi }\left( a\right) V_{\varphi };$ \item $\mathcal{H}_{\lambda }^{\varphi }=\left[ \pi _{\varphi }\left( \mathcal{A}\right) V_{\varphi }\mathcal{H}_{\lambda }\right] $ for all $ \lambda \in \Lambda .$ Moreover, if $\varphi \left( 1_{\mathcal{A}}\right) =$id$_{\mathcal{D}_{ \mathcal{E}}}$, then $V_{\varphi }$ is an isometry. \end{enumerate} \end{theorem} \ \ \ \ \ \ The triple $\left( \pi _{\varphi },V_{\varphi },\{\mathcal{H} ^{\varphi },\mathcal{E}^{\varphi },\mathcal{D}_{\mathcal{E}^{\varphi }}\}\right) $ constructed in Theorem \ref{s} is called a minimal Stinespring dilation associated to $\varphi $. Moreover, the minimal Stinespring dilation associated to $\varphi $ is unique up to unitary equivalence in the following sense, if $(\ \pi _{\varphi },V_{\varphi },$\ $\{\mathcal{H} ^{\varphi },\mathcal{E}^{\varphi },\mathcal{D}_{\mathcal{E}^{\varphi }}\})$ and$\left( \widetilde{\pi }_{\varphi },\widetilde{V}_{\varphi },\{\widetilde{ \mathcal{H}}^{\varphi },\widetilde{\mathcal{E}}^{\varphi },\widetilde{ \mathcal{D}}_{\widetilde{\mathcal{E}}^{\varphi }}\}\right) \ $are two minimal Stinespring dilations associated to $\varphi $, then there is a unitary operator $U_{\varphi }:\mathcal{H}^{\varphi }\rightarrow \widetilde{ \mathcal{H}}^{\varphi }$ such that $U_{\varphi }V_{\varphi }=\widetilde{V} _{\varphi }$ and $U_{\varphi }\pi _{\varphi }\left( a\right) \subseteq \widetilde{\pi }_{\varphi }\left( a\right) U_{\varphi }$ for all $a\in \mathcal{A\ }$\cite[Theorem 3.4]{BGK}. If $\left( \pi _{\varphi },V_{\varphi },\{\mathcal{H}^{\varphi },\mathcal{E} ^{\varphi },\mathcal{D}_{\mathcal{E}^{\varphi }}\}\right) $ is a minimal Stinespring dilation associated to $\varphi \in \mathcal{CPCC}_{\text{loc}}( \mathcal{A},C^{\ast }(\mathcal{D}_{\mathcal{E}}))$, then $\left( \left. \pi _{\varphi }\right\vert _{b(\mathcal{A})},V_{\varphi },\mathcal{H}^{\varphi }\right) $, where $\left. \left. \pi _{\varphi }\right\vert _{b(\mathcal{A} )}\left( a\right) \right\vert _{\mathcal{D}_{\widetilde{\mathcal{E}} ^{\varphi }}}=\pi _{\varphi }\left( a\right) \ $for all $a\in b(\mathcal{A} ), $ is a minimal Stinespring dilation associated with $\left. \varphi \right\vert _{b(\mathcal{A})}.$ Let $\mathcal{E}=\{\mathcal{H}_{\iota };\iota \in \Upsilon \}$ and $\mathcal{ F}=\{\mathcal{K}_{\gamma };\gamma \in \Gamma \}\ $be quantized domains in Hilbert spaces $\mathcal{H}$ and $\mathcal{K}$, respectively. Then \begin{equation*} \mathcal{E}\otimes \mathcal{F}=\left\{ \mathcal{H}_{\iota }\otimes \mathcal{K }_{\gamma };\left( \iota ,\gamma \right) \in \Upsilon \times \Gamma \right\} \end{equation*} is a quantized domain in the Hilbert space $\mathcal{H}\otimes \mathcal{K}$, with the union space $\mathcal{D}_{\mathcal{E}\otimes \mathcal{F} }=\tbigcup\limits_{\left( \iota ,\gamma \right) \in \Upsilon \times \Gamma } \mathcal{H}_{\iota }\otimes \mathcal{K}_{\gamma }.$\ If $\mathcal{E}=\{ \mathcal{H}\}$, then $\mathcal{H}\otimes \mathcal{F}=\left\{ \mathcal{H} \otimes \mathcal{K}_{\gamma };\gamma \in \Gamma \right\} $ is a quantized domain in the Hilbert space $\mathcal{H}\otimes \mathcal{K}$ with the union space $\mathcal{D}_{\mathcal{H}\otimes \mathcal{F}}=\tbigcup\limits_{\gamma \in \Gamma }\mathcal{H}\otimes \mathcal{K}_{\gamma }$. The map $\Phi :C^{\ast }(\mathcal{D}_{\mathcal{E}})\otimes _{\text{alg} }C^{\ast }(\mathcal{D}_{\mathcal{F}})\rightarrow C^{\ast }(\mathcal{D}_{ \mathcal{E}\otimes \mathcal{F}})$ given by \begin{equation*} \Phi \left( T\otimes S\right) \left( \xi \otimes \eta \right) =T\xi \otimes S\eta ,T\in C^{\ast }(\mathcal{D}_{\mathcal{E}}),S\in C^{\ast }(\mathcal{D}_{ \mathcal{F}}),\xi \in \mathcal{D}_{\mathcal{E}},\eta \in \mathcal{D}_{ \mathcal{F}} \end{equation*} identifies $C^{\ast }(\mathcal{D}_{\mathcal{E}})\otimes _{\text{alg}}C^{\ast }(\mathcal{D}_{\mathcal{F}})\ $with a $\ast $-subalgebra of $C^{\ast }( \mathcal{D}_{\mathcal{E}\otimes \mathcal{F}})$. The minimal tensor product of the locally $C^{\ast }$-algebras $C^{\ast }(\mathcal{D}_{\mathcal{E}})$ and $C^{\ast }(\mathcal{D}_{\mathcal{F}})$ is the locally $C^{\ast }$ -algebra $C^{\ast }(\mathcal{D}_{\mathcal{E}})\otimes C^{\ast }(\mathcal{D}_{ \mathcal{F}})$ obtained by the completion of the $\ast $-subalgebra $C^{\ast }(\mathcal{D}_{\mathcal{E}})\otimes _{\text{alg}}C^{\ast }(\mathcal{D}_{ \mathcal{F}})$ in $C^{\ast }(\mathcal{D}_{\mathcal{E}\otimes \mathcal{F}})$ (see for example \cite{G}). Let $\mathcal{A}$ and $\mathcal{B}$ be two locally $C^{\ast }$-algebras. Recall that $\mathcal{A}$ and $\mathcal{B}$ can be identified with a locally $C^{\ast }$-subalgebra in $C^{\ast }(\mathcal{D}_{\mathcal{E}})$ and $ C^{\ast }(\mathcal{D}_{\mathcal{F}})$, respectively for some quantized domains $\mathcal{D}_{\mathcal{E}}$ and $\mathcal{D}_{\mathcal{F}}$. The minimal or spatial tensor product of the locally $C^{\ast }$-algebras $ \mathcal{A}$ and $\mathcal{B}$ is the locally $C^{\ast }$-algebra $\mathcal{A }\otimes \mathcal{B}$ obtained by the completion of the $\ast $-subalgebra $ \mathcal{A}\otimes _{\text{alg}}\mathcal{B}$\ in $C^{\ast }(\mathcal{D}_{ \mathcal{E}\otimes \mathcal{F}})$. In fact, $\mathcal{A}\otimes \mathcal{B}$ can be identified with the projective limit of the projective system of $ C^{\ast }$-algebras $\{\mathcal{A}_{\lambda }\otimes \mathcal{B}_{\delta };\pi _{\lambda _{1}\lambda _{2}}^{\mathcal{A}}\otimes \pi _{\delta _{1}\delta _{2}}^{\mathcal{B}},\lambda _{1}\geq \lambda _{2},\delta _{1}\geq \delta _{2}\}_{\left( \lambda ,\delta \right) \in \Lambda \times \Delta }$ (see \cite{Ph,F}). If $\mathcal{A}$ is a $C^{\ast }$-algebra acting nondegenerately on a Hilbert space $\mathcal{H}$ and $\mathcal{B}$ is a locally $C^{\ast }$ -algebra that is identified with a locally $C^{\ast }$-subalgebra in $ C^{\ast }(\mathcal{D}_{\mathcal{F}})$, then the minimal tensor product $ \mathcal{A}\otimes \mathcal{B}$ of $\mathcal{A}$ and $\mathcal{B}$ is the completion of the $\ast $-subalgebra $\mathcal{A}\otimes _{\text{alg}} \mathcal{B}$\ in $C^{\ast }(\mathcal{D}_{\mathcal{H}\otimes \mathcal{F}})$. Let $\mathcal{A}$ and $\mathcal{B}$ be two locally $C^{\ast }$-algebras, $ \pi _{1}:$ $\mathcal{A\rightarrow }C^{\ast }(\mathcal{D}_{\mathcal{E}})$ and $\pi _{2}:$ $\mathcal{B\rightarrow }C^{\ast }(\mathcal{D}_{\mathcal{F}})$ be two local contractive $\ast $-morphisms. By the functorial property of the minimal tensor product of $C^{\ast }$-algebras and taking into account the above discussion we conclude that there is a unique local $\ast $-morphism $ \pi _{1}\otimes \pi _{2}:\mathcal{A\otimes B\rightarrow }C^{\ast }(\mathcal{D }_{\mathcal{E}\otimes \mathcal{F}})$ such that \begin{equation*} \left( \pi _{1}\otimes \pi _{2}\right) \left( a\otimes b\right) =\pi _{1}\left( a\right) \otimes \pi _{2}\left( b\right) \end{equation*} where $\left( \pi _{1}\left( a\right) \otimes \pi _{2}\left( b\right) \right) \left( \xi \otimes \eta \right) =\pi _{1}\left( a\right) \xi \otimes \pi _{2}\left( b\right) \eta $ for all $\xi \in \mathcal{D}_{\mathcal{E} },\eta \in \mathcal{D}_{\mathcal{F}}$. \begin{proposition} \label{3} Let $\mathcal{A}$ and $\mathcal{B}$ be two unital locally $C^{\ast }$-algebras and let $\varphi \in \mathcal{CPCC}_{\text{loc}}(\mathcal{A} ,C^{\ast }(\mathcal{D}_{\mathcal{E}}))$ and $\psi \in \mathcal{CPCC}_{\text{ loc}}(\mathcal{B},C^{\ast }(\mathcal{D}_{\mathcal{F}}))$. Then there is a unique map $\varphi \otimes \psi \in $ $\mathcal{CPCC}_{\text{loc}}(\mathcal{ A\otimes B},C^{\ast }(\mathcal{D}_{\mathcal{E}\otimes \mathcal{F}}))$ such that \begin{equation*} \left( \varphi \otimes \psi \right) \left( a\otimes b\right) =\varphi \left( a\right) \otimes \psi \left( b\right) \ \text{for all }a\in \mathcal{A\ } \text{and }b\in \mathcal{B}. \end{equation*} Moreover, if $\left( \pi _{\varphi },V_{\varphi },\{\mathcal{H}^{\varphi }, \mathcal{E}^{\varphi },\mathcal{D}_{\mathcal{E}^{\varphi }}\}\right) $ is a minimal Stinespring dilation associated to $\varphi $ and $\left( \pi _{\psi },V_{\psi },\{\mathcal{H}^{\psi },\mathcal{E}^{\psi },\mathcal{D}_{\mathcal{E }^{\psi }}\}\right) $ is a minimal Stinespring dilation associated to $\psi $ , then $\left( \pi _{\varphi }\otimes \pi _{\psi },V_{\varphi }\otimes V_{\psi },\{\mathcal{H}^{\varphi }\otimes \mathcal{H}^{\psi },\mathcal{E} ^{\varphi }\otimes \mathcal{E}^{\psi },\mathcal{D}_{\mathcal{E}^{\varphi }\otimes \mathcal{E}^{\psi }}\}\right) \ $is a minimal Stinespring dilation associated to $\varphi \otimes \psi .$ \end{proposition} \begin{proof} Since $\pi _{\varphi }:\mathcal{A\rightarrow }C^{\ast }(\mathcal{D}_{ \mathcal{E}^{\varphi }})$ and $\pi _{\psi }:\mathcal{B\rightarrow }C^{\ast }( \mathcal{D}_{\mathcal{E}^{\psi }})$ are local contractive $\ast $-morphisms, there is a local contractive $\ast $-morphism $\pi _{\varphi }\otimes \pi _{\psi }:\mathcal{A\otimes B\rightarrow }$ $C^{\ast }(\mathcal{D}_{\mathcal{E }^{\varphi }\otimes \mathcal{E}^{\psi }})$ such that \begin{equation*} \left( \pi _{\varphi }\otimes \pi _{\psi }\right) \left( a\otimes b\right) =\pi _{\varphi }\left( a\right) \otimes \pi _{\psi }\left( b\right) , \end{equation*} where $\left( \pi _{\varphi }\left( a\right) \otimes \pi _{\psi }\left( b\right) \right) \left( \xi \otimes \eta \right) =\pi _{\varphi }\left( a\right) \xi \otimes \pi _{\psi }\left( b\right) \eta $ for all $\xi \in \mathcal{D}_{\mathcal{E}^{\varphi }},\eta \in \mathcal{D}_{\mathcal{E}^{\psi }}$. Since $V_{\varphi }:\mathcal{H}\rightarrow \mathcal{H}^{\varphi }$ and $ V_{\psi }:\mathcal{K}\rightarrow \mathcal{H}^{\psi }$ are contractions, we get a contraction $V_{\varphi }\otimes V_{\psi }:\mathcal{H}\otimes \mathcal{ K}\rightarrow \mathcal{H}^{\varphi }\otimes \mathcal{H}^{\psi }$.$\ $Since $ V_{\varphi }\left( \mathcal{E}\right) \subseteq \mathcal{E}^{\varphi }$ and $ V_{\psi }\left( \mathcal{F}\right) \subseteq \mathcal{E}^{\psi },$ $\left( V_{\varphi }\otimes V_{\psi }\right) \left( \mathcal{H}_{\iota }\otimes \mathcal{K}_{\mathcal{\gamma }}\right) \subseteq \mathcal{H}_{\iota }^{\varphi }\otimes \mathcal{H}_{\mathcal{\gamma }}^{\varphi }\ $for all $ \left( \iota ,\mathcal{\gamma }\right) \in \Upsilon \times \Gamma $, thus \begin{equation*} \left( V_{\varphi }\otimes V_{\psi }\right) \left( \mathcal{E}\otimes \mathcal{F}\right) \subseteq \mathcal{E}^{\varphi }\otimes \mathcal{E}^{\psi }. \end{equation*} Therefore, we may consider the map $\varphi \otimes \psi :$ $\mathcal{ A\otimes B\rightarrow }C^{\ast }(\mathcal{D}_{\mathcal{E}\otimes \mathcal{F} })$ given by \begin{equation*} \left( \varphi \otimes \psi \right) \left( c\right) =\left. \left( V_{\varphi }\otimes V_{\psi }\right) ^{\ast }\left( \pi _{\varphi }\otimes \pi _{\psi }\right) \left( c\right) \left( V_{\varphi }\otimes V_{\psi }\right) \right\vert _{\mathcal{D}_{\mathcal{E}^{\varphi }\otimes \mathcal{E} ^{\psi }}}\text{.} \end{equation*} Since $\pi _{\varphi }\otimes \pi _{\psi }$ is a local contractive $\ast $ -morphism and $V_{\varphi }\otimes V_{\psi }$ is a contraction, $\varphi \otimes \psi $ is a local completely positive and local completely contractive map from $\mathcal{A\otimes B}$ to $C^{\ast }(\mathcal{D}_{ \mathcal{E}\otimes \mathcal{F}})$. Clearly, $\left( \varphi \otimes \psi \right) \left( a\otimes b\right) =\varphi \left( a\right) \otimes \psi \left( b\right) \ $for all $a\in \mathcal{A\ }$and $b\in \mathcal{B}$. To show that $\left( \pi _{\varphi }\otimes \pi _{\psi },V_{\varphi }\otimes V_{\psi },\{\mathcal{H}^{\varphi }\otimes \mathcal{H}^{\psi },\mathcal{E} ^{\varphi }\otimes \mathcal{E}^{\psi },\mathcal{D}_{\mathcal{E}^{\varphi }\otimes \mathcal{E}^{\psi }}\}\right) \ $is a minimal Stinespring dilation associated with $\varphi \otimes \psi $ it remains to show that $\mathcal{H} _{\iota }^{\varphi }\otimes \mathcal{H}_{\gamma }^{\psi }=\left[ \left( \pi _{\varphi }\otimes \pi _{\psi }\right) \left( \mathcal{A\otimes B}\right) \left( V_{\varphi }\otimes V_{\psi }\right) \left( \mathcal{H}_{\iota }\otimes \mathcal{K}_{\gamma }\right) \right] $ for all $\left( \iota , \mathcal{\gamma }\right) \in \Upsilon \times \Gamma $. Let $\left( \iota ,\mathcal{\gamma }\right) \in \Upsilon \times \Gamma $. Then \begin{eqnarray*} \left[ \left( \pi _{\varphi }\otimes \pi _{\psi }\right) \left( \mathcal{ A\otimes B}\right) \left( V_{\varphi }\otimes V_{\psi }\right) \left( \mathcal{H}_{\iota }\otimes \mathcal{K}_{\gamma }\right) \right] &=&\left[ \pi _{\varphi }\left( \mathcal{A}\right) V_{\varphi }\left( \mathcal{H} _{\iota }\right) \otimes \pi _{\psi }\left( \mathcal{B}\right) V_{\psi }\left( \mathcal{K}_{\gamma }\right) \right] \\ &=&\mathcal{H}_{\iota }^{\varphi }\otimes \mathcal{H}_{\gamma }^{\psi }, \end{eqnarray*} as required. To show the uniqueness of the map $\varphi \otimes \psi $, let $\phi \in \mathcal{CPCC}_{\text{loc}}(\mathcal{A\otimes B},C^{\ast }(\mathcal{D}_{ \mathcal{E}\otimes \mathcal{F}}))$ such that $\phi \left( a\otimes b\right) =\varphi \left( a\right) \otimes \psi \left( b\right) \ $for all $a\in \mathcal{A\ }$and $b\in \mathcal{B}$, $\phi \neq \varphi \otimes \psi $. Since $\varphi \otimes \psi $ and $\phi $ are continuous and $\mathcal{ A\otimes }_{\text{alg}}\mathcal{B}$ is dense, from $\phi \left( a\otimes b\right) =\varphi \left( a\right) \otimes \psi \left( b\right) =\left( \varphi \otimes \psi \right) \left( a\otimes b\right) $ for all $a\in \mathcal{A\ }$and $b\in \mathcal{B}$ it follows that $\phi =\varphi \otimes \psi $, a contradiction, and the uniqueness of the map $\varphi \otimes \psi $ is proved. \end{proof} \section{Main results} \ Suppose that $\{\mathcal{H};\mathcal{E};\mathcal{D}_{\mathcal{E}}\}$ is a Fr\'{e}chet quantized domain in a Hilbert space $\mathcal{H}\ $(that is, $ \mathcal{E=\{H}_{n}\mathcal{\}}_{n\in \mathbb{N}}$). For each $n\in \mathbb{N }^{\ast },$ let $\mathcal{H}_{n}^{c}$ be the orthogonal complement of the closed subspace\ $\mathcal{H}_{n-1}$ in $\mathcal{H}_{n}$, and put $\mathcal{ H}_{0}^{c}=\mathcal{H}_{0}$. Then $\mathcal{H}_{n}=\tbigoplus\limits_{k\leq n}\mathcal{H}_{k}^{c}$. For each $n\in \mathbb{N}$ and for each $\xi \in \mathcal{H}_{k}^{c},k\leq n$, the rank one operator $\theta _{\xi ,\xi }: \mathcal{D}_{\mathcal{E}}\rightarrow \mathcal{D}_{\mathcal{E}},$ $\theta _{\xi ,\xi }(\eta )=\xi \left\langle \xi ,\eta \right\rangle \ $is an element in $C^{\ast }(\mathcal{D}_{\mathcal{E}})$. The closed two sided $ \ast $-ideal of $C^{\ast }(\mathcal{D}_{\mathcal{E}})$ generated by these rank one operators is denoted by $K(\mathcal{D}_{\mathcal{E}})$ and it is called the locally $C^{\ast }$-algebra of all compact operators on $\mathcal{ D}_{\mathcal{E}}$. Clearly, for each $n\in \mathbb{N},K(\mathcal{D}_{ \mathcal{E}})_{n}$ $\subseteq K\left( \mathcal{H}_{n}\right) $, which it is a closed two sided $\ast $-ideal of $C^{\ast }(\mathcal{D}_{\mathcal{E} })_{n}.$ Let $\mathcal{A}$ be a unital locally $C^{\ast }$-algebra, $\{\mathcal{H}; \mathcal{E};\mathcal{D}_{\mathcal{E}}\}$ be a Fr\'{e}chet quantized domain in a Hilbert space $\mathcal{H}$ with $\mathcal{E=\{H}_{n}\mathcal{\}}_{n\in \mathbb{N}}$, $a\in \mathcal{A}$ and $c\in \mathcal{A\otimes }C^{\ast }( \mathcal{D}_{\mathcal{E}})$. If $\xi \in \mathcal{H}_{n}^{c}$ and $a\otimes \theta _{\xi ,\xi }\geq _{\left( \lambda ,n\right) }c$ $\geq _{\left( \lambda ,n\right) }0$, then there is $b\in \mathcal{A}$ such that $ c=_{\left( \lambda ,n\right) }b\otimes \theta _{\xi ,\xi }\ $. Indeed, from $ a\otimes \theta _{\xi ,\xi }\geq _{\left( \lambda ,n\right) }c$ $\geq _{\left( \lambda ,n\right) }0$ we deduce that \begin{equation*} \pi _{\lambda }^{\mathcal{A}}\left( a\right) \otimes \left. \theta _{\xi ,\xi }\right\vert _{\mathcal{H}_{n}}=\pi _{\left( \lambda ,n\right) }^{ \mathcal{A\otimes }C^{\ast }(\mathcal{D}_{\mathcal{E}})}\left( a\otimes \theta _{\xi ,\xi }\right) \geq \pi _{\left( \lambda ,n\right) }^{\mathcal{ A\otimes }C^{\ast }(\mathcal{D}_{\mathcal{E}})}\left( c\right) \geq 0 \end{equation*} and by \cite[Lemma 2.1]{BO}, there is $b_{\lambda }\in \mathcal{A}_{\lambda } $ such that $\pi _{\left( \lambda ,n\right) }^{\mathcal{A\otimes }C^{\ast }(\mathcal{D}_{\mathcal{E}})}\left( c\right) =b_{\lambda }\otimes \left. \theta _{\xi ,\xi }\right\vert _{\mathcal{H}_{n}}.$ Therefore, there is $ b\in \mathcal{A}$ such that \begin{equation*} \pi _{\left( \lambda ,n\right) }^{\mathcal{A\otimes }C^{\ast }(\mathcal{D}_{ \mathcal{E}})}\left( c\right) =b_{\lambda }\otimes \left. \theta _{\xi ,\xi }\right\vert _{\mathcal{H}_{n}}=\pi _{\lambda }^{\mathcal{A}}\left( b\right) \otimes \left. \theta _{\xi ,\xi }\right\vert _{\mathcal{H}_{n}}=\pi _{\left( \lambda ,n\right) }^{\mathcal{A\otimes }C^{\ast }(\mathcal{D}_{ \mathcal{E}})}\left( b\otimes \theta _{\xi ,\xi }\right) . \end{equation*} \begin{theorem} \label{6} Let $\mathcal{A}$ and $\mathcal{B}$\ be two unital locally $ C^{\ast }$-algebras, $\{\mathcal{H};\mathcal{E};\mathcal{D}_{\mathcal{E}}\}$ be a Fr\'{e}chet quantized domain in a Hilbert space $\mathcal{H}$ with $ \mathcal{E=\{H}_{n}\mathcal{\}}_{n\in \mathbb{N}},$ $\varphi :$ $\mathcal{A}$ $\rightarrow $ $\mathcal{B\ }$and $\phi :\mathcal{A\otimes }C^{\ast }( \mathcal{D}_{\mathcal{E}})\rightarrow \mathcal{B\otimes }C^{\ast }(\mathcal{D }_{\mathcal{E}})$ be two linear maps. Then the following conditions are equivalent: \begin{enumerate} \item $\phi $ and $\varphi \otimes $id$_{C^{\ast }(\mathcal{D}_{\mathcal{E} })}-\phi $ are local positive. \item There exists a local positive map $\psi :\mathcal{A\rightarrow B\ }$ such that $\varphi -\psi $ is local positive and $\phi =\psi \otimes $ id$ _{C^{\ast }(\mathcal{D}_{\mathcal{E}})}$. \end{enumerate} \end{theorem} \begin{proof} $(1)\Rightarrow (2)$ Since the maps $\phi $ and $\varphi \otimes $id$ _{C^{\ast }(\mathcal{D}_{\mathcal{E}})}-\phi $ are local positive, the map $ \varphi \otimes $id$_{C^{\ast }(\mathcal{D}_{\mathcal{E}})}$ is local positive. Then, $\varphi $ is local positive, and by Corollary \ref {localpositive} \begin{equation*} 0\leq \phi \left( a\otimes T\right) \leq \varphi \left( a\right) \otimes T \end{equation*} for all $a\in \mathcal{A}\ $with $a\geq 0$ and for all $T\in C^{\ast }( \mathcal{D}_{\mathcal{E}})$ with $T\geq 0.$ Therefore, for each $\left( \delta ,n\right) \in \Delta \times \mathbb{N},$ there are $\lambda _{0}\in \Lambda $ and $C_{0}>0$ such that \begin{equation*} \left\Vert \phi \left( a\otimes T\right) \right\Vert _{\left( \delta ,n\right) }\leq \left\Vert \varphi \left( a\right) \otimes T\right\Vert _{\left( \delta ,n\right) }=\left\Vert \varphi \left( a\right) \right\Vert _{\delta }\left\Vert T\right\Vert _{n}\leq C_{0}p_{\lambda _{0}}\left( a\right) \left\Vert T\right\Vert _{n} \end{equation*} for all $a\in \mathcal{A}\ $with $a\geq 0$ and for all $T\in C^{\ast }( \mathcal{D}_{\mathcal{E}})$ with $T\geq 0$. Therefore, there exist maps $ \phi _{\left( \delta ,n\right) }:$ $\mathcal{A}_{\lambda _{0}}\mathcal{ \otimes }C^{\ast }(\mathcal{D}_{\mathcal{E}})_{n}\rightarrow \mathcal{B} _{\delta }\mathcal{\otimes }C^{\ast }(\mathcal{D}_{\mathcal{E}})_{n}$ and $ \varphi _{\delta }:\mathcal{A}_{\lambda _{0}}\rightarrow \mathcal{B}_{\delta }$ such that \begin{equation*} \pi _{\left( \delta ,n\right) }^{\mathcal{B\otimes }C^{\ast }(\mathcal{D}_{ \mathcal{E}})}\circ \phi =\phi _{\left( \delta ,n\right) }\circ \pi _{\left( \lambda _{0},n\right) }^{\mathcal{A\otimes }C^{\ast }(\mathcal{D}_{\mathcal{E }})}\text{ and }\pi _{\delta }^{\mathcal{B}}\circ \varphi =\varphi _{\delta }\circ \pi _{\lambda _{0}}^{\mathcal{A}}. \end{equation*} Moreover, $\phi _{\left( \delta ,n\right) }$ and $\varphi _{\delta }\otimes $ id$_{C^{\ast }(\mathcal{D}_{\mathcal{E}})_{n}}-\phi _{\left( \delta ,n\right) }$ are positive. Thus, by the proof of \cite[Theorem 2]{BO}, there is a positive map $\widetilde{\psi }_{\delta }:\mathcal{A}_{\lambda _{0}}\rightarrow \mathcal{B}_{\delta }$ such that \begin{equation*} \phi _{\left( \delta ,n\right) }\left( \pi _{\lambda _{0}}^{\mathcal{A} }\left( a\right) \otimes \left. \theta _{\xi ,\xi }\right\vert _{\mathcal{H} _{n}}\right) =\widetilde{\psi }_{\delta }\left( \pi _{\lambda _{0}}^{ \mathcal{A}}\left( a\right) \right) \otimes \left. \theta _{\xi ,\xi }\right\vert _{\mathcal{H}_{n}} \end{equation*} for all $a\in \mathcal{A}$ such that $a\geq _{\lambda _{0}}0$ and $\xi \in \mathcal{H}_{k}^{c},k\leq n$, and $\varphi _{\delta }-\widetilde{\psi } _{\delta }$ is positive. Since $K(\mathcal{D}_{\mathcal{E}})_{n}\ $is generated by rank one operators $\theta _{\xi ,\xi },\xi \in \mathcal{H} _{k}^{c},$ $k\leq n,$ \begin{equation*} \phi _{\left( \delta ,n\right) }\left( \pi _{\lambda _{0}}^{\mathcal{A} }\left( a\right) \otimes \left. T\right\vert _{\mathcal{H}_{n}}\right) = \widetilde{\psi }_{\delta }\left( \pi _{\lambda _{0}}^{\mathcal{A}}\left( a\right) \right) \otimes \left. T\right\vert _{\mathcal{H}_{n}} \end{equation*} for all $a\in \mathcal{A}$ and for all $T\in K(\mathcal{D}_{\mathcal{E}}).$ Suppose that $\mathcal{B}_{\delta }$ acts nondegenerately on a Hilbert space $\mathcal{K}$. Let $\{u_{i}\}_{i\in I}$ be an approximate unit for $K( \mathcal{D}_{\mathcal{E}})_{n}$, $\eta \in \mathcal{K}$ and $\xi \in \mathcal{H}_{k}^{c},k\leq n,\left\Vert \xi \right\Vert \neq 0.$ Since $ \mathcal{H}_{n}=\tbigoplus\limits_{k\leq n}\mathcal{H}_{k}^{c}$ and $K( \mathcal{D}_{\mathcal{E}})_{n}$ is a closed two sided $\ast $-ideal of $ C^{\ast }(\mathcal{D}_{\mathcal{E}})_{n}$, we have \begin{eqnarray*} &&\left\langle \phi _{\left( \delta ,n\right) }\left( \pi _{\lambda _{0}}^{ \mathcal{A}}\left( a\right) \otimes \left. T\right\vert _{\mathcal{H} _{n}}\right) \left( \eta \otimes \xi \right) ,\left( \eta \otimes \xi \right) \right\rangle \\ &=&\lim\limits_{i}\left\langle \phi _{\left( \delta ,n\right) }\left( \pi _{\lambda _{0}}^{\mathcal{A}}\left( a\right) \otimes u_{i}\left. T\right\vert _{\mathcal{H}_{n}}u_{i}\right) \left( \eta \otimes \xi \right) ,\eta \otimes \xi \right\rangle \\ &=&\lim\limits_{i}\left\langle \left( \widetilde{\psi }_{\delta }\left( \pi _{\lambda _{0}}^{\mathcal{A}}\left( a\right) \right) \otimes u_{i}\left. T\right\vert _{\mathcal{H}_{n}}u_{i}\right) \left( \eta \otimes \xi \right) ,\eta \otimes \xi \right\rangle \\ &=&\lim\limits_{i}\left( \left\langle \widetilde{\psi }_{\delta }\left( \pi _{\lambda _{0}}^{\mathcal{A}}\left( a\right) \right) \eta ,\eta \right\rangle \otimes \left\langle u_{i}\left. T\right\vert _{\mathcal{H} _{n}}u_{i}\left. \theta _{\frac{1}{\left\Vert \xi \right\Vert }\xi ,\frac{1}{ \left\Vert \xi \right\Vert }\xi }\right\vert _{\mathcal{H}_{n}}\left( \xi \right) ,\xi \right\rangle \right) \\ &=&\left\langle \widetilde{\psi }_{\delta }\left( \pi _{\lambda _{0}}^{ \mathcal{A}}\left( a\right) \right) \eta ,\eta \right\rangle \otimes \left\langle \left. T\right\vert _{\mathcal{H}_{n}}\left. \theta _{\frac{1}{ \left\Vert \xi \right\Vert }\xi ,\frac{1}{\left\Vert \xi \right\Vert }\xi }\right\vert _{\mathcal{H}_{n}}\left( \xi \right) ,\xi \right\rangle \\ &=&\left\langle \widetilde{\psi }_{\delta }\left( \pi _{\lambda _{0}}^{ \mathcal{A}}\left( a\right) \right) \eta ,\eta \right\rangle \otimes \left\langle \left. T\right\vert _{\mathcal{H}_{n}}\left( \xi \right) ,\xi \right\rangle \\ &=&\left\langle \left( \widetilde{\psi }_{\delta }\left( \pi _{\lambda _{0}}^{\mathcal{A}}\left( a\right) \right) \otimes \left. T\right\vert _{ \mathcal{H}_{n}}\right) \left( \eta \otimes \xi \right) ,\eta \otimes \xi \right\rangle \end{eqnarray*} for all $a\in \mathcal{A}$ and for all $T\in C^{\ast }(\mathcal{D}_{\mathcal{ E}})$. Therefore, \begin{equation*} \phi _{\left( \delta ,n\right) }\left( \pi _{\lambda _{0}}^{\mathcal{A} }\left( a\right) \otimes \left. T\right\vert _{\mathcal{H}_{n}}\right) = \widetilde{\psi }_{\delta }\left( \pi _{\lambda _{0}}^{\mathcal{A}}\left( a\right) \right) \otimes \left. T\right\vert _{\mathcal{H}_{n}} \end{equation*} for all $a\in \mathcal{A}$ and for all $T\in C^{\ast }(\mathcal{D}_{\mathcal{ E}}).$ Let $\psi _{\delta }:\mathcal{A}\rightarrow \mathcal{B}_{\delta },\psi _{\delta }=\widetilde{\psi }_{\delta }\circ \pi _{\lambda _{0}}^{\mathcal{A} } $. Clearly, $\psi _{\delta }$ is a local positive map, and \begin{equation*} \pi _{\left( \delta ,n\right) }^{\mathcal{B\otimes }C^{\ast }(\mathcal{D}_{ \mathcal{E}})}\left( \phi \left( a\otimes T\right) \right) =\psi _{\delta }\left( a\right) \otimes \left. T\right\vert _{\mathcal{H}_{n}} \end{equation*} for all $a\in \mathcal{A}$ and $T\in $ $C^{\ast }(\mathcal{D}_{\mathcal{E}})$ . Let $\delta _{1},\delta _{2}\in \Delta $ with $\delta _{1}\geq \delta _{2},$ $n\in \mathbb{N}$ and $a\in \mathcal{A}$. Since \begin{eqnarray*} \pi _{\delta _{1}\delta _{2}}^{\mathcal{B}}\left( \psi _{\delta _{1}}\left( a\right) \right) \otimes \left. T\right\vert _{\mathcal{H}_{n}} &=&\pi _{\left( \delta _{1},n\right) \left( \delta _{2},n\right) }^{\mathcal{ B\otimes }C^{\ast }(\mathcal{D}_{\mathcal{E}})}\left( \psi _{\delta _{1}}\left( a\right) \otimes \left. T\right\vert _{\mathcal{H}_{n}}\right) \\ &=&\pi _{\left( \delta _{1},n\right) \left( \delta _{2},n\right) }^{\mathcal{ B\otimes }C^{\ast }(\mathcal{D}_{\mathcal{E}})}\left( \pi _{\left( \delta _{1},n\right) }^{\mathcal{B\otimes }C^{\ast }(\mathcal{D}_{\mathcal{E} })}\left( \phi \left( a\otimes T\right) \right) \right) \\ &=&\pi _{\left( \delta _{2},n\right) }^{\mathcal{B\otimes }C^{\ast }( \mathcal{D}_{\mathcal{E}})}\left( \phi \left( a\otimes T\right) \right) =\psi _{\delta _{2}}\left( a\right) \otimes \left. T\right\vert _{\mathcal{H} _{n}} \end{eqnarray*} for all $T\in $ $C^{\ast }(\mathcal{D}_{\mathcal{E}})$, it follows that $\pi _{\delta _{1}\delta _{2}}^{\mathcal{B}}\left( \psi _{\delta _{1}}\left( a\right) \right) =\psi _{\delta _{2}}\left( a\right) $. Therefore, there is a linear map $\psi :\mathcal{A\rightarrow B}$ such that \begin{equation*} \psi \left( a\right) =\left( \psi _{\delta }\left( a\right) \right) _{\delta \in \Delta }\text{.} \end{equation*} Moreover, since for each $\delta \in \Delta $, $\psi _{\delta }$ is local positive, $\psi $ is local positive and \begin{eqnarray*} \pi _{\left( \delta ,n\right) }^{\mathcal{B\otimes }C^{\ast }(\mathcal{D}_{ \mathcal{E}})}\left( \left( \psi \otimes \text{id}_{C^{\ast }(\mathcal{D}_{ \mathcal{E}})}\right) \left( a\otimes T\right) \right) &=&\psi _{\delta }\left( a\right) \otimes \left. T\right\vert _{\mathcal{H}_{n}} \\ &=&\pi _{\left( \delta ,n\right) }^{\mathcal{B\otimes }C^{\ast }(\mathcal{D} _{\mathcal{E}})}\left( \phi \left( a\otimes T\right) \right) \end{eqnarray*} for all $a\in \mathcal{A}$ and $T\in $ $C^{\ast }(\mathcal{D}_{\mathcal{E}})$ , and for all $\left( \delta ,n\right) \in \Delta \times \mathbb{N}$. Therefore, \begin{equation*} \phi =\psi \otimes \text{id}_{C^{\ast }(\mathcal{D}_{\mathcal{E}})}\text{.} \end{equation*} To show that $\varphi -\psi $ is local positive, let $\delta \in \Delta $. We seen that there exist $\lambda _{0}\in \Lambda $ such that $\varphi _{\delta }-\widetilde{\psi }_{\delta }$ is positive, where $\pi _{\delta }^{ \mathcal{B}}\circ \varphi =\varphi _{\delta }\circ \pi _{\lambda _{0}}^{ \mathcal{A}}$ and $\pi _{\delta }^{\mathcal{B}}\circ \psi =\psi _{\delta }= \widetilde{\psi }_{\delta }\circ \pi _{\lambda _{0}}^{\mathcal{A}}$. Clearly, $\left( \varphi -\psi \right) \left( a\right) \geq _{\delta }0$ whenever $a\geq _{\lambda _{0}}0$ and $\left( \varphi -\psi \right) \left( a\right) =_{\delta }0$ whenever $a=_{\lambda _{0}}0$. Therefore, $\varphi -\psi $ is local positive. $\left( 2\right) \Rightarrow \left( 1\right) $ By Proposition \ref{3}, $\phi $ is local positive, and since \begin{equation*} \varphi \otimes \text{id}_{C^{\ast }(\mathcal{D}_{\mathcal{E}})}-\phi =\left( \varphi -\psi \right) \otimes \text{id}_{C^{\ast }(\mathcal{D}_{ \mathcal{E}})} \end{equation*} and $\varphi -\psi $ are local positive, $\varphi \otimes $id$_{C^{\ast }( \mathcal{D}_{\mathcal{E}})}-\phi $ is local positive. \end{proof} \begin{corollary} \label{positive} Let $\mathcal{A}$ and $\mathcal{B}$\ be two unital locally $ C^{\ast }$-algebras, $\mathcal{H}$ be a Hilbert space $\mathcal{H},$ $ \varphi :$ $\mathcal{A}$ $\rightarrow $ $\mathcal{B\ }$and $\phi :\mathcal{ A\otimes }B(\mathcal{H})\rightarrow \mathcal{B\otimes }B(\mathcal{H})$ be two linear maps. Then $\phi $ and $\varphi \otimes $id$_{B(\mathcal{H} ))}-\phi $ are local positive if and only if there is a local positive map $ \psi :\mathcal{A\rightarrow B\ }$such that $\varphi -\psi $ is local positive and $\phi =\psi \otimes $ id$_{B(\mathcal{H})}$. \end{corollary} As an application of Theorem \ref{6}, we show that given a local positive map $\varphi :$ $\mathcal{A}\rightarrow $ $\mathcal{B}$, the local positive map $\varphi \otimes $id$_{M_{n}\left( \mathbb{C}\right) }$ is local decomposable for some $n\geq 2$ if and only if $\varphi $ is a local\ $CP$ -map. \begin{definition} \cite{MJ3} A linear map $\varphi :\mathcal{A}\rightarrow \mathcal{B}$ is called \textit{local }$\mathit{n}$\textit{-copositive if }the map $\varphi \otimes t:\mathcal{A\otimes }M_{n}\left( \mathbb{C}\right) \rightarrow \mathcal{B\otimes }M_{n}\left( \mathbb{C}\right) $ defined by \begin{equation*} \left( \varphi \otimes t\right) \left( \left[ a_{ij}\right] _{i,j=1}^{n}\right) =\left[ \varphi \left( a_{ji}\right) \right] _{i,j=1}^{n} \end{equation*} is local positive, where $t$ denotes the transpose map on $M_{n}\left( \mathbb{C}\right) $. We say that $\varphi $ is local completely copositive if for each $\delta \in \Delta $, there exists $\lambda \in \Lambda $ such that $\left( \varphi \otimes t\right) \left( \left[ a_{ij}\right] _{i,j=1}^{n}\right) \geq _{\delta }0\ $whenever $\left[ a_{ij}\right] _{i,j=1}^{n}\geq _{\lambda }0$ and $\left( \varphi \otimes t\right) \left( \left[ a_{ij}\right] _{i,j=1}^{n}\right) =_{\delta }0\ \ $whenever $\left[ a_{ij}\right] _{i,j=1}^{n}=_{\lambda }0$,$\ $for all $n\in \mathbb{N}.$ \end{definition} \begin{remark} \cite{MJ3}\label{copositive}Let $\mathcal{A}$ and $\mathcal{B}$\ be two locally $C^{\ast }$-algebras and $\varphi :\mathcal{A}\rightarrow \mathcal{B} $ be a \textit{local }$\mathit{n}$\textit{-copositive} map. Then: \begin{enumerate} \item $\varphi $ is \textit{\ }local positive and so it is continuous and positive. \item \textit{for each }$\delta \in \Delta $, there exist $\lambda \in \Lambda $ and an $n$-copositive map $\varphi _{\delta }:\mathcal{A}_{\lambda }\rightarrow \mathcal{B}_{\delta }$ such that $\pi _{\delta }^{\mathcal{B} }\circ \varphi =\varphi _{\delta }\circ \pi _{\lambda }^{\mathcal{A}}.$ \end{enumerate} \end{remark} \begin{definition} \cite{MJ3} A linear map $\varphi :\mathcal{A}\rightarrow \mathcal{B}$ is local decomposable if it is sum of a local completely positive map and a \textit{local completely copositive }map. \end{definition} If $\varphi :\mathcal{A}\rightarrow \mathcal{B}$ is local decomposable, then for each\textit{\ }$\delta \in \Delta $, there exist $\lambda \in \Lambda $ and a decomposable positive map $\varphi _{\delta }:\mathcal{A}_{\lambda }\rightarrow \mathcal{B}_{\delta }$ such that $\pi _{\delta }^{\mathcal{B} }\circ \varphi =\varphi _{\delta }\circ \pi _{\lambda }^{\mathcal{A}}.$ \begin{theorem} \label{7}Let $\mathcal{A}$ and $\mathcal{B}$\ be two unital locally $C^{\ast }$-algebras and $\varphi :\mathcal{A\rightarrow B}$ be a linear map. If for some $n\geq 2$, $\varphi \otimes $id$_{M_{n}\left( \mathbb{C}\right) }: \mathcal{A\otimes }M_{n}\left( \mathbb{C}\right) \rightarrow \mathcal{ B\otimes }M_{n}\left( \mathbb{C}\right) $ is local decomposable, then $ \varphi $ is local completely positive. \end{theorem} \begin{proof} We adapt the proof of \cite[Theorem 3.1 ]{BO}. If $\varphi \otimes $id$ _{M_{n}\left( \mathbb{C}\right) }$ is decomposable, there are a local completely positive map $\phi $ and a local completely copositive map $\psi $ such that $\varphi \otimes $id$_{M_{n}\left( \mathbb{C}\right) }=\phi +\psi $ .$\ $By Corollary \ref{positive}, there exist two local positive maps $\phi _{1}:$ $\mathcal{A\rightarrow B}$ and $\psi _{1}:$ $\mathcal{A\rightarrow B\ }$such that $\phi =\phi _{1}\otimes $id$_{M_{n}\left( \mathbb{C}\right) }$ and $\psi =\psi _{1}\otimes $id$_{M_{n}\left( \mathbb{C}\right) }$. Since $\phi =\phi _{1}\otimes $id$_{M_{n}\left( \mathbb{C}\right) }$ and $ \phi $ is local completely positive, $\phi _{1}$ is local completely positive. Since $\psi $ and $\psi _{1}$ are continuous (as $\psi $ is local completely copositive and $\psi _{1}$ is local positive) and $\psi =\psi _{1}\otimes $id $_{M_{n}\left( \mathbb{C}\right) }$,\ for each $\delta \in \Delta ,$ there exist $\lambda \in \Lambda $, and completely positive maps $\psi _{\delta }: \mathcal{A}_{\lambda }\rightarrow \mathcal{B}_{\delta }$ and $\psi _{1\delta }:$ $\mathcal{A}_{\lambda }\rightarrow \mathcal{B}_{\delta }$\ such that $ \pi _{\delta }^{\mathcal{B}}\circ \psi =\psi _{\delta }\circ \pi _{\lambda }^{\mathcal{A}},$ $\pi _{\delta }^{\mathcal{B}}\circ \psi _{1}=\psi _{1\delta }\circ \pi _{\lambda }^{\mathcal{A}}\ $and $\psi _{\delta }=\psi _{1\delta }\otimes $id$_{M_{n}\left( \mathbb{C}\right) }$. Then, since $ n\geq 2$, by \cite[Lemma 3.2]{BO}, for each $\delta \in \Delta ,$ $\psi _{\delta }=0$. Consequently, $\psi =0$, and so $\varphi \otimes $id$ _{M_{n}\left( \mathbb{C}\right) }=\phi =\phi _{1}\otimes $id$_{M_{n}\left( \mathbb{C}\right) }$, whence $\varphi =\phi _{1}.$ \end{proof} Let $\{\mathcal{H};\mathcal{E};\mathcal{D}_{\mathcal{E}}\}$ be a quantized domain in the Hilbert space $\mathcal{H\ }$with $\mathcal{E=\{H}_{\iota } \mathcal{\}}_{\iota \in \Upsilon }$. For a local contractive $\ast $-morphism $\pi :\mathcal{A\rightarrow } C^{\ast }(\mathcal{D}_{\mathcal{E}})$ \begin{equation*} \pi \left( \mathcal{A}\right) ^{^{\prime }}=\{T\in B\left( \mathcal{H} \right) ;T\pi \left( a\right) \subseteq \pi \left( a\right) T\text{ for all } a\in \mathcal{A}\}. \end{equation*} \begin{remark} \begin{enumerate} \item $\pi \left( \mathcal{A}\right) ^{^{\prime }}$is a\textbf{\ }von Neumann algebra. Indeed, \begin{eqnarray*} \pi \left( \mathcal{A}\right) ^{^{\prime }} &=&\{T\in B\left( \mathcal{H} \right) ;T\pi \left( a\right) \subseteq \pi \left( a\right) T\text{ for all } a\in \mathcal{A}\} \\ &=&\{T\in B\left( \mathcal{H}\right) ;T\pi \left( a\right) \subseteq \pi \left( a\right) T\text{ for all }a\in b(\mathcal{A)}\} \\ &=&\{T\in B\left( \mathcal{H}\right) ;T\left. \pi \right\vert _{b(\mathcal{A) }}\left( a\right) =\left. \pi \right\vert _{b(\mathcal{A)}}\left( a\right) T \text{ for all }a\in b(\mathcal{A)}\} \\ &=&\left. \pi \right\vert _{b(\mathcal{A)}}\left( b(\mathcal{A})\right) ^{\prime } \end{eqnarray*} where $\left. \pi \right\vert _{b(\mathcal{A)}}:b(\mathcal{A)\rightarrow } B\left( \mathcal{H}\right) $ is the $\ast $-representation of the $C^{\ast }$ -algebra $b(\mathcal{A)}$ of all bounded elements of $\mathcal{A},$ $\left. \left. \pi \right\vert _{b(\mathcal{A)}}\left( a\right) \right\vert _{ \mathcal{D}_{\mathcal{E}}}=\pi \left( a\right) .$ \item $\pi \left( \mathcal{A}\right) ^{^{\prime }}\cap C^{\ast }(\mathcal{D} _{\mathcal{E}})$ \ is identified with a\textbf{\ }von Neumann algebra on $ \mathcal{H}$. Indeed, \begin{eqnarray*} \pi \left( \mathcal{A}\right) ^{^{\prime }}\cap C^{\ast }(\mathcal{D}_{ \mathcal{E}}) &=&\{T\in B\left( \mathcal{H}\right) \cap C^{\ast }(\mathcal{D} _{\mathcal{E}});T\pi \left( a\right) \subseteq \pi \left( a\right) T\text{ for all }a\in \mathcal{A}\} \\ &=&\{S\in b(C^{\ast }(\mathcal{D}_{\mathcal{E}}));S\pi \left( a\right) =\pi \left( a\right) S\ \text{ for all }a\in \mathcal{A}\} \\ &=&b(\pi \left( \mathcal{A}\right) ^{c}) \end{eqnarray*} where $\pi \left( \mathcal{A}\right) ^{c}=\{S\in C^{\ast }(\mathcal{D}_{ \mathcal{E}});S\pi \left( a\right) =\pi \left( a\right) S$ for all $a\in \mathcal{A}\}$. On the other hand, $\pi \left( \mathcal{A}\right) ^{c}$\ is a locally von Neumann algebra \cite[p.4198]{D}, and then, $b(\pi \left( \mathcal{A}\right) ^{c})$ is identified with a von Neumann algebra on $\mathcal{H}$ \cite[ Proposition 3.2]{D}. \item By \cite[Proposition 3.2]{D}, $b(C^{\ast }(\mathcal{D}_{\mathcal{E}}))$ is identified with a von Neumann algebra on $\mathcal{H}$, which is spatially isomorphic to the von Neumann algebra $\{T\in B\left( \mathcal{H} \right) ;P_{\iota }T=TP_{\iota },\forall \iota \in \Upsilon \}.$ \item Von Neumann algebras $\pi \left( \mathcal{A}\right) ^{^{\prime }}\cap C^{\ast }(\mathcal{D}_{\mathcal{E}})$ and $\left. \pi \right\vert _{b( \mathcal{A)}}\left( b(\mathcal{A})\right) ^{\prime }\cap b(C^{\ast }( \mathcal{D}_{\mathcal{E}}))$ are isomorphic. \end{enumerate} \end{remark} \begin{definition} \cite[Definition 4.1]{BGK} Let $\varphi ,\psi \in \mathcal{CPCC}_{\text{loc} }(\mathcal{A},C^{\ast }(\mathcal{D}_{\mathcal{E}}))$. We say that $\psi $ is dominated by $\varphi $,and note by $\varphi \geq \psi $, if $\varphi -\psi \in \mathcal{CPCC}_{\text{loc}}(\mathcal{A},C^{\ast }(\mathcal{D}_{\mathcal{E }})).$ \end{definition} \ \ \ Let $\varphi ,\psi \in \mathcal{CPCC}_{\text{loc}}(\mathcal{A},C^{\ast }(\mathcal{D}_{\mathcal{E}}))$ such that $\psi $ is dominated by $\varphi $. If $(\pi _{\varphi },V_{\varphi },$\ \ \ $\{\mathcal{H}^{\varphi },\mathcal{E }^{\varphi },\mathcal{D}_{\mathcal{E}^{\varphi }}\})$ is a minimal Stinespring dilation associated to $\varphi $, then, by Radon Nikodym type theorem \cite[Theorem 4.5]{BGK}, there is a unique element $T\in \pi _{\varphi }\left( \mathcal{A}\right) ^{^{\prime }}\cap C^{\ast }(\mathcal{D} _{\mathcal{E}^{\varphi }})$ such that \begin{equation*} \psi \left( a\right) =\varphi _{T}\left( a\right) =\left. V_{\varphi }^{\ast }T\pi _{\varphi }\left( a\right) V_{\varphi }\right\vert _{\mathcal{D}_{ \mathcal{E}}} \end{equation*} for all $a\in \mathcal{A}.$ \begin{definition} Let $\varphi \in \mathcal{CPCC}_{\text{loc}}(\mathcal{A},C^{\ast }(\mathcal{D }_{\mathcal{E}}))$. We say that $\varphi $ is pure if whenever $\psi \in \mathcal{CPCC}_{\text{loc}}(\mathcal{A},C^{\ast }(\mathcal{D}_{\mathcal{E} })) $ and $\varphi \geq \psi ,$ there is a positive number $\alpha $ such that $\psi =\alpha \varphi .$ \end{definition} Let $\varphi \in \mathcal{CPCC}_{\text{loc}}(\mathcal{A},C^{\ast }(\mathcal{D }_{\mathcal{E}}))$ and $\left( \pi _{\varphi },V_{\varphi },\{\mathcal{H} ^{\varphi },\mathcal{E}^{\varphi },\mathcal{D}_{\mathcal{E}^{\varphi }}\}\right) $ be a minimal Stinespring dilation associated to $\varphi $. Then $\varphi $ is pure if and only if $\pi _{\varphi }\left( \mathcal{A} \right) ^{^{\prime }}\cap C^{\ast }(\mathcal{D}_{\mathcal{E}^{\varphi }})$ $ =\{\alpha $id$_{\mathcal{D}_{\mathcal{E}^{\varphi }}};\alpha \in \mathbb{C} \}.$ \begin{proposition} Let $\varphi \in \mathcal{CPCC}_{\text{loc}}(\mathcal{A},C^{\ast }(\mathcal{D }_{\mathcal{E}}))$. If $\varphi $ is pure, then it is a bounded operator valued completely positive map. \end{proposition} \begin{proof} Let $\left( \pi _{\varphi },V_{\varphi },\{\mathcal{H}^{\varphi },\mathcal{E} ^{\varphi },\mathcal{D}_{\mathcal{E}^{\varphi }}\}\right) $ be a minimal Stinespring dilation associated to $\varphi $. Since $\varphi $ is pure, $ \pi _{\varphi }\left( \mathcal{A}\right) ^{^{\prime }}\cap C^{\ast }( \mathcal{D}_{\mathcal{E}^{\varphi }})=\{\alpha $id$_{\mathcal{D}_{\mathcal{E} }};\alpha \in \mathbb{C}\}$. On the other hand, for each $\iota \in \Upsilon ,$ $\left. P_{\iota }\right\vert _{\mathcal{D}_{\mathcal{E}^{\varphi }}}\in \pi _{\varphi }\left( \mathcal{A}\right) ^{^{\prime }}\cap C^{\ast }( \mathcal{D}_{\mathcal{E}^{\varphi }})$, and so $P_{\iota }=$id$_{\mathcal{H} ^{\varphi }}$. Therefore, for each $\iota \in \Upsilon ,$ $\mathcal{H} _{\iota }^{\varphi }=\mathcal{H}^{\varphi }$, $\mathcal{D}_{\mathcal{E} ^{\varphi }}=\mathcal{H}^{\varphi }$ and $C^{\ast }(\mathcal{D}_{\mathcal{E} ^{\varphi }})=B\left( \mathcal{H}^{\varphi }\right) $. Consequently, $\pi _{\varphi }\left( a\right) \in B\left( \mathcal{H}^{\varphi }\right) $ for all $a\in \mathcal{A}.\ $Therefore, $V_{\varphi }^{\ast }\pi _{\varphi }\left( a\right) V_{\varphi }\in B\left( \mathcal{H}\right) $ for all $a\in \mathcal{A}$, and \begin{equation*} \varphi \left( a\right) =\left. V_{\varphi }^{\ast }\pi _{\varphi }\left( a\right) V_{\varphi }\right\vert _{\mathcal{D}_{\mathcal{E}}}\in b\left( C^{\ast }(\mathcal{D}_{\mathcal{E}})\right) \end{equation*} for all $a\in \mathcal{A}$. \end{proof} \begin{theorem} \label{8} Let $\mathcal{A}$ and $\mathcal{B}$ be two unital locally $C^{\ast }$-algebras, $\varphi \in \mathcal{CPCC}_{\text{loc}}(\mathcal{A},\mathcal{D} _{\mathcal{E}}),$ $\psi \in \mathcal{CPCC}_{\text{loc}}(\mathcal{B},C^{\ast }(\mathcal{D}_{\mathcal{F}}))$ and $\phi \in \mathcal{CPCC}_{\text{loc}}( \mathcal{A\otimes B},C^{\ast }(\mathcal{D}_{\mathcal{E\otimes F}}))$. If $ \phi $ is dominated by $\varphi \otimes \psi $ and $\varphi $ is pure, then there exists $\widetilde{\psi }\in \mathcal{CPCC}_{\text{loc}}(\mathcal{B} ,C^{\ast }(\mathcal{D}_{\mathcal{F}}))$ dominated by $\psi $ such that $\phi =\varphi \otimes \widetilde{\psi }.$ \end{theorem} \begin{proof} Let $\left( \pi _{\varphi },V_{\varphi },\{\mathcal{H}^{\varphi },\mathcal{E} ^{\varphi },\mathcal{D}_{\mathcal{E}^{\varphi }}\}\right) $ and $\left( \pi _{\psi },V_{\psi },\{\mathcal{H}^{\psi },\mathcal{E}^{\psi },\mathcal{D}_{ \mathcal{E}^{\psi }}\}\right) $ be the minimal Stinespring dilations associated to $\varphi $ and $\psi $. Since $\varphi $ is pure, $\mathcal{D} _{\mathcal{E}^{\varphi }}=\mathcal{H}^{\varphi }$ and $C^{\ast }(\mathcal{D} _{\mathcal{E}^{\varphi }})=B\left( \mathcal{H}^{\varphi }\right) $. By Proposition \ref{3}, $\ (\pi _{\varphi }\otimes \pi _{\psi },V_{\varphi }\otimes V_{\psi },\{\mathcal{H}^{\varphi }\otimes \mathcal{H}^{\psi }, \mathcal{H}^{\varphi }\otimes \mathcal{E}^{\psi },$ $\mathcal{D}_{\mathcal{H} ^{\varphi }\otimes \mathcal{E}^{\psi }}\})$ is a minimal Stinespring dilation associated to $\varphi \otimes \psi $. We have: \begin{eqnarray*} \left( \left( \pi _{\varphi }\otimes \pi _{\psi }\right) \left( \mathcal{ A\otimes B}\right) \right) ^{\prime } &=&\left( \left( \left. \pi _{\varphi }\right\vert _{b\mathcal{(A)}}\otimes \left. \pi _{\psi }\right\vert _{b \mathcal{(B)}}\right) \left( b(\mathcal{A)\otimes }_{\text{alg}}b\mathcal{(B) }\right) \right) ^{\prime } \\ &=&\left( \left. \pi _{\varphi }\right\vert _{b\mathcal{(A)}}\left( b( \mathcal{A)}\right) \right) ^{\prime }\overline{\otimes }\left( \left. \pi _{\psi }\right\vert _{b\mathcal{(B)}}\left( b\mathcal{(B)}\right) \right) ^{\prime } \end{eqnarray*} where "$\overline{\otimes }$ " denotes the tensor product of von Neumann algebras, and \begin{eqnarray*} b(C^{\ast }(\mathcal{D}_{\mathcal{H}^{\varphi }\otimes \mathcal{E}^{\psi }})) &=&\{R\in B(\mathcal{H}^{\varphi }\otimes \mathcal{H}^{\psi });R(\text{ id}_{\mathcal{H}^{\varphi }}\otimes P_{\iota })=(\text{id}_{\mathcal{H} ^{\varphi }}\otimes P_{\iota })R,\forall \iota \in \Upsilon \} \\ &=&B(\mathcal{H}^{\varphi })\overline{\otimes }\{S\in B(\mathcal{H}^{\psi });SP_{\iota }=P_{\iota }S,\forall \iota \in \Upsilon \} \\ &=&B(\mathcal{H}^{\varphi })\overline{\otimes }b(C^{\ast }(\mathcal{D}_{ \mathcal{E}^{\psi }})). \end{eqnarray*} Thus, \begin{eqnarray*} &&\left( \left( \pi _{\varphi }\otimes \pi _{\psi }\right) \left( \mathcal{ A\otimes B}\right) \right) ^{\prime }\cap b(C^{\ast }(\mathcal{D}_{\mathcal{H }^{\varphi }\otimes \mathcal{E}^{\psi }})) \\ &=&\left( \left( \left. \pi _{\varphi }\right\vert _{b\mathcal{(A)}}\left( b( \mathcal{A)}\right) \right) ^{\prime }\overline{\otimes }\left( \left. \pi _{\psi }\right\vert _{b\mathcal{(B)}}\left( b\mathcal{(B)}\right) \right) ^{\prime }\right) \cap \left( B(\mathcal{H}^{\varphi })\overline{\otimes } b(C^{\ast }(\mathcal{D}_{\mathcal{E}^{\psi }}))\right) \\ &=&\left( \pi _{\varphi }\left( \mathcal{A}\right) ^{\prime }\cap B(\mathcal{ H}^{\varphi })\right) \overline{\otimes }\left( \left( \left. \pi _{\psi }\right\vert _{b\mathcal{(B)}}\left( b\mathcal{(B)}\right) \right) ^{\prime }\cap b(C^{\ast }(\mathcal{D}_{\mathcal{E}^{\psi }}))\right) \\ &&\text{(since }\varphi \text{ is pure) } \\ &=&\{\alpha \text{id}_{\mathcal{H}^{\varphi }};\alpha \in \mathbb{C} \}\otimes \left( \pi _{\psi }\left( \mathcal{B}\right) ^{\prime }\cap C^{\ast }(\mathcal{D}_{\mathcal{E}^{\psi }})\right) . \end{eqnarray*} Therefore, \begin{equation*} \left( \left( \pi _{\varphi }\otimes \pi _{\psi }\right) \left( \mathcal{ A\otimes B}\right) \right) ^{\prime }\cap C^{\ast }(\mathcal{D}_{\mathcal{H} ^{\varphi }\otimes \mathcal{E}^{\psi }})=\{\alpha \text{id}_{\mathcal{H} ^{\varphi }};\alpha \in \mathbb{C}\}\otimes \left( \pi _{\psi }\left( \mathcal{B}\right) ^{\prime }\cap C^{\ast }(\mathcal{D}_{\mathcal{E}^{\psi }})\right) . \end{equation*} Since $\phi \ $and $\varphi \otimes \psi $ are local completely contractive and local completely positive and $\phi $ is dominated by $\varphi \otimes \psi $, by Radon Nikodym theorem \cite[Theorem 4.5]{BGK}, there is a unique positive contractive linear operator $R\in \left( \left( \pi _{\varphi }\otimes \pi _{\psi }\right) \left( \mathcal{A\otimes B}\right) \right) ^{\prime }\cap C^{\ast }(\mathcal{D}_{\mathcal{H}^{\varphi }\otimes \mathcal{ E}^{\psi }})$ such that $\phi =\left( \varphi \otimes \psi \right) _{R}$. Therefore, there is $T\in \pi _{\psi }\left( \mathcal{B}\right) ^{\prime }\cap C^{\ast }(\mathcal{D}_{\mathcal{E}^{\psi }})$ such that $R=$id$_{ \mathcal{H}^{\varphi }}\otimes T$ and \begin{eqnarray*} \phi \left( a\otimes b\right) &=&\left( \varphi \otimes \psi \right) _{\text{ id}_{\mathcal{H}^{\varphi }}\otimes T}\left( a\otimes b\right) \\ &=&\left( V_{\varphi }^{\ast }\otimes V_{\psi }^{\ast }\right) \left( \text{ id}_{\mathcal{H}^{\varphi }}\otimes T\right) \left( \pi _{\varphi }\otimes \pi _{\psi }\right) \left( a\otimes b\right) \left( V_{\varphi }\otimes V_{\psi }\right) \\ &=&V_{\varphi }^{\ast }\pi _{\varphi }\left( a\right) V_{\varphi }\otimes V_{\psi }^{\ast }T\pi _{\psi }\left( b\right) V_{\psi }=\varphi \left( a\right) \otimes \psi _{T}\left( b\right) \end{eqnarray*} for all $a\in \mathcal{A}$ and $b\in \mathcal{B}$.$\ $Hence, there is $ \widetilde{\psi }=\psi _{T}\in \mathcal{CPCC}_{\text{loc}}(\mathcal{B} ,C^{\ast }(\mathcal{D}_{\mathcal{F}}))$ such that $\phi =\varphi \otimes \widetilde{\psi }$.\ Moreover, $\widetilde{\psi }$ is dominated by $\psi $. \end{proof} \begin{corollary} Let $\mathcal{A}$ be a unital $C^{\ast }$-algebra, $\mathcal{B}$ be a unital locally $C^{\ast }$-algebra, $\varphi \in \mathcal{CP}(\mathcal{A},B( \mathcal{H})),$ $\psi \in \mathcal{CPCC}_{\text{loc}}(\mathcal{B},C^{\ast }( \mathcal{D}_{\mathcal{F}}))$ and $\phi \in \mathcal{CPCC}_{\text{loc}}( \mathcal{A\otimes B},C^{\ast }(\mathcal{D}_{\mathcal{H\otimes F}}))$. If $ \phi $ is dominated by $\varphi \otimes \psi $ and $\varphi $ is pure, then there is $\widetilde{\psi }\in \mathcal{CPCC}_{\text{loc}}(\mathcal{B} ,C^{\ast }(\mathcal{D}_{\mathcal{F}}))$ which is dominated by $\psi $ and such that $\phi =\varphi \otimes \widetilde{\psi }$. \end{corollary} \end{document}
\begin{document} \title[On $\opn{BP}^*(BPU_n)$ in lower dimensions and the Thom map]{On the Brown-Peterson cohomology of $BPU_n$ in lower dimensions and the Thom map} \author{Xing Gu} \address{Max Planck Institute for Mathematics, Vivatsgasse 7, 53111 Bonn, Germany} \email{[email protected]} \thanks{The author would like to thank the Max Planck Institute for Mathematics for their hospitality and financial support.} \subjclass[2010]{55N35, 55R35} \date{} \dedicatory{} \keywords{the Brown-Peterson cohomology, the classifying spaces of the projective unitary groups} \begin{abstract} For an odd prime $p$, we study the image of the Thom map from Brown-Peterson cohomology of $BPU_n$ to the ordinary cohomology in dimensions $0\leq i\leq 2p+2$, where $BPU_n$ is the classifying space of the projective unitary group $PU_n$. Also we show that a family of well understood $p$-torsion cohomology classes $y_{p,k}\in H^{2p^{k+1}+2}(BPU_n;\mathbb{Z}_{(p)})$ are in the image of the Thom map. \end{abstract} \maketitle \section{Introduction}\label{sec:intro} Let $p$ be an odd prime number, and let $\opn{BP}$ be the corresponding Brown-Peterson spectrum. The Brown-Peterson cohomology $\opn{BP}^*(BG)$ of the classifying space of a compact Lie group or a finite group $G$ is the subject of various works such as Kameko and Yagita \cite{kameko2008brown}, Kono and Yagita \cite{kono1993brown}, Leary and Yagita \cite{leary1992some}, and Yan \cite{YAN1995221}. One case that $\opn{BP}^*(BG)$ is particularly interesting is when $G$ is homotopy equivalent to a complex algebraic group via a group homomorphism. In this case the Chow ring of $BG$, $\opn{CH}^*(BG)$ is defined by Totaro \cite{totaro1999chow}, and one has the cycle class map \begin{equation}\label{eq:cl} \operatorname{cl}:\opn{CH}^*(BG)\rightarrow H^{\textrm{even}}(BG;\mathbb{Z}) \end{equation} which is a ring homomorphism from the Chow ring to the subring of $H^*(BG)$ of even dimensional classes. Although for complex algebraic varieties, Chow rings are in general much more complicated than ordinary cohomology, it is shown in many cases that $\opn{CH}^*(BG)$ is simpler than $H^*(BG;\mathbb{Z})$. On the other hand, Totaro \cite{totaro1999chow} shows that the cycle class map \eqref{eq:cl} factors as \begin{equation}\label{eq:cl refined} \opn{CH}^*(BG)\xrightarrow{\tilde{\operatorname{cl}}}\operatorname{MU}^{\textrm{even}}(BG)\otimes_{\operatorname{MU}^*}\mathbb{Z}\xrightarrow{T} H^{\textrm{even}}(BG;\mathbb{Z}) \end{equation} where $\operatorname{MU}$ denotes the complex cobordism theory, and the second map $T$ is the Thom map. The first map $\tilde{\operatorname{cl}}$ is called the refined cycle class map. Therefore, the $\opn{BP}$ theory, being a $p$-local approximation of the $\operatorname{MU}$ theory, act as a bridge between the Chow ring and the ordinary cohomology of $BG$. Indeed, it is an interesting problem to find out for which $G$ is the refined cycle class map \[\tilde{\operatorname{cl}}: \opn{CH}^*(BG)\xrightarrow{\tilde{\operatorname{cl}}}\operatorname{MU}^*(BG)\otimes_{\operatorname{MU}^*}\mathbb{Z}\] an isomorphism. For this to hold, it is necessary that $\opn{BP}^*(BG)$ concentrates in even dimensions. This property is studied for various $G$ by Landweber \cite{landweber1970coherence} and \cite{landweber1972elements}, and by Kono and Yagita \cite{kono1993brown}. In this paper we focus on the case $G=PU_n$, where $PU_n$ is the $n$th projective unitary group, i.e., the quotient of the unitary group $U_n$ by its center $S^1$, the group of the unit circle, or equivalently, the quotient of the special unitary group $SU_n$ by its center, the group of $n$th complex roots of unit. The algebraic invariants of $BPU_n$ are much less known compared to $BG$ for most of the other compact Lie groups $G$. The Chow ring of $BPU_3$ is determined, up to one relation, by Vezzosi \cite{vezzosi1999chow}. The additive structure of $\opn{CH}^*(BPU_3)$ is independently determined by Kameko and Yagita \cite{kameko2008brown}. Vistoli \cite{vistoli2007cohomology} improves Vezzosi's method and determines the additive structures as well as much of the ring structures of the Chow ring and ordinary cohomology with integral coefficients of $BPU_p$ for an odd prime $p$. In particular, he completes Vezzosi's study of the Chow ring of $BPU_3$. The ordinary mod $p$ cohomology and Brown-Peterson cohomology of $BPU_p$ are studied by Kameko and Yagita \cite{kameko2008brown}, Kono and Yagita \cite{kono1993brown}, and Vavpeti{\v{c}} and Viruel \cite{vavpetivc2005mod}. The mod $2$ ordinary cohomology ring of $BPU_n$ for $n\equiv 2\pmod{4}$ is determined by Kono and Mimura \cite{kono1975cohomology} and Toda \cite{toda1987cohomology}. For a general positive integer $n$, the cohomology groups $H^k(BPU_n;\mathbb{Z})$ for $k\leq 3$ are easily determined by the universal $n$-cover $SU_n\to PU_n$. The group $H^4(BPU_n;\mathbb{Z})$ is determined by Woodward \cite{woodward1982classification} and $H^5(BPU_n;\mathbb{Z})$ by Antieau and Williams \cite{antieau2014topological}. The ring structure of $H^*(BPU_n;\mathbb{Z})$ in dimensions less than or equal to $10$ is determined by the author \cite{gu2019cohomology}. The author \cite{gu2019some} also studies some $p$-torsion classes of $\opn{CH}^*(BPU_n)$ for $n$ with $p$-adic valuation $1$, i.e., $p|n$ but $p^2\nmid n$. To the author's best knowledge, $\opn{BP}^*(BPU_n)$ for $n$ not a prime number has not been studied in any earlier published work. Before stating the main conclusions of this paper, we fix some notations. For a spectrum $A$, we denote by $A_*$ its homotopy groups, or the group of coefficients of the homology theory $A$, considered as a graded abelian group. Denote by $A^*$ the group of coefficients of the cohomology theory associated to $A$. Then $A^*$ and $A_*$ are isomorphic, but the gradings are opposite to each other. For instance, we have $\opn{BP}_*\cong\mathbb{Z}_{(p)}[v_1,v_2\cdots]$ where $\operatorname{dim}v_k=2p^k-2$ and $\opn{BP}^*\cong\mathbb{Z}_{(p)}[v_1,v_2\cdots]$ where $\operatorname{dim}v_k=-(2p^k-2)$. Let $H\mathbb{Z}_{(p)}$ be the Eilenberg-Mac Lane spectrum for the ring $\mathbb{Z}_{(p)}$, and $T:\opn{BP}\to H\mathbb{Z}_{(p)}$ be the Thom map. Then we have the augmentation map $T:\opn{BP}^*\to\mathbb{Z}_{(p)}$ induced by the Thom map, and the ring $\mathbb{Z}_{(p)}$ has a structure of graded $\opn{BP}^*$-algebra given by $T^*$. More generally, for any space $X$, we have the induced homomorphism $T:\opn{BP}^*(X)\to H^*(X;\mathbb{Z}_{(p)})$, whose image is canonically isomorphic to the graded $\mathbb{Z}_{(p)}$-algebra $\opn{BP}^*(X)\otimes_{BP^*}\mathbb{Z}_{(p)}$. \begin{theorem}\label{thm:main} Let $p$ be an odd prime. In dimensions $0\leq k \leq 2(p+1)$, the graded ring $\opn{BP}^*(BPU_n)\otimes_{BP^*}\mathbb{Z}_{(p)}$, i.e., the image of the Thom map \[T:\opn{BP}^*(BPU_n)\to H^*(BPU_n;\mathbb{Z}_{(p)})\] concentrates in even dimensions. \end{theorem} \begin{remarkthm} We have the ``canonical Brauer class'' denoted by $x_1$ generating the group \[H^3(BPU_n,\mathbb{Z}_{(p)})\cong\mathbb{Z}_{p^r},\] where $r$ is the $p$-adic valuation of $n$. In other words, we have $n=p^rm$ for with $p\nmid m$. This class plays an important role in the calculation of $H^*(BPU_n;\mathbb{Z})$ in \cite{gu2019cohomology}. However, Theorem \ref{thm:main} indicates that any $kx_1$ for $p^r\nmid k$ is not in the image of the Thom map. \end{remarkthm} For the next theorem, we note that there are $p$-torsion classes \[y_{p,k}\in H^{2p^{k+1}+2}(BPU_n;\mathbb{Z}_{(p)}),\ k\geq0,\] which are studied in \cite{gu2019cohomology} and discussed in more details in Section \ref{sec:ordinary coh}. \begin{theorem}\label{thm:eta p,k} Let $p$ be an odd prime. For $k\geq 0$ and $p|n$, there are classes \begin{equation*} \eta_{p,k}\in\opn{BP}^{2p^{k+1}+2}(BPU_n) \end{equation*} satisfying \[T(\eta_{p,k})=y_{p,k}\in H^{2p^{k+1}+2}(BPU_n;\mathbb{Z}_{(p)})\] where $T$ is the Thom map. \end{theorem} \begin{remarkthm} Localizing at $p$ the homotopy fiber sequence $B\mathbb{Z}_n\rightarrow BSU_n\rightarrow BPU_n$, we obtain a $p$-local homotopy equivalence $BSU_n\xrightarrow{\simeq_{(p)}} BPU_n$ in the case $p\nmid n$. Therefore this case is not very interesting. \end{remarkthm} \begin{remarkthm} In \cite{gu2019some}, the author constructs $p$-torsion classes $\rho_{p,k}$, $k\geq 0$ in the Chow ring of $BPU_n$ satisfying $\operatorname{cl}(\rho_{p,k})=y_{p,k}$, when the $p$-adic valuation of $n$ is $1$. Theorem \ref{thm:eta p,k} and the existence of the refined cycle class map as in \eqref{eq:cl refined} support the conjecture that the classes $\rho_{p,k}$ exist for $n$ with $p$-adic valuation greater than $1$. \end{remarkthm} \begin{remarkthm} The classes $\eta_{p,k}$ are not $p$-torsion classes in general. As pointed out by N. Yagita, it follows from Theorem 1.4 of Kameko and Yagita \cite{kameko2008brown}, for $n=p$, that the class $\eta_{p,0}$ satisfies \[p\eta_{p,0}=v_2\eta_{p,0}^p+\cdots\neq 0,\] where the class on the right side is trivial modulo the ideal $(v_2,v_3,\cdots,v_n,\cdots)$ of the ring $\opn{BP}^*$. \end{remarkthm} This paper is organized as follows. In Section \ref{sec:ordinary coh} we review some results on the ordinary cohomology of $BPU_n$, most of which are proved in \cite{gu2019cohomology} and \cite{gu2019some}. In Section \ref{sec:p-primary} we prove Theorem \ref{thm:main} by studying an $n$-torsion abelian subgroup of $PU_n$, an idea inspired by Vistoli \cite{vistoli2007cohomology}. Finally, in Section \ref{sec:eta}, we construct the classes $\eta_{p,k}$ and prove Theorem \ref{thm:eta p,k}. \section{On the ordinary cohomology of $BPU_n$}\label{sec:ordinary coh} In this section we consider the ordinary cohomology of $BPU_n$. For the most part of this section, we reformulate results in \cite{gu2019cohomology}. By definition we have a short exact sequence of Lie groups \[1\rightarrow\mathbb{Z}_n\rightarrow SU_n\rightarrow PU_n\rightarrow 1\] which gives a universal cover of $PU_n$ and shows $\pi_1(PU_n)\cong\mathbb{Z}_n$. It follows from the Hurewicz theorem and the universal coefficient theorem that we have \begin{equation}\label{eq:H123} H^k(BPU_n;\mathbb{Z})\cong \begin{cases} 0,\hspace{2 mm}k=1,2,\\ \mathbb{Z}_n,\hspace{2 mm}k=3. \end{cases} \end{equation} Consider the short exact sequence of Lie groups \[1\rightarrow S^1\rightarrow U_n\rightarrow PU_n\rightarrow 1\] which defines the Lie group $PU_n$. Taking classifying spaces, we obtain a homotopy fiber sequence \[BS^1\rightarrow BU_n\rightarrow BPU_n.\] Notice that $BS^1$ is of the homotopy type of the Eilenberg-Mac Lane space $K(\mathbb{Z},2)$. Delooping $BS^1$, we obtain another homotopy fiber sequence \begin{equation}\label{eq:BPUn fib seq} BU_n\rightarrow BPU_n\xrightarrow{\chi}K(\mathbb{Z},3). \end{equation} Here the map \begin{equation}\label{eq:chi} \chi: BPU_n\rightarrow K(\mathbb{Z},3) \end{equation} defines a generator $x_1$ of $H^3(BPU_n;\mathbb{Z})\cong\mathbb{Z}_n$, or $H^3(BPU_n;\mathbb{Z}_{(p)})\cong\mathbb{Z}_{p^r}$, where $r$ is the $p$-adic valuation of $n$, i.e., we have $n=p^rm$ with $p\nmid m$. We call $x_1$ the canonical Brauer class of $BPU_n$. In principle, the homology of $K(\pi,n)$ for any finitely generated abelian group $\pi$ and any $n>0$ is determined in \cite{Ca}. Tamanoi \cite{tamanoi1999subalgebras} offers a description of the mod $p$ cohomology of $K(\pi,n)$ in terms of the Milnor basis (\cite{milnor1958steenrod}) of the mod $p$ Steenrod algebra. In \cite{gu2019cohomology}, the author gives a description of the cohomology of $K(\mathbb{Z},3)$, which is consistent with the notations in this paper. Throughout the rest of this paper, we denote by $\mathscr{P}^k$ the $k$th Steenrod reduced power operation, and $\partialta$ the Bockstein homomorphism $H^*(-;\mathbb{Z}_p)\rightarrow H^{*+1}(-;\mathbb{Z})$. The following lemma is well known and can be easily deduced from Section 2 of \cite{gu2019cohomology}. \begin{lemma}\label{lem:K(Z,3)} In dimensions $0<i\leq 2p+4$, we have \begin{equation*} H^i(K(\mathbb{Z},3);\mathbb{Z}_{(p)})\cong \begin{cases} \mathbb{Z}_{(p)},\ i=3,\\ \mathbb{Z}_p,\ i=2p+2,\\ 0,\ 0<i\leq 2p+4,\ i\neq3,\ 2p+2. \end{cases} \end{equation*} Let $x_1\in H^3(K(\mathbb{Z},3);\mathbb{Z}_{(p)})$ be the canonical Brauer class. Then the group \[H^{2p+2}(K(\mathbb{Z},3);\mathbb{Z}_{(p)})\cong\mathbb{Z}_p\] is generated by the class $y_{p,0}=\partialta\mathscr{P}^1(x_1)$. The mod $p$ reduction of $y_{p,0}$, denoted by $\bar{y}_{p,0}$, is equal to $Q_1(x_1)$, where $Q_1$ is one of the Milnor's operations defined in \cite{milnor1958steenrod}. \end{lemma} We also note the following \begin{lemma}\label{lem:K(Z,3)p-tor} All torsion classes in the graded abelian group $H^*(K(\mathbb{Z},3);\mathbb{Z}_{(p)})$ are $p$-torsion classes. In other words, the abelian groups $H^k(K(\mathbb{Z},3);\mathbb{Z}_{(p)})$ are $p$-torsion groups for $k>3$. \end{lemma} \begin{proof} This follows immediately from Proposition 2.14 of \cite{gu2019cohomology}, which gives a complete description of the graded $\mathbb{Z}_{(p)}$-algebra $H^*(K(\mathbb{Z},3);\mathbb{Z}_{(p)})$. \end{proof} \begin{corollary}\label{cor:K(Z,3)p-tor} For $k>3$, the mod $p$ reduction \[H^k(K(\mathbb{Z},3);\mathbb{Z}_{(p)})\rightarrow H^k(K(\mathbb{Z},3);\mathbb{Z}_{p})\] is injective. \end{corollary} \begin{proof} This follows immediately from Lemma \ref{lem:K(Z,3)p-tor} and the long exact sequence induced by the short exact sequence \[0\rightarrow\mathbb{Z}_{(p)}\xrightarrow{\times p}\mathbb{Z}_{(p)}\rightarrow\mathbb{Z}_p\rightarrow 0.\] \end{proof} In the cohomology ring $H^*(K(\mathbb{Z},3);\mathbb{Z}_{(p)})$ we have $p$-torsion classes \[y_{p,k}=\partialta\mathscr{P}^{p^k}\mathscr{P}^{p^{k-1}}\cdots\mathscr{P}^1(x_1),\hspace{2 mm}k\geq 0\] of dimension $2p^{k+1}+2$, where $x_1\in H^3(BPU_n;\mathbb{Z})$ is the canonical Brauer class. Let $\bar{y}_{p,k}\in H^*(K(\mathbb{Z},3);\mathbb{Z}_p)$ be the mod $p$ reduction of $y_{p,k}$, and we have $\bar{y}_{p,k}=Q_{k+1}(x_1)$ where $Q_{k+1}$ is the Milnor's operation considered in \cite{milnor1958steenrod}. In \cite{gu2019cohomology} and \cite{gu2019some}, the author studies the images of the classes $y_{p,k}$ under \[\chi^*: H^*(K(\mathbb{Z},3);\mathbb{Z}_{(p)})\rightarrow H^*(BPU_n;\mathbb{Z}_{(p)}).\] In the following theorems, we abuse notations and denote $\chi^*(y_{p,k})$ simply by $y_{p,k}$. \begin{theorem}[Theorem 1.2, \cite{gu2019cohomology}]\label{thm:2p+2} Let $p$ be a prime. In $H^{2p+2}(BPU_{n};\mathbb{Z})$, we have $y_{p,0}\neq 0$ of order $p$ when $p|n$, and $y_{p,0}=0$ otherwise. Furthermore, the $p$-torsion subgroup of $H^k(BPU_n;\mathbb{Z})$ is $0$ for $3<k<2p+2$. \end{theorem} \begin{theorem}[(1) of Theorem 1.1, \cite{gu2019some}]\label{thm:2p^k+2} In $H^{2p^{k+1}+2}(BPU_n;\mathbb{Z}_{(p)})$, we have $p$-torsion classes $y_{p,k}\neq 0$ for all odd prime divisors $p$ of $n$ and $k\geq 0$. \end{theorem} \section{An $n$-torsion abelian subgroup of $PU_n$}\label{sec:p-primary} Let $p$ be an odd prime. In \cite{vistoli2007cohomology}, Vistoli considers an abelian $p$-subgroup of $PU_p$, which plays an important role in the study of the Chow ring and cohomology of $BPU_p$. In this section we slightly generalize his construction and prove Theorem \ref{thm:main}. Let $n>1$ be an integer. Consider the following matrices in $U_n$: \begin{equation*} \alpha'= \begin{bmatrix} 0 & 1\\ I_{n-1} & 0 \end{bmatrix}\\ \textrm{ and }\\ \beta'= \begin{bmatrix} \zeta & & & & \\ & \zeta^2& & &\\ & & \ddots & &\\ & & & \zeta^{n-1} & \\ & & & &1 \end{bmatrix} \end{equation*} where $\zeta=\exp(2\pi i/n)$. Let $V_n'$ be the subgroup of $U_n$ generated by $\alpha'$ and $\beta'$. A direct calculation shows \begin{equation}\label{eq:alpha'beta'} \beta'\alpha'=\zeta\alpha'\beta'. \end{equation} Let $W_n$ be the subgroup of $U_n$ generated by the matrix $\beta'$ and the scalar $\zeta$. Then we have $W_n\cong\mathbb{Z}_n\times\mathbb{Z}_n$, which is a normal subgroup of $V_n'$, and we have the quotient group $V_n'/W_n\cong\mathbb{Z}_n$ generated by the matrix $\alpha'$. Indeed, $V_n'$ is a semidirect product \begin{equation}\label{eq:V'SES} V_n'=(\mathbb{Z}_n\times\mathbb{Z}_n)\rtimes_{\phi} C_n \end{equation} where $C_n$ is the cyclic group of order $n$. Here we use a different notation than $\mathbb{Z}_n$ since it looks less confusing as we introduce the action $\phi$. \begin{lemma}\label{lem:phi} In the definition \eqref{eq:V'SES} of $V_n'$, the action of $\phi$ is as follows: Identify the canonical generator of $C_n$ with the matrix $\alpha'$. Then $\alpha'$ acts on $\mathbb{Z}_n\times\mathbb{Z}_n$ as the matrix \begin{equation*} \begin{bmatrix} 1 & -1\\ 0 & 1 \end{bmatrix}. \end{equation*} \end{lemma} \begin{proof} The action $\phi$ of $\alpha'$ is given by the conjugation action of $\alpha'$ on $W_n$, and a direct computation shows \begin{equation*} \alpha'[\zeta^i(\beta')^j](\alpha')^{-1}=\zeta^{i-j}(\beta')^j, \end{equation*} and we conclude. \end{proof} \begin{lemma}\label{lem:V'} There is a short exact sequence \[0\rightarrow\mathbb{Z}_n\rightarrow H^2(BV_n';\mathbb{Z})\rightarrow\mathbb{Z}_n\rightarrow 0.\] \end{lemma} \begin{remarkthm} Later in the proof of Lemma \ref{lem:H3 restriction}, we may show \[H^2(BV_n';\mathbb{Z})\cong\mathbb{Z}_n\oplus\mathbb{Z}_n.\] But this does not seem important in the rest of this paper. \end{remarkthm} \begin{proof} By \eqref{eq:V'SES} we have a short exact sequence \[1\rightarrow\mathbb{Z}_n\times\mathbb{Z}_n\rightarrow V_n'\rightarrow C_n\rightarrow1.\] Consider the Lyndon-Hochschild-Serre spectral sequence \[E_2^{s,t}\cong H^s(BC_n; H^t(B\mathbb{Z}_n\times B\mathbb{Z}_n;\mathbb{Z})_{\phi})\mathbb{R}ightarrow H^{s+t}(BV_n';\mathbb{Z}),\] where $H^t(B\mathbb{Z}_n\times B\mathbb{Z}_n;\mathbb{Z})_{\phi}$ means the local coefficient system induced by $\phi$. The only nontrivial groups $E_2^{s,t}$ with $s+t=2$ are $E_2^{2,0}$ and $E_2^{0,2}$. For obvious degree reasons there is no nontrivial differential into or out of either of them. Therefore we have $E_2^{2,0}=E_{\infty}^{2,0}$ and $E_2^{0,2}=E_{\infty}^{0,2}$, and a short exact sequence \begin{equation}\label{eq:H2SES} 0\rightarrow E_2^{2,0}\rightarrow H^2(BV_n';\mathbb{Z})\rightarrow E_2^{0,2}\rightarrow 0. \end{equation} The local coefficient system on the bottom row of the spectral sequence is the constant one, and we have \[E_2^{2,0}\cong H^2(BC_n; H^0(B\mathbb{Z}_n\times B\mathbb{Z}_n;\mathbb{Z}))=H^2(BC_n;\mathbb{Z})\cong\mathbb{Z}_n.\] For $E_2^{0,2}$, we have \[E_2^{0,2}=H^0(BC_n; H^2(B\mathbb{Z}_n\times B\mathbb{Z}_n;\mathbb{Z})_{\phi})=H^2(B\mathbb{Z}_n\times B\mathbb{Z}_n;\mathbb{Z})^{\phi}\cong (\mathbb{Z}_n\times\mathbb{Z}_n)^{\phi},\] i.e., the invariants of $\mathbb{Z}_n\times\mathbb{Z}_n$ under the action $\phi$. By Lemma \ref{lem:phi}, we have \[E_2^{0,2}\cong (\mathbb{Z}_n\times\mathbb{Z}_n)^{\phi}\cong\mathbb{Z}_n.\] By \eqref{eq:H2SES}, we conclude. \end{proof} Let $\alpha$ and $\beta$ be the conjugation class in $PU_n$ of $\alpha'$ and $\beta'$, respectively. By \eqref{eq:alpha'beta'} we have $\alpha\beta=\beta\alpha$. Let $V_n$ be the subgroup of $PU_n$ generated by $\alpha$ and $\beta$, and we have $V_n\cong\mathbb{Z}_n\times\mathbb{Z}_n$. \begin{remark} In the case $n=p$, the subgroup $V_p\subset PU_p$ plays an important role in Vistoli \cite{vistoli2007cohomology}, where the Chow ring and integral cohomology of $BPU_p$ are thoroughly studied. \end{remark} \begin{lemma}\label{lem:H3} $H^2(BV_n;Z)\cong\mathbb{Z}_n\oplus\mathbb{Z}_n$, $H^3(BV_n;\mathbb{Z})\cong\mathbb{Z}_n.$ \end{lemma} \begin{proof} This follows from the isomorphism $V_n\cong\mathbb{Z}_n\times\mathbb{Z}_n$ and the K{\"u}nneth formula. \end{proof} Recall that by \eqref{eq:H123} we have $H^3(BPU_n;\mathbb{Z})\cong\mathbb{Z}_n$. \begin{lemma}\label{lem:H3 restriction} The inclusion $V_n\subset PU_n$ induces an isomorphism \[H^3(BPU_n;\mathbb{Z})\xrightarrow{\cong} H^3(BV_n;\mathbb{Z}).\] \end{lemma} \begin{proof} By construction we have a short exact sequence of groups \[1\rightarrow\mathbb{Z}_n\rightarrow V'_n\rightarrow V_n\rightarrow 1,\] which induces a homotopy fiber sequence \[B\mathbb{Z}_n\rightarrow BV'_n\rightarrow BV_n.\] Delooping the first term, we obtain another homotopy fiber sequence \begin{equation*} BV'_n\rightarrow BV_n\rightarrow K(\mathbb{Z}_n,2). \end{equation*} On the other hand, recall the homotopy fiber sequence \eqref{eq:BPUn fib seq}: \begin{equation*} BU_n\rightarrow BPU_n\xrightarrow{\chi}K(\mathbb{Z},3). \end{equation*} We compare the two homotopy sequences above by the following commutative (up to homotopy) diagram: \begin{equation}\label{eq:compare} \begin{tikzcd} BV'_n\arrow[r]\arrow[d]&BV_n\arrow[r]\arrow[d]&K(\mathbb{Z}_n,2)\arrow[d,"\partialta"]\\ BU_n\arrow[r]&BPU_n\arrow[r,"\chi"]&K(\mathbb{Z},3). \end{tikzcd} \end{equation} where the first two vertical arrows are induced by the inclusions of groups, and the third one $\partialta$ is the Bockstein homomorphism. Let $^VE_*^{*,*}$ and $^UE_*^{*,*}$ be the Serre spectral sequences for the upper and lower homotopy fiber sequences in \eqref{eq:compare}, respectively: \begin{equation*} \begin{split} &^VE_2^{s,t}=H^s(K(\mathbb{Z}_n,2);H^t(BV'_n);\mathbb{Z})\mathbb{R}ightarrow H^{s+t}(BV_n;\mathbb{Z}),\\ &^UE_2^{s,t}=H^s(K(\mathbb{Z},3);H^t(BU_n);\mathbb{Z})\mathbb{R}ightarrow H^{s+t}(BPU_n;\mathbb{Z}). \end{split} \end{equation*} The only nontrivial group $^VE_2^{s,t}$ with $s+t=2$ is \[^VE_2^{0,2}\cong H^2(BV'_n;\mathbb{Z}).\] Therefore, by Lemma \ref{lem:H3}, we have \begin{equation}\label{eq:VEinfty} ^VE_{\infty}^{0,2}\cong H^2(BV_n;\mathbb{Z})\cong\mathbb{Z}_n\oplus\mathbb{Z}_n. \end{equation} By Lemma \ref{lem:V'}, we have a short exact sequence \begin{equation}\label{eq:VE2} 0\to\mathbb{Z}_n\to {^VE}_2^{0,2}\to\mathbb{Z}_n\to 0. \end{equation} Comparing \eqref{eq:VEinfty} and \eqref{eq:VE2}, we have \[^VE_{\infty}^{0,2}\cong{^VE}_2^{0,2}\cong\mathbb{Z}_n\oplus\mathbb{Z}_n\cong H^2(BV_n;\mathbb{Z}).\] Hence, there is no nontrivial differential landing in ${^VE}_2^{3,0}\cong\mathbb{Z}_n$. By Lemma \ref{lem:H3}, we have \[{^VE}_{\infty}^{3,0}={^VE}_2^{3,0}\cong H^3(BV_n;\mathbb{Z})\cong\mathbb{Z}_n.\] Then the diagram \eqref{eq:compare} induces an isomorphism \[H^3(BPU_n;Z)={^UE}_{\infty}^{3,0}\rightarrow {^VE}_{\infty}^{3,0}={^VE}_2^{3,0}=H^3(BV_n;\mathbb{Z}),\] and we conclude. \end{proof} Now we are ready to prove Theorem \ref{thm:main}: \begin{theorem}[Theorem \ref{thm:main}] Let $p$ be an odd prime. In dimensions $0\leq k \leq 2(p+1)$, the graded ring $\opn{BP}^*(BPU_n)\otimes_{BP^*}\mathbb{Z}_{(p)}$, or equivalently, the image of the Thom map $\opn{BP}^*(BPU_n)\to H^*(BPU_n;\mathbb{Z}_{(p)})$ concentrates in even dimensions. \end{theorem} \begin{proof} It follows from Theorem \ref{thm:2p+2} that the $p$-torsion subgroup of $H^k(BPU_n;\mathbb{Z})$ is $0$ for $0<k<2p+2$, $k\neq3$. On the other hand, all the non-torsion classes in $H^*(BPU_n;\mathbb{Z}_{(p)})$ are of even dimensions. Therefore, it suffices to show that the image of the Thom map \[T:\opn{BP}^3(BPU_n)\rightarrow H^3(BPU_n;\mathbb{Z}_{p})\] is trivial. Consider the commutative diagram \begin{equation}\label{eq:Thom3} \begin{tikzcd} \opn{BP}^3(BPU_n)\arrow[r]\arrow[d,"T"]&\opn{BP}^3(BV_n)\arrow[d,"T"]\\ H^3(BPU_n;\mathbb{Z}_{(p)})\arrow[r,"\cong"]&H^3(BV_n;\mathbb{Z}_{(p)}) \end{tikzcd} \end{equation} where the vertical arrows are the Thom maps, and the horizontal ones are the restrictions. By Lemma \ref{lem:H3 restriction}, the bottom arrow is an isomorphism. We complete the proof by contradiction. If the vertical arrow to the left has a nontrivial image, then so is the image of the composition \begin{equation}\label{eq:composition} \opn{BP}^3(BPU_n)\xrightarrow{T}H^3(BPU_n;\mathbb{Z}_{(p)})\xrightarrow{\cong} H^3(BV_n;\mathbb{Z}_{(p)}). \end{equation} However, by Landweber \cite{landweber1970coherence}, $\opn{BP}^*(BG)$ concentrates in even dimensions if $G$ is abelian, from which we deduce $\opn{BP}^3(BV_n)=0$. Therefore, by \eqref{eq:Thom3}, the composition \eqref{eq:composition} factors through $0$, leading to a contradiction. \end{proof} \section{The classes $\eta_{p,k}$}\label{sec:eta} In this section we prove Theorem \ref{thm:eta p,k} by studying the Thom map for the Eilenberg-Mac Lane space $K(\mathbb{Z},3)$. Indeed, the Thom maps for Eilenberg-Mac Lane spaces are studied in Tamanoi \cite{tamanoi1997image}, and we make use of one of his main conclusions. In what follows, we denote by $\mathscr{A}^*$ the mod $p$ Steenrod algebra for an odd prime $p$, and we use the notations for the stable cohomology operations in Milnor \cite{milnor1958steenrod}. \begin{theorem}[Tamanoi, (I) of Theorem A, \cite{tamanoi1997image}]\label{thm:Tamanoi} Let $p$ be a prime, and let $n\geq 1$. The image of the Thom map \[T':\opn{BP}^*(K(\mathbb{Z},n+2))\rightarrow H^*(K(\mathbb{Z},n+2);\mathbb{Z}_{p})\] is an $\mathscr{A}^*$-invariant polynomial subalgebra with infinitely many generators: \[\operatorname{Im}T'=\mathbb{Z}_p[Q_{s_n}Q_{s_{n-1}}\cdots Q_{s_1}(\tau_{n+2})|0<s_1<\cdots<s_n],\] where $\tau_{n+2}\in H^{n+2}(K(\mathbb{Z},n+2);\mathbb{Z}_{p})$ is the fundamental class. \end{theorem} \begin{corollary}\label{cor:Tamonoi} The classes $y_{p,k}\in H^*(K(\mathbb{Z},3);\mathbb{Z}_{(p)})$ for $k\geq 0$ are in the image of the Thom map \[T:\opn{BP}^*(K(\mathbb{Z},3))\rightarrow H^*(K(\mathbb{Z},3);\mathbb{Z}_{(p)}).\] \end{corollary} \begin{proof} By definition, the Thom map $T'$ factors as \begin{equation*} \begin{tikzcd} \opn{BP}^k(K(\mathbb{Z},3))\arrow[dr,"T"]\arrow[rr,"T'"]& &H^k(K(\mathbb{Z},3);\mathbb{Z}_p)\\ &H^k(K(\mathbb{Z},3);\mathbb{Z}_{(p)})\arrow[ur,hook] \end{tikzcd} \end{equation*} where the hooked arrow is monic for $k>3$, by Corollary \ref{cor:K(Z,3)p-tor}. The desired result then follows from Theorem \ref{thm:Tamanoi}. \end{proof} We proceed to prove Theorem \ref{thm:eta p,k}: \begin{theorem}[Theorem \ref{thm:eta p,k}] Let $p$ be an odd prime. For $k\geq 0$ and $p|n$, there are classes \begin{equation*} \eta_{p,k}\in\opn{BP}^{2p^{k+1}+2}(BPU_n) \end{equation*} satisfying \[T(\eta_{p,k})=y_{p,k}\in H^{2p^{k+1}+2}(BPU_n;\mathbb{Z}_{(p)})\] where $T$ is the Thom map. \end{theorem} \begin{proof} It follows from Corollary \ref{cor:Tamonoi} and the commutative diagram \begin{equation*} \begin{tikzcd} \opn{BP}^*(K(\mathbb{Z},3))\arrow[r,"\chi^*"]\arrow[d,"T"]&\opn{BP}^*(BPU_n)\arrow[d,"T"]\\ H^*(K(\mathbb{Z},3);\mathbb{Z}_{(p)})\arrow[r,"\chi^*"]&H^*(BPU_n;\mathbb{Z}_{(p)}) \end{tikzcd} \end{equation*} that the classes $y_{p,k}\in H^*(BPU_n;\mathbb{Z}_{(p)})$ are in the image of \[T: \opn{BP}^*(BPU_n)\rightarrow H^*(K(\mathbb{Z},3);\mathbb{Z}_{(p)}).\] Let $\eta_{p,k}\in\opn{BP}^*(BPU_n)$ satisfy $T(\eta_{p,k})=y_{p,k}$, and we conclude. \end{proof} \end{document}
\begin{document} \title{Theoretical framework for physical unclonable functions, including quantum readout} \"{a}uthor{Giulio Gianfelici} \email{[email protected]} \"{a}uthor{Hermann Kampermann} \"{a}uthor{Dagmar Bru\ss} \"{a}ffiliation{ Institut f\"{u}r Theoretische Physik III, Heinrich-Heine-Universit\"{a}t D\"{u}sseldorf, D-40225 D\"{u}sseldorf, Germany } \date{\today} \begin{abstract} \noindent We propose a theoretical framework to quantitatively describe Physical Unclonable Functions (PUFs), including extensions to quantum protocols, so-called Quantum Readout PUFs (QR-PUFs). $\text{(QR-)}$ PUFs are physical systems with challenge-response behavior intended to be hard to clone or simulate. Their use has been proposed in several cryptographic protocols, with particular emphasis on authentication. Here, we provide theoretical assumptions and definitions behind the intuitive ideas of $\text{(QR-)}$ PUFs. This allows to quantitatively characterize the security of such devices in cryptographic protocols. First, by generalizing previous ideas, we design a general authentication scheme, which is applicable to different physical implementations of both classical PUFs and $\text{(QR-)}$ PUFs. Then, we define the \emph{robustness} and the \emph{unclonability}, which allows us to derive security thresholds for $\text{(QR-)}$ PUF authentication and paves the way to develop further new authentication protocols. \end{abstract} \maketitle \section{Introduction} \label{sec:Intro} \emph{Authentication} is a major task of both classical and quantum cryptography. To achieve secure communication between two parties Alice and Bob, it is necessary to ensure that no intruder may participate in the communication, pretending to be one of the legitimate parties, e.g. by a so-called \emph{Man-in-the-middle attack} \cite{KM}. Authentication is ultimately classical, even in quantum protocols like QKD \cite{SBCDLP}. The main ingredient of an authentication protocol is a shared secret between the legitimate parties: during any authenticated communication Alice and Bob must prove the possession of this secret to confirm their identity. One has to distinguish two types of authentication \cite{KM}. \emph{Message authentication} is the assurance that a given entity was the original source of the received data. This type of authentication can be achieved by unconditionally secure protocols \cite{WC}. \emph{Entity authentication} is the assurance that a given entity can prove its identity and its involvement in the communication session to another entity. Entity authentication is particularly important if there is an asymmetry between the parties, e.g. when one party, namely Alice, is a trusted institution and the other one, namely Bob, is an untrusted user. The communication between Alice and Bob may happen on an authenticated channel owned by Alice, where Bob interacts through a remote terminal. In that case, a one-way entity authentication protocol will be used by Alice to authenticate Bob and to allow him to use her channel. Such protocols are usually based on a \emph{challenge-response authentication}, a type of authentication where Alice presents a \emph{challenge} and Bob provides a valid \emph{response}, based on the common secret, to be authenticated. For instance, Alice can ask for a password (challenge) and Bob will provide the correct one (response). In the case of asymmetric communication, it is useful to design authentication protocols based on something the parties possess. The trusted Alice can still be required to have secret knowledge since she is able to conceal information from an adversary, but Bob is required only to protect a given token from theft. A crucial condition of this approach is that the object has to be unique and an adversary, namely Eve, should not be able to copy it easily. A \emph{Physical Unclonable Function} (PUF) \cite{RP} is a physical system which can interact in a very complex way with an external signal (which can serve as a challenge) to give an unpredictable output (which can serve as a response). Its internal disorder is exploited to make it unique, hard to clone or simulate. PUFs are particularly suited for entity authentication because their internal structure plays the role of the shared secret. They can also be used in other protocols, like oblivious transfer \cite{UR10}, bit commitment \cite{RD} or classical key distribution \cite{BFSK}. There is a large variety of PUFs, such as the \emph{Optical PUF} \cite{PRTG}, the \emph{Arbiter PUF} \cite{LLGSDD}, the \emph{SRAM PUF} \cite{GKST}, the \emph{Coating PUF} \cite{TSSGVW}, the \emph{Magnetic PUF} \cite{IM}, the \emph{Ring Oscillator PUF} \cite{BNCF} and so on. A more detailed description of the whole family of PUFs is given in \cite{MBWRY} and in \cite{MV}. To ensure reliability and security it is required to post-process the PUFs' outputs \cite{DGSV, PMBHS}. The most common way to do it is by using the so-called \emph{fuzzy extractor} \cite{DORS}, a tool which combines error correction and privacy amplification. Error correction is necessary because the PUF's output can be different each time the PUF interacts with the same challenge, even when the authentication involves the real Bob with the original PUF. This can be due to an erroneous implementation of the challenge or to noise in the physical process. Privacy amplification is important since the outcomes of a PUF are generally non-uniform, i.e. there exist correlations between different responses that can be used by an adversary to undermine the PUF's security. Furthermore, the response, once it is mapped into a uniform key, can, in principle, be used in different protocols other than entity authentication. However, even when dealing with noise and non-uniformity, there are some issues with PUFs, because it has been shown that many of them can be actually cloned or simulated \cite{HBNS, RSSDDS, R-etal}, compromising their use in secure authentication schemes. To solve these problems, an extension of PUFs to quantum protocols was suggested, the so-called \emph{Quantum Readout PUFs} (QR-PUFs) \cite{BS}. Such PUFs encode challenges and responses in quantum states, thus they are expected to be more secure and reliable than classical PUFs, as they add a layer of complexity given by the unclonability of the involved quantum states \cite{WZ}. Moreover, if such quantum states are non-orthogonal, an adversary cannot perfectly distinguish them, and an attempt to do it would introduce disturbances, thus exposing the presence of an intruder to the legitimate parties. It is desirable to establish a theoretical framework in which one can perform a rigorous, quantitative, analysis of the security properties of $\text{(QR-)}$ PUFs. Several efforts have been made to formalize the intuitive ideas of PUF \cite{RSS, AMSSW, PK, PM, JD}, and they all capture some aspects of them, but a well-defined agreement about theoretical assumptions and definitions is still lacking. Moreover, the previous approaches are devoted to classical PUFs only. In this article we propose a common theoretical framework by quantitatively characterizing the $\text{(QR-)}$ PUF properties, particularly the \emph{robustness} \cite{AMSSW} against noise and the \emph{unclonability}. This is done by generalizing ideas from previous approaches (in particular from \cite{AMSSW}) to encompass both classical and QR-PUFs. Moreover, we introduce a generic scheme for authentication protocols with $\text{(QR-)}$ PUFs, for which security thresholds can be calculated once an experimental implementation is specified. This scheme provides an abstract formalization of existing protocols, together with new ideas such as the difference between a \textit{physical layer} and a \textit{mathematical layer} (see Sec. \ref{sec:Auth}) or the concept of the \textit{shifter} (see Secs. \ref{subsec:cenr} and \ref{subsec:qenr}). This framework is designed to be independent of the specific experimental implementation, such that a comparison of different types of PUFs and QR-PUFs becomes possible. In particular, all implementations use a fuzzy extractor for post-processing. We expect that this analysis supports both theoretical and experimental research on $\text{(QR-)}$ PUFs, by promoting the implementation of such devices in existing and new secure authentication schemes. The paper is organized as follows. In Sec. \ref{sec:Auth} we give an introduction on entity authentication protocols with $\text{(QR-)}$ PUFs. Sec. \ref{sec:not} contains the notation we will use in the paper, in Sec. \ref{sec:class} we describe a protocol with a generic classical PUF, and in Sec. \ref{sec:quant} we generalize this to a generic QR-PUF. The shared formalization of the theoretical properties of $\text{(QR-)}$ PUFs is stated in Sec. \ref{sec:prop} and the formalism is applied in some examples in Sec. \ref{sec:ex}. Some final remarks and the outlook of the work are given in the Conclusion. \section{Authentication protocols} \label{sec:Auth} In the following, we will always call Alice the party that has to authenticate Bob. Mutual authentication can be achieved by repeating the protocol swapping the roles of Alice and Bob. Moreover, we stated in the Introduction that the raw output of a $\text{(QR-)}$ PUF has to be post-processed to be used in secure cryptographic protocols. Therefore, for the sake of clarity, we call \emph{outcome} the raw output while we mean with \emph{response} only the post-processed uniform key. Entity authentication protocols with $\text{(QR-)}$ PUFs consist of two phases \cite{STO}, the \emph{enrollment stage} and the \emph{verification stage} (see fig. \ref{fig:enver}). \begin{figure} \caption{ A schematic description of the authentication scheme (colour online). \\ \textbf{Top:} \label{fig:enver} \end{figure} The enrollment stage is a part of the protocol which happens only once at the beginning, after the manufacture of the $\text{(QR-)}$ PUF and before any communications between Alice and Bob. An entity, or a group of entities, called the \emph{$\text{(QR-)}$ PUF Certifier} (which may be the $\text{(QR-)}$ PUF manufacturer, Alice itself, a third trusted party or a combination of all of them) studies the $\text{(QR-)}$ PUF's properties, evaluates the parameters needed for the implementation and the post-processing. In particular, the Certifier selects a certain number $N$ of challenges and records the corresponding responses. Challenges and responses form the so-called \emph{Challenge-Response pairs} (CRPs) and they are stored as a \emph{Challenge-Response Table} (CRT), together with additional information needed in the remaining part of the protocol. After the end of this stage, the Certifier gives the CRT to Alice (which then \emph{knows} the secret) and the $\text{(QR-)}$ PUF to Bob (which then \emph{has} the secret). The verification stage is the part of the protocol where communication between Alice and Bob is necessary. In this stage, Bob declares his identity to Alice with his $\text{(QR-)}$ PUF, remotely interacting with her through her terminal. To authenticate Bob, Alice sends randomly one challenge from the CRT to the $\text{(QR-)}$ PUF and collects the outcome, which is then post-processed. The calculated response is compared with the one in the CRT, i.e. the one obtained in the enrollment stage. If they match, Alice authenticates Bob. This stage can be repeated every time Alice needs to authenticate Bob. After every round, however, the used challenge-response pair has to be eliminated from the CRT and cannot be used again \footnote{It was argued \cite{BS} that in the QR-PUF case, challenge-response pairs could be used again, because an adversary is not able to gain full information about their state. Such claims need to be quantitatively proven, here we continue as if any reused CRP is insecure.}. Depending on the different types of $\text{(QR-)}$ PUFs, the challenges could be different types of physical quantities. For instance, optical PUFs are transparent materials filled with light scattering particles: a laser that interacts with one of them is turned into a unique speckle pattern. For a classical optical PUF, the challenge is the laser orientation and the outcome is the intensity of some points in the speckle pattern \cite{PRTG}. For a QR-PUF, the challenges and the outcomes are quantum states \cite{BS}. In both cases, however, challenges, outcomes and responses are stored in the CRT as digital binary strings, and the responses are used as authentication keys. There are two different layers involved in this protocol: a physical one, where the actual $\text{(QR-)}$ PUF acts as a physical evolution from input systems to output systems, and a mathematical one, where a binary challenge string (which should represent the information on how to implement the input system) is mapped into an outcome string which is post-processed into a response string. To deal with the two different layers, we denote as \emph{challenges} (\emph{outcomes}, \emph{responses}) the strings in the mathematical layer, and as \emph{challenge states} \footnote{This term clearly comes from quantum physics, where it is used to describe a vector in a Hilbert space. We will use the term \emph{classical state} in this article meaning a classical physical quantity, either scalar or vectorial.} (\emph{outcome states}, \emph{response states}) the implementations in the physical layer. This configuration is schematized in fig. \ref{fig:sch}. \begin{figure*} \caption{A scheme of the two layers, the mathematical one (where the cryptographic protocol takes place) and the physical one (where the $\text{(QR-)} \label{fig:sch} \end{figure*} \section{Notation} \label{sec:not} In the article we will use the following conventions: \begin{itemize} \item Digital strings, like the challenges and the responses, are denoted by lowercase bold letters, for instance, $\mathbf{x_i}$ and $\mathbf{r_j}$ for the i-th challenge and the j-th response, respectively; \item Sets of digital strings are denoted by the calligraphic uppercase letters, e.g. $\mathcal{X}$ and $\mathcal{R}$ for the set of challenges and responses, respectively; \item Random variables which take values from given sets are denoted by uppercase italic letters, e.g. $X$ and $R$ for challenges and responses, respectively; \item The physical classical states are denoted by the vector symbol (right arrow), for instance, $\vec{x}_i$ and $\vec{r}_j$ for the i-th challenge state and the j-th response state, respectively; \item The physical quantum states are denoted by the usual ket notation, for instance, $\Ket{x_i}$ and $\Ket{r_j}$ for the i-th challenge state and the j-th response state, respectively; \item Maps are denoted by uppercase letters with a circumflex accent, e.g. $\hat{P}$ or $\hat{\Pi}$. In particular, the Latin letters are used for maps between strings and the Greek ones for maps between states. \end{itemize} \section{Classical PUF} \label{sec:class} The realization of a challenge state may involve several different steps, each of them with different experimental complexity. Each step involves devices with a limited, even though possibly large, number of different configurations and such configurations can be used to parametrize the experimental system, resulting in our ability to formalize the challenges through discrete variables. A challenge is therefore defined as the binary string $\bf x_i$ of length $n$ representing the configuration which realizes a given challenge state $\vec{x}_i$. \subsection{Enrollment} \label{subsec:cenr} At the start of the enrollment stage, the PUF Certifier selects $N\leq 2^n$ different challenges ${\bf x_i}\in \mathcal{X} \subseteq \{0,1\}^n$, where $\mathcal{X}\subseteq\{0,1\}^n$ is the set of all chosen challenges and $|\mathcal{X}|=N$. In fact, if a challenge consists of $n$ bits, the total possible number of challenges is $2^n$. However, in practice, certain challenges could represent states which are impossible or hard to implement or they do not lead to a set of distinguishable responses. For security purposes, the set of challenges $\mathcal{X}$ has to be \textit{uniform}, i.e. $\hat{S}(X)=|\mathcal{X}|$, where $X$ is the random variable defined on the set $\mathcal{X}$ and $\hat{S}(X)$ is the Shannon entropy of $X$. An adversary should not be able to characterize the set of challenges by studying some of them. The Certifier is free to discard some challenges from $\mathcal{X}$ if he finds correlations in them. This affects the number $N$ of challenges and has to be quantified for given experimental implementations. Each $\bf x_i\in\mathcal{X}$ represents a challenge state $\vec{x}_i$ which can be experimentally realized and sent to the PUF, that acts as a deterministic function $\hat{\Pi}$. Due to its complex structure, any attempt to give a full description of it should be unfeasible, even for the Certifier itself. For a given challenge state $\vec{x}_i$, $\hat{\Pi}(\vec{x}_i)= \vec{y}_i$, where $\vec{y}_i$ is denoted as \emph{outcome state}. The Certifier needs to map the outcome state into an outcome string, taking into account both the distribution of the outcome states and any error which may have occurred due to noise or wrong implementation of the experimental system. To do this, we introduce the notion of a \emph{shifter}. For each outcome state $\vec{y}_i$, let $\hat{\Omega}_i$ be a state-dependent operation, which maps $\vec{y}_i$ into a \textit{reference state}, denoted by $\vec{0}$, equal for all outcome states. For $N$ outcome states $\vec{y}_i$, we obtain a set of $N$ shifters $\hat{\Omega}_i$. The importance of using the shifters will be more clear when we discuss QR-PUFs. The shifters simplify the error verification process, as each expected outcome is identical. Some devices ascribable to shifters have been used in some PUF implementations: consider, for instance, the optical PUF \cite{PRTG}, where a laser beam (challenge state) is transformed into a complex speckle pattern (outcome state). In this scenario, it has been proposed \cite{GHMSP} to use spatial light modulators to transform every speckle pattern into a plane wave, which then is focused into a single point (the reference state). Only if the pattern is the expected one this happens, otherwise, the outcome state is mapped into another speckle pattern. Shifters can be designed also for other PUFs, depending on which physical quantities are implied in the outcome states. If the outcome state is already a binary value (like in the \emph{SRAM PUF} \cite{GKST}) the reference state can be the bit $0$ and the shifters can be realized by a gate implementing either the identity or a bit flip operation, depending on the expected outcome state. Whenever an outcome is determined by the frequency of a signal (like in a \emph{ring oscillator PUF} \cite{BNCF}), the shifters can be passband filters. The Certifier can implement the corresponding shifter for every outcome state, since he can characterize $\hat{\Pi}(\vec{x}_i)$, possibly repeating the PUF evaluation for the same challenge state $\vec{x}_i$, to find a $\hat{\Omega}_i$ such that $\hat{\Omega}_i\big(\hat{\Pi}(\vec{x}_i)\big)=\vec{0}$. We define $\vec{o}_i:=\hat{\Omega}_i\big(\hat{\Pi}(\vec{x}_i)\big)$. While in the enrollment stage, or in a noiseless verification stage, $\vec{o}_i=\vec{0}$ by definition, in reality $\vec{o}_i$ will contain errors. This error is mapped into the Hamming weight, i.e. the number of bits that are different from $0$, of a classical string $\mathbf{o_i}$, i.e. $\mathbf{o_i}=\mathbf{0}_{l_o}=00\dots0$ if and only if $\vec{o}_i=\vec{0}$. The string has a length $ l_o$, dependent on the experimental implementation of the shifter. In the aforementioned example of an optical PUF, the plane wave is focused onto an analyzer plane with a pinhole. If $\vec{o}_i=\vec{0}$ the light passes through this pinhole, and a detector will click. Therefore the intensity of the light on the analyzer plane outside the pinhole can be used to find $\mathbf{o_i}$, and the resolution of the analyzer plane determines the length $l_o$. The shifters convey information about the distribution of the outcome states (as they are designed on them) and therefore indirectly about the PUF. We can represent this information in terms of binary strings in the mathematical layer, just as we did for challenge states. The shifters are implemented by an experimental device (or a collection of them) with a limited number of configurations, each one of them implementing a different $\hat{\Omega}_i$. Parametrizing such configurations, we map each shifter $\hat{\Omega}_i$ in a string ${\bf w_i}\in\mathcal{W}\subseteq\{0,1\}^{l_w}$. This string is exact, because it represents only the correct implementation of the shifter, without taking into account any noise. The length $l_w$ depends on the entropy of the shifters and, consequently, on the outcome states (for some implementations, methods to analyze such an entropy have been derived \cite{TSSAO, RSGD}). The entropy of $\mathcal{W}$ has to be studied also to verify the presence of non-uniformity, i.e. correlations between different outcomes or between challenges and corresponding outcomes. This entropy affects the \emph{unclonability} of the PUF (see Sec. \ref{sec:prop}). The two strings ${\bf o_i}$ and ${\bf w_i}$ convey two different aspects of the outcome state. In fact, ${\bf o_i}$ gives information about the error only, without distinguishing different outcomes. Instead, ${\bf w_i}$ gives information about the distribution of the outcome states, but not about errors (even a single bit flip of ${\bf w_i}$ changes it into ${\bf w_{j\neq i}}$). We combine ${\bf o_i}$ and ${\bf w_i}$ by defining as \emph{outcome} a string $\mathbf{y_i}$ of length $l= l_w+l_o$, such that \begin{equation} \mathbf{y_i}=\bf w_i\,\|\,o_i\,, \end{equation} where $\|$ is the concatenation of strings. We designate $\mathcal{Y}\subseteq\{0,1\}^l$ as the set of all outcomes, including all possible noisy versions. Explicitly, \begin{equation} \label{setY} \mathcal{Y}=\left\{\mathbf{y_i}=\mathbf{w_i}\,\|\,\mathbf{o_i},\, \mathbf{w_i}\in\mathcal{W},\, \mathbf{o_i}\in\{0,1\}^{l_o} \right\}\, , \end{equation} and $|\mathcal{Y}|=2^{l_o}\,N$ (see fig. \ref{fig:setY} for a graphic representation of the set $\mathcal{Y}$). Moreover we define a function $\hat{P}:\mathcal{X}\rightarrow\mathcal{Y}$, associating each challenge with the corresponding outcome, i.e. $\hat{P}(\bf x_i)=y_i$. \begin{figure} \caption{Graphic representation of the set $\mathcal{Y} \label{fig:setY} \end{figure} The outcome string, being noisy and not uniformly distributed, cannot be used directly as a response. The most common way to post-process it is through a \emph{fuzzy extractor} \cite{DORS}, which is a combined error correction and privacy amplification scheme: \begin{definition} Let $\{0,1\}^\star$ be the \textit{star closure} of $\{0,1\}$, i.e. the set of strings of arbitrary length: \begin{equation} \{0,1\}^\star=\bigcup_{i \ge 0 }\{0,1\}^i\, , \end{equation} where $\{0,1\}^0=\emptyset$ is the empty set. Let $\hat{H}({\bf y_i,y'_i})$ be the \textit{Hamming distance} between $\bf y_i$ and $\bf y'_i$, i.e. the Hamming weight of $\bf y_i+y'_i$ and $ s:= -\log \left(\max_k p_k\right)$ be the \emph{min-entropy} of a probability distribution $p=\Set{p_k}$. Furthermore, given two probability distributions $p_A$, $p_B$, associated to discrete random variables $A,B$ with the same domain $\mathcal{C}$, let $\hat{D}_S(p_A, p_B)$ be the \emph{statistical distance} between $p_A$ and $p_B$, i.e. \begin{equation} \label{statdist} \hat{D}_S(p_A,p_B):=\frac{1}{2}\,\sum_{c\in\mathcal{C}}\,\left|Pr(A=c)-Pr(B=c)\right|\, . \end{equation} A $(\mathcal{Y},s,m,t,\epsilon)$-\emph{fuzzy extractor} is a pair of random functions, the \emph{generation function} $\hat{G}$ and the \emph{reproduction function} $\hat{R}$, with the following properties: \begin{itemize} \item $\hat{G}:\mathcal{Y}\rightarrow\{0,1\}^m\times\{0,1\}^\star$ on input $\bf y_i\in\mathcal{Y}$ outputs an extracted string ${\bf r_i}\in\mathcal{R}\subseteq\{0,1\}^m$ and a \emph{helper data} ${\bf h_i}\in\mathcal{H}\subseteq\{0,1\}^\star$. While $\bf r_i$ has to be kept secret, $\bf h_i$ can be made public (it can even be physically attached to the PUF); \item $\hat{R}:\mathcal{Y}\times\mathcal{H}\rightarrow\{0,1\}^m$ takes an element $\bf y'_i\in\mathcal{Y}$ and a helper string $\bf h_i\in\mathcal{H}$ as inputs. The \emph{correctness property} of a fuzzy extractor guarantees that if $\hat{H}({\bf y_i,y'_i})\leq t$ and $({\bf r_i,h_i})=\hat{G}(\bf y_i)$, then $\hat{R}(\bf y_i')=r_i$; \item The \emph{security property} guarantees that for any probability distribution on $\mathcal{Y}$ of min-entropy $s$, the string $\bf r_i$ is nearly uniform even for those who observe $\bf h_i$: i.e. if $({\bf r_i,h_i})=\hat{G}\bf(y_i)$, then \begin{equation} \hat{D}_S(p_{RH}, p_{UH})\leq\epsilon\, , \end{equation} where $p_{RH}$ ($p_{U\! H}$) is a joint probability distribution for $\bf r_i\in\mathcal{R}$ (for a uniformly distributed variable on $m$-bit binary strings) and $\bf h_i\in\mathcal{H}$. \end{itemize} \end{definition} The generation function of a fuzzy extractor is used, in the enrollment stage, to transform the outcome $\bf y_j$ into the uniformly distributed $\bf r_i$, that is the final \emph{response}. We will see later that, in the verification stage, the reproduction function is used on a noisy version of the outcome to generate the same response. The Certifier selects a fuzzy extractor by knowing $\mathcal{Y}$ and its min-entropy $s$, and choosing $t$ such that the fuzzy extractor uniquely maps a given outcome into a response, without collisions: due to noise or an erroneous experimental setup, a challenge state $\vec{x}_i$ can be implemented as a state which is closer to $\vec{x}_j$, for $i\neq j$. The error ${\bf o}_{\bf i}^{(j)}$ associated to $\hat{\Omega}_i\big(\hat{\Pi}(\vec{x}_j)\big)$ for $i\neq j$, must be uncorrectable: the Certifier has to choose a maximum allowed error $t<l_o$ smaller than the minimum Hamming weight of ${\bf o}_{\bf i}^{(j)}$, over all $i\neq j$ (see Fig. \ref{fig:overlap}). \begin{figure} \caption{Graphic representation of the choice of $t$ for $N=2$ challenge-response pairs. The circle represents both $\bf o_1$ and $\bf o_2$, indipendently from $\bf w_1$ and $\bf w_2$. The center of the circle represent the noiseless cases ${\bf o_1} \label{fig:overlap} \end{figure} There is a trade-off between $t$ and the entropy of the shifters: a high entropy, associated to a longer length $l_w$ of $\bf w_i$, is equivalent to similar states with a small error in case of a wrong implementation, and $t$ has to be chosen low. The Certifier may decide to delete challenge-response pairs from the Challenge-Response Table, in order to choose a higher $t$ and increase the resistance of the PUF against the noise. For practical purposes we define two functions $\hat{G}_R$ and $\hat{G}_H$ such that \begin{equation} \hat{G}(\cdot)=(\hat{G}_R(\cdot),\hat{G}_H(\cdot))\, , \end{equation} and therefore ${\bf r_i}=\hat{G}_R(\bf y_i)$ and ${\bf h_i}=\hat{G}_H\bf(y_i)$ for $\bf y_i\in\mathcal{Y}$. Moreover, we define the function $\hat{F}_E$ to be the function mapping each challenge to the respective response in the enrollment stage, i.e. \begin{equation} \label{eq:fe} \hat{F}_E(\cdot):=\hat{G}_R(\hat{P}(\cdot))\, , \end{equation} for $\bf x_i\in\mathcal{X}$ and therefore ${\bf r_i}=\hat{F}_E ({\bf x_i})$. Summarising, during the enrollment stage the Certifier creates a set of $N$ challenges $\mathcal{X}\in\{0,1\}^n$ and a set of $N$ responses $\mathcal{R}\subseteq\{0,1\}^m$ \begin{equation} \mathcal{R}= \Set{{\bf r_i}\in\{0,1\}^m\,|\,{\bf r_i}=\hat{F}_E({\bf x_i});\quad {\bf x_i}\in\mathcal{X}}\, . \end{equation} They are stored into the Challenge-Response Table (CRT) together with \begin{itemize} \item the set of $N$ strings $\bf w_i$ representing how to set the shifter operator to get the correct outcome; \item the parameters of the fuzzy extractor; \item the (possibly public) set of helper data $\mathcal{H}\subseteq\{0,1\}^\star$, i.e. \begin{equation} \mathcal{H}= \Set{{\bf h_i}\in\{0,1\}^\star\,|\,{\bf h_i}=\hat{G}_H(\hat{P}({\bf x_i}));\quad {\bf x_i}\in\mathcal{X}} \, . \end{equation} \end{itemize} The Challenge-Response Table is given to Alice and the PUF to Bob, concluding the enrollment stage. \subsection{Verification} \label{subsec:cver} In the verification stage, Bob declares his identity and allows Alice to (remotely) interact with his PUF. Alice, equipped with the CRT, retraces the steps made by the Certifier in the enrollment stage. She picks up a randomly selected challenge ${\bf x_j}\in\mathcal{X}$ (for which she knows the response ${\bf r_j}=\hat{F}_E({\bf x_j})$) and prepares the challenge state $\vec{x}_j$. The PUF transforms $\vec{x}_j$ into the outcome state $\hat{\Pi}(\vec{x}_j)$. At this point, Alice tunes the shifter $\hat{\Omega}_j$, according to the CRT and evaluates $\hat{\Omega}_j\big(\hat{\Pi}(\vec{x}_j)\big)$. After the use of the PUF and the shifter, she may obtain a noisy version of $\vec{y}_j$, because of noise or a wrong preparation of the challenge state. Moreover, the noise could come from the PUF not being the original one, if an adversary Eve is impersonating Bob. We call this noisy version $\vec{y'}_j= \hat{\Pi}^{(e)}(\vec{x}_j)$. In that case $\hat{\Omega}_j(\vec{y'}_j)\neq\vec{0}$, which leads to $\mathbf{o'_j}\neq \mathbf{0}_{l_o}$ such that ${\bf y'_j}={\bf w_j\,\|\,o'_j}=\hat{P}^{(e)}({\bf x_j})$ is different from the ${\bf y_j}$ obtained by the Certifier in the enrollment stage. The outcome is then post-processed by the reproduction function of the fuzzy extractor that was used in the enrollment stage, so Alice collects $\mathbf{z_j}:= \hat{F}_V(\bf x_j)$, where the function $\hat{F}_V$ represents the map between the challenges and the corresponding responses in the verification stage, i.e. \begin{equation} \label{eq:fv} \hat{F}_V:=\hat{R}\big(\hat{P}^{(e)}(\mathbf{\cdot}), \hat{G}_H(\hat{P}(\mathbf{\cdot}))\big) \, , \end{equation} for $\bf x_j\in\mathcal{X}$. The claimed response $\bf z_j$ is compared with the one in the CRT: if $\bf z_j=r_j$, Bob is authenticated, otherwise the protocol fails. \section{QR-PUF} \label{sec:quant} The authentication scheme for Quantum Readout PUFs follows the structure of the classical scheme (see Sect. \ref{sec:class}) and still uses classical challenges, responses and fuzzy extractors in the mathematical layer. However, the implementation of the challenge states and outcome states in the physical layer is done via quantum states. At the moment, the only classical PUF which was extended to a QR-PUF is an optical PUF \cite{BS, GHMSP}, for which there are some studies on side-channel attacks \cite{SMP, BS13, YGLZ}. In this work, we study discrete qubit states, but our approach could also be generalized to continuous-variable $\text{(QR-)}$ PUFs \cite{ND, GN}. Let us assume to work with $\lambda$ qubits, so challenge states are elements of the Hilbert space $\mathbb{C}^{2^\lambda}$. We also assume that each qubit can be in a finite number of states. Like in the classical case, we can parametrize the configurations of the experimental system that implements the challenge states, to obtain a set $\mathcal{X}$ of classical challenges. Let us denote the length of such strings by $n$, to match the case of classical PUFs. Here the challenge states are quantum, therefore challenge states will be represented by $\ket{x_i}$. Our QR-PUF will be described in an idealized way, as unitary operation acting on a pure state to produce another pure state. In reality, this process will introduce noise: in our framework, this will be taken into account in the transition from the outcome state to the outcome string. \subsection{Enrollment} \label{subsec:qenr} Since not all states are implementable, or they do not lead to distinguishable responses, the Certifier selects $N\leq 2^{n}$ challenges ${\bf x_i}\in\mathcal{X}\subseteq\{0,1\}^n$, where $\mathcal{X}$ is implemented by a set of nonorthogonal states $\Set{\Ket{x_1},\dots,\Ket{x_N}}\in\mathbb{C}^{2^\lambda}$. The nonorthogonality is expected to be a crucial condition, since, as a consequence of the no-cloning theorem \cite{WZ}, there does not exist a measurement which perfectly distinguishes nonorthogonal states. We expect that this enhances the security of QR-PUFs compared to classical PUFs since an adversary could gain only a limited amount of information about the challenge and the outcome states. In this work we consider separable challenge states $\Ket{x_i}$, so $\ket{x_i}=\bigotimes_{k=1}^\lambda\ket{x_{ik}}$ and we can deal with single qubit states $\ket{x_{ik}}$. The procedure can be generalized to other challenge states. The qubit states can be written in terms of some complete orthonormal basis, which we denote as $\Set{\Ket{0},\,\Ket{1}}$: \begin{equation} \label{chalstat} \ket{x_{ik}}= \cos \theta_{ik} \, \ket{0} + e^{i \varphi_{ik}} \sin \theta_{ik} \,\ket{1}\, , \end{equation} where $\theta_{ik}\in[0,\pi]$ and $\varphi_{ik}\in[0,2\pi]$. The Certifier sends all states to the QR-PUF, collecting the outcome states. The QR-PUF is formalized as a $\lambda$-fold tensor product of single-qubit unitary gates $\hat{\Phi}=\bigotimes_{k=1}^\lambda \hat{\Phi}_k$. Despite its form being unknown, it can be parametrized by \cite{ZK}: \begin{equation} \label{unitmat} \hat{\Phi}_k(\omega_k,\psi_k,\chi_k)= \begin{pmatrix} e^{i \psi_k}\cos \omega_k & e^{i \chi_k}\sin \omega_k \\ -e^{-i \chi_k}\sin \omega_k & e^{-i \psi_k}\cos \omega_k \end{pmatrix}\, , \end{equation} with random parameters $\psi_k, \chi_k \in [0, 2\pi]$ and $ \omega_k \in \left[0, \frac{\pi}{2}\right]$. The outcome state is then $\ket{y_i}=\bigotimes_{k=1}^\lambda\ket{y_{ik}}$, where \begin{equation} \label{outstat} \begin{split} &\ket{y_{ik}}=\,\hat{\Phi}_k \ket{x_{ik}}\\ &=\begin{pmatrix} e^{i \psi_k}\cos \omega_k \cos \theta_{ik} + e^{i (\chi_k+\varphi_{ik}) }\sin \omega_k \sin \theta_{ik} \\ -e^{-i \chi_k}\sin \omega_k \cos \theta_{ik} + e^{i(\varphi_{ik}- \psi_k)}\cos \omega_k \sin \theta_{ik} \end{pmatrix}\, . \end{split} \end{equation} \begin{figure*} \caption{A scheme for the verification stage for QR-PUFs, as described in Sec. \ref{subsec:qver} \label{fig:qPUF} \end{figure*} Like in the classical case, the Certifier can design a state-dependent shifter, that performs a tensor product of unitary transformations, $\hat{\Omega}_{i}=\bigotimes_{k=1}^\lambda\hat{\Omega}_{ik}$, each one of them mapping a specific qubit state to the reference state $\ket{0}=(1,0)^T$. This operation is indeed unitary, because for $\ket{y_{ik}}= \cos\"{a}lpha_{ik}\Ket{0}+e^{i\beta_{ik}}\sin\"{a}lpha_{ik}\ket{1}$, it holds that $\hat{\Omega}_{ik} \ket{y_{ik}} =\Ket{0}$ for \begin{equation} \label{shifdef} \hat{\Omega}_{ik} = \begin{pmatrix} \cos\"{a}lpha_{ik} & e^{-i\beta_{ik}}\sin\"{a}lpha_{ik}\\ e^{i\beta_{ik}}\sin\"{a}lpha_{ik} & -\cos\"{a}lpha_{ik} \end{pmatrix}\, , \end{equation} which verifies $\hat{\Omega}_{ik}\,\hat{\Omega}^\dagger_{ik}=\hat{\Omega}_{ik}^\dagger\,\hat{\Omega}_{ik}=\mathbb{I}$, where $\mathbb{I}$ is the identity operator. The Certifier can implement $\hat{\Omega}_{i}$ for each $\hat{\Phi}\Ket{x_i}$ because he can repeat the experiment and characterize each outcome state by performing quantum state tomography or, as we work with pure states, compressed sensing \cite{GLFBE}. Instead of having to change the single-qubit measurement basis for each qubit and each challenge, by applying the suitable shifter it is now possible to use the basis $\set{\Ket{0}, \Ket{1}}$ for all qubits of all challenges. By definition of $\hat{\Omega}_{ik}$, if there is no error, we will measure for every qubit the state $\ket{0}$, and the results of the measurement form a string of length $\lambda$ made by all zeros, ${\bf o_i=0}=00\dots0$. If there is some error, which in the quantum case is introduced by either the environment or an adversary, the Hamming weight of $\bf o_i$ will give us an estimate of it. Like in the classical case, we can parametrize the experimental system that implements the shifters in terms of the (discrete) configuration it must assume to implement a specific $\hat{\Omega}_i$. Therefore, a given $\hat{\Omega}_i$ is represented by a classical string $\bf w_i\in\mathcal{W}$ of length $ l_w$. We again define as \emph{outcome} a classical string $\mathbf{y_i}$ of length $l=l_w+\lambda$, given by: \begin{equation} \mathbf{y_i}=\bf w_i\,\|\,o_i\, , \end{equation} where $\|$ is the concatenation of strings. We also define a set $\mathcal{Y}$ like in Eq. \eqref{setY} and a function $\hat{P}:\mathcal{X}\rightarrow\mathcal{Y}$ mapping every challenge to the corresponding outcome. At this point, like for classical PUFs, the Certifier fixes the correctable amount of noise $t<l_o$ and selects a fuzzy extractor $(\hat{G}, \hat{R})$, able to correct $t$ errors and to generate a uniformly distributed response, according to the distribution of the outcome states and the entropy of the set of outcomes. The non-orthogonality of the challenge states affects $t$: when a wrong challenge state is implemented, its \textit{fidelity} with the correct one is preserved by the QR-PUF and the shifter, since they are unitary maps, and influences the results of the measurement. The maximum correctable error $t$ has to be chosen lower than the error produced by wrong implementations, which becomes small for highly non-orthogonal challenges. The Certifier may decide to delete challenge-response pairs from the Challenge-Response Table, in order to choose a higher $t$ and increase the resistance of the QR-PUF against the noise. However, this reduces the overall non-orthogonality of the quantum states, thus improving Eve's ability to distinguish them. Such a trade-off will be discussed again in the following sections. The generation function of a fuzzy extractor generates a uniformly distributed response $\bf r_i \in\mathcal{R}$, together with a public helper data $\bf h_i\in\mathcal{H}$. Again we have: \begin{equation} \hat{G}(\cdot)=(\hat{G}_R(\cdot),\hat{G}_H(\cdot))\, , \end{equation} and \begin{equation} {\bf r_i}=\hat{G}_R({\bf y_i}), \quad\forall \,{\bf y_i}\in\mathcal{Y}\, . \end{equation} We define a function $\hat{F}_E(\cdot):=\hat{G}_R(\hat{P}(\cdot)):\mathcal{X}\rightarrow\mathcal{R}$ mapping each challenge to the corresponding response, representing the action of the QR-PUF in the enrollment stage. Like for classical PUFs, challenges, responses and other information are stored in the Challenge-Response Table, which is given to Alice, while the QR-PUF is given to Bob. \subsection{Verification} \label{subsec:qver} In the verification stage Bob allows Alice to (remotely) interact with his $\text{QR-}$PUF. She selects randomly a challenge $\bf x_j\in\mathcal{X}$ and prepares $\Ket{x_j}$. Using the QR-PUF with the challenge state $\Ket{x_j}$, Alice may obtain $\ket{y'_j}$, different from the expected $\ket{y_j}$, because of noise or an erroneous implementation of the system or the action of a malicious intruder. Then Alice applies $\hat{\Omega}_{j}$ and measures each qubit state in the basis $\Set{\Ket{0},\Ket{1}}$, obtaining $\bf o'_j$ and hence the outcome $\mathbf{y'_j}=\bf w_j\,\|\,o'_j$. While in the ideal noiseless case $\mathbf{o'_j}={\bf 0}_{l_o}$, here we may measure some state $\ket{1}$ for some qubits, therefore $\mathbf{y'_j}$ could be different from the $\bf y_j$ obtained by the Certifier in the enrollment stage. The outcome is then post-processed by the reproduction function of the fuzzy extractor that was used in the enrollment stage, so Alice collects $\mathbf{z_j}:= \hat{F}_V(\bf x_j)$, where the function $\hat{F}_V$ is defined like in the classical case, $\hat{F}_V(\mathbf{\cdot}):=\hat{R}\big(\hat{P}^{(e)}(\mathbf{\cdot}), \hat{G}_H(\hat{P}(\mathbf{\cdot}))\big)$. Authentication succeeds if $F_E({\bf x_j})=F_V({\bf x_j})$. The verification stage is schematized in fig. \ref{fig:qPUF}. \section{Properties and formalization} \label{sec:prop} In this section, we will analyze the properties of $\text{(QR-)}$ PUFs. As we have seen, both PUFs and QR-PUFs can be represented by a classical pair of functions $\hat{F}=(\hat{F}_E,\hat{F}_V)$ that describe the map between challenges and responses in the enrollment ($\hat{F}_E$, see Eq. \eqref{eq:fe}) or verification ($\hat{F}_V$, see Eq. \eqref{eq:fv}) stage. We will keep the same formalism for both PUFs and QR-PUFs, to allow our framework to compare them, but we will also specify the practical differences. We have seen that the noise can be a problem which can lead to false rejection in the protocols. Therefore it is important to characterize and quantify the amount of noise of a $\text{(QR-)}$ PUF, which is connected to the \emph{robustness} of a $\text{(QR-)}$ PUF. We take the definition of this concept from \cite{AMSSW}, adapting it to our framework and our formalism. \begin{definition} Let us consider a $\text{(QR-)}$ PUF $\hat{F}$ with a set of challenges $\mathcal{X}$, where $|\mathcal{X}|=N$. $\hat{F}$ is $\rho$-\emph{robust} with respect to $\mathcal{X}$ if $\rho\in[0,1]$ is the greatest number for which: \begin{equation} \frac{1}{N}\sum_{i=1}^{N}\, Pr\{\hat{F}_V({\bf x_i})=\hat{F}_E({\bf x_i})\} \geq \rho\, . \end{equation} $\rho$ is called the \emph{robustness} of the $\text{(QR-)}$ PUF with respect to $\mathcal{X}$. \end{definition} The robustness represents the average probability that the $\text{(QR-)}$ PUF in the verification stage outputs the correct response, such that the authentication succeeds. So it represents the $\text{(QR-)}$ PUF's ability to avoid false rejections and depends on many factors, e.g. on the average noise of the specific implementation and the parameters of the fuzzy extractor. Regarding the robustness, we do not expect a significant advantage of QR-PUFs compared to classical PUFs. Actually, there is the possibility to have a disadvantage, because of the fragility of quantum states and of the necessity of having a low error threshold $t$, as the noise can originate from a possible interaction of an adversary. Any implementation with QR-PUFs has to pay special care to this issue. Now we will discuss unclonability, which is the main parameter involved in attacks from an adversary Eve. This concept is also mildly inspired by \cite{AMSSW}, but with marked differences, mainly caused by the need of taking into account QR-PUFs. In the context of entity authentication with $\text{(QR-)}$ PUF, the purpose of an adversary Eve is to create a clone of a $\text{(QR-)}$ PUF, such that Alice can verify with it a challenge-response pair, falsely authenticating her as Bob. When we say \emph{clone}, we need to specify if we are talking of a physical or a mathematical one. A \emph{physical clone} is an experimental reproduction of the $\text{(QR-)}$ PUF. It will have the same physical properties as the original one, even in contexts not involved with the authentication protocol. The requirement of \emph{physical unclonability} means that a physical clone is technologically or financially unfeasible at the current state of technology. A mathematical clone, instead, is an object that \emph{simulates} the challenge-response behavior of a $\text{(QR-)}$ PUF. In this case, we cannot just state that a mathematical clone is unfeasible, because if there are some correlations between the outcome states, in principle they can be exploited to predict new challenge-response pairs. As mentioned in the introduction, several PUFs have been successfully mathematically cloned. We need to formalize this notion, in order to quantify it for different $\text{(QR-)}$ PUFs. We assume that Eve cannot directly access the internal structure of the $\text{(QR-)}$ PUF \cite{RSS, RBK}, but only interact with the challenge and the outcome states. An attack consists of two phases, both carried out during the verification stage of the protocol. We require that the enrollment stage is inaccessible to Eve since this part is performed in the Certifier's lab and it involves the study of the inner structure of the $\text{(QR-)}$ PUF. During the \emph{passive phase}, Eve observes a certain number of successful authentications with the real $\text{(QR-)}$ PUF, collecting as much information as she can. Then, during the \emph{active phase} she designs a clone and gives it to Alice, claiming to be Bob. The attack succeeds if she is authenticated as Bob. Each interaction affects one challenge-response pair. In this context, there is a crucial difference between PUFs and QR-PUFs. Classical states can be measured without introducing disturbances and can be copied perfectly. Therefore for $q\leq N$ interactions, we can assume that Eve would know exactly $q$ challenge and outcome states, possibly using this information to create a mathematical clone of the PUF. Instead, a quantum state cannot be copied. Moreover, a quantum measurement cannot perfectly distinguish the states (since they are non-orthogonal) and any measurement can in principle introduce errors, thus potentially making a passive eavesdrop a detectable action. After $q$ interactions, Eve would know less than $q$ challenge and outcome states. This is the main reason for which QR-PUFs have been introduced: we expect that, concerning unclonability, they can be superior, even far superior, than classical PUFs \footnote{As we mentioned in Sec. \ref{subsec:qenr}, highly non-orthogonal challenge states require a fuzzy extractor with a low correctable error, undermining the robustness of the QR-PUF. Therefore this feature of QR-PUFs must be used carefully, balancing robustness and unclonability.}. \begin{definition} Let $\hat F$ be a $\text{(QR-)}$ PUF with a set of challenges $\mathcal{X}$, where $|\mathcal{X}|=N$. Let us suppose that an adversary Eve has $q$ interactions with a $\text{(QR-)}$ PUF in the passive stage of an attack, by observing an authentication protocol between Alice and Bob. With the information she can extract, she prepares a clone $\hat{E}_q$, defined as (see Eq. \eqref{eq:fv} for a comparison) \begin{equation} \label{eq:Eq} \hat{E}_q(\cdot):=\hat{R}\big(\hat{P}_E (\cdot), \hat{G}_H(\hat{P}(\cdot))\big)\:, \end{equation} and gives it to Alice, who selects a challenge $\bf x_i\in\mathcal{X}$ and evaluates $\hat{E}_q({\bf x_i})$. Then $\hat{E}_q$ is a $ (\gamma,q)$-\emph{(mathematical) clone} of $\hat{F}$ if $\gamma\in[0,1]$ is the greatest number for which \begin{equation} \frac{1}{N}\sum_{i=1}^{N} Pr(\hat{E}_q({\bf x_i})=\hat{F}_E({\bf x_i}))\geq \gamma\, . \end{equation} \end{definition} \begin{definition} A $\text{(QR-)}$ PUF $\hat{F}$ is called $(\gamma,q)$-\emph{(mathematical) clonable} if $\gamma\in[0, 1]$ is the smallest number for which it is not possible to generate a $(\bar{\gamma},q)$ clone of the $\text{(QR-)}$ PUF for any $\bar{\gamma}>\gamma$. Conversely, a $\text{(QR-)}$ PUF $\hat{F}$ is denoted as $(\delta,q)$-\emph{(mathematical) unclonable} if it is $(1-\delta,q)$-clonable. \end{definition} The unclonability of a $\text{(QR-)}$ PUF is therefore related to the average probability of false acceptance. We could expect to find a relation between the number of interactions $q$ and the unclonability: with a higher knowledge of CRP, it could be expected that Eve will be able to build a more and more sophisticated reproduction of the $\text{(QR-)}$ PUF. Increasing $q$ increases the know-how for making $(1-\delta,q)$-clones with a lower $\delta$. Therefore, fixing the maximum number of uses $q=q^* $ we fix the minimum $\delta=\delta^* $. So we ensure that for $q<q^* $, the $\text{(QR-)}$ PUF is at least $(\delta^* , q)$-unclonable. \begin{definition} A $(\rho,\delta^* ,q^* )$-\emph{secure} $\text{(QR-)}$ PUF $\hat{F}$ is $\rho$-robust, physically unclonable and at least $(\delta^* ,q)$-mathematically unclonable up to $q^* $ uses. \end{definition} When manufacturing $\text{(QR-)}$ PUFs several properties, that are typically implementation-dependent, are important \cite{MV}. We believe that the above theoretical definitions of robustness and unclonability are, from a theoretical point of view, the main and most general properties involved in a $\text{(QR-)}$ PUF. They are directly related to the probabilities of false rejection and false acceptance, hence describing the efficiency and the security of the entity authentication protocol. They also describe all $\text{(QR-)}$ PUFs independently from their implementation. \section{Examples} \label{sec:ex} Explicit calculation of the robustness and the unclonability for a particular $\text{(QR-)}$ PUFs strongly depends on its implementation. In this section, we illustrate the analysis for simplified examples, starting from idealized, extreme, cases. \begin{itemize} \item Consider a physically unclonable device implementing a true random number generator. An example of that is a QR-PUF based on the shot noise of an integrated circuit. This device is extremely difficult to simulate (Eve has to try a random guess), but also not robust at all (since it will not generate the same number in the enrollment and the verification). For this device, it holds \begin{equation} \begin{split} &\frac{1}{N}\sum_{i=1}^{N}\,Pr\{\hat{F}_V({\bf x_i})=\hat{F}_E({\bf x_i})\}= \frac{1}{N}\, ; \\ &\frac{1}{N}\sum_{i=1}^{N} Pr(\hat{E}_{q^*}({\bf x_i})=\hat{F}_E({\bf x_i}))= \frac{1}{N}\, . \end{split} \end{equation} Therefore it is a $(1/N,1-1/N,q^*)$ $\text{(QR-)}$ PUF, for any $q^*$. \item Consider a physically unclonable device that outputs a fixed signal ($\vec{0}$ for classical PUFs or $\Ket{0}$ for QR-PUFs) for any input. An example is an optical QR-PUF based on the polarization of light for which a fixed polarizer is used as a shifter: for all outcome states only light waves of a specific polarization would pass though. This device is perfectly robust, but also clonable. It holds \begin{equation} \begin{split} &\frac{1}{N}\sum_{i=1}^{N}\,Pr\{\hat{F}_V({\bf x_i})=\hat{F}_E({\bf x_i})\}= 1\, ; \\ &\frac{1}{N}\sum_{i=1}^{N} Pr(\hat{E}_{q^*}({\bf x_i})=\hat{F}_E({\bf x_i}))= 1\, . \end{split} \end{equation} Therefore the $\text{(QR-)}$ PUF is a $(1,0,q^*)$ $\text{(QR-)}$ PUF, for any $q^*$. \end{itemize} These examples are extreme cases, while all $\text{(QR-)}$ PUFs will be somewhere in between. We now focus on an example of QR-PUF, to point out some features of QR-PUFs and some open points. Let $\hat{F}$ be a QR-PUF implemented by a unitary transformation $\hat{\Phi}$, acting on $\lambda$ qubits, parametrized according to Eq. \eqref{unitmat}, with $\psi_k=\chi_k=0$, i.e. \begin{equation} \hat{\Phi}=\bigotimes_{k=1}^\lambda \hat{\Phi}_k=\bigotimes_{k=1}^\lambda \begin{pmatrix} \cos\omega_k & \sin\omega_k \\ -\sin\omega_k & \cos\omega_k \end{pmatrix}\, . \end{equation} Consider a scenario in which each challenge state is a separable state of $\lambda$ qubits, $\Ket{x_i}=\bigotimes_{k=1}^\lambda \Ket{x_{ik}}$, and each qubit is in one of four possible states: \begin{equation} \label{exqub} \ket{x_{ik}}=\ket{x_{ik}^{(\ell)}}:=\cos \left(\frac{\phi^{(\ell)}}{ 2}\right)\Ket{0}+\sin \left(\frac{\phi^{(\ell)}}{2}\right)\Ket{1}\, , \end{equation} where \begin{equation} \label{eq:phi} \begin{split} &\phi^{(1)}=\phi\, ,\qquad\qquad \phi^{(2)}=-\phi\, ,\\ &\phi^{(3)}=\phi-\pi\, ,\qquad\: \phi^{(4)}=\pi-\phi\, , \end{split} \end{equation} for a fixed angle $\phi$. Such challenge states can be parametrized by challenge strings of length $n=2\lambda$: for each qubit, the four possibilities are represented by two bits. For simplicity of notation, from now on, we drop the indices $i$ and $k$, e.g. we write $\big|x^{(\ell)}\big\rangle:=\big| x_{ik}^{(\ell)}\big\rangle$. The pairs $\{\Ket{x^{(1)}},\Ket{x^{(3)}}\}$ and $\{\Ket{x^{(2)}},\Ket{x^{(4)}}\}$ are orthogonal, but the overall set is non-orthogonal. We assume that the noise can be parametrized as a depolarizing channel, associated to a probability of error $\tilde{p}$ and equal for all qubits. The noisy challenge state reads: \begin{equation} \begin{split} \tilde{\rho}_x&:=(1-\tilde{p})\Ket{x}\Bra{x}+\tilde{p}\,\frac{\hat{I}}{2} \\ &=\left[(1-\tilde{p})\cos^2\left(\frac{\phi^{(\ell)}}{2}\right)+ \frac{\tilde{p}}{2}\right] \Ket{0}\Bra{0}\\ &+\left[ (1-\tilde{p}) \sin\left(\frac{\phi^{(\ell)}}{2}\right) \cos\left(\frac{\phi^{(\ell)}}{2}\right) \right] \left(\Ket{0}\Bra{1}+ \Ket{1}\Bra{0}\right) \\ &+\left[(1-\tilde{p}) \sin^2\left(\frac{\phi^{(\ell)}}{2}\right) +\frac{\tilde{p}}{2}\right]\Ket{1}\Bra{1}\, . \end{split} \end{equation} The shifter needs to map the noiseless outcome state to $\Ket{0}\dots\Ket{0}$. According to Eq.\eqref{shifdef} it can be chosen to be a $\lambda$-fold tensor product of single qubit gates \begin{equation} \begin{split} \hat{\Omega}= &\cos\left(\frac{\phi^{(\ell)}}{2}-\omega\right)\proj{0}{0}+ \sin\left(\frac{\phi^{(\ell)}}{2}-\omega\right)\proj{0}{1}\\ +&\sin\left(\frac{\phi^{(\ell)}}{2}-\omega\right)\proj{1}{0}-\cos\left(\frac{\phi^{(\ell)}}{2}-\omega\right)\proj{1}{1}\, , \end{split} \end{equation} and it follows: \begin{equation} \tilde{\rho}_o:= \hat{\Omega}\,\tilde{\rho}_{y}\,\hat{\Omega}^\dagger= \left(1-\frac{\tilde{p}}{2}\right)\proj{0}{0}+ \left(\frac{\tilde{p}}{2}\right)\proj{1}{1} \, . \end{equation} For a single qubit, therefore, the probability of measuring $\Ket{1}$ is $\tilde{p}/2$. For a challenge state of $\lambda$ qubits, the average Hamming weight of the string $\bf o_i$ is $\lambda\,\tilde{p}/2$. Any fuzzy extractor is defined in terms of the maximum number of errors $t$ it can correct. With our error model, we can choose to correct the average error of the system, i.e. $t=\lceil \lambda\,\tilde{p}/2 \rceil$, where $\lceil \lambda\,\tilde{p}/2 \rceil$ is the least integer greater than or equal to $\lambda\,\tilde{p}/2$. However, $t$ and the number $N$ of challenge-response pairs are related since the fuzzy extractor has to uniquely map a given outcome into a unique response, without collisions. Consider $\big| x^{(\ell)}\big\rangle$ and $\big| x^{(\ell')}\big\rangle$ ($\ell, \ell'\in \{1,2,3,4\}$ and $\ell\neq \ell'$) and estimate the error if $\big| x^{(\ell)}\big\rangle$ is implemented as the state $\big| x^{(\ell')}\big\rangle$, by evaluating $\hat{\Omega}_\ell \,\hat{\Phi}\big|x^{(\ell')}\big\rangle$. From \begin{equation} \begin{split} &\Ket{x^{(\ell)}}= \cos \left(\frac{\phi^{(\ell)}}{2}\right)\Ket{0}+\sin \left(\frac{\phi^{(\ell)}}{2}\right)\Ket{1}\, , \\ &\Ket{x^{(\ell')}}= \cos \left(\frac{\phi^{(\ell')}}{2}\right)\Ket{0}+\sin \left(\frac{\phi^{(\ell')}}{2}\right)\Ket{1}\, , \end{split} \end{equation} it follows \begin{equation} \begin{split} &\hat{\Omega}_\ell\, \hat{\Phi}\Ket{x^{(\ell')}} \\ &=\cos\left(\frac{\phi^{(\ell)}-\phi^{(\ell')}}{2}\right)\Ket{0}+ \sin\left(\frac{\phi^{(\ell)}-\phi^{(\ell')}}{2}\right)\Ket{1}\, . \end{split} \end{equation} Therefore, for this case, the probability of measuring $\ket{1}$ is $\sin^2\big[\big(\phi^{(\ell)}-\phi^{(\ell')}\big)/2\big]$. In table \ref{table1}, the explicit values for all the combinations of the 4 qubit states are listed. In case of wrong implementation, challenges with a large overlap lead to small error weights, while orthogonal challenges lead to big ones. Thus there is a trade-off between the robustness of the QR-PUF and the quantum advantage of using indistinguishable non-orthogonal states. \begin{table}[ht] \centering \begin{tabular}{ c| c c c c } & $\Ket{x^{(1)}}$ & $\Ket{x^{(2)}}$ & $\Ket{x^{(3)}}$ & $\Ket{x^{(4)}}$ \\ \hline $\Ket{x^{(1)}}$ & 0 & $\sin^2\phi$ & 1 & $\cos^2\phi$ \\ $\Ket{x^{(2)}}$ & $\sin^2\phi$ & 0 & $\cos^2\phi$ & 1 \\ $\Ket{x^{(3)}}$ & 1 & $\cos^2\phi$ & 0 & $\sin^2\phi$ \\ $\Ket{x^{(4)}}$ & $\cos^2\phi$ & 1 & $\sin^2\phi$ & 0 \\ \hline \end{tabular} \caption{Error induced by implementing the wrong challenge state: the entry in row $\ell$ and column $\ell'$ of the table is the probability of error when applying shifter $\ell$ to state $\ell'$. The parameter $\phi$ is defined in Eq. \eqref{eq:phi}.} \label{table1} \end{table} For any pair of possible challenge states $\Ket{x_i}=\bigotimes_{k=1}^\lambda \ket{x_{ik}}$ and $\Ket{x_j}=\bigotimes_{k=1}^\lambda \ket{x_{jk}}$, the average Hamming weight of the error string $\bf o_i$, obtained by the aforementioned process, is \begin{equation} \begin{split} \operatorname{err}_{i,j}&:=(n_{12}+n_{34})\sin^2\phi+ (n_{13}+n_{24}) \\ &+(n_{14}+n_{23})\cos^2\phi\, , \end{split} \end{equation} where $n_{ab}$ counts how many times $\ket{x_{ik}}=\Ket{x^{(\ell)}}$ when $\ket{x_{jk}}=\Ket{x^{(\ell')}}$ (or viceversa). If $\operatorname{err}_{i,j}<\lceil \lambda\,\tilde{p}/2 \rceil$, then the Certifier should discard one of the two challenges, either $\bf x_i$ or $\bf x_j$, thus reducing the number $N$ of possible challenge-response pairs. After this selection is repeated for all pairs of challenges, the Certifier studies the entropy of the set of shifters, determining the strings $\bf w_i$ and the outcomes $\mathbf{y_i}=\bf w_i\,\|\,o_i$. The \textit{Canetti's reusable fuzzy extractor} \cite{CFPRS} is able to correct up to $t=(l\ln l/m)$ bits, where $l$ is the length of the outcomes and $m$ the length of the responses. As $l=\lambda+l_w$ is fixed, $m$ has to be adapted to the noise level $\lceil \lambda\,\tilde{p}/2 \rceil$. The correctness property of this fuzzy extractor guarantees that an error smaller than $t$ is corrected with probability $1-\tilde{\varrho}$, where \begin{equation} \tilde{\varrho} = \left(1-\left(1-\frac{t}{l}\right)^m\right)^{\xi_1}+\xi_1\xi_2 \,, \end{equation} with $\xi_1$ and $\xi_2$ being computational parameters of the fuzzy extrator (in \cite{CFPRS}, to which we refer for a precise explanation, they are called $\ell$ and $\gamma$, respectively). Then the robustness of this QR-PUF is $1-\tilde{\varrho}$. Concerning the unclonability, one should relate the amount of information Eve obtains from the (possibly correlated) challenge-response pairs to her ability to create a mathematical clone of the QR-PUF. Unfortunately, there is no general method known to provide this relation. We expect that, for some QR-PUFs, quantum unitary gate discrimination methods \cite{CH} could be used, but this line of research goes beyond the purposes of our work. Here, we can show that QR-PUFs prevent Eve to gain too much information about challenges and responses, thus strongly hindering her ability to learn the CRT. As the optimal global attack on the challenge states is unknown, unless knowing all challenge states, here we consider an attack that acts individually on qubits. In particular, we consider the case for which, on each qubit, Eve can apply a $1\rightarrow 2$ cloning operator, i.e. she can intercept each qubit of a challenge state during an authentication round to produce two (imperfect) copies, one of which is given back to the legitimate parties and the other is kept for herself. For such a set of states, the optimal cloning tranformation, i.e. the transformation who keeps the highest possible fidelity between the copies and the original states, has been derived \cite{BM01} and for any challenge state $\Ket{x_i}$ and its optimal copy $\rho_i^E$ holds: \begin{equation} \begin{split} &F(\Ket{x_{i}}\Bra{x_{i}},\varrho^E_{i} ):=\prod_{k=1}^\lambda\Braket{x_{ik}|\varrho^E_{ik}|x_{ik}}\\ &=\left(\frac{1}{2}\,\left(1+\sqrt{\sin^4\phi+\cos^4\phi}\right)\right)^\lambda\,. \end{split} \end{equation} For fixed $\lambda$, the minimum value of the fidelity is reached for $\phi=\pi/4$, for which, considering a single qubit, $F=(0.85)$. Already for $10$ qubits the fidelity drops to $F=(0.20)$, and for $20$ qubits, $F=(0.04)$. Thus, Eve is not able to successfully simulate the challenge-response behavior, as she cannot even reconstruct the challenge and outcome states. Moreover, as the fidelity is preserved by unitary matrices, this result holds also for the expected outcome state $\Ket{y_i}$ and the actual outcome state Alice obtains after challenging the QR-PUF with her (unwittingly altered by the cloning process) challenge state. The noise is too high to be corrected by the fuzzy extractor, thus aborting the authentication protocol and exposing the presence of an intruder. For classical PUFs, instead, Eve could perfectly read the challenge and outcome states, without being noticed. This provides an advantage of QR-PUFs compared to classical PUFs in terms of unclonability. However, we also noticed that a high non-orthogonality of the challenges can, in principle, undermine the robustness. The trade-off between the advantages and disadvantages of QR-PUFs (Table \ref{table2}) has to be studied to find secure applications of them. \pagebreak \begin{table}[htbp] \centering \begin{tabular}{|c|c|} \hline \multicolumn{2}{|c|}{QR-PUFs compared to PUFs} \\ \hline \textbf{ Advantages} & \textbf{ Disadvantages} \\ \hline & \\ \begin{tabular}[c]{@{}c@{}} An adversary cannot \\ copy or distinguish \\ non-orthogonal states. \\ \end{tabular} & \begin{tabular}[c]{@{}c@{}}Highly non-orthogonal \\ states reduce the \\ robustness. \\ \end{tabular} \\ & \\ \begin{tabular}[c]{@{}c@{}}Adversarial measurements\\ on the states introduce \\ detectable disturbances.\end{tabular} & \begin{tabular}[c]{@{}c@{}}Quantum states are \\ more fragile than \\ classical states.\end{tabular} \\ & \\ \hline \end{tabular} \caption{Advantages and disadvantages of QR-PUFs compared to classical PUFs.} \label{table2} \end{table} \section{Conclusion} In this article, we proposed a theoretical framework for the quantitative characterisation of both PUFs and QR-PUFs. After developing an authentication protocol common to both typologies, with the same error correction and privacy amplification scheme, we formalized the $\text{(QR-)}$ PUFs in term of two main properties, the \emph{robustness} (connected to false rejection) and the \emph{unclonability} (connected to false acceptance). Finally, we studied some examples, motivating the possible advantages and disadvantages of QR-PUFs compared to classical PUFs. Our framework is useful to study and to compare different implementations of $\text{(QR-)}$ PUFs and to develop new authentication schemes. An important application would be to strictly prove the superiority of QR-PUFs over classical PUFs. The next step towards that goal would be the development of new methods to estimate the unclonability of (QR-) PUFs for different implementations. This could open an interesting line of theoretical and experimental research about $\text{(QR-)}$ PUFs. Furthermore, our framework can be employed to determine the level of security of using $\text{(QR-)}$ PUFs in other cryptographic protocols, like QKD, where a quantitatively secure $\text{(QR-)}$ PUF can be used as authentication and reduces the number of necessary preshared key bits. \emph{Note added:} During the finalisation of this work, we became aware of a preprint on a related topic \cite{ADDK}. \begin{thebibliography}{42} \makeatletter \providecommand \@ifxundefined [1]{ \@ifx{#1\"{u}ndefined} } \providecommand \@ifnum [1]{ \ifnum #1\expandafter \@firstoftwo \else \expandafter \@secondoftwo \fi } \providecommand \@ifx [1]{ \ifx #1\expandafter \@firstoftwo \else \expandafter \@secondoftwo \fi } \providecommand \natexlab [1]{#1} \providecommand \enquote [1]{``#1''} \providecommand \bibnamefont [1]{#1} \providecommand \bibfnamefont [1]{#1} \providecommand \citenamefont [1]{#1} \providecommand \href@noop [0]{\@secondoftwo} \providecommand \href [0]{\begingroup \@sanitize@url \@href} \providecommand \@href[1]{\@@startlink{#1}\@@href} \providecommand \@@href[1]{\endgroup#1\@@endlink} \providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode `\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax} \providecommand \@@startlink[1]{} \providecommand \@@endlink[0]{} \providecommand \"{u}rl [0]{\begingroup\@sanitize@url \@url } \providecommand \@url [1]{\endgroup\@href {#1}{\"{u}rlprefix }} \providecommand \"{u}rlprefix [0]{URL } \providecommand \Eprint [0]{\href } \providecommand \doibase [0]{https://doi.org/} \providecommand \selectlanguage [0]{\@gobble} \providecommand \bibinfo [0]{\@secondoftwo} \providecommand \bibfield [0]{\@secondoftwo} \providecommand \translation [1]{[#1]} \providecommand \BibitemOpen [0]{} \providecommand \bibitemStop [0]{} \providecommand \bibitemNoStop [0]{.\EOS\space} \providecommand \EOS [0]{\spacefactor3000\relax} \providecommand \BibitemShut [1]{\csname bibitem#1\endcsname} \let\"{a}uto@bib@innerbib\@empty \bibitem [{\citenamefont {Martin}(2012)}]{KM} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.~M.}\ \bibnamefont {Martin}},\ }\href@noop {} {\emph {\bibinfo {title} {Everyday Cryptography: Fundamental Principles and Applications}}}\ (\bibinfo {publisher} {OUP Oxford},\ \bibinfo {year} {2012})\BibitemShut {NoStop} \bibitem [{\citenamefont {Scarani}\ \emph {et~al.}(2009)\citenamefont {Scarani}, \citenamefont {Bechmann-Pasquinucci}, \citenamefont {Cerf}, \citenamefont {Du{\v{s}}ek}, \citenamefont {L{\"u}tkenhaus},\ and\ \citenamefont {Peev}}]{SBCDLP} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Scarani}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Bechmann-Pasquinucci}}, \bibinfo {author} {\bibfnamefont {N.~J.}\ \bibnamefont {Cerf}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Du{\v{s}}ek}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {L{\"u}tkenhaus}},\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Peev}},\ }\bibfield {title} {\bibinfo {title} {The security of practical quantum key distribution},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Rev. Mod. Phys.}\ }\textbf {\bibinfo {volume} {81}},\ \bibinfo {pages} {1301} (\bibinfo {year} {2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Wegman}\ and\ \citenamefont {Carter}(1981)}]{WC} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~N.}\ \bibnamefont {Wegman}}\ and\ \bibinfo {author} {\bibfnamefont {J.~L.}\ \bibnamefont {Carter}},\ }\bibfield {title} {\bibinfo {title} {New hash functions and their use in authentication and set equality},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {J. Comput. Syst. Sci.}\ }\textbf {\bibinfo {volume} {22}},\ \bibinfo {pages} {265} (\bibinfo {year} {1981})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Pappu}(2001)}]{RP} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Pappu}},\ }\emph {\bibinfo {title} {Physical one-way functions}},\ \href@noop {} {Ph.D. thesis},\ \bibinfo {school} {Massachusetts Institute of Technology, USA} (\bibinfo {year} {2001})\BibitemShut {NoStop} \bibitem [{\citenamefont {R{\"u}hrmair}(2010)}]{UR10} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {U.}~\bibnamefont {R{\"u}hrmair}},\ }\bibfield {title} {\bibinfo {title} {Oblivious transfer based on physical unclonable functions},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {International Conference on Trust and Trustworthy Computing}}}\ (\bibinfo {organization} {Springer},\ \bibinfo {year} {2010})\ pp.\ \bibinfo {pages} {430--440}\BibitemShut {NoStop} \bibitem [{\citenamefont {R{\"u}hrmair}\ and\ \citenamefont {van Dijk}(2013)}]{RD} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {U.}~\bibnamefont {R{\"u}hrmair}}\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {van Dijk}},\ }\bibfield {title} {\bibinfo {title} {On the practical use of physical unclonable functions in oblivious transfer and bit commitment protocols},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {J. Cryptogr. Eng.}\ }\textbf {\bibinfo {volume} {3}},\ \bibinfo {pages} {17} (\bibinfo {year} {2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Brzuska}\ \emph {et~al.}(2011)\citenamefont {Brzuska}, \citenamefont {Fischlin}, \citenamefont {Schr{\"o}der},\ and\ \citenamefont {Katzenbeisser}}]{BFSK} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Brzuska}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Fischlin}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Schr{\"o}der}},\ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Katzenbeisser}},\ }\bibfield {title} {\bibinfo {title} {Physically uncloneable functions in the universal composition framework},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {Annual Cryptology Conference}}}\ (\bibinfo {organization} {Springer},\ \bibinfo {year} {2011})\ pp.\ \bibinfo {pages} {51--70}\BibitemShut {NoStop} \bibitem [{\citenamefont {Pappu}\ \emph {et~al.}(2002)\citenamefont {Pappu}, \citenamefont {Recht}, \citenamefont {Taylor},\ and\ \citenamefont {Gershenfeld}}]{PRTG} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Pappu}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Recht}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Taylor}},\ and\ \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Gershenfeld}},\ }\bibfield {title} {\bibinfo {title} {Physical one-way functions},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Science}\ }\textbf {\bibinfo {volume} {297}},\ \bibinfo {pages} {2026} (\bibinfo {year} {2002})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Lee}\ \emph {et~al.}(2004)\citenamefont {Lee}, \citenamefont {Lim}, \citenamefont {Gassend}, \citenamefont {Suh}, \citenamefont {Van~Dijk},\ and\ \citenamefont {Devadas}}]{LLGSDD} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~W.}\ \bibnamefont {Lee}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Lim}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Gassend}}, \bibinfo {author} {\bibfnamefont {G.~E.}\ \bibnamefont {Suh}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Van~Dijk}},\ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Devadas}},\ }\bibfield {title} {\bibinfo {title} {A technique to build a secret key in integrated circuits for identification and authentication applications},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {2004 Symposium on VLSI Circuits. Digest of Technical Papers (IEEE Cat. No. 04CH37525)}}}\ (\bibinfo {organization} {IEEE},\ \bibinfo {year} {2004})\ pp.\ \bibinfo {pages} {176--179}\BibitemShut {NoStop} \bibitem [{\citenamefont {Guajardo}\ \emph {et~al.}(2007)\citenamefont {Guajardo}, \citenamefont {Kumar}, \citenamefont {Schrijen},\ and\ \citenamefont {Tuyls}}]{GKST} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Guajardo}}, \bibinfo {author} {\bibfnamefont {S.~S.}\ \bibnamefont {Kumar}}, \bibinfo {author} {\bibfnamefont {G.-J.}\ \bibnamefont {Schrijen}},\ and\ \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Tuyls}},\ }\bibfield {title} {\bibinfo {title} {$\text{FPGA}$ intrinsic $\text{PUFs}$ and their use for $\text{IP}$ protection},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {International Workshop on Cryptographic Hardware and Embedded Systems}}}\ (\bibinfo {organization} {Springer},\ \bibinfo {year} {2007})\ pp.\ \bibinfo {pages} {63--80}\BibitemShut {NoStop} \bibitem [{\citenamefont {Tuyls}\ \emph {et~al.}(2006)\citenamefont {Tuyls}, \citenamefont {Schrijen}, \citenamefont {{\v{S}}kori{\'c}}, \citenamefont {Van~Geloven}, \citenamefont {Verhaegh},\ and\ \citenamefont {Wolters}}]{TSSGVW} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Tuyls}}, \bibinfo {author} {\bibfnamefont {G.-J.}\ \bibnamefont {Schrijen}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {{\v{S}}kori{\'c}}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Van~Geloven}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Verhaegh}},\ and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Wolters}},\ }\bibfield {title} {\bibinfo {title} {Read-proof hardware from protective coatings},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {International Workshop on Cryptographic Hardware and Embedded Systems}}}\ (\bibinfo {organization} {Springer},\ \bibinfo {year} {2006})\ pp.\ \bibinfo {pages} {369--383}\BibitemShut {NoStop} \bibitem [{\citenamefont {Indeck}\ and\ \citenamefont {Muller}(1994)}]{IM} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.~S.}\ \bibnamefont {Indeck}}\ and\ \bibinfo {author} {\bibfnamefont {M.~W.}\ \bibnamefont {Muller}},\ }\href@noop {} {\bibinfo {title} {Method and apparatus for fingerprinting magnetic media}} (\bibinfo {year} {1994}),\ \bibinfo {note} {$\text{US}$ Patent 5, 365, 586}\BibitemShut {NoStop} \bibitem [{\citenamefont {Bossuet}\ \emph {et~al.}(2013)\citenamefont {Bossuet}, \citenamefont {Ngo}, \citenamefont {Cherif},\ and\ \citenamefont {Fischer}}]{BNCF} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Bossuet}}, \bibinfo {author} {\bibfnamefont {X.~T.}\ \bibnamefont {Ngo}}, \bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont {Cherif}},\ and\ \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Fischer}},\ }\bibfield {title} {\bibinfo {title} {A $\text{PUF}$ based on a transient effect ring oscillator and insensitive to locking phenomenon},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {IEEE Trans. Emerg. Topics Comput.}\ }\textbf {\bibinfo {volume} {2}},\ \bibinfo {pages} {30} (\bibinfo {year} {2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {McGrath}\ \emph {et~al.}(2019)\citenamefont {McGrath}, \citenamefont {Bagci}, \citenamefont {Wang}, \citenamefont {Roedig},\ and\ \citenamefont {Young}}]{MBWRY} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {McGrath}}, \bibinfo {author} {\bibfnamefont {I.~E.}\ \bibnamefont {Bagci}}, \bibinfo {author} {\bibfnamefont {Z.~M.}\ \bibnamefont {Wang}}, \bibinfo {author} {\bibfnamefont {U.}~\bibnamefont {Roedig}},\ and\ \bibinfo {author} {\bibfnamefont {R.~J.}\ \bibnamefont {Young}},\ }\bibfield {title} {\bibinfo {title} {A $\text{PUF}$ taxonomy},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Appl. Phys. Rev.}\ }\textbf {\bibinfo {volume} {6}},\ \bibinfo {pages} {011303} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Maes}\ and\ \citenamefont {Verbauwhede}(2010)}]{MV} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Maes}}\ and\ \bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Verbauwhede}},\ }\bibfield {title} {\bibinfo {title} {Physically unclonable functions: A study on the state of the art and future research directions},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {Towards Hardware-Intrinsic Security}}}\ (\bibinfo {publisher} {Springer},\ \bibinfo {year} {2010})\ pp.\ \bibinfo {pages} {3--37}\BibitemShut {NoStop} \bibitem [{\citenamefont {Delvaux}\ \emph {et~al.}(2014)\citenamefont {Delvaux}, \citenamefont {Gu}, \citenamefont {Schellekens},\ and\ \citenamefont {Verbauwhede}}]{DGSV} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Delvaux}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Gu}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Schellekens}},\ and\ \bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Verbauwhede}},\ }\bibfield {title} {\bibinfo {title} {Helper data algorithms for $\text{PUF}$-based key generation: Overview and analysis},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {IEEE Trans. Comput.-Aided Design Integr. Circuits Syst.}\ }\textbf {\bibinfo {volume} {34}},\ \bibinfo {pages} {889} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Puchinger}\ \emph {et~al.}(2015)\citenamefont {Puchinger}, \citenamefont {M{\"u}elich}, \citenamefont {Bossert}, \citenamefont {Hiller},\ and\ \citenamefont {Sigl}}]{PMBHS} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Puchinger}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {M{\"u}elich}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Bossert}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Hiller}},\ and\ \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Sigl}},\ }\bibfield {title} {\bibinfo {title} {On error correction for physical unclonable functions},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {SCC 2015; 10th International ITG Conference on Systems, Communications and Coding}}}\ (\bibinfo {organization} {VDE},\ \bibinfo {year} {2015})\ pp.\ \bibinfo {pages} {1--6}\BibitemShut {NoStop} \bibitem [{\citenamefont {Dodis}\ \emph {et~al.}(2008)\citenamefont {Dodis}, \citenamefont {Ostrovsky}, \citenamefont {Reyzin},\ and\ \citenamefont {Smith}}]{DORS} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Dodis}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Ostrovsky}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Reyzin}},\ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Smith}},\ }\bibfield {title} {\bibinfo {title} {Fuzzy extractors: How to generate strong keys from biometrics and other noisy data},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {SIAM J. Comput.}\ }\textbf {\bibinfo {volume} {38}},\ \bibinfo {pages} {97} (\bibinfo {year} {2008})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Helfmeier}\ \emph {et~al.}(2013)\citenamefont {Helfmeier}, \citenamefont {Boit}, \citenamefont {Nedospasov},\ and\ \citenamefont {Seifert}}]{HBNS} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Helfmeier}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Boit}}, \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Nedospasov}},\ and\ \bibinfo {author} {\bibfnamefont {J.-P.}\ \bibnamefont {Seifert}},\ }\bibfield {title} {\bibinfo {title} {Cloning physically unclonable functions},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {2013 IEEE International Symposium on Hardware-Oriented Security and Trust (HOST)}}}\ (\bibinfo {organization} {IEEE},\ \bibinfo {year} {2013})\ pp.\ \bibinfo {pages} {1--6}\BibitemShut {NoStop} \bibitem [{\citenamefont {R{\"u}hrmair}\ \emph {et~al.}(2010{\natexlab{a}})\citenamefont {R{\"u}hrmair}, \citenamefont {Sehnke}, \citenamefont {S{\"o}lter}, \citenamefont {Dror}, \citenamefont {Devadas},\ and\ \citenamefont {Schmidhuber}}]{RSSDDS} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {U.}~\bibnamefont {R{\"u}hrmair}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Sehnke}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {S{\"o}lter}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Dror}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Devadas}},\ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Schmidhuber}},\ }\bibfield {title} {\bibinfo {title} {Modeling attacks on physical unclonable functions},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {Proceedings of the 17th ACM conference on Computer and communications security}}}\ (\bibinfo {organization} {ACM},\ \bibinfo {year} {2010})\ pp.\ \bibinfo {pages} {237--249}\BibitemShut {NoStop} \bibitem [{\citenamefont {R{\"u}hrmair}\ \emph {et~al.}(2013)\citenamefont {R{\"u}hrmair}, \citenamefont {S{\"o}lter}, \citenamefont {Sehnke}, \citenamefont {Xu}, \citenamefont {Mahmoud}, \citenamefont {Stoyanova}, \citenamefont {Dror}, \citenamefont {Schmidhuber}, \citenamefont {Burleson},\ and\ \citenamefont {Devadas}}]{R-etal} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {U.}~\bibnamefont {R{\"u}hrmair}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {S{\"o}lter}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Sehnke}}, \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Xu}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Mahmoud}}, \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Stoyanova}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Dror}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Schmidhuber}}, \bibinfo {author} {\bibfnamefont {W.}~\bibnamefont {Burleson}},\ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Devadas}},\ }\bibfield {title} {\bibinfo {title} {$\text{PUF}$ modeling attacks on simulated and silicon data},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {IEEE Trans. Inf. Forensics Security}\ }\textbf {\bibinfo {volume} {8}},\ \bibinfo {pages} {1876} (\bibinfo {year} {2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {{\v{S}}kori{\'c}}(2012)}]{BS} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {{\v{S}}kori{\'c}}},\ }\bibfield {title} {\bibinfo {title} {Quantum readout of physical unclonable functions},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Int. J. Quantum Inf.}\ }\textbf {\bibinfo {volume} {10}},\ \bibinfo {pages} {1250001} (\bibinfo {year} {2012})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Wootters}\ and\ \citenamefont {Zurek}(1982)}]{WZ} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {W.~K.}\ \bibnamefont {Wootters}}\ and\ \bibinfo {author} {\bibfnamefont {W.~H.}\ \bibnamefont {Zurek}},\ }\bibfield {title} {\bibinfo {title} {A single quantum cannot be cloned},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Nature}\ }\textbf {\bibinfo {volume} {299}},\ \bibinfo {pages} {802} (\bibinfo {year} {1982})}\BibitemShut {NoStop} \bibitem [{\citenamefont {R{\"u}hrmair}\ \emph {et~al.}(2009)\citenamefont {R{\"u}hrmair}, \citenamefont {S{\"o}lter},\ and\ \citenamefont {Sehnke}}]{RSS} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {U.}~\bibnamefont {R{\"u}hrmair}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {S{\"o}lter}},\ and\ \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Sehnke}},\ }\bibfield {title} {\bibinfo {title} {On the foundations of physical unclonable functions},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {IACR Cryptology ePrint Archive}\ }\textbf {\bibinfo {volume} {2009}},\ \bibinfo {pages} {277} (\bibinfo {year} {2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Armknecht}\ \emph {et~al.}(2011)\citenamefont {Armknecht}, \citenamefont {Maes}, \citenamefont {Sadeghi}, \citenamefont {Standaert},\ and\ \citenamefont {Wachsmann}}]{AMSSW} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Armknecht}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Maes}}, \bibinfo {author} {\bibfnamefont {A.-R.}\ \bibnamefont {Sadeghi}}, \bibinfo {author} {\bibfnamefont {F.-X.}\ \bibnamefont {Standaert}},\ and\ \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Wachsmann}},\ }\bibfield {title} {\bibinfo {title} {A formalization of the security features of physical functions},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {2011 IEEE Symposium on Security and Privacy}}}\ (\bibinfo {organization} {IEEE},\ \bibinfo {year} {2011})\ pp.\ \bibinfo {pages} {397--412}\BibitemShut {NoStop} \bibitem [{\citenamefont {Plaga}\ and\ \citenamefont {Koob}(2012)}]{PK} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Plaga}}\ and\ \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Koob}},\ }\bibfield {title} {\bibinfo {title} {A formal definition and a new security mechanism of physical unclonable functions},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {International GI/ITG Conference on Measurement, Modelling, and Evaluation of Computing Systems and Dependability and Fault Tolerance}}}\ (\bibinfo {organization} {Springer},\ \bibinfo {year} {2012})\ pp.\ \bibinfo {pages} {288--301}\BibitemShut {NoStop} \bibitem [{\citenamefont {Plaga}\ and\ \citenamefont {Merli}(2015)}]{PM} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Plaga}}\ and\ \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Merli}},\ }\bibfield {title} {\bibinfo {title} {A new definition and classification of physical unclonable functions},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {Proceedings of the Second Workshop on Cryptography and Security in Computing Systems}}}\ (\bibinfo {organization} {ACM},\ \bibinfo {year} {2015})\ p.~\bibinfo {pages} {7}\BibitemShut {NoStop} \bibitem [{\citenamefont {Delvaux}(2017)}]{JD} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Delvaux}},\ }\emph {\bibinfo {title} {Security analysis of $\text{PUF}$-based key generation and entity authentication}},\ \href@noop {} {Ph.D. thesis},\ \bibinfo {school} {Katholieke Universiteit Leuven, Belgium} (\bibinfo {year} {2017})\BibitemShut {NoStop} \bibitem [{\citenamefont {{\v{S}}kori{\'c}}\ \emph {et~al.}(2005)\citenamefont {{\v{S}}kori{\'c}}, \citenamefont {Tuyls},\ and\ \citenamefont {Ophey}}]{STO} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {{\v{S}}kori{\'c}}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Tuyls}},\ and\ \bibinfo {author} {\bibfnamefont {W.}~\bibnamefont {Ophey}},\ }\bibfield {title} {\bibinfo {title} {Robust key extraction from physical uncloneable functions},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {International Conference on Applied Cryptography and Network Security}}}\ (\bibinfo {organization} {Springer},\ \bibinfo {year} {2005})\ pp.\ \bibinfo {pages} {407--422}\BibitemShut {NoStop} \bibitem [{\citenamefont {Goorden}\ \emph {et~al.}(2014)\citenamefont {Goorden}, \citenamefont {Horstmann}, \citenamefont {Mosk}, \citenamefont {{\v{S}}kori{\'c}},\ and\ \citenamefont {Pinkse}}]{GHMSP} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.~A.}\ \bibnamefont {Goorden}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Horstmann}}, \bibinfo {author} {\bibfnamefont {A.~P.}\ \bibnamefont {Mosk}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {{\v{S}}kori{\'c}}},\ and\ \bibinfo {author} {\bibfnamefont {P.~W.}\ \bibnamefont {Pinkse}},\ }\bibfield {title} {\bibinfo {title} {Quantum-secure authentication of a physical unclonable key},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Optica}\ }\textbf {\bibinfo {volume} {1}},\ \bibinfo {pages} {421} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Tuyls}\ \emph {et~al.}(2005)\citenamefont {Tuyls}, \citenamefont {{\v{S}}kori{\'c}}, \citenamefont {Stallinga}, \citenamefont {Akkermans},\ and\ \citenamefont {Ophey}}]{TSSAO} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Tuyls}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {{\v{S}}kori{\'c}}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Stallinga}}, \bibinfo {author} {\bibfnamefont {A.~H.}\ \bibnamefont {Akkermans}},\ and\ \bibinfo {author} {\bibfnamefont {W.}~\bibnamefont {Ophey}},\ }\bibfield {title} {\bibinfo {title} {Information-theoretic security analysis of physical uncloneable functions},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {International Conference on Financial Cryptography and Data Security}}}\ (\bibinfo {organization} {Springer},\ \bibinfo {year} {2005})\ pp.\ \bibinfo {pages} {141--155}\BibitemShut {NoStop} \bibitem [{\citenamefont {Rioul}\ \emph {et~al.}(2016)\citenamefont {Rioul}, \citenamefont {Sol{\'e}}, \citenamefont {Guilley},\ and\ \citenamefont {Danger}}]{RSGD} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Rioul}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Sol{\'e}}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Guilley}},\ and\ \bibinfo {author} {\bibfnamefont {J.-L.}\ \bibnamefont {Danger}},\ }\bibfield {title} {\bibinfo {title} {On the entropy of physically unclonable functions},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {2016 IEEE International Symposium on Information Theory (ISIT)}}}\ (\bibinfo {organization} {IEEE},\ \bibinfo {year} {2016})\ pp.\ \bibinfo {pages} {2928--2932}\BibitemShut {NoStop} \bibitem [{\citenamefont {{\v{S}}kori{\'c}}\ \emph {et~al.}(2013)\citenamefont {{\v{S}}kori{\'c}}, \citenamefont {Mosk},\ and\ \citenamefont {Pinkse}}]{SMP} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {{\v{S}}kori{\'c}}}, \bibinfo {author} {\bibfnamefont {A.~P.}\ \bibnamefont {Mosk}},\ and\ \bibinfo {author} {\bibfnamefont {P.~W.}\ \bibnamefont {Pinkse}},\ }\bibfield {title} {\bibinfo {title} {Security of quantum-readout $\text{PUFs}$ against quadrature-based challenge-estimation attacks},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Int. J. Quantum Inf.}\ }\textbf {\bibinfo {volume} {11}},\ \bibinfo {pages} {1350041} (\bibinfo {year} {2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {{\v{S}}koric}(2016)}]{BS13} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {{\v{S}}koric}},\ }\bibfield {title} {\bibinfo {title} {Security analysis of quantum-readout $\text{PUFs}$ in the case of challenge-estimation attacks},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Quantum Inf. Comput.}\ }\textbf {\bibinfo {volume} {16}},\ \bibinfo {pages} {0050} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Yao}\ \emph {et~al.}(2016)\citenamefont {Yao}, \citenamefont {Gao}, \citenamefont {Li},\ and\ \citenamefont {Zhang}}]{YGLZ} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Yao}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Gao}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Li}},\ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Zhang}},\ }\bibfield {title} {\bibinfo {title} {Quantum cloning attacks against $\text{PUF}$-based quantum authentication systems},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Quantum Inf. Process.}\ }\textbf {\bibinfo {volume} {15}},\ \bibinfo {pages} {3311} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Nikolopoulos}\ and\ \citenamefont {Diamanti}(2017)}]{ND} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {G.~M.}\ \bibnamefont {Nikolopoulos}}\ and\ \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Diamanti}},\ }\bibfield {title} {\bibinfo {title} {Continuous-variable quantum authentication of physical unclonable keys},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Sci. Rep.}\ }\textbf {\bibinfo {volume} {7}},\ \bibinfo {pages} {46047} (\bibinfo {year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Nikolopoulos}(2018)}]{GN} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {G.~M.}\ \bibnamefont {Nikolopoulos}},\ }\bibfield {title} {\bibinfo {title} {Continuous-variable quantum authentication of physical unclonable keys: Security against an emulation attack},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {97}},\ \bibinfo {pages} {012324} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Zyczkowski}\ and\ \citenamefont {Kus}(1994)}]{ZK} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Zyczkowski}}\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Kus}},\ }\bibfield {title} {\bibinfo {title} {Random unitary matrices},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {J. Phys. A: Math. Gen.}\ }\textbf {\bibinfo {volume} {27}},\ \bibinfo {pages} {4235} (\bibinfo {year} {1994})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Gross}\ \emph {et~al.}(2010)\citenamefont {Gross}, \citenamefont {Liu}, \citenamefont {Flammia}, \citenamefont {Becker},\ and\ \citenamefont {Eisert}}]{GLFBE} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Gross}}, \bibinfo {author} {\bibfnamefont {Y.-K.}\ \bibnamefont {Liu}}, \bibinfo {author} {\bibfnamefont {S.~T.}\ \bibnamefont {Flammia}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Becker}},\ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Eisert}},\ }\bibfield {title} {\bibinfo {title} {Quantum state tomography via compressed sensing},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {105}},\ \bibinfo {pages} {150401} (\bibinfo {year} {2010})}\BibitemShut {NoStop} \bibitem [{\citenamefont {R{\"u}hrmair}\ \emph {et~al.}(2010{\natexlab{b}})\citenamefont {R{\"u}hrmair}, \citenamefont {Busch},\ and\ \citenamefont {Katzenbeisser}}]{RBK} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {U.}~\bibnamefont {R{\"u}hrmair}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Busch}},\ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Katzenbeisser}},\ }\bibfield {title} {\bibinfo {title} {Strong $\text{PUFs}$: models, constructions, and security proofs},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {Towards Hardware-Intrinsic Security}}}\ (\bibinfo {publisher} {Springer},\ \bibinfo {year} {2010})\ pp.\ \bibinfo {pages} {79--96}\BibitemShut {NoStop} \bibitem [{\citenamefont {Canetti}\ \emph {et~al.}(2016)\citenamefont {Canetti}, \citenamefont {Fuller}, \citenamefont {Paneth}, \citenamefont {Reyzin},\ and\ \citenamefont {Smith}}]{CFPRS} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Canetti}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Fuller}}, \bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Paneth}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Reyzin}},\ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Smith}},\ }\bibfield {title} {\bibinfo {title} {Reusable fuzzy extractors for low-entropy distributions},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {Annual International Conference on the Theory and Applications of Cryptographic Techniques}}}\ (\bibinfo {organization} {Springer},\ \bibinfo {year} {2016})\ pp.\ \bibinfo {pages} {117--146}\BibitemShut {NoStop} \bibitem [{\citenamefont {Helstrom}(1969)}]{CH} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C. W.}~\bibnamefont {Helstrom}},\ }\bibfield {title} {\bibinfo {title} {Quantum detection and estimation theory},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {J. Stat. Phys.}\ }\textbf {\bibinfo {volume} {1}},\ \bibinfo {pages} {231--252} (\bibinfo {year} {1969})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Bru{\ss}}\ and\ \citenamefont {Macchiavello}(2001)}]{BM01} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Bru{\ss}}}\ and\ \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Macchiavello}},\ }\bibfield {title} {\bibinfo {title} {Optimal cloning for two pairs of orthogonal states},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {J. Phys. A: Math. Gen.}\ }\textbf {\bibinfo {volume} {34}},\ \bibinfo {pages} {6815} (\bibinfo {year} {2001})}\BibitemShut {NoStop} \bibitem{ADDK} M. Arapinis, M. Delavar, M. Doosti and E. Kashefi, Quantum Physical Unclonable Functions: Possibilities and Impossibilities, arXiv:1910.02126v1 (2019). \end{thebibliography} \end{document}
\begin{document} \thispagestyle{empty} \title{\Large Constrained approximation of rational triangular B\'ezier surfaces\\ by polynomial triangular B\'ezier surfaces} \author{Stanis{\l}aw Lewanowicz${}^{a,\!}$ \footnote{Corresponding author\newline \hspace*{1.5em}\textit{Email addresses}: [email protected] (Stanis≥aw Lewanowicz),\newline [email protected] (Pawe≥ Keller), [email protected] (Pawe≥ Woüny)} \,\:, Pawe{\l} Keller${}^{b}$, Pawe{\l} Wo\'zny${}^{a}$ } \date{\small\it ${}^{a}\!\!\!$ Institute of Computer Science, University of Wroc{\l}aw, ul.~Joliot-Curie 15, 50-383 Wroc{\l}aw, Poland\\ \noindent ${}^{b}\!\!\!$ Faculty of Mathematics and Information Science, Warsaw University of Technology,\\ ul. Koszykowa 75, 00-662 Warszawa, Poland\\[2ex]} \maketitle \noindent {\small\textbf{Abstract}. We propose a novel approach to the problem of polynomial approximation of rational B\'ezier triangular patches with prescribed boundary control points. The method is very efficient thanks to using recursive properties of the bivariate dual Bernstein polynomials and applying a~smart algorithm for evaluating a collection of two-dimensional integrals. Some illustrative examples are given.}\\ \noindent {\small \textit{Key words}: Rational triangular B\'ezier surface; Polynomial approximation; Bivariate dual Bernstein basis; Two-dimensional integral; Adaptive quadrature.} \section{Introduction and preliminaries} \label{S:intro} Rational triangular B\'ezier surfaces are an important tool in computer graphics. However, they may be sometimes inconvenient in practical applications. The reason is that evaluation of integrals or derivatives of rational expressions is cumbersome. Also, it happens that a rational surface produced in one CAD system is to be imported into another system which can handle only polynomial surfaces. In order to solve the two problems above, different algorithms for approximating the rational surface by polynomial surface are proposed \cite{CW11,Hu13,Sha13,XW09,XW10,ZW06}. The spectrum of methods contains hybrid algorithm \cite{ZW06}, progressive iteration approximation \cite{CW11,Hu13}, least squares approximation and linear programming \cite{Sha13}, and approximation by B\'ezier surfaces with control points obtained by successive degree elevation of the rational B\'ezier surface \cite{XW09,XW10}. As a rule, no geometric constraints are imposed, which means a serious drawback: if we start with a patchwork of smoothly connected rational B\'ezier triangles and approximate each patch separately, we do not obtain a smooth composite surface. In this paper, we propose a method for solving the problem of the constrained least squares approximation of a rational triangular B\'ezier patch by a polynomial triangular B\'ezier patch; see Problem~\ref{P:main} below. The method is based on the idea of using constrained dual bivariate Bernstein polynomials. Using a fast recursive scheme of evaluation of B\'ezier form coefficients of the bivariate dual Bernstein polynomials, and applying a swift adaptive scheme of numerical computation of a collection of double integrals involving rational functions resulted in high efficiency of the method. The outline of the paper is as follows. Section~\ref{S:main} brings a complete solution to the approximation problem. Some comments on the algorithmic implementation of the method are given in Section~\ref{S:impl}; some technical details of the implementation are presented in Appendix A. In Section~\ref{S:exmp}, several examples are given to show the efficiency of the method. In Appendix~B, some basic information on the Hahn orthogonal polynomials is recalled. We end this section by introducing some notation. For $\bl y:=(y_1,y_2,\ldots,y_d)\in\mathbb R^d$, we denote $|\bl y|:=y_1+y_2+\ldots+y_d$, and $\|\bl y\|:=\left(y^2_1+y^2_2+\ldots+y^2_d\right)^{\frac12}$. For $n\in\mathbb N$ and $\bl c:=(c_1,\,c_2,\,c_3)\in\mathbb N^3$ such that $\mbox{$\left|\blc\right|$}<n$, we define the following sets (cf. Figure~\ref{fig:Fig-bc}): \begin{equation}\label{E:TOG} \left.\begin{array}{l} \Theta_n:=\{\bl k=(k_1,k_2)\in\mathbb N^2:\: 0\le|\bl k|\le n\}, \\[1ex] \Omega^{\sbl c}_n:=\{\bl k=(k_1,k_2)\in\mathbb N^2:\:k_1\ge c_1,\,k_2\ge c_2,\,|\bl k|\le n-c_3\},\\[1ex] \Gamma^{\sbl c}_n:=\Theta_n\setminus\Omega^{\sbl c}_n. \end{array}\;\right\} \end{equation} \begin{figure} \caption{\small Examples of sets \eqref{E:TOG} \label{fig:Fig-bc} \end{figure} Throughout this paper, the symbol $\Pi^2_n$ denotes the space of all polynomials of two variables, of total degree at most $n$. Let $T$ be the standard triangle in $\mathbb R^2$, \begin{equation} \label{E:T} T:=\{(x_1,\,x_2)\,:\,x_1,\,x_2\ge0,\: x_1+x_2\le1\}. \end{equation} For $n\in\mathbb N$, and $\bl k:=(k_1,k_2)\in\Theta_n$, we denote, \[ \binom{n}{\bl k}:=\frac{n!}{k_1!k_2!(n-|\bl k|)!}. \] The \textit{shifted factorial} is defined for any $a\in\mathbb C$ by \[ (a)_0:=1;\qquad (a)_k:=a(a+1)\cdots(a+k-1), \qquad k\ge1. \] The \textit{Bernstein polynomial basis} in $\Pi^2_n$, $n\in\mathbb N$, is given by (see, e.g., \cite{Far86}, or \cite[\S 17.3]{Far02}), \begin{equation}\label{E:Ber2} B^n_{\sbl k}(\bl x):=\binom{n}{\bl k}x_1^{k_1}x_2^{k_2}(1-|\bl x|)^{n-|\sbl k|}, \qquad \bl k:=(k_1,k_2)\in\Theta_n,\quad\bl x:=(x_1,x_2). \end{equation} The (unconstrained) \textit{bivariate dual Bernstein basis polynomials} \cite{LW06}, \begin{equation}\label{E:dBer2} D^n_{\sbl k}(\bl\cdot;\mbox{$\bm{\alpha}$})\in\Pi^2_n, \qquad \bl k\in\Theta_n, \end{equation} are defined so that \[ \left\langle D^{n}_{\sbl k},\,B^n_{\sbl l} \right\rangle_{\mbox{\scriptsize$\bla$}}=\delta_{\sbl k, \sbl l}, \qquad \bl k,\bl l\in\Theta_n. \] Here $\delta_{\sbl k, \sbl l}$ equals 1 if $\bl k= \bl l$, and 0 otherwise, while the inner product is defined by \begin{equation}\label{E:Jinprod} \langle f, g \rangle_{\mbox{\scriptsize$\bla$}} :=\mbox{$\displaystyle\int\!\!\!\!\int\limits_{\!\!\!\!T}$} w_{\mbox{\scriptsize$\bla$}}(\bl x)f(\bl x)\,g(\bl x)\,\mbox{{\rm d}$\bl x$}, \end{equation} where the weight function $w_{\mbox{\scriptsize$\bla$}}$ is given by \begin{equation} \label{E:w} w_{\mbox{\scriptsize$\bla$}}(\bl x):=A_{\mbox{\scriptsize$\bla$}}x_1^{\alpha_1}x_2^{\alpha_2}(1-|\bl x|)^{\alpha_3}, \qquad\mbox{$\bm{\alpha}$}:=(\alpha_1,\,\alpha_2,\,\alpha_3), \quad\alpha_i>-1, \end{equation} with \( A_{\mbox{\scriptsize$\bla$}}:=\Gamma(\mbox{$\left|\bla\right|$}+3)/[\Gamma(\alpha_1+1)\Gamma(\alpha_2+1)\Gamma(\alpha_3+1)]. \) For $n\in\mathbb N$ and $\bl c:=(c_1,\,c_2,\,c_3)\in\mathbb N^3$ such that $\mbox{$\left|\blc\right|$}<n$, define the constrained bivariate polynomial space \[ \Pi^{\sbl c,\,2}_n :=\left\{P\in\Pi^2_n\::\: P(\bl x)= x_1^{c_1}x_2^{c_2}(1-|\bl x|)^{c_3}\cdot\,Q(\bl x), \;\mbox{where}\;Q\in\Pi^2_{n-|\sbl c|}\right\}. \] It can be easily seen that the constrained set $\{B^n_{\sbl k}\}_{\sbl k\in\Omega^{\sbl c}_n}$ of degree $n$ bivariate Bernstein polynomials forms a basis in this space. We define \textit{constrained dual bivariate Bernstein basis polynomials}, \begin{equation}\label{E:constrdBer2} D^{(n,\sbl c)}_{\sbl k}(\bl{\cdot};\mbox{$\bm{\alpha}$})\in\Pi^{\sbl c,\,2}_n, \qquad \bl k\in\ \Omega^{\sbl c}_n, \end{equation} so that \begin{equation} \label{E:dualprop} \left\langle D^{(n,\sbl c)}_{\sbl k},\,B^n_{\sbl l} \right\rangle_{\mbox{\scriptsize$\bla$}}=\delta_{\sbl k, \sbl l} \quad \mbox{for} \quad \bl k,\:\bl l\in\ \Omega^{\mbox{$\sbl c$}}_n, \end{equation} where the notation of \eqref{E:Jinprod} is used. For $\bl c=(0,0,0)$, basis \eqref{E:constrdBer2} reduces to the unconstrained basis \eqref{E:dBer2} in $\Pi^2_n$. Notice that the solution of the least squares approximation problem in the space $\Pi^{(\sbl c,2)}_n$ can be given in terms of the polynomials $D^{(n,\sbl c)}_{\sbl k}$. Namely, we have the following result. \begin{lem}\label{L:bestpol} Let $F$ be a function defined on the standard triangle $T$ (cf. \eqref{E:T}). The polynomial $S_n\in\Pi^{(\sbl c,2)}_n$, which gives the minimum value of the norm \[ \|F-S_n\|_{L_2}:=\left\langle F-S_n,F-S_n\right\rangle_{\mbox{\scriptsize$\bla$}}^{\frac12}, \] is given by \begin{equation} \label{E:bestpol} S_n= \sum_{\sbl k\in\Omega^{\tbl c}_n}\left\langle F,D^{(n,\sbl c)}_{\sbl k}\right\rangle_{\!\mbox{\scriptsize$\bla$}}\,B^n_{\sbl k}. \end{equation} \end{lem} \begin{pf} Obviously, $S_n$ has the following representation in the Bernstein basis of the space $\Pi_n^{(\sbl c,2)}$: \[ S_n=\sum_{\sbl k\in\Omega^{\tbl c}_n} \left\langle S_n,D^{(n,\sbl c)}_{\sbl k}\right\rangle_{\!\mbox{\scriptsize$\bla$}}\,B^n_{\sbl k}. \] On the other hand, a classical characterization of the best approximation polynomial $S_n$ is that $\langle {F-S_n}, Q\rangle_{\mbox{\scriptsize$\bla$}}=0$ holds for any polynomial $Q\in\Pi_n^{(\sbl c,2)}$ (see, e.g. \cite[Thm 4.5.22]{DB08}). In particular, for $Q=D^{(n,\sbl c)}_{\sbl k}$, we obtain \[ \left\langle {F},{D^{(n,\sbl c)}_{\sbl k}}\right\rangle_{\!\mbox{\scriptsize$\bla$}} =\left\langle S_n,D^{(n,\sbl c)}_{\sbl k}\right\rangle_{\!\mbox{\scriptsize$\bla$}} ,\qquad \bl k\in\Omega^{\sbl c}_n. \] Hence, the formula \eqref{E:bestpol} follows. \end{pf} The coefficients $E^{\sbl k}_{\sbl l}(\mbox{$\bm{\alpha}$},\mbox{$\bl c$},n) $ in the B\'ezier form of the dual Bernstein polynomials, \begin{equation} \label{E:Dc2inB2} D^{(n,\mbox{$\sbl c$})}_{\sbl k}(\bl x;\mbox{$\bm{\alpha}$})=\, \sum_{\sbl l\in\Omega^{\mbox{$\sbl c$}}_n}\,E^{\sbl k}_{\sbl l}(\mbox{$\bm{\alpha}$},\mbox{$\bl c$},n) \, B^n_{\sbl l}(\bl x),\qquad \bl k\in\Omega^{\mbox{$\sbl c$}}_n, \end{equation} play important role in the proposed method. Using the duality property \eqref{E:dualprop}, we obtain the following expression for the coefficients of the above expansion: \begin{equation} \label{E:Ek} E^{\sbl k}_{\sbl l}(\mbox{$\bm{\alpha}$},\mbox{$\bl c$},n) =\left\langle D^{(n,\sbl c)}_{\sbl k}, D^{(n,\sbl c)}_{\sbl l}\right\rangle_{\mbox{\scriptsize$\bla$}}. \end{equation} In a recent paper \cite{LKW15}, an efficient algorithm was obtained for evaluating all these coefficients for $\mbox{$\bm{k}$},\,\mbox{$\bm{\bm l}$}\in\Omega^{\mbox{$\sbl c$}}_n$, with the computational complexity $O(n^4)$, i.e., proportional to the total number of these coefficients. See Section~\ref{SS:impl-E} for details. \section{Polynomial approximation of B\'ezier triangular surfaces with constraints} \label{S:main} In this paper, we consider the following approximation problem. \begin{prob} \label{P:main} Let $\mbox{$\mathsf R$}_n$ be a rational triangular B\'ezier surface of degree $n$, \[ \mbox{$\mathsf R$}_n(\bl x) :=\frac{\mbox{$\mathsf Q$}_n(\bl x)}{\omega(\bl x)} =\frac{\displaystyle \sum_{\sbl k\in\Theta_n}\omega_{\sbl k}r_{\sbl k}B^n_{\sbl k}(\bl x)} {\displaystyle \sum_{\sbl k\in\Theta_n}\omega_{\sbl k}B^n_{\sbl k}(\bl x)},\qquad \bl x\in T, \] with the control points $r_{\sbl k}\in\mathbb R^3$ and positive weights $\omega_{\sbl k}\in\mathbb R$, $\bl k\in\Theta_n$. Find a~B\'ezier triangular surface of degree $m$, \[ \mbox{$\mathsf P$}_m(\bl x) := \sum_{\sbl k\in\Theta_m}p_{\sbl k}B^m_{\sbl k}(\bl x),\qquad \bl x\in T, \] with the control points $p_{\sbl k}\in\mathbb R^3$, satisfying the conditions \begin{equation}\label{E:gcond} p_{\sbl k}=g_{\sbl k}\quad \mbox{for}\quad \bl k\in\Gamma^{\sbl c}_m, \end{equation} $g_{\sbl k}\in\mathbb R^3$ being prescribed control points, and $\bl c:=(c_1,c_2,c_3)\in\mathbb N^3$ being a given parameter vector with $|\bl c|<m$, such that the distance between the surfaces $\mbox{$\mathsf R$}_n$ and $\mbox{$\mathsf P$}_m$, \begin{equation} \label{E:dist} d(\mbox{$\mathsf R$}_n,\mbox{$\mathsf P$}_m):=\mbox{$\displaystyle\int\!\!\!\!\int\limits_{\!\!\!\!T}$} w_{\mbox{\scriptsize$\bla$}}(\bl x)\|\mbox{$\mathsf R$}_n(\bl x)-\mbox{$\mathsf P$}_m(\bl x)\|^2\,\mbox{{\rm d}$\bl x$}, \end{equation} reaches the minimum. \end{prob} \begin{rem}\label{R:gcond} Remember that continuity conditions for any two adjacent triangular B\'ezier patches are given in terms of several rows of the control net "parallel" to the control polygon of their common boundary (see, e.g., \cite[Section 17]{Far02}). Therefore, constraints \eqref{E:gcond} are natural, in a sense (cf. Fig.~\ref{fig:Fig-bc}). In Section~\ref{S:exmp}, we give several examples of practical usage of this approach. \end{rem} Clearly, the B\'ezier triangular patch being the solution of Problem~\ref{P:main} can be obtained in a componentwise way. Hence it is sufficient to give a method for solving the above problem in the case where $\mbox{$\mathsf R$}_n$ and $\mbox{$\mathsf P$}_m$ are scalar functions, and $g_{\sbl k}$ are numbers. All the details of the proposed method are given in the following theorem. \begin{thm} \label{T:main} Given the coefficients $r_{\sbl k}$ and positive weights $\omega_{\sbl k}$, $\bl k\in\Theta_n$, of the rational function \begin{equation} \label{E:Rsc} \mbox{$\mathsf R$}_n(\bl x):=\frac{\mbox{$\mathsf Q$}_n(\bl x)}{\omega(\bl x)} =\frac{\displaystyle \sum_{\sbl k\in\Theta_n}\omega_{\sbl k}r_{\sbl k}B^n_{\sbl k}(\bl x)} {\displaystyle \sum_{\sbl k\in\Theta_n}\omega_{\sbl k}B^n_{\sbl k}(\bl x)}, \end{equation} the coefficients $p_{\sbl k}$ of the degree $m$ polynomial \begin{equation}\label{E:Psc} \mbox{$\mathsf P$}_m(\bl x):= \sum_{\sbl k\in\Theta_m}p_{\sbl k}B^m_{\sbl k}(\bl x), \end{equation} minimising the error \begin{equation}\label{E:er-sc} \|\mbox{$\mathsf R$}_n-\mbox{$\mathsf P$}_m\|^2_{L_2}:=\langle \mbox{$\mathsf R$}_n-\mbox{$\mathsf P$}_m,\mbox{$\mathsf R$}_n-\mbox{$\mathsf P$}_m\rangle_{\sbl \alpha}, \end{equation} with the constraints \begin{equation}\label{E:gsc} p_{\sbl k}=g_{\sbl k}\quad \mbox{for}\quad \bl k\in\Gamma^{\sbl c}_m, \end{equation} are given by \begin{equation}\label{E:psc} p_{\sbl k}=\sum_{\sbl l\in\Omega^{\sbl c}_m}\binom{m}{\bl l}\,E^{\sbl k}_{\sbl l}(\mbox{$\bm{\alpha}$},\mbox{$\bl c$},m) \big(u_{\sbl l}-v_{\sbl l}\big), \qquad \bl k\in\Omega^{\sbl c}_m, \end{equation} where \begin{align*} u_{\sbl l}:=& \sum_{\sbl h\in\Theta_n} \binom{n}{\bl h} \rbinom{n+m}{\bl h+\bl l}\,\omega_{\sbl h}r_{\sbl h}\,I_{\sbl h+\sbl l},\\ \label{E:vl} v_{\sbl l}:=& \frac{1}{(|\mbox{$\bm{\alpha}$}|+3)_{2m}}\sum_{\sbl h\in\Gamma^{\sbl c}_m}\binom{m}{\bl h} \left(\prod_{i=1}^{3}(\alpha_i+1)_{h_i+l_i}\right)g_{\sbl h}, \end{align*} with $h_3:=m-|\bl h|$, $l_3:=m-|\bl l|$, and \begin{equation} \label{E:I} I_{\sbl j}:= \mbox{$\displaystyle\int\!\!\!\!\int\limits_{\!\!\!\!T}$} w_{\mbox{\scriptsize$\bla$}}(\bl x)\frac{B^{n+m}_{\sbl j}(\bl x)}{\omega(\bl x)}\,\mbox{{\rm d}$\bl x$}, \qquad\bl j\in\Omega^{\mbox{$\sbl c$}}_{n+m}. \end{equation} The symbol $E^{\sbl k}_{\sbl l}(\mbox{$\bm{\alpha}$},\mbox{$\bl c$},m)$ has the meaning given in \eqref{E:Dc2inB2}. \end{thm} \begin{pf} Observe that \[ \|\mbox{$\mathsf R$}_n-\mbox{$\mathsf P$}_m\|^2_{L_2}=\|\mbox{$\mathsf W$}-\mbox{$\mathsf S$}_m\|^2_{L_2} \] where \[ \mbox{$\mathsf W$}:=\mbox{$\mathsf R$}_n-\mbox{$\mathsf T$}_m,\quad \mbox{$\mathsf T$}_m:=\sum_{\sbl k\in\Gamma^{\sbl c}_m}g_{\sbl k}B^m_{\sbl k}, \quad \mbox{$\mathsf S$}_m:=\sum_{\sbl k\in\Omega^{\sbl c}_m}p_{\sbl k}B^m_{\sbl k}, \] the notation being that of \eqref{E:TOG}. Thus, we want $\mbox{$\mathsf S$}_m$ to be the best approximation polynomial for the function $\mbox{$\mathsf W$}$ in the space $\Pi^{\sbl c,2}_m$. Its B\'ezier coefficients are given by \[ p_{\sbl k}=\left\langle \mbox{$\mathsf W$}, D^{(m,\sbl c)}_{\sbl k} \right\rangle_{\mbox{\scriptsize$\bla$}} =\sum_{\sbl l\in\Omega^{\sbl c}_m}\,E^{\sbl k}_{\sbl l}(\mbox{$\bm{\alpha}$},\mbox{$\bl c$},m) \Big(\left\langle \mbox{$\mathsf R$}_n, B^m_{\sbl l} \right\rangle_{\mbox{\scriptsize$\bla$}} -\left\langle \mbox{$\mathsf T$}_m, B^m_{\sbl l} \right\rangle_{\mbox{\scriptsize$\bla$}}\Big),\qquad \bl k\in\Omega^{\sbl c}_m, \] where we have used Lemma~\ref{L:bestpol}. We obtain \begin{align*} \left\langle \mbox{$\mathsf R$}_n, \,B^m_{\sbl l}\right\rangle_{\mbox{\scriptsize$\bla$}} =& \sum_{\sbl h\in\Theta_n}\omega_{\sbl h}r_{\sbl h} \left\langle \frac{B^n_{\sbl h}}{\omega},\,B^m_{\sbl l}\right\rangle_{\mbox{\scriptsize$\bla$}}\\ =& \sum_{\sbl h\in\Theta_n}\omega_{\sbl h}r_{\sbl h} \binom{n}{\bl h}\binom{m}{\bl l}\rbinom{n+m}{\bl h+\bl l} \left\langle \frac{1}{\omega},\,B^{n+m}_{\sbl h+\sbl l}\right\rangle_{\mbox{\scriptsize$\bla$}}\\ =& \sum_{\sbl h\in\Theta_n}\omega_{\sbl h}r_{\sbl h} \binom{n}{\bl h}\binom{m}{\bl l}\rbinom{n+m}{\bl h+\bl l}\,I_{\sbl h+\sbl l}, \end{align*} where we use the notation \eqref{E:I}. Further, using equations \eqref{E:Ber2} and \eqref{E:Jinprod}, we obtain \begin{align*} \left\langle \mbox{$\mathsf T$}_m, B^{m}_{\sbl l} \right\rangle_{\mbox{\scriptsize$\bla$}}= & \sum_{\sbl h\in\Gamma^{\sbl c}_m}g_{\sbl h} \left\langle B^m_{\sbl h}, B^{m}_{\sbl l} \right\rangle_{\mbox{\scriptsize$\bla$}}\\ = &\sum_{\sbl h\in\Gamma^{\sbl c}_m}g_{\sbl h} \binom{m}{\bl h}\binom{m}{\bl l} \frac{(\alpha_1+1)_{h_1+l_1}(\alpha_2+1)_{h_2+l_2}(\alpha_3+1)_{2m-|\sbl h|-|\sbl l|}} {(|\mbox{$\bm{\alpha}$}|+3)_{2m}}. \end{align*} Hence, the formula \eqref{E:psc} follows. \end{pf} \begin{rem}\label{R:integr} In general, the integrals \eqref{E:I} cannot be evaluated exactly. In Section~\ref{SS:impl-I}, we show that they can be efficiently computed numerically up to high precision using an extension of the method of \cite{Kel07}. In the special case where all the weights $\omega_{\sbl i}$, $\bl i\in\Theta_n$, are equal, the rational function \eqref{E:Rsc} reduces to a polynomial of degree $n$, so that the problem is actually the constrained polynomial degree reduction problem (see, e.g., \cite{WL10}). Evaluation of the integrals is then a~simple task. \end{rem} \section{Implementation of the method} \label{S:impl} In this section, we discuss some computational details of the polynomial approximation of the rational B\'ezier function, described in Section~\ref{S:main} (see Theorem~\ref{T:main}). \subsection{Computing the coefficients $E^{\sbl k}_{\sbl l}(\mbox{$\bm{\alpha}$},\mbox{$\bl c$},m)$} \label{SS:impl-E} We have to compute all the coefficients $E^{\sbl k}_{\sbl l}(\mbox{$\bm{\alpha}$},\mbox{$\bl c$},m)$ with ${\bl k},\,\mbox{$\bm{\bm l}$}\in\Omega^{\sbl c}_m$. It has been shown \cite{LKW15} that they can be given in terms of \[ e^{\sbl k}_{\sbl l}(\bl\mu,M):=\langle D^M_{\mbox{\scriptsize$\blk$}},D^M_{\mbox{\scriptsize$\bll$}}\rangle_{\sbl\mu}, \qquad\mbox{$\bm{k}$},\bl l\in\Theta_M, \] with $M:=m-|\mbox{$\bl c$}|$ and $\mbox{$\bm{\mu}$}:=\mbox{$\bm{\alpha}$}+2\mbox{$\bl c$}$, where $D^M_{\mbox{\scriptsize$\blk$}}\equiv D^M_{\sbl k}(\bl\cdot;\bl\mu)$ are the unconstrained dual Bernstein polynomial of total degree $M$ (cf. \eqref{E:dBer2}). See Eq. \eqref{E:E-e} for details. Obviously, \( e^{\sbl k}_{\sbl l}(\bl\mu,M)=e^{\mbox{\scriptsize$\bll$}}_{\mbox{\scriptsize$\blk$}}(\bl\mu,M). \) The following algoritm is based on the recurrence relations satisfied by $e^{\mbox{\scriptsize$\blk$}}_{\mbox{\scriptsize$\bll$}}\equiv e^{\sbl k}_{\sbl l}(\bl\mu,M)$, obtained in the paper cited above. \begin{alg}[Computing the coefficients $E^{\sbl k}_{\sbl l} (\mbox{$\bm{\alpha}$},\mbox{$\bl c$},m)$] \label{A:Ecomp} \ \begin{description} \itemsep4pt \item[{\sc Step 1}] Let $M:=m-|\mbox{$\bl c$}|$, $\mbox{$\bm{\mu}$}:=\mbox{$\bm{\alpha}$}+2\mbox{$\bl c$}$. \item[{\sc Step 2}] For $l_1=0,1,\ldots, M-1$,\\ \hspace*{4.0em}$l_2=0,1,\ldots,M-l_1$,\\ \hspace*{2.2em}compute \begin{equation}\label{E:E00} e^{\sbl 0}_{\sbl l} :=\frac{(-1)^{l_1}(|\mbox{$\bm{\mu}$}|+3)_ {M}}{M!(\mu_1+2)_{l_1}} \sum_{i=0}^{M-l_1}C^\ast_i\,h_i(l_2;\mu_2,\mu_3,M- l_1), \end{equation} \hspace*{2.2em}where $\bl 0=(0,0)$, $\bl l=(l_1,l_2)$, $h_i(t;a,b,N)$ are the Hahn polynomials (cf.~\eqref {E:Hahn1}), and \[ C^\ast_i:=\left\{\begin{array}{ll} \dfrac{(\mu_1+2)_{M}}{(|\mbox{$\bm{\mu}$}|-\mu_1+2)_{M-l_1}},&\quad i=0, \\[2.5ex] \displaystyle (-1)^{i} \dfrac{(2i+|\mbox{$\bm{\mu}$}|-\mu_1+1)(\mu_1+2)_{M-i}(| \mbox{$\bm{\mu}$}|+M+3)_{i}} {i!(\mu_3+1)_i(|\mbox{$\bm{\mu}$}|-\mu_1+i+1)_{M- l_1+1}},&\quad i\ge1; \end{array}\right. \] \hspace*{2.2em}next put $e^{\sbl l}_{\sbl 0}:=e^{\sbl 0}_ {\sbl l}$. \item[{\sc Step 3}] For $k_1=0,1,\ldots,M-1$, \begin{description} \itemsep4pt \item[$1^o$] for $k_2=0,1,\ldots,M-k_1-1$,\\ \hspace*{1.55em}$l_1=k_1,k_1+1,\ldots,M$, \\ \hspace*{1.55em}$l_2=0,1,\ldots,M-l_1$, \\ compute \[ e^{\sbl k+\sbl v_2}_{\sbl l}:=\left([\sigma_1(\bl k)-\sigma_1(\bl l)]e^{\sbl k}_{\sbl l} -\sigma_2(\bl k) e^{\sbl k-\sbl v_2}_{\sbl l} +\sigma_0(\bl l) e^{\sbl k}_{\sbl l +\sbl v_2} +\sigma_2(\bl l) e^{\sbl k}_{\sbl l-\sbl v_2}\right)/\sigma_0(\bl k), \] where $\bl k=(k_1,k_2)$, $\bl l=(l_1,l_2)$, $\bl v_2:= (0,1)$, and where for $\bl t:=(t_1,t_2)$ we define \[ \;\;\sigma_0(\bl t):=(|\bl t|-M)(t_2+\mu_2+1),\quad \! \sigma_2(\bl t):=t_2(|\bl t|-\mu_3-M-1), \quad\! \sigma_1(\bl t):=\sigma_0(\bl t)+\sigma_2(\bl t), \] next put $e^{\sbl l}_{\sbl k+\sbl v_2}:=e^{\sbl k+\sbl v_2} _{\sbl l}$; \item[$2^o$]for $l_1=k_1+1,k_1+2,\ldots,M$, \\ \hspace*{1.55em}$l_2=0,1,\ldots,M-l_1$, \\ compute \[ e^{\sbl k+\sbl v_1}_{\sbl l} :=\left([\tau_1(\bl k)- \tau_1(\bl l)]e^{\sbl k}_ {\sbl l} -\tau_2(\bl k)\,e^{\sbl k-\sbl v_1}_{\sbl l} + \tau_0(\bl l)\,e^{\sbl k}_{\sbl l+\sbl v_1} + \tau_2(\bl l)\,e^{\sbl k}_{\sbl l-\sbl v_1}\right)/\tau_0(\bl k), \] where $\bl k=(k_1,0)$, $\bl l=(l_1,l_2)$, $\bl v_1:=(1,0)$, and for $\bl t:=(t_1,t_2)$ the coefficients $\tau_{j}(\bl t)$ are given by \[ \tau_0(\bl t):=(|\bl t|-M)(t_1+\mu_1+1), \quad\! \tau_2(\bl t):=t_1(|\bl t|-\mu_3-M-1), \quad\! \tau_1(\bl t):=\tau_0(\bl t)+\tau_2(\bl t); \] next put $e^{\sbl l}_{\sbl k+\sbl v_1}:=e^{\sbl k+\sbl v_1} _{\sbl l}$. \end{description} \item[{\sc Step 4}] For $ \mbox{$\bm{k}$},\,\mbox{$\bm{\bm l}$}\in\Omega^{\sbl c}_m$, compute \begin{equation} \label{E:E-e} E^{\sbl k}_{\sbl l}(\mbox{$\bm{\alpha}$}, c,m):=U \,V_{\sbl k}\,V_{\sbl l}\, \,e^{\sbl k-\sbl c'}_{\sbl l-\sbl c'}, \end{equation} \hspace*{2.2em}where $\bl c':=(c_1,c_2)$, and \[ U:= (|\mbox{$\bm{\alpha}$}|+3)_{2|\sbl c|}\prod_{i=1}^{3} (\alpha_i+1)_{2c_i}^{-1},\qquad V_{\sbl h}:= \binom{m-|\bl c|}{\bl h-\bl c'}\rbinom {m}{\bl h}. \] \end{description} \end{alg} As noticed in Remark~\ref{R:Clenshaw}, the sum in \eqref{E:E00} can be evaluated efficiently using the Clenshaw's algorithm, at the cost of $O(M-l_1)$ operations. \subsection{Computing the integrals $I_{\sbl j}$} \label{SS:impl-I} The most computationally expensive part of the proposed method is the evaluation of the collection of integrals \eqref{E:I}. For example, for $n+m=22$, if $\bl c=(0,0,0)$, there are $276$ two-dimensional integrals to be computed. It is obvious that using any standard quadrature would completely ruin the efficiency of the algorithm. Moreover, if any of the parameters $\alpha_i$ ($i=1,2,3$) in (\ref{E:w}) is smaller than $0$ and the corresponding constrain parameter $c_i$ equals zero, then the integrands in (\ref{E:I}) are singular functions, and standard quadratures may fail to deliver any approximations to the integrals. Therefore, for evaluating the complete set of integrals (\ref{E:I}), we introduce a special scheme which is based on the general method \cite{Kel07} for approximating singular integrals. The proposed numerical quadrature is of the automatic type, which means that the required number of nodes is adaptively selected, depending on the complexity of the rational B\'ezier function, so that the requested accuracy of the approximation is always achieved. Most importantly, the algorithm is extremely effective in the considered application. In the example given at the beginning of this subsection ($n+m=22$), the time required to compute the whole collection of $276$ integrals is only twice\footnote{Based on the Maple implementation of the algorithm. If the collection consists of 990 integrals ($n+m=42$), the computation time increases by only 50\% (compared to the case of 276 integrals). The detailed report from the efficiency test can be found at the end of Appendix B .} longer than the time needed to approximate a single separate integral of a similar type. First, we shall write the integral (\ref{E:I}) in a different form which is better suited for fast numerical evaluation. Observe that bivariate Bernstein polynomials \eqref{E:Ber2} can be expressed in terms of univariate Bernstein polynomials. Namely, we have \[ B^N_{\sbl j}(\bl x)= B^N_{j_1}(x_1) B^{N-j_1}_{j_2}({x_2}/{(1-x_1)}), \qquad \bl j:=(j_1,j_2),\; \bl x:=(x_1,x_2), \] where $B^M_i(t):=\binom{M}{i}t^i(1-t)^{M-i}$, $0\le i\le M$, are univariate Bernstein polynomials. Further, the bivariate weight function $w_{\mbox{\scriptsize$\bla$}}$ (see \eqref{E:w}) can be expressed as \[ w_{\mbox{\scriptsize$\bla$}}(\bl x) = A_{\mbox{\scriptsize$\bla$}}\,v_{\alpha_2+\alpha_3,\alpha_1}(x_1) \,v_{\alpha_3,\alpha_2}({x_2}/{(1-x_1)}), \] where $v_{\alpha,\beta}(t):=(1-t)^\alpha t^\beta$ is the univariate Jacobi weight function. Hence, the integral \eqref{E:I} can be written as \begin{align} \nonumber I_{\sbl j}&=\int_{0}^{1}\int_{0}^{1-x_1} w_{\mbox{\scriptsize$\bla$}}(\bl x)\frac{B^{N}_{\sbl j}(\bl x)}{\omega(\bl x)}\,{\rm d}x_2\,{\rm d}x_1 \\ \nonumber &=A_{\mbox{\scriptsize$\bla$}}\int_{0}^{1}v_{\alpha_2+\alpha_3+1,\alpha_1}(s)B^{N}_{j_1}(s) \left(\int_{0}^{1}v_{\alpha_3,\alpha_2}(t)\frac{B^{N-j_1}_{j_2}(t)}{\omega^\ast(s,t)}\,{\rm d}t\right){\rm d}s \\[0.5ex] \label{E:I2} &=A_{\mbox{\scriptsize$\bla$}}\binom{N}{\bl j} \int_{0}^{1}v_{a,b}(t) \left(\int_{0}^{1}v_{c,d}(s)\frac{1}{\omega^\ast(s,t)}\,{\rm d}s\right){\rm d}t, \end{align} where we denoted $N:=n+m$, \begin{equation}\label{E:abcd} \left.\begin{array}{ll} a\equiv a(\bl j):=\alpha_3+N-|\bl j|,&\quad b\equiv b(j_2):=\alpha_2+j_2,\\[1ex] c\equiv c(j_1):=\alpha_2+\alpha_3+N-j_1+1, &\quad d\equiv d(j_1):=\alpha_1+j_1, \end{array}\,\right\} \end{equation} and \begin{equation} \label{E:omegastar} \omega^\ast(s,t):=\omega(s,(1-s)t) =\sum_{i=0}^n w_i(t) B^n_{i}(s), \qquad w_i(t) = \sum_{j=0}^{n-i}\omega_{i,j}\,B^{n-i}_{j}(t). \end{equation} Note that the computation of values of the integrand is now much more effective, because the coefficients $w_i$ of the function $\omega^\ast$ ($1\leq i\leq n$) in (\ref{E:omegastar}) do not depend of the inner integration variable $s$. The main idea is, however, to compute the values of $\omega^\ast$ only once (at a properly selected set of quadrature nodes), and obtain a tool for fast computation of the integrals (\ref{E:I2}) for different values of $a$, $b$, $c$, and $d$, i.e.\ for different values of $\bl j$. For arbitrary fixed $t\in[0,1]$, define the function \begin{equation}\label{E:psi} \psi_t(s):=\omega^\ast(s,t)^{-1}. \end{equation} It is easy to see that we can write \[ I_{\sbl j}=A_{\mbox{\scriptsize$\bla$}}\binom{N}{\bl j} J(a,b,\Phi), \] with \begin{equation}\label{E:Phi} \Phi(t):=J(c,d,\psi_t), \end{equation} where we use the notation \[ J(\alpha,\beta,f):=\int_{0}^{1}(1-x)^\alpha x^\beta f(x){\rm d}x. \] The functions $\psi_t$ and $\Phi$ are analytic in a closed complex region containing the interval $[0,1]$ (it is proved in Appendix B). This implies that (cf.\ \cite[Chapter 3]{Riv90}) they can be accurately and efficiently approximated by polynomials given in terms of the (shifted) Chebyshev polynomials of the first kind, \begin{equation}\label{E:TchExpan} \begin{array}{l} \displaystyle \psi_t(s)\approx S_{M_t}(s) := \sum_{i=0}^{M_t}{'\,}\gamma^{[t]}_{i} T_i(2s-1),\\[3ex] \displaystyle \Phi(t) \approx \hat{S}_{M}(t) := \sum_{l=0}^{M}{'\,}\hat{\gamma}_l T_i(2t-1), \end{array}\qquad 0\leq s,t\leq 1, \end{equation} where $M$ may depend on $j_1$, and the prime denotes a sum with the first term halved. Once the above expansions are computed (this can be done in a time proportional to $M_{t}\log(M_{t})$ and $M\log(M)$), the integrals $J(\cdot,\cdot,\cdot)$ can be easily evaluated using the following algorithm that was proved in \cite{LWK12}. \begin{alg}[Computing the integral $ J(\alpha,\beta;S)$, $S$ being a polynomial] \label{A:PK_1d} \ \\ Given numbers $\alpha,\,\beta>-1$, let $r := \beta-\alpha$, $u := \alpha+\beta+1$. Let $S_{\mathcal{M}}$ be a polynomial defined by \[ S_{\mathcal{M}}(x) = \sum\limits_{i=0}^{\mathcal{M}}{'} \gamma_i T_i(2x-1). \] Compute the sequence $d_i$, $0\leq i \leq {\mathcal{M}}+1$, by \begin{align*} &d_{{\mathcal{M}}+1} = d_{\mathcal{M}} := 0, \\[1.0ex] &d_{i-1} := \frac{2 r d_{i} + (i-u) d_{i+1} - \gamma_{i}} {i+u}, \qquad i={\mathcal{M}},{\mathcal{M}}-1,\dots,1. \end{align*} \noindent \textbf{Output}: $ J(\alpha,\beta;S_{\mathcal{M}}) = \mathcal{B}\cdot \left( \frac12\gamma_0 - r d_0 + u d_1 \right)$, where $\hspace*{0.1ex}\mathcal{B} := \Gamma(\alpha+1)\Gamma(\beta+1) / \Gamma(\alpha+\beta+2)$. \end{alg} By the repeated use of the above very fast scheme, we may efficiently approximate the whole set of integrals $I_{\sbl j}$ for $\bl j\in\Omega_{n+m}^{\sbl c}$. The remaining technical details of the adaptive implementation of the proposed quadrature and the complete formulation of the integration algorithm are presented in Appendix A. \subsection{ Main algorithm} \label{SSS:MainAlg} The method presented in this paper is summarized in the following algorithm. \begin{alg}[Polynomial approximation of the rational B\'ezier triangular surface] \label{A:RTBSappr} Given the coefficients $r_{\sbl k}$ and positive weights $\omega_{\sbl k}$, $\bl k\in\Theta_n$, of the rational function \eqref{E:Rsc}, the coefficients $p_{\sbl k}$ of the degree $m$ polynomial \eqref{E:Psc}, minimising the error \eqref{E:er-sc}, with the constraints \eqref{E:gsc}, can be computed in the following way.\\[-3ex] \begin{description} \itemsep0.25pt \item[{\sc Step 1}] Compute the table $\{E^{\sbl k}_{\sbl l}(\mbox{$\bm{\alpha}$}, c,m)\}_{\sbl k,\sbl l\in\Omega_m^{\sbl c}}$ by Algorithm~\ref{A:Ecomp}. \item[{\sc Step 2}] Compute the table $\{I_{\sbl j}\}_{\sbl j\in\Omega^{\sbl c}_{n+m}}$ by Algorithm~\ref{A:I-comp}. \item[{\sc Step 3}] For $\bl k\in\Gamma^{\mbox{$\sbl c$}}_m$, put $p_{\sbl k}:=g_{\sbl k}$. \item[{\sc Step 4}] For $\bl k\in\Omega^{\mbox{$\sbl c$}}_m$, compute $p_{\sbl k}$ by \eqref{E:psc}. \end{description} \noindent \textbf{Output}: Set of the coefficients $p_{\sbl k}$, $\bl k\in\Theta_m$. \end{alg} \section{Examples} \label{S:exmp} In this section, we present some examples of approximation of rational triangular B\'ezier patches by triangular B\'ezier patches. No theoretical justification is known for the best choice of the vector parameter $\mbox{$\bm{\alpha}$}$ in the distance functional \eqref{E:dist} if we use the \textit{error function} \begin{equation} \label{E:err} \Delta(\bm x):=\|\mbox{$\mathsf R$}_n(\bm x)-\mbox{$\mathsf P$}_m(\bm x)\| \end{equation} to measure the quality of the approximation. On the base of numerical experiments, we claim that $\mbox{$\bm{\alpha}$}=(-\frac12,-\frac12,-\frac12)$ usually leads to slightly better results than the ones obtained for other "natural" choices of parameters, including the usually preferred $\mbox{$\bm{\alpha}$}=(0,0,0)$ (meaning $w_{\mbox{\scriptsize$\bla$}}(\bl x)=1$). The computations were performed in 16-decimal-digit arithmetic. In the implementation of Algorithm~\ref{A:I-comp}, we have assumed $\varepsilon=5\times10^{-16}$ in \eqref{E:stop}, and used the initial values $M^{*}=M_{k}^{*}=32$. \subsection{Example 1}\label{SS:example1} Let $\mbox{$\mathsf R$}_6$ be the degree 6 rational triangular B\'ezier patch \cite[Example 2]{Hu13}, \begin{equation} \label{E:R6} \mbox{$\mathsf R$}_6(\bl x):= \dfrac{\displaystyle \sum_{\sbl k\in\Theta_6}\omega_{\sbl k}r_{\sbl k}B^6_{\sbl k}(\bl x)} {\displaystyle \sum_{\sbl k\in\Theta_6}\omega_{\sbl k}B^6_{\sbl k}(\bl x)}, \qquad \bl x\in T, \end{equation} $T$ being the standard triangle \eqref{E:T}, and the control points $r_{\sbl k}$ and the associated weights $\omega_{\sbl k}$ being listed in Table~\ref{tab:R6cp+w}. \begin{table}[phtb] \caption{Control points $r_{\sbl k}$ (\textit{upper entries}) and the associated weights $\omega_{\sbl k}$ (\textit{lower entries}) of the surface \eqref{E:R6}, with $\bl k=(k_1,k_2)\in\Theta_6$.\label{tab:R6cp+w}} {\footnotesize \[ \begin{array}{c|ccccccc} k_1\setminus k_2 & 0 & 1 & 2 & 3 & 4 &5 &6 \\[1ex]\hline &&&&&&&\\ 0 & (6,0,2) & (5,0,3) & (4,-0.5,3.5) & (3,-0.2,4) & (1.5,0.5,2) & (0.4,0.4,1)&(0,0,0)\\ &0.8 & 0.3 & 1.8 & 1.2 &0.8 & 0.2&1.6\\ 1 & (5.2,1,3)& (4.5,1,3) & (3,0.6,4) & (2,0.9,3) & (1.2,1,2) & (0.4,0.8,0.6)&\\ &1& 0.4 &0.8&2.4 &1.3 &0.9& \\ 2 & (4.5,2,5) & (4,2.2,4) & (3,2,3) & (2,1.2,2) &(0.8,1.5,1.5)&& \\ & 0.5& 1 & 1 & 1.8 &0.8&& \\ 3 & (4,3,6) & (2.5,2.5,5) & (1.5,2.8,4) &(1,2,3)&& &\\ & 0.3 & 2 & 1 &0.9&&& \\ 4 & (3.5,3.5,4) & (2.5,3,5) &(1.5,3.5,3)&&&&\\ & 1.5& 0.6 &1.2&&& &\\ 5 & (3,4.2,2)&(2,4,2) & & & & &\\ & 0.8 & 0.5 & & & & &\\ 6 & (2,5,1)& & & & & &\\ & 1 & & & & & & \end{array} \] } \end{table} We let $\mbox{$\bm{\alpha}$}=(-\frac12,-\frac12,-\frac12)$, $\mbox{$\bl c$}=(1,1,1)$, and constructed the degree 5 best approximating polynomial patch \[ \mbox{$\mathsf P$}_5(\bl x):=\sum_{\sbl k\in\Theta_5}p_{\sbl k}B^5_{\sbl k}(\bl x), \qquad \bl x\in T, \] under the restriction $p_{\sbl k}=g_{\sbl k}$ for $\bl k\in\Gamma^{\sbl c}_5$, where \[ \Gamma^{\sbl c}_5:=\{\bl k=(k_1,k_2):k_1=0,\; \mbox{or}\; k_2=0, \; \mbox{or}\; |\bl k|=5\}, \] and the set of points $g_{\sbl k}$, $\bl k\in\Gamma^{\sbl c}_5$, is obtained in the following way. As well known, the boundary of the patch \eqref{E:R6} is formed by three degree 6 rational B\'ezier curves. The least squares degree 5 polynomial approximation to each of these rational curves, with the endpoints preservation, is constructed using an extension of the method of \cite{LWK12}, described in \cite{LWK15} (the input data: $m=5$, $\alpha=\beta=-\frac12$, $k=l=1$, notation used being that of \cite{LWK15}). Now, the set of points $g_{\sbl k}$ is the appropriate collection of all control points of the three resulting B\'ezier curves. We have repeated the computations for $\mbox{$\bm{\alpha}$}=(0,0,0)$ (with $\alpha=\beta=0$, in \cite{LWK15}), obtaining slightly worse results. The maximum errors $\max_{\sbl x\in T}\Delta(\bm x)$ (cf. \eqref{E:err}) of the obtained results (see Fig.~\ref{fig:Fig2}) are about 50\% less than those reported in \cite[Table 1]{Hu13}. \begin{figure} \caption{\small Constrained degree 5 polynomial approximation of the degree 6 rational triangular B\'ezier surface, with $\mbox{$\bl c$} \label{fig:Fig2} \end{figure} \subsection{Example 2}\label{SS:example2} Let $\mbox{$\mathsf R$}^\ast$ be the composite rational surface, \begin{equation} \label{E:Rast} \mbox{$\mathsf R$}^\ast(\bl x):=\left\{\begin{array}{ll} \mbox{$\mathsf R$}^R_5(\bl y), &\qquad\bl y:=(1-|\bl x|,x_1-x_2),\;\bl x\in T_R,\\[2ex] \mbox{$\mathsf R$}^Y_5(\bl z), &\qquad\bl z:=(x_2-x_1,1-|\bl x|),\;\bl x\in T_Y, \end{array}\right. \end{equation} where for $C\in\{R,Y\}$, \begin{equation} \label{E:RC} \mbox{$\mathsf R$}^C_5(\bl w):= \dfrac{\displaystyle \sum_{\sbl k\in\Theta_5}\omega^C_{\sbl k}r^C_{\sbl k}B^5_{\sbl k}(\bl w)} {\displaystyle \sum_{\sbl k\in\Theta_5}\omega^C_{\sbl k}B^5_{\sbl k}(\bl w)}, \qquad \bl w\in T, \end{equation} $T$ being the standard triangle \eqref{E:T}, and \begin{align*} T_R:=&\{\bl x=(x_1,x_2): x_1\ge x_2\ge0,\;|\bl x|\leq1\}, \\[1ex] T_Y:=&\{\bl x=(x_1,x_2): x_2\ge x_1\ge0,\;|\bl x|\leq1\}. \end{align*} The control points $r^C_{\sbl k}$ and the associated weights $\omega^C_{\sbl k}$ of the rational patches \eqref{E:RC} can be found at the webpage \texttt{http://www.ii.uni.wroc.pl/\~{}pwo/programs.html}. The surface \eqref{E:Rast} is shown in Fig.~\ref{fig:Fig3} (the left plot). Now, we show how to obtain the degree $m$ polynomial approximations of the rational subpatches, which form a $C^1$-continuous composite surface. $1^o$ Let $\mbox{$\mathsf P$}^Y_m$ be the triangular B\'ezier patch of degree $m$ approximating the rational patch $\mbox{$\mathsf R$}^Y_5$ without constraints, i.e., for $\mbox{$\bl c$}=(0,0,0)$. Let $p^Y_{\sbl k}$ be the control points of the patch $\mbox{$\mathsf P$}^Y_m$. $2^o$ We approximate the rational patch $\mbox{$\mathsf R$}^R_5$ by the triangular B\'ezier patch $\mbox{$\mathsf P$}^R_m$ of degree $m$, with constraints of the type $\mbox{$\bl c$}=(2,0,0)$, where the points $g_{\sbl k}\in\Gamma^{\sbl c}_m$ are chosen so that the $C^1$-continuity is obtained (cf. \cite[Section 17]{Far02}): \[ \begin{array}{ll} g_{(0,i)}:= p^Y_{(i,0)},&\qquad i=0,1,\ldots,m, \\[2ex] g_{(1,i)}:=p^Y_{(i+1,0)}+(p^Y_{(i+1,0)}-p^Y_{(i,1)}),&\qquad i=0,1,\ldots,m-1. \end{array} \] The results, obtained for $m=5$ and $m=6$, with $\mbox{$\bm{\alpha}$}=(-\frac12,-\frac12,-\frac12)$, are shown in Fig.~\ref{fig:Fig3}. It can be observed that approximation of the rational composite surface \eqref{E:Rast} by two jointed polynomial patches of degree $m=5$ (the middle plot) resulted in some visible differences. Increasing the degree of the approximating polynomials to $m=6$ (the right plot) already gave a very satisfactory result. \begin{figure} \caption{\small The composite rational B\'ezier surface \eqref{E:Rast} \label{fig:Fig3} \end{figure} \section*{Appendix A: The adaptive algorithm for computing the integrals $I_{\sbl j}$} \label{S:AppA} \setcounter{equation}0 \setcounter{subsection}0 \setcounter{thm}0 \renewcommand{B.\arabic{thm}}{A.\arabic{thm}} \renewcommand{\Arabic{section}}{\Arabic{section}} \renewcommand{B.\arabic{subsection}}{A.\arabic{subsection}} \renewcommand{B.\arabic{equation}}{A.\arabic{equation}} We start with proving that the functions $\psi_t$ (\ref{E:psi}), $t\in[0,1]$, and $\Phi$ (\ref{E:Phi}) are analytic in a closed complex region containing the interval $[0,1]$. The assertion is clearly true in the case of $\psi_t(z)=\omega^{*}(z,t)^{-1}$, as the bivariate polynomial $\omega^{*}$ has no roots in $[0,1]\times[0,1]$. Similarly, for any $s\in[0,1]$, the function $z\mapsto\omega^{*}(s,z)^{-1}$ is analytic in a rectangular region $[-\sigma,1+\sigma]\times[-\sigma,\sigma]$, where $\sigma>0$ does not depend on $s$. Thus, if $s\in[0,1]$, then \begin{equation*} \int_{C} \omega^{*}(s,z)^{-1} {\rm d}z = 0 \end{equation*} for any closed contour $C\subset[-\sigma,1+\sigma]\times[-\sigma,\sigma]$. Consequently, if $\alpha,\beta > -1$, then \begin{equation*} \int_{C} \Bigg( \int_{0}^{1} (1-s)^{\alpha}s^{\beta} \omega^{*}(s,z)^{-1} {\rm d}s\Bigg) {\rm d}z = \int_{0}^{1} (1-s)^{\alpha}s^{\beta} \Bigg( \int_{C} \omega^{*}(s,z)^{-1} {\rm d}z\Bigg) {\rm d}s = 0 . \end{equation*} Therefore, by Morera's theorem (see, e.g., \cite[Chapter 2.3]{Ahl79}), the function $\Phi(z) = J(\alpha,\beta,\psi_z)$ is also analytic in $[-\sigma,1+\sigma]\times[-\sigma,\sigma]$. The polynomials $S_{M_t}$ and $\hat{S}_M$ in (\ref{E:TchExpan}), which approximates the functions $\psi_t$ and $\Phi$, are determined to satisfy the interpolation conditions \[ \left.\begin{array}{l} S_{M_{k}}(s_j) = \omega^\ast(s_j,t_k)^{-1},\quad 0\leq j\leq M_{k},\\[2ex] \hat{S}_{M}(t_k) = J(c,d;S_{M_{k}}), \end{array}\right\}\qquad 0\leq k\leq M, \] where, for simplicity, we denote $M_{k}\equiv M_{t_k}$, and the interpolation nodes are given by \begin{equation} \label{E:sjtk} s_j = \frac{1}{2} + \frac{1}{2}\cos\frac{j\pi}{M_{k}}, \qquad t_k = \frac{1}{2} + \frac{1}{2}\cos\frac{k\pi}{M}. \end{equation} In such a case, the coefficients $\gamma^{[t_k]}_i$ and $\hat{\gamma}_l$ in (\ref{E:TchExpan}) are given by \begin{equation} \label{E:gammas} \begin{array}{ll} \displaystyle \gamma^{[t_k]}_i = \frac{2-\delta_{i,M_{k}}}{M_{k}}\sum_{j=0}^{M_{k}}{''\,} \omega^{*}(s_j,t_k)^{-1}\cos\frac{ij\pi}{M_{k}},&\quad 0\leq i\leq M_{k},\\[3ex] \displaystyle \hat{\gamma}_l = \frac{2-\delta_{l,M}}{M}\sum_{k=0}^{M}{''\,} J(c,d;S_{M_{k}}) \cos\frac{lk\pi}{M},&\quad 0\leq l\leq M, \end{array} \end{equation} where $\delta_{j,k}$ is the Kronecker delta, the double prime means that the first and the last term of the sum are to be halved. The sets of coefficients (\ref{E:gammas}) can be very efficiently computed by means of the FFT with only $O\big(M_{k}\log(M_{k})\big)$ and $O\big(M\log(M)\big)$ arithmetic operations (cf.~\cite{Gen72} or \cite[Section 5.1]{DB08}; the authors recall that the FFT is not only fast, but also resistant to round-off errors). The presented approach is very convenient from the practical point of view because if the accuracy of the approximation (\ref{E:TchExpan}) is not satisfactory, then we may double the value of $M_{k}$ (or $M$) and reuse the previously computed results. The expansions (\ref{E:TchExpan}) are accepted if \begin{equation} \label{E:stop} \frac{\sum\limits_{i=M_{k}-3}^{M_{k}}|\gamma^{[t_k]}_i|} {\max\big\{1,\,\max\limits_{\,0\leq i\leq 3}|\gamma^{[t_k]}_i|\big\}} \leq 16\varepsilon \quad\, \mathrm{and} \quad\, \frac{\sum\limits_{i=M-3}^{M}|\hat{\gamma}_i|} {\max\big\{1,\,\max\limits_{\,0\leq i\leq 3}|\hat{\gamma}_i|\big\}} \leq 256\varepsilon , \end{equation} where $\varepsilon$ is the computation precision. Here is the complete algorithm for efficient approximation of the whole set of integrals $I_{\sbl j}$ for $\bl j\in\Omega_{n+m}^{\sbl c}$. The functions (parameters) $a$, $b$, $c$, and $d$ are defined in (\ref{E:abcd}). \begin{alg}[Numerical computation of the set of integrals $I_{\sbl j}$, $\bl j\in\Omega^{\sbl c}_{n+m}$] \label{A:I-comp} \ \begin{itemize} \setlength{\itemindent}{-3.3ex} \itemsep2pt \item[] Let $M := M^*$, where $M^*$ is an arbitrary integer greater than 7. \item[] \textbf{Phase I}. For $k\in\{0,1,\dots,M\}$ do the following Steps 1--6:\\[-2ex] \begin{itemize} \setlength{\itemindent}{5ex} \setlength\itemsep{0.1ex} \item[Step 1.] Compute $t_k$ according to \eqref{E:sjtk}, and compute $w_i(t_k)$ in \eqref{E:omegastar} for $i\in\{0,1,\dots,n\}$. \item[Step 2.] Let $M_{k} := M_{k}^{*}$, where $M_{k}^{*}$ is an arbitrary integer greater than 7. \item[Step 3.] Compute the values $\hspace*{0.1ex}\omega^{*}(s_j,t_k)^{-1}$ for $j\in\{0,1,\dots,M_{k}\}$, where $s_j$ is given by (\ref{E:sjtk}). \item[Step 4.] Using the FFT, compute the coefficients $\gamma_i^{[t_k]}$ ($\,0\leq i\leq M_{k}$) defined in (\ref{E:gammas}). \item[Step 5.] If the first condition of (\ref{E:stop}) is not satisfied, then set $M_{k} := 2 M_{k}$, compute the additional values $\hspace*{0.1ex}\omega^{*}(s_j,t_k)^{-1}$ for $j\in\{1,3,5,\dots,M_{k}-1\}$, and go to Step 4. \item[Step 6.] Compute the set of quantities \( W[t_k,j_1] := J\left(c(j_1),d(j_1);S_{M_{k}}\right), \) by applying Algorithm \ref{A:PK_1d}, for $j_1\in\{c_1,c_1+1,\dots,N-c_2-c_3\}$, where $ N=n+m$. \end{itemize}\vspace*{1ex} \item[] \textbf{Phase II}. For $j_1\in\{c_1,c_1+1,\dots,N-c_3-c_2\}$ perform the following Steps 7--9:\\[-2ex] \begin{itemize} \setlength{\itemindent}{5ex} \setlength\itemsep{0.1ex} \item[Step 7.] Compute the coefficients $\hat{\gamma}_l$ ($\,0\leq l\leq M$) defined in (\ref{E:gammas}), by means of the FFT, using the stored values $W[t_k,j_1]$, $0\leq k\leq M$, in place of $J\big(c(j_1),d(j_1);S_{M_{k}}\big)$. \item[Step 8.] If the second condition of (\ref{E:stop}) is not satisfied, then set $M := 2 M$, and repeat Steps 1--6 for $k\in\{1,3,5,\dots,M-1\}$. \item[Step 9.] For $j_2\in\{c_2,c_2+1,\dots,N-c_3-j_1\}$, compute the integrals \[ I_{\sbl j}\equiv I_{(j_1,j_2)} := A_{\mbox{\scriptsize$\bla$}}\binom{N}{\bl j}\, J\Big(a(\bl j),b(j_2);\hat{S}_{M}\Big) \] using Algorithm \ref{A:PK_1d}. \end{itemize} \end{itemize} \noindent \textbf{Output}: Set of the integrals $ I_{\sbl j}$ for $\bl j\in\Omega^{\sbl c}_{n+m}$. \end{alg} \begin{rem} In Steps 4 and 7 of the above algorithm the coefficients $\gamma_i^{[t_k]}$ ($\,0\leq i\leq M_{k}$) or $\hat{\gamma}_l$ ($\,0\leq l\leq M$) are recalculated each time the value of $M_k$ or $M$ is doubled. Such a procedure is advised if we use a system (like, e.g., Maple or Matlab) equipped with a fast built-in FFT subroutine. If we are to program the FFT summation algorithm by ourselves, it should rather be done in such a way that practically all results computed for a previous value of $M_k$ or $M$ are reused (cf., e.g.,\ \cite{Gen72}). \end{rem} In Table \ref{Tab:IntTime} we present the results of the efficiency test, where the proposed quadrature (implemented in Maple) is compared to the Maple built-in integration subroutine. We have used the B\'ezier surface form Example \ref{SS:example1} ($n=6$), and set the parameters $m$ and $\bl c$ to several different values, to obtain collections of integrals of different sizes (equal to $|\Omega_{n+m}^{\sbl c}|$). The experiment was performed in the 64-bit version of Maple 16 on the computer equipped with the $3.7$GHz i7 processor. All parameters $\alpha_i$ in (\ref{E:w}) were set to $0$ (the efficiency of the proposed method does not depend on $\bl\alpha$, but the Maple built-in integration subroutine works most efficiently with this selection). \begin{table}[!ht] \newcolumntype{C}[1]{>{\centering\arraybackslash}m{#1}} \newcommand{\hphantom{0}}{\hphantom{0}} \begin{center} \caption{\small Comparison of the computation times of the Maple library function and the proposed adaptive quadrature (Algorithm \ref{A:I-comp}) in the case of several collections of integrals (\ref{E:I}). The number of integrals which are to be computed equals $|\Omega_{n+m}^{\sbl c}|$.} \label{Tab:IntTime} \renewcommand{1.025}{1.025} \setlength\tabcolsep{1.5ex} \begin{tabular}{C{14ex}C{24ex}C{24ex}}\hline \vspace*{1.5ex}$\big|\Omega_{n+m}^{\sbl c}\big|$\vspace*{-1.75ex} & \multicolumn{2}{c}{computation time (in seconds)}\\\cline{2-3} & Maple library function & the proposed method\\\hline $\hphantom{0}\hphantom{0}1$ & $\hphantom{0}0.064$ & $0.30$ \\ $\hphantom{0}\hphantom{0}3$ & $\hphantom{0}0.19\hphantom{0}$ & $0.30$ \\ $\hphantom{0}10$ & $\hphantom{0}0.64\hphantom{0}$ & $0.32$ \\ $\hphantom{0}28$ & $\hphantom{0}1.75\hphantom{0}$ & $0.37$ \\ $\hphantom{0}91$ & $\hphantom{0}6.34\hphantom{0}$ & $0.43$ \\ $276$ & $22.9\hphantom{0}\hphantom{0}$ & $0.59$ \\ $990$ & FAILURE & $0.89$ \\ \hline \end{tabular} \end{center} \end{table} We have to keep in mind that Maple is an interpretative programming language with a pretty slow code interpreter. Therefore, the $4.7$ times longer computation time of our quadrature, compared to the computation time of the Maple library function, in the case of $1$-element collection of integrals is in fact an excellent result. The last collection of $990$ integrals ($n+m=42$) was too difficult to be computed by the Maple built-in subroutine (in $14$-decimal digit arithmetic, assumed during this test). \section*{Appendix B: Hahn orthogonal polynomials} \label{S:AppB} \setcounter{equation}0 \setcounter{subsection}0 \setcounter{thm}0 \renewcommand{B.\arabic{thm}}{B.\arabic{thm}} \renewcommand{\Arabic{section}}{\Arabic{section}} \renewcommand{B.\arabic{subsection}}{B.\arabic{subsection}} \renewcommand{B.\arabic{equation}}{B.\arabic{equation}} The notation \[ \hyper rs {a_1,\ldots, a_r}{b_1,\ldots,b_s}z :=\sum_{k=0}^{\infty}\frac{(a_1)_k\cdots (a_r)_k} {k!(b_1)_k\cdots (b_s)_k}\,z^k \] is used for the \textit{generalized hypergeometric series} (see, e.g., \cite[\S2.1]{AAR99}); here $r,\,s\in\mathbb Zplus$, $z$, $a_i, b_j\in \mathbb C$, and $(c)_k$ is the shifted factorial. The \textit{Hahn polynomials} (see, e.g., \cite[\S1.5]{KS98}) \begin{equation} \label{E:Hahn1} h_l(t)\equiv h_l(t;a,b,M):=(a+1)_l(-M)_l\hyper32{-l,l+a+b+1,-t}{a+1,-M}1, \end{equation} where $l=0,1,\ldots,M$, $a,\,b>-1$, and $M\in\mathbb N$, satisfy the recurrence relation \begin{equation} \label{E:Hahn1-rec} h_{l+1}(t)=A_l(t,M)\,h_l(t)+B_l(M)\,h_{l-1}(t), \qquad l\ge0;\; h_{0}(t)\equiv1; \;h_{-1}(t)\equiv0, \end{equation} with the coefficients \begin{equation} \label{E:Hahn1-rec-coeffs} A_l(t,M):=C_l\,(2l+s-1)_2\,t-D_l-E_l,\qquad B_l(M):=-D_l\,E_{l-1}, \end{equation} where $s:=a+b+1$, $C_l:=(2l+s+1)/[(l+s)(2l+s-1)]$, $D_l:=C_l\,l(l+M+s)(l+b)$, and $E_l:=(l+a+1)(M-l)$. \begin{rem}\label{R:Clenshaw} A linear combination of Hahn polynomials, \( s_N(t):=\sum_{i=0}^{N}\gamma_i\,h_i(t;a,b,M), \) can be summed using the following \textit{Clenshaw's algorithm} (see, e.g., \cite[Thm 3.2.11]{DB08}). Compute the sequence $V_0,V_1,\ldots,V_{n+2}$ from $V_i:=\gamma_i+A_i(t;M)V_{i+1}+B_{i+1}(M)V_{i+2}$, $i=N,N-1,\ldots,0$, with $V_{N+1}=V_{N+2}=0$, where the coefficients $A_i(t;M)$ and $B_i(M)$ are defined by \eqref{E:Hahn1-rec-coeffs}. Then $s_N(t)=V_0$. \end{rem} \small \end{document}
\begin{document} \title{$K_r$-Factors in Graphs with Low Independence Number} \begin{abstract} A classical result by Hajnal and Szemer\'edi from 1970 determines the minimal degree conditions necessary to guarantee for a graph to contain a $K_r$-factor. Namely, any graph on $n$ vertices, with minimum degree $\delta(G) \ge \left(1-\frac{1}{r}\right) n $ and $r$ dividing $n$ has a $K_r$-factor. This result is tight but the extremal examples are unique in that they all have a large independent set which is the bottleneck. Nenadov and Pehova showed that by requiring a sub-linear independence number the minimum degree condition in the Hajnal-Szemer\'edi theorem can be improved. We show that, with the same minimum degree and sub-linear independence number, we can find a clique-factor with double the clique size. More formally, we show for every $r\in \mathbb{N}$ and constant $\mu>0$ there is a positive constant $\gamma$ such that every graph $G$ on $n$ vertices with $\delta(G)\ge \left(1-\frac{2}{r}+\mu\right)n$ and $\alpha(G)< \gamma n$ has a $K_r$-factor. We also give examples showing the minimum degree condition is asymptotically best possible. \mathbf End{abstract} \section{Introduction} Given two graphs $H$ and $G$, a collection of vertex-disjoint copies of $H$ in $G$ is called an $H$-tiling. A perfect $H$-tiling of $G$, an $H$-factor for short, is an $H$-tiling that covers all the vertices of $G$. Note that a perfect matching corresponds to a $K_2$-factor, thus the notion of $H$-factors is a natural generalization from edges to arbitrary graphs. In extremal graph theory there is a big interest in finding necessary or sufficient conditions for the existence of spanning substructures. Perfect matchings and Hamilton cycles are two commonly studied examples. In particular for perfect matchings, necessary and sufficient conditions are well-known by Hall's and Tutte's theorems. Often, \mathbf Emph{global} properties such as factors and Hamilton cycles have \mathbf Emph{local} necessary conditions. Dirac \cite{dirac1952some} showed that if an $n$-vertex graph $G$ has minimum degree at least $n/2$, then it has a Hamilton cycle, in particular if $n$ is even then $G$ has a perfect matching. This was extended to triangle factors by Corr\'adi and Hajnal \cite{corradi1963maximal} in 1963 and later generalized to $K_r$-factors in a classical result by Hajnal and Szemer\'edi~\cite{hajnal1970proof}, who gave the sufficient minimum degree for $K_r$-factors. \begin{theorem}[Hajnal and Szemer\'edi] \label{thm:hajnal} For every graph on $n$ vertices, given an integer $r\ge 2$, if $r$ divides $n$ and the minimum degree of $G$ is at least $\left(1-\frac{1}{r}\right)n$, then $G$ contains a $K_r$-factor. \mathbf End{theorem} A short proof was later found by Kierstead and Kostochka~\cite{kierstead2008short}. The divisibility condition in this theorem is necessary as the vertex set must be divisible by $|H|$ if we want to have an $H$-factor. The theorem is also tight in a sense that we can not lower the minimum degree condition and still hope to cover any $n$-vertex graph. Different results relating to the theorem of Hajnal and Szemer\'edi have been published. A degree sequence version of the result was published by Treglown~\cite{treglown2016degree} proving that, for a $({1}/{r})$-fraction of the vertices, the degrees can be smaller than prescribed by the Hajnal-Szemer\'edi theorem. Other results include the minimum degree condition in a 3-partite \cite{magyar2002tripartite}, 4-partite \cite{martin2008quadripartite} or multi-partite \cite{keevash2013multipartite} host graph. In each of these results, the known extremal examples all have one or more large independent sets. Naturally the question arises, what happens if we forbid these large independent sets? To cover any $n$-vertex graph with independence number\footnote{The independence number of a graph $G$, $\alpha(G)$, is the size of a largest independent set in $G$.} at least $n/r+1$ with cliques of arbitrary size we need at least $n/r+1$ cliques, as no clique can contain more than one vertex from the independent set. Taking an independent set of size exactly $n/r+1$ and adding edges from each of the remaining vertices to all other vertices gives a graph that does not have a $K_r$-factor and minimum degree $n-(n/r+1)=(1-1/r)n-1$. So-called \mathbf Emph{Ramsey-Tur\'an} type problems, first studied by Erd\H{o}s and S\'os \cite{erdos1970some} in 1970, ask for the minimum number of edges that force the existence of a given subgraph $H$ in a graph with bounded independence number. In particular $\textbf{RT}(H,o(n))$ denotes the smallest number of edges which guarantees that every graph $G$ on $n$ vertices with $\alpha(G)=o(n)$ contains a copy of $H$. More on Ramsey-Tur\'an theory can be found e.g.\ in \cite{erdos1979turan,erdHos1983more,simonovits2001ramsey,SUDAKOV200399}. Continuing this line of research, Balogh, Molla and Sharifzadeh~\cite{balogh2016triangle} proved that the minimum degree requirement for a triangle factor in $G$ decreases if the independence number of $G$ is small showing that $\delta(G)\ge1/2+\varepsilon$ suffices in this case. Nenadov and Pehova~\cite{nenadov2018ramsey} extended their result to larger cliques and a generalization of the independence number. They show that instead of $\delta(G) \ge \left(1-\frac{1}{r}\right) n$ one only needs roughly $\delta(G) \ge \left(1-\frac{1}{r-1}\right) n$ for the existence of a $K_r$-factor if we restrict the independence number of $G$ to be sub-linear. We further improve the minimum degree condition, doubling the clique size compared to the Hajnal-Szemer\'edi theorem. We see in the following that this is best possible. \begin{theorem} For every $r \ge 4$ and $\mu>0$ there are constants $\gamma$ and $n_0\in \mathbb{N}$ such that every graph $G$ on $n\ge n_0$ vertices where $r$ divides $n$, with $\delta(G)\ge \left(1-\frac{2}{r}+\mu\right)n$ and $\alpha(G)< \gamma n$ has a $K_r$-factor. \label{thm:mainresult} \mathbf End{theorem} Note that the bound is not true for $r=2,3$. Balogh, Molla and Sharifzadeh~\cite{balogh2016triangle} observed that a minimum degree of $(1/2+\varepsilon)n$ is needed in the case $r=3$. This can be seen by considering graphs with a bipartition such that there are no triangles which span over both parts. In particular, for $n$ divisible by 4, the graph $K_{n/2+1} \cup K_{n/2-1}$, the union of two disjoint cliques, has independence number $2$ and minimum degree $n/2-2$ but does not contain a perfect matching nor a triangle factor because $n/2-1$ and $n/2+1$ are both odd and cannot both be divisible by three. Balogh, McDowell, Molla and Mycroft~\cite{balogh2018triangle} showed that the minimum degree condition can be lowered if an additional divisibility condition is added to avoid exactly this case. The tightness of the Hajnal-Semer\'edi theorem came from large independent sets. So what is the bottleneck if we forbid these? By definition, $\alpha(G)=o(1)$ implies that every set of linear size has at least one edge inside, but if we have a large triangle-free set then we can take at most two vertices from this set for every clique. In particular, if we have an $n$-vertex graph with a triangle-free set of size $2n/r+1$ then we cannot hope to find a $K_r$-factor. The existence of triangle-free graphs with sub-linear independence number is related to the asymmetric Ramsey number $R(3,n)$. This is well studied, results can be found e.g.\ in \cite{erdos1961graph}, \cite{kim1995ramsey}. The above construction shows that Theorem~\ref{thm:mainresult} is asymptotically tight. Look at the following example of an $n$-vertex graph. Take a triangle-free graph of size $2n/r +1$ and add $\left(1-\frac{2}{r}\right)n -1$ vertices each connected to all other vertices. The triangle-free subgraph of size $2n/r +1$ becomes a bottleneck since we can take at most to two vertices from it to complete to a $K_r$ and we cannot cover the graph with $n/r$ many $K_r$ (see Figure~\ref{fig:extremalexample}). So in this graph we have $\delta(G)> \left(1-\frac{2}{r} \right)n$ and $\alpha(G) = o(1)$ but there is no $K_r$-factor. This construction was first given in~\cite{balogh2016triangle}. \captionsetup{justification=raggedright,singlelinecheck=false} \begin{figure} \centering \includegraphics[scale=0.9]{images/extremalexample.pdf} \caption{ An extremal example showing that we cannot improve the degree condition below $\left(1-\frac{2}{r}\right)n$. } \label{fig:extremalexample} \mathbf End{figure} \captionsetup{justification=centering} Our proof combines well-known methods like the Regularity Lemma and embedding techniques with new ideas that use the low independence number. Our paper is structured as follows. In Section~\ref{sec:preliminaries} we introduce some notation and definitions and give tools that will be useful in the later proofs. The remainder of the paper contains the proof of Theorem~\ref{thm:mainresult}. The proof consists of two parts. First, in Section~\ref{sec:absorbers}, we use the absorbing method. This is a technique mainly pushed forward by Rödl, Ruci\'nski and Szemer\'edi~\cite{rodl2006dirac, rodl2006perfect, rodl2008approximate}. The method implies that, under the appropriate circumstances, it is enough to find a $K_r$-tiling covering everything but a small fraction. The method sets aside a small set of vertices at the beginning which we can cover flexibly enough so that we can ``absorb'' any small fraction of the other vertices which may remain. For the more precise definition see Definition~\ref{def:absorber}. Second, in Section~\ref{sec:almost}, we prove that the minimum degree and independence number conditions are enough to cover everything but a small $\xi$-fraction of all vertices with a $K_r$-tiling. This is also known as an almost cover of the vertices. To show that there is an almost cover with $K_r$'s in the graph we find a fractional tiling in the reduced graph after applying the Regularity Lemma and convert this back. We adapt some well-known techniques to make use of the fact that the independence number of $G$ is low. Embedding independent sets into a cluster of the reduced graph of the Regularity Lemma is standard, but we sometimes want to embed edges instead of single vertices. In fact we use that the low independence number implies we can find paths of small length in any small linear sized subset of the vertices. We are required to differentiate between edges in the reduced graph which represent densities above $1/2 + \beta$ and those only above $\beta$. We believe this approach might also work for embedding other graphs into a host graph with a low independence number. \section{Preliminaries} \label{sec:preliminaries} Since many of our constructions are specifically built for making use of the low independence number we first introduce some definitions. We also prove an embedding lemma that will be useful in multiple places to convert structures we found in the reduced graph back to the original graph. We begin with the notion of $\mathbf Epsilon$-regular. Throughout the paper we use standard graph theory notation (see e.g.\ \cite{diestel2012graph}). For a graph $G$, we use $|G|$ to refer to the number of vertices in $G$. In general, variables represented by Latin letters will be variables in $\mathbb{N}$ and variables represented by Greek letters will be small positive real numbers. $N_G(v)$ denotes the neighborhood of a vertex in $G$ which is the set $N_G(v) = \{ u \in V(G) | \{v, u\} \in E(G) \} $. We omit the index $G$ if the graph is clear from the context. On the contrary $\deg_G(v)$ denotes the number of outgoing edges from $v$ in the graph $G$, counting a double-edge twice in the case of multigraphs. We write $\deg_G(v, S)$ if we restrict to edges to a subset $S$ of $V(G)$ and again we omit the index if not needed. Further for two vertex sets $U$ and $W$, we denote by $\deg_G(U, W)$ the combined degrees over all vertices in $U$, $\sum_{u\in U}\deg_G(u, S)$ \begin{definition}[$\mathbf Epsilon$-regular] Given a graph $G$ and disjoint subsets $V_1, V_2 \subseteq V (G)$, we say that the pair $(V_1, V_2)$ is $\mathbf Epsilon$-regular if for all $X\subseteq V_1, |X| \ge \mathbf Epsilon |V_1|$ and $Y\subseteq V_2, |Y | \ge \mathbf Epsilon|V_2|$ we have $|d(X, Y ) - d(V_1, V_2)| \le \mathbf Epsilon$ where $d(X, Y ) = \deg(X, Y ) / |X||Y|$ \mathbf End{definition} The following fact is an easy consequence from the definition of regularity. It is sometimes known as the Slicing Lemma (cf.\cite{komlos96szemerediregularity}). \begin{fact} Let $B=(V_1\cup V_2,E)$ be an $\varepsilon$-regular bipartite graph, let $\alpha>\varepsilon$ and let $V_1'\subset V_1$ and $V_2'\subset V_2$ be subsets with $|V_1'|\ge \alpha|V_1|$ and $|V_2'|\ge \alpha |V_2|$. Then for $\varepsilon'\ge \max\{\varepsilon/\alpha,2\varepsilon\}$ the graph $B'=B[V_1'\cup V_2']$ induced by $V_1'$ and $V_2'$ is $\varepsilon'$-regular with $|d_B(V_1,V_2)-d_{B'}(V_1',V_2')|<\varepsilon$. \label{fact:regsub} \mathbf End{fact} Our proof builds upon the famous Regularity Lemma by Szemer\'edi. Originally from~\cite{szemeredi1975regular} there have been many variants making it slightly stronger or adapted to a particular problem. The following is the degree variant of the Regularity Lemma. \begin{lemma}[Regularity Lemma \cite{komlos96szemerediregularity}, Theorem 1.10] For every $\mathbf Epsilon>0$ there is an $M=M(\mathbf Epsilon)$ such that if $G$ is a graph on $n\ge M$ vertices and $\beta \in[0,1]$ is a real number, then there exists a partition $V(G)=V_0\cup\ldots\cup V_k$ and a spanning subgraph $G'\subseteq G$ with the following properties: \begin{enumerate} \item \label{lem:reg:k} $k\le M$, \item \label{lem:reg:V0} $|V_0|\le \mathbf Epsilon n$, \item \label{lem:reg:Vi} $|V_i|=m$ for all $1\le i\le k$ with $m\le \mathbf Epsilon n$, \item \label{lem:reg:deg}$\deg_{G'}(v)>\deg_G(v)-(\beta+\mathbf Epsilon)n$ for all $v\in V(G)$, \item \label{lem:reg:indsets} $V_i$ is an independent set in $G'$ for all $i\in [k]$, \item \label{lem:reg:pairs} all pairs $(V_i,V_j)$ are $\mathbf Epsilon$-regular with density $0$ or at least $\beta$. \mathbf End{enumerate} \label{lem:reg} \mathbf End{lemma} What is new in our case is that we must differentiate between dense and very dense pairs of partitions. The following definition replaces the usual reduced graph of the Regularity Lemma. We call it the reduced multigraph throughout the paper. \begin{definition}[reduced multigraph] \label{def:reducedmultigraph} For a graph $G$ and $\beta,\mathbf Epsilon>0$ let $V(G)=V_0\cup\ldots\cup V_k$ be a partition and $G'\subseteq G$ and a subgraph fulfilling the properties of Lemma~\ref{lem:reg}. We denote by $R_{\beta, \mathbf Epsilon}$ the \mathbf Emph{reduced multigraph} of this partition, which is defined as follows. Let $V(R_{\beta, \mathbf Epsilon})=\{1,\ldots,k\}$ and for two distinct vertices $i$ and $j$ we draw two edges between $i$ and $j$ if $d_{G'}(V_i,V_j)\ge 1/2+\beta$, one edge if $d_{G'}(V_i,V_j)\ge \beta$ and no edge otherwise. \mathbf End{definition} In this reduced multigraph we sometimes refer to the vertices as clusters because of the correspondence to sets of vertices in the original graph. We omit the subscripts $\beta$ and $\mathbf Epsilon$ whenever it is clear from the context or the parameters are not used. The following fact connects a minimum degree condition in $G$ to a minimum degree condition in reduced multigraph. \begin{fact}\label{fact:min_deg_r} Let $G$ be a graph with $\delta(G)\ge \left(1-\frac{2}{r}+\mu\right)n$ and $ V_1\cup\ldots\cup V_k$ be the partition given by the Regularity Lemma with the corresponding reduced multigraph $R_{ \beta, \mathbf Epsilon}$ for $\mathbf Epsilon$ and $\beta$ smaller than $\mu/10$. Then for every $i\in V(R_{\beta, \mathbf Epsilon})$ we have \[\deg_{R_{\beta, \mathbf Epsilon}}(i)\ge 2\left(1-\frac{2}{r}+\mu/2\right)k.\] \mathbf End{fact} \begin{proof} For every $i\in V(R_{\beta, \mathbf Epsilon})$ we have $\deg_{G'}(V_i, \bigcup_{j\ne i} V_j)$ is at least \[|V_i|\left(\left(1-\frac{2}{r}+\mu -(\mathbf Epsilon+\beta) \right) n -|V_0| \right) \ge \left(1-\frac{2}{r}+\mu -2\mathbf Epsilon -\beta) \right) nm .\] Every edge in $R_{\beta, \mathbf Epsilon}$ represents less than $(1/2 + \beta) m^2$ edges in $G' \setminus {V_0}$. So $R_{\beta, \mathbf Epsilon}$ must have minimum degree at least \[\deg_{R_{\beta, \mathbf Epsilon}}(i)\ge \frac{ \left(1-\frac{2}{r}+\mu -2\mathbf Epsilon - \beta\right) nm}{(1/2+ \beta )m^2}\ge 2\left(1-\frac{2}{r}+\mu/2\right)k,\] where in the last step we use the upper bounds on $\beta$ and $\mathbf Epsilon$, $m\le n/k$ and $(1/2+\beta)^{-1}\ge 2(1-2\beta)$.\mathbf End{proof} Then to formalize the intuition of embedding two vertices into a cluster of the reduced graph we define a multi-embedding. \begin{definition}[$H$-multi-embedding]\label{def:embedding} Let $R$ be a reduced multigraph. We say that a simple graph $H$ is embeddable into the multigraph $R$ if there is a mapping $f:V(H)\to V(R)$ such that the following holds: \begin{enumerate} \item \label{def:emb:1} For any $i\in V(R)$ the induced subgraph on the vertices $f^{-1}(i)$ in $H$ is either an isolated vertex, an edge or a path of length 2. \item \label{def:emb:2} If $\{u,v\}\in E(H)$, then $f(u)$ and $f(v)$ are connected by at least one edge in $R$ (as long as $f(u)$ and $f(v)$ differ). \item \label{def:emb:3} If for $i,j\in V(R)$ we have that $f^{-1}(i)$ and $f^{-1}(j)$ have at least two vertices and are connected in $H$, then $i$ and $j$ are connected with two edges. \item \label{def:emb:4} The joint neighborhood of the vertices embedded into a single cluster has at most two vertices embedded in any other cluster. That is \[ | N_{H}(f^{-1}(i)) \cap f^{-1}(j) | \le 2 \qquad \forall i, j \in V(R) \ (i \ne j) \] where $N_{H}(f^{-1}(i)) = \bigcup_{w \in f^{-1}(i)} N_{H}(w)$ is the combined neighborhood of all vertices embedded in cluster $i$. \mathbf End{enumerate} We call $f$ a multi-embedding of $H$. \mathbf End{definition} \begin{figure}[b] \centering \includegraphics[scale=0.6]{images/embedding.pdf}\\ \label{fig:embedding} \caption{Multi-embedding of a $K_{4}$-diamond path} \mathbf End{figure} We prove that this embedding is useful in the intended way. Whenever we can find a multi-embedding of a graph $H$ in a reduced multigraph of $G$ then we can also find many copies of $H$ as a subgraph of $G$. \begin{lemma}[Embedding Lemma]\label{lem:embedable_struct} For every graph $H$ with $|H| = h $ and $\beta >0$ there exist $\mathbf Epsilon, \gamma> 0$ and $n_0 \in \mathbb{N}$ such that the following holds for every graph $G$ on $n > n_0$ vertices and with independence number $\alpha(G) \le \gamma n$ and the sets $ V_1\cup\ldots\cup V_k$ with $|V_i|=m$ given by the Regularity Lemma with the corresponding reduced multigraph $R_{ \beta, \mathbf Epsilon}$. Let $f$ be a multi-embedding of $H$ into $R_{ \beta, \mathbf Epsilon}$ with $f(V(H))=\mathcal{I}=\{i_1,\ldots,i_t\}$ for some $1\le t\le |H|$. Then let $V'_{i_1},\ldots,V'_{i_t}$ be subsets of $V_{i_1},\ldots,V_{i_t}$ respectively of size at least $({2 }/ {\beta})^{h} \mathbf Epsilon m$. There exists a copy of $H$ as a subgraph of $G$ such that $v \in V'_{f(v)}$ for each vertex $v$ in $H \subset G$. \mathbf End{lemma} \begin{proof} Ensure $\beta^{h}\ge (h+1)\varepsilon$ and $\mathbf Epsilon m \ge 3 \gamma n$. The subgraph can be chosen greedily and we show this by induction on the size of $\mathcal{I}$. The base case is clear, if we only have $| \mathcal{I}| = 1$ the multi-embedding can be at most a path of length two. But since $V_{i_1}'$ is of size at least $({2 }/ {\beta})^{h} \mathbf Epsilon m \ge 3 \gamma n \ge 3 \alpha(G)$ there is always a path of length two in $V_{i_1}'$. For the induction we first check if there is any vertex of $R$ which has a single vertex embedded. If so choose one of these, say $i_v$ and $f^{-1}(i_v) = v \in H$. By the Property~\mathbf Eqref{def:emb:2} of a multi-embedding, there are edges between cluster-vertices in $R$ if their vertices in $H$ are in the neighborhood of $v$. Consider any of these edges $\{i_w, i_v\}$. This means that $V_{i_w}$ and $V_{i_v}$ must be $\varepsilon$-regular with density at least $\beta$. This means that at most $\mathbf Epsilon m$ vertices of $V'_{i_v}$ have a neighborhood smaller than $(\beta - \mathbf Epsilon) |V'_{i_w}|$ in $V'_{i_w}$. This holds for any neighbor of $v$ in $H$. As $\mathbf Epsilon m h < m$, there is at least one vertex in $V'_{i_v}$ which has at least $(\beta/2) |V'_{i_j}|$ neighbors in $V'_{i_j}$ for all $i_j \in f(N_H(v))$, choose one arbitrarily say $s_v$. Choose $V''_{i_j}$ to be the neighborhood of $s_v$ if $f^{-1}(i_j)$ contains a neighbor of $v$ or set it equal to $V'_{i_j}$ if not. Note that $|V''_{j}| \ge ({{2 }/ \beta})^{h-1}\mathbf Epsilon m $ for all $j \in \mathcal{I} $ and that $f$ restricted to $H \setminus v$ is still a multi-embedding into $\mathcal{I} \setminus i_v$. So we can apply the induction hypothesis and find the subgraph $H \setminus v$ of $G$ such that the vertices are chosen from $V''_{i_j}$. Since all of the necessary $V''_{i_j}$ are in the neighborhood of $s_v$, we have that the graph from the induction together with $s_v$ form $H \subset G$ as desired. The case where each cluster has at least two vertices embedded works analogously. Choose a vertex in $\mathcal{I} \subset R$ arbitrarily, say $i_v$, and let $f^{-1}(i_v)$ be the vertices $v_1, v_2$ and possibly $v_3$ of $H$. We call $f(N_H( f^{-1}(i_v))) \subset R$, excluding $i_v$, the set of corresponding neighbors of $i_v$. Because there are double-edges between $i_v$ and its corresponding neighbors, for any of the corresponding neighbors $i_w$ we have that $V_{i_v}$ and $V_{i_w}$ are regular with density at least $1/2 + \beta$. So all but at most $\mathbf Epsilon m$ vertices of $V'_{i_v}$ have degree at least $(1/2 + \beta - \mathbf Epsilon) |V'_{i_w}|$ in $V'_{i_w}$. Since removing these bad vertices, which are at most $ \mathbf Epsilon m h$ many, from $V'_{i_v}$ still leaves us with at least $3 \gamma n$ vertices and there must exist a 2-path (or edge). We choose one of these arbitrarily. By Property~\mathbf Eqref{def:emb:4} of the multi-embedding, at most two of its vertices need to suffice a neighboring condition to any other cluster $V'_{i_w}$ and since the degree of each is at least $(1/2 + \beta -\mathbf Epsilon) |V'_{i_w}|$ also the common neighborhood of these two vertices is larger than $(\beta /2) |V'_{i_w}|$. Take this neighborhood to be $V''_{i_w}$ for all corresponding neighbors of $i_v$ (and $V'_{j} = V''_{j}$ where there is no neighborhood condition to be fulfilled). We apply the induction hypothesis on the remaining graph $H \setminus \{v_1, v_2, v_3 \}$ with its restricted multi-embedding and the sets $V''_{j} \ \forall j \in \mathcal{I} \setminus \{i_{v_1}, i_{v_2}, i_{v_3} \}$ to find $H \setminus \{v_1, v_2, v_3 \}$ as a subgraph of $G$ which we can extend by the path we chose in $V'_{i_v}$ to get graph $H \subset G$. \mathbf End{proof} \begin{corollary} For every graph $H$ with $|H| = h $ and $\beta >0$ there exist $\mathbf Epsilon, \gamma> 0$ and $n_0 \in \mathbb{N}$ such that the following holds for every graph $G$ on $n > n_0$ vertices and with independence number $\alpha(G) \le \gamma n$ and the sets $ V_1\cup\ldots\cup V_k$ with $|V_i|=m$ given by the Regularity Lemma with the corresponding reduced multigraph $R_{ \beta, \mathbf Epsilon}$. Let $f$ be a multi-embedding of $H$ into $R_{ \beta, \mathbf Epsilon}$ with $f(V(H))=\mathcal{I}=\{i_1,\ldots,i_t\}$ for some $1\le t\le |H|$. Then let $V'_{i_1},\ldots,V'_{i_t}$ be subsets of $V_{i_1},\ldots,V_{i_t}$ respectively of size at least $({2 }/ {\beta})^{h} \mathbf Epsilon m$. Additionally, let $u,v\in V(H)$ and $u_G,v_G\in V(G)$. Then there is an embedding of $H$ in $G$ such that $u$ is mapped to $u_G$ and $v$ is mapped to $v_G$ if the following holds. \begin{enumerate}[(i)] \item There is a multi-embedding $f$ of $H \setminus \{ u, v\}$ into $R_{\beta, \mathbf Epsilon}$. \item For all edges of the form $\{u, x\}$ and $\{v, y\}$ in $H$ also $deg(u_G, V_{f(x)})\ge \beta |V_{f(x)}|$ and $deg(v_G, V_{f(y)}) \ge \beta |V_{f(y)}|$ in $G$ respectively. \item $u$ and $v$ have distance at least 3 in $H$. \mathbf End{enumerate} \label{cor:embedable_struct} \mathbf End{corollary} \begin{proof} The embedding works the same as Lemma~\ref{lem:embedable_struct}. First fix $u$ and $v$ as $u_G$ and $v_G$, and then for each neighbor $x$ of $u$ choose $V'_{f(x)} = N(u_G) \cap V_{f(x)}$, same for neighbors of $v$. For all other vertices in $H$ simply choose $V'_{f(i)} = V_{f(i)} $. So all $|V'_i| \ge \beta |V_i|$ and by Lemma \ref{lem:embedable_struct} we can find an embedding of $H\setminus \{u, v\}$ and the embedding of neighbors $u$ and $v$ will also be neighbors of $u_G$ and $v_G$ respectively. The distance $3$ is used to ensure no vertex in $H$ is neighbor to both $u$ and $v$. \mathbf End{proof} \section{Absorbers} \label{sec:proof_strategy} \label{sec:absorbers} Absorbers are a well known tool and they allow us to prove statements about spanning subgraph structures. Often when working with the Regularity Lemma, we only find subgraph structures which cover almost all of the vertices, so all but a small linear fraction. Absorbers allow us to go the last step, they are structures we set aside in advance and which can ``absorb'' this small fraction of leftover vertices. \begin{definition} \label{def:absorber} Let $H$ be a graph with $h$ vertices and let $G$ be a graph with $n$ vertices. \begin{itemize} \item We say that a subset $A \subseteq V(G)$ is \mathbf Emph{$\xi$-absorbing} for some $\xi > 0$ if for every subset $R \subseteq V(G) \setminus A$ such that $h$ divides $|A| + |R|$ and $|R| \le \xi n$ the induced subgraph $G[A \cup R]$ contains an $H$-factor. \item Given a subset $S \subseteq V(G)$ of size $h$ and an integer $t \in \mathbb{N}$, we say that a subset $A_S \subseteq V(G) \setminus S$ is \mathbf Emph{$(S, t)$-absorbing} if $|A_S| = h t$ and both $G[A_S]$ and $G[A_S \cup S]$ contain an $H$-factor. \mathbf End{itemize} \mathbf End{definition} We use the following lemma which gives a sufficient condition for the existence of $\xi$-absorbers based on abundance of disjoint $(S, t)$-absorbers. The proof of Lemma \ref{lemma:absorbing} is based on ideas of Montgomery \cite{montgomery2014embedding} and relies on the existence of `robust' sparse bipartite graphs. \begin{lemma}[Nenadov, Pehova \cite{nenadov2018ramsey}] \label{lemma:absorbing} Let $H$ be a graph with $h$ vertices and let $\phi > 0$ and $t \in \mathbb{N}$. Then there exists $\xi$ and $n_o\in \mathbb{N}$ such that the following is true. Suppose that $G$ is a graph with $n \ge n_0$ vertices such that for every $S \in \binom{V(G)}{h}$ there is a family of at least $\phi n$ vertex-disjoint $(S, t)$-absorbers. Then $G$ contains an $\xi$-absorbing set of size at most $\phi n$. \mathbf End{lemma} We define the following structure as it will be used as the main building block in the remainder of this section. \begin{definition}[$K_{r}$-diamond path] A $K_{r}$-diamond path between vertices $u$ and $v$ is the graph formed by a sequence of disjoint vertices $u = v_1, v_2, .. , v_\mathbf Ell = v$ and disjoint cliques of size $r-1$ in the joint neighborhood of each pair of consecutive vertices. The length of the $K_{r}$-diamond path is $\mathbf Ell$, the number of vertices in the sequence. \mathbf End{definition} \begin{figure}[ht] \centering \includegraphics[scale=1]{images/kr_diamondpath.pdf} \label{fig:diamondpath} \caption{$K_{r}$-diamond path} \mathbf End{figure} \subsection{Finding $K_{r}$-diamond paths} To make use of this lemma we additionally need to find vertex-disjoint $(S, t)$-absorbers in our graph. Observe that if we can find a $K_r$ with disjoint $K_r$-diamond paths attached to each of its vertices, then this structure is ($S,t$)-absorbing for the set $S$ of $r$ free endpoints of the $K_r$-diamond paths. To find vertex-disjoint $(S, t)$-absorbers it is sufficient to find many disjoint $K_{r}$-diamond paths between any two vertices. \begin{lemma} \label{lem:absorber_diamondpaths} For every $r \ge 4$ and $\mu>0$ there exist $\gamma >0$ and $n_0\in \mathbb{N}$ such that in every graph $G$ on $n\ge n_0$ vertices with minimum degree $\delta(G)\ge \left(1-\frac{2}{r}+\mu\right)n $ and $\alpha(G)\le \gamma n$, after deleting $(\mu/2) n$ many vertices we can still find a $K_{r}$-diamond path of length at most $7$ between any two remaining vertices. \mathbf End{lemma} Note that, for connectivity issues, the lemma only holds for $r\ge4$. To prove this lemma we find a multi-embedding of a $K_{r}$-diamond path in a reduced multigraph and then extract from that a $K_{r}$-diamond path in the original graph. We introduce the notion of a $K_r$-neighborhood $\Upsilon_r(v)$. These are the neighbors of $v$ such that additionally we can find a multi-embedding of a $K_r$ into the reduced multigraph covering both the vertex and $v$. \begin{definition} Let $R$ be a reduced multigraph. Then for any vertex $v$ the \mathbf Emph{$K_r$-neighborhood} $\Upsilon_r(v)$ is defined as follows. \begin{align*} \Upsilon_r(v) = \{ w \in V(R) | \mathbf Exists \text{ a multi-embedding } \psi : V(K_r) \rightarrow V(R)& \text{ s.t. } \\ \psi^{-1}(v) \ne\mathbf Emptyset & \text{ and } \psi^{-1}(w) \ne \mathbf Emptyset \} \mathbf End{align*} Further we define $\Upsilon^2_r(v) = \bigcup_{u \in \Upsilon_{r}(v)} \Upsilon_{r}(u)$ to be the second-$K_r$-neighborhood. \mathbf End{definition} Note that by definition any vertex $v$ is in its own $K_r$-neighborhood assuming there is at least one $K_r$ multi-embedding containing $v$. Then also $\Upsilon_r(v) \subseteq \Upsilon^2_r(v)$. In order to find $K_{r}$-diamond paths we first show that the $K_{r+1}$-neighborhood for every vertex in the reduced graph is large. \begin{proposition} \label{prop:upsilonexpand} For $r\ge 4$, let $R$ be a reduced multigraph on $k$ vertices with $\delta(R) > (1-2/r)2 k$ then we have \[ \left| \Upsilon^2_{r+1}(v) \right| \ge \frac{k}{2} \quad \quad \forall v\in V(R). \] \mathbf End{proposition} Before we prove this proposition, we prove a series of lemmas about the size of $K_r$-neighborhoods. Note that this is easier for large $r$ thus we have to consider some special cases for small values of $r$. We start with some general lemmas that hold for all $r$. In the following, a clique of double-edges denotes a clique where all edges are double-edges and the double-edge-neighborhood of a vertex $v$ is the set of neighbors connected to $v$ with a double-edge. \begin{lemma} \label{lem:double-edgeconnection} For $r\ge 4$, let $R$ be a reduced multigraph on $k$ vertices with $\delta(R) > (1-2/r)2 k$. For any vertex $v$ the vertices connected to $v$ by double-edges are contained in the $K_{r+1}$-neighborhood $\Upsilon_{r+1}(v)$. \mathbf End{lemma} \begin{proof} In order to embed $K_{r+1}$ we need a clique of double-edges of size $\mathbf Ell$ and a clique of size $r+1-2\mathbf Ell$ in the neighborhood of this clique. Note that the double-edge itself is already a clique of size $2$. In the following, we show that for every $2\le \mathbf Ell\le (r+1)/2$ we can find such an embedding given that $\mathbf Ell$ is the size of a maximal clique of double-edges. Fix any double-edge of $v$ and take the largest clique of double-edges containing the double-edge. Let $\mathbf Ell$ be the size of the clique, and let $S$ be the set of all vertices which lie in the joint neighborhood of all vertices of the clique. As we assumed the clique of double-edges to be maximal we know that every vertex in $S$ has at most $2\mathbf Ell-1$ edges into the clique. Every vertex that is not in $S$ has to have at least one non-neighbor in the clique and can thus not have more than $2(\mathbf Ell-1)$ edges into the clique. Moreover, by our minimum degree condition in $R$ we know that every vertex in the clique has at least $(1-r/2)2k $ edges. Combining this, we get \[ (2\mathbf Ell-1)|S|+2(\mathbf Ell-1)(k-|S|) > \mathbf Ell\left(1-\frac{2}{r}\right)2k, \] from which we conclude that \[|S| > 2k-\frac{4k\mathbf Ell}{r} .\numberthis\label{eq:lowerS} \] For any vertex $v \in R$ it holds that the neighborhood \[|N(v)| \ge \deg(v) /2 > \left(1 - \frac{2}{r}\right) k. \] In particular the number of vertices not in the neighborhood of a vertex is less than $\frac{2k}{r} $. So by greedily picking vertices one by one we can choose at least \[\left\lceil \frac{\ |S|\ }{\frac{2 k}{r} } \right\rceil \ge r-2\mathbf Ell+1 \] many vertices. This gives us a clique of double-edges of size $\mathbf Ell$ and in the joint neighborhood a clique of size $r-2\mathbf Ell+1$ into which we can find a multi-embedding of $K_{r+1}$. \mathbf End{proof} \begin{lemma} \label{lem:highsingle} For $r\ge 3$, let $R$ be a reduced multigraph on $k$ vertices with $\delta(R) > (1-2/r)2 k$. For any vertex $v$ in the $R$, if the neighborhood of $v$ is of size at least $ (1-1/r)k$, then $N(v) \subseteq \Upsilon_{r+1}(v)$. \mathbf End{lemma} \begin{proof} We apply induction on $r$ by looking at the neighborhood of a vertex finding that the appropriate minimum degree conditions hold. The lemma is true for $r= 3$ since then any neighbor $u$ of $v$ has at least one vertex $w$ in the joint neighborhood with $v$ and we can create a multi-embedding $\psi$ which maps one vertex of a $K_4$ to $v$ and $u$ and maps the two remaining vertices to $w$. This is a valid multi-embedding of a $K_{4}$ and proves $N(v) \subseteq \Upsilon_{4}(v)$. This builds our induction base. For $r>3$ consider for any vertex $u\in N(v)$ the joint neighborhood with $v$. \[ \deg(u, {N(v)}) > (1-2/r) 2k - 2(k - |N(v)|) \ge (1-2/(r-1)) 2|N(v)| , \] where in the last step we use that $|N(v)| \ge \frac{r-1}{r} k$. To prove that $u \in \Upsilon_{r+1}(v)$ it suffices to show that there is a $K_r$ multi-embedding containing $u$ in $R[N(v)]$, the subgraph induced by $N(v)$. Now $\delta(R[N(v)]) > (1-2/(r-1)) 2|N(v)|$ so for any vertex $u \in R[N(v)]$, by counting the edges, there must be either a double-edge containing $u$, in which case Lemma~\ref{lem:double-edgeconnection} gives at least one $K_r$ multi-embedding, or $u$ has a large neighborhood, $(1-2/(r-1))2|N(v)| \ge (1-1/(r-1))|N(v)|$, in which case we apply the induction on the subgraph $R[N(v)]$ so, in fact, in the subgraph $R[N(v)]$ any neighbor of $u$ is in $\Upsilon_{r}(u)$ and also $u$ is contained in a $K_r$. \mathbf End{proof} \begin{lemma} \label{lem:requal4}For $r = 4$, let $R$ be a reduced multigraph on $k$ vertices with $\delta(R) > (1-2/r)2 k$, then $N(v) \subseteq \Upsilon_{r+1}(v)$. \mathbf End{lemma} \begin{proof} For any neighbor of $v$ we want to find a multi-embedding of $K_5$ mapping to $v$ and that neighbor. By Lemma~\ref{lem:double-edgeconnection}, every double-edge-neighbor of $v$ is in $\Upsilon_{r+1}(v)$. For all other vertices $w \in N(v)$ we claim that either there is an edge between $w$ and a vertex $x$ in the double-edge-neighborhood of $v$, in which case we can map two vertices to $x$, two vertices to $v$ and one to $w$ to get a multi-embedding of $K_5$ or, in the other case, there is a double-edge between $w$ and another vertex $x$ in $N(v)$ and then we can map two vertices to $x$ and $w$ and one to $v$ to get a multi-embedding of $K_5$. Let $D$ be the double-edge-neighborhood of $v$ and $S = N(v) \backslash D$. Then for any vertex $w \in N(v)$, if $w$ has no edge to any vertex in $D$ and at most one edge to any vertex in $S$, then \[ \deg(w) \le |S| + 2(k - |S| - |D|) \le 2k - (2|D| + |S|) \osref{$(2|D|+|S|)=\deg(v)$}< k \] which is a contradiction to the assumption that every vertex in the reduced graph $R$ has degree greater than $(1-2/r)2k = k$ for $r=4$. \mathbf End{proof} \begin{lemma} \label{lem:requal5} For $r = 5$, let $R$ be a reduced multigraph on $k$ vertices with $\delta(R) > (1-2/r)2 k$ then $N(v) \subseteq \Upsilon^2_{r+1}(v)$. \mathbf End{lemma} \begin{proof} Let $D$ be the double-edge-neighborhood of $v$. If $|D| \le 2k/5$ we have $N(v) \ge 4k/5$ and by Lemma \ref{lem:highsingle} again we have that $N(v) \subseteq \Upsilon_{r+1}(v)$, so we assume $|D| \ge 2k/5$. Look at any fixed $u \in N(v)\setminus\Upsilon^2_{r+1}(v)$. If $u$ has a double-edge to $D$, then by Lemma~\ref{lem:double-edgeconnection} it has distance two with regards to the $K_r$-neighborhood $\Upsilon_{r+1}$ and we are done. So we can assume it has only single edges or no edges to vertices in $D$. In particular, the double-edge-neighborhood of $u$ does not contain $D$ so its size is at most $k-|D|$. So since $|D| \ge 2k/5$, we have that \[N(u) \ge \frac{6k}{5} - \left(k-|D|\right) \ge \frac{k}{5} + |D| \ge \frac{7k}{5} - N(v), \] where the last step follows from $N(v) > 6k/5 - |D|$. Since the minimum neighborhood of any other vertex is $3k/5$, the common intersection of $u$ with any other vertex $x$ must be more than $7k/5 - N(v)+ 3k/5 -k = k - N(v)$, so we can choose a vertex $y$ in the joint neighborhood of $v$, $u$ and $x$. Now choose $x$ in $D$. The multi-embedding of $K_6$ follows by embedding two vertices each in $v$ and $x$ one each in $u$ and $y$. So then $u \in \Upsilon_{r+1}(v)$. In any case $u \in \Upsilon^2_{r+1}(v)$ and the lemma follows. \mathbf End{proof} Combining the previous lemmas, we are now ready to prove Proposition \ref{prop:upsilonexpand}. \begin{proof}[Proof of Proposition \ref{prop:upsilonexpand}] For $r \ge 8$ the double-edge-neighborhood of every vertex is greater than $k/2$ so by Lemma \ref{lem:double-edgeconnection} this follows immediately. For $r \ge 6$ by looking at the degree, for each vertex either the double-edge-neighborhood is greater than $k/2$ or the total neighborhood is greater than $(1-1/r)k$, so by Lemma \ref{lem:double-edgeconnection} or Lemma \ref{lem:highsingle} the proposition follows. For $r = 4,5$ we have Lemmas \ref{lem:requal4} and \ref{lem:requal5} respectively, where in both cases it is easy to see that $|N(v)|\ge k/2$. \mathbf End{proof} The next lemma is about connecting one fixed vertex $v$ in $G$ to $K_{r+1}$-embedable structures as follows. Given $v$, we want to find a multi-embedding of $K_{r-1}$ into the neighborhood of $v$ i.e.\ clusters that $v$ has many edges to. We then want to extend this $K_{r-1}$ to a $K_{r}$ by finding a vertex in the joint neighborhood of the clique (not necessarily in $N(v)$). This is a preparation step to apply Corollary~\ref{cor:embedable_struct}. \begin{lemma} \label{lem:start} Fix a vertex $v$ in $G$ and a reduced multigraph $R_{\beta, \mathbf Epsilon}$ of $G$. Let $Q_v$ be the set of vertices $i \in V(R_{\beta, \mathbf Epsilon})$ such that for their corresponding clusters $V_i \subseteq V(G)$ it holds that $\deg(v,V_i) \ge \beta |V_i|$. Then there exists a multi-embedding of a $K_{r}$ into $R_{\beta, \mathbf Epsilon}$ embedding at most one vertex into $V(R_{\beta, \mathbf Epsilon}) \setminus Q_v$. \mathbf End{lemma} \begin{proof} Note that the number of edges from $v$ to $V_0$ or any cluster not in $Q_v$ is at most $\mathbf Epsilon n$ and $\beta k m \le \beta n$ respectively. The degree of $v$ is at least $(1-2/r+\mu)n$ in $G$ and choosing $\beta, \mathbf Epsilon< \mu/10$ the number of edges from $v$ to clusters of $Q_v$ is at least $(1-2/r + 2\mu/3) n$. In particular since every cluster has size at most $n/k$ this means \[|Q_v| \ge \left(1-\frac{2}{r}+\mu/2 \right)k. \numberthis\label{eq:lowerQv}\] The proof follows similar arguments as the proof of Lemma~\ref{lem:double-edgeconnection}. Let the largest clique with double-edges in $Q_v$ be $C$ of size $\mathbf Ell$. Let $S \subseteq Q_v$ be the joint neighborhood of the vertices from this clique inside $Q_v$ and $T \subseteq V(R) \setminus Q_v$ all vertices which are in the joint neighborhood of the clique but not in $Q_v$. We want to find a $K_{r-2\mathbf Ell}$ in $S\cup T$ with at most one vertex in $T$. Because $C$ is maximal every vertex in $S$ has at most $2\mathbf Ell -1$ edges to $C$ and every other vertex in $Q_v$ has at most $2\mathbf Ell -2$ edges to $C$. But also every vertex in $C$ has degree greater than $ (1-\frac{2}{r})2k $. So we get two bounds for $deg(C, {Q_v})$. the sum of degrees between $C$ and $Q_v$. \[deg(C, {Q_v}) > \mathbf Ell \left( (1-2/r )2k - 2(k-|Q_v|) \right) > \left(1- 2 / (r-2) \right)2 \mathbf Ell |Q_v|, \] where in the last step we use from \mathbf Eqref{eq:lowerQv} that $k < \frac{r}{r-2}|Q_v|$. \begin{align*} \deg(C, {Q_v}) & \le \deg (C, S) + \deg(C, {Q_v\backslash S}) \\ & \le (2\mathbf Ell-1)|S|+(2\mathbf Ell-2)(|Q_v|-|S|) \\ & < |S|+(2\mathbf Ell-2)|Q_v|. \mathbf End{align*} Together we get a bound on $|S|$. Namely \[ |S| > \left(1- 2\mathbf Ell / (r-2) \right)2|Q_v| > (r-2\mathbf Ell-2) \frac{2k}{r} \numberthis\label{eq:lowerSv}.\] Next, we bound the size of $S\cup T$ with a similar argument. Counting the edges $\deg(C, {V(R)})$. Again, vertices in $ V(R)$ but not in $S\cup T$ can have at most $2\mathbf Ell -2$ edges to $C$. \begin{align*} (1-2/r)2k\mathbf Ell & \le \deg(C,{V(R)}) \\ & = \deg(C, S) + \deg(C, T) + \deg(C, {V(R)\setminus (S\cup T)}) \\ & \le (2\mathbf Ell-1)|S|+ 2\mathbf Ell |T|+(2\mathbf Ell-2)(k-|S| -|T|) \\ & < |S|+ 2|T| +(2\mathbf Ell-2)k. \mathbf End{align*} We get a bound on $|S \cup T|$. Namely $|S| + 2|T| > (1-2\mathbf Ell/r)2k = (r-2\mathbf Ell) \frac{2k}{r} $ and in particular since $|T| \le k-|Q_v| < 2k/r$ because of (\ref{eq:lowerQv}) this means \[|S| + |T| > (1-2\mathbf Ell/r)2k -2k/r = (r-2\mathbf Ell-1) \frac{2k}{r}. \numberthis\label{eq:lowerSTv} \] Furthermore, observe that for every vertex $w \in V(R)$ we have $N(w) \ge k - 2k/r$, thus every vertex has at most $2k/r$ non-neighbors. This directly implies we can sequentially choose \[ \left\lceil \frac{\ |S|\ }{\frac{2k}{r}} \right\rceil \osref{(\ref{eq:lowerSv})}\ge r-2\mathbf Ell-1 \] many vertices from $S$ to form a clique and still have at least one vertex from $S\cup T$ because of (\ref{eq:lowerSTv}) to form the $K_{r-2\mathbf Ell}$. This together with the $K_\mathbf Ell$ of double-edges gives allows for a multi-embedding of $K_r$ and concludes the proof. \mathbf End{proof} We now prove Lemma \ref{lem:absorber_diamondpaths}. \begin{proof}[Proof of Lemma~\ref{lem:absorber_diamondpaths}] Choose two arbitrary vertices $s,t \in V(G)$ for which we want to find a $K_r$-diamond path. For $s$ and $t$, apply Lemma~\ref{lem:start} to find two multi-embeddings of $K_{r}$'s such that at most one of the vertices in $R$ has $\deg(s, V_i) < \beta |V_i|$ and $\deg(t, V_j) < \beta |V_j|$ respectively. Call these vertices $s_1$ and $t_1$ respectively. With Proposition~\ref{prop:upsilonexpand} we find a multi-embedding of at most four $K_{r+1}$'s connecting $s_1$ and $t_1$ since the second $\Upsilon_{r+1}$ neighborhoods overlap. This almost gives a multi-embedding of a $K_r$-diamond path connecting $s$ and $t$. It remains to deal with the multi usage of a cluster in the reduced graph. For the mapping to be a multi-embedding as in Definition \ref{def:embedding} we need that each vertex/cluster in the reduced graph has only a single vertex, edge or 2-path mapped to it. For this we partition each cluster arbitrarily into enough parts such that we can assign each vertex, edge or 2-path to a unique part. Note that as we have at most six $K_r$'s we only need to split the clusters into constantly many parts. If we arbitrarily split each cluster of the reduced graph into $ 6r$ equal parts, then the new partition still satisfies the conditions of the Regularity Lemma because $\varepsilon$-regularity is inherent by Fact~\ref{fact:regsub} just with slightly different $\varepsilon'$ and $\beta'$. So we can have a reduced multigraph $R'$ of this new partition which is just a blowup of $R$. In particular, we can embed each isolated vertex, edge or 2-path into a separate cluster. In $R'$ the consecutive $K_r$ multi-embedding is in fact a multi-embedding of a $K_r$-diamond path of length at most seven excluding the endpoints $s$ and $t$. It follows by Corollary~\ref{cor:embedable_struct} that we get a $K_r$-diamond path in $G$. \mathbf End{proof} With Lemma \ref{lem:absorber_diamondpaths} we can find $K_r$-diamond paths from any tuple of $r$ vertices matching them to a different vertex of a $K_r$ somewhere else in the graph. This is now a $(S,t)$-absorber from Definition~\ref{def:absorber} and together with Lemma~\ref{lemma:absorbing} this is enough to find an absorber of the first kind as in Definition~\ref{def:absorber}. \section{Almost Spanning Structure} \label{sec:almost} For the second part of the proof we want to show that we can cover most of the vertices with a $K_r$-tiling. Combining this with the absorber gives a $K_r$-factor. \begin{lemma} For every $r\in \mathbb{N}$ and $\xi, \mu > 0$, there exist $\gamma > 0$ and $n_0 \in \mathbb{N}$ such that every graph $G$ on $n>n_0$ vertices with $\delta(G)\ge \left(1-\frac{2}{r}+\mu\right)n$ and $\alpha(G) \le \gamma n$, we can find a $K_r$-tiling which covers at least $(1- \xi)n$ vertices in $G$. \label{lem:almost} \mathbf End{lemma} We make use of a known result for small subgraphs in the same setting. To find a $K_{r}$ in a graph with small independence number we only need a certain average degree. The following lemma states this \begin{lemma}[Erd\H{o}s, S\'os \cite{erdos1970some}] For every $r\in \mathbb{N}$ and $\mu>0$ there exist $\gamma >0 $ and $n_0 \in \mathbb{N}$ such that for every graph $G$ on $n>n_0$ vertices with \textbf{average degree} $d(G)\ge \left(1-\frac{2}{r-1}+\mu\right) n$ and $\alpha(G)\le \gamma n$, then $K_r\subseteq G$. \label{lem:RT} \mathbf End{lemma} First we would like to show, that there exists at least a fractional almost cover of the vertices. A fractional cover is defined as follows: \begin{definition} A fractional $K_r$-tiling $\mathcal{T}$ of a graph $G$ is a weight function from the set $\mathcal{S}$ of all $K_r \subseteq G$ to the interval $[0, 1]$ such that for vertices of $G$ it holds that \[\hspace{6em} w_{\mathcal{T}}( v) = \sum_{\substack{\mathcal{K}_i \in \mathcal{S}, \\ v \in\mathcal{K}_i} } w_{\mathcal{T}}( \mathcal{K}_i ) \le 1 \hspace{6em} \forall v \in G. \] We call $\sum_{v \in G } w_{\mathcal{T}}(v)$ the total weight of a tiling and it is a perfect fractional tiling if equality holds for every vertex. \mathbf End{definition} Fractional $K_r$-tilings are somehow easier to find and we will prove the following lemma later in this section. \begin{restatable}{lemma}{fracmat} For every $r\in \mathbb{N}$ and $\mathbf Eta$, $\mu >0$ there exist $\gamma >0$ and $n_0\in \mathbb{N}$ such that every graph $G$ on $n\ge n_0$ vertices with $\delta(G)\ge \left(1-\frac{2}{r}+\mu\right)n$ and $\alpha(G)< \gamma n$ has a fractional $K_r$-tiling $\mathcal{T}$ such that \[|\{v\in G\colon w_{\mathcal{T}}(v)<1-\mathbf Eta \}|\le \mathbf Eta n.\] \label{lem:fracmat} \mathbf End{restatable} Observe that the weight of this tiling is at least $(1-2\mathbf Eta) n$. We would like to transform the fractional into an actual tiling. We construct a fractional tiling in the reduced multigraph first, then transfer it to the original graph greedily. We will slightly abuse notation for the fractional tiling to extend the definition to the reduced multigraph. By a fractional tiling with $K_r$-embeddable structures we mean we assign the weights to all possible multi-embeddings of $K_r$ onto the reduced multigraph and require that the for every vertex all multi-embeddings mapping to that vertex have a total weight of at most one, counting multiplicity. \begin{lemma} For every $r \in \mathbb{N}$ and $\mathbf Eta, \beta>0$ there exist $\mathbf Epsilon, \gamma >0$ and $n_0\in \mathbb{N}$ such that for every graph $G$ on $n\ge n_0$ vertices, if a reduced multigraph $R_{\beta, \mathbf Epsilon}$ of $G$ has a fractional tiling with $K_r$-embeddable structures of total weight at least $(1-\mathbf Eta) k$, then $G$ has an $K_r$-factor that covers all but $(1-2\mathbf Eta)n$ vertices. \label{lem:frac_multi_to_almost} \mathbf End{lemma} \begin{proof} Set $\mathbf Epsilon$ and $\gamma$ small enough for Lemma~\ref{lem:embedable_struct} and such that $(2/\beta)^r\mathbf Epsilon \le \mathbf Eta/2$. The first step is to rescale the tiling. Let $\mathcal{T}$ be the fractional tiling of $R$ as given by the statement. Construct $\mathcal{T}'$ by scaling every $K_r$-embeddable structure with a factor of $(1-(2/\beta)^r\varepsilon)$ i.e.\ for any $K_r$-multi-embedding $\mathcal{K}$ we have $w_{\mathcal{T}'}(\mathcal{K})=(1-(2/\beta)^r\varepsilon)w_{\mathcal{T}}(\mathcal{K})$. We construct the $K_r$-tiling in $G$ by greedily taking $w_{\mathcal{T}'}(\mathcal{K})|V_i|$ many $K_r$ given by Lemma~\ref{lem:embedable_struct} and remove them from $G$. Note that, because of the rescaling, the sum of the weights of all $K_r$-embeddable structures touching one vertex is at most $(1-(2/\beta)^r\varepsilon)$. Thus, in every step of the greedy removal we have at least $(2/\beta)^r\varepsilon |V_i|$ vertices left which ensures that we can always apply Lemma~\ref{lem:embedable_struct}. Even after rescaling, $\mathcal{T}'$ has total weight at least $(1-(2/\beta)^r\varepsilon - \mathbf Eta) k$ and $|V_0|$ has at most $\mathbf Epsilon n$ many vertices. So the greedy $K_r$-tiling of $G$ covers at least a $(1-((2/\beta)^r\mathbf Epsilon + \mathbf Eta + \mathbf Epsilon)) \ge (1-2\mathbf Eta)$ fraction of the vertices which concludes the proof. \mathbf End{proof} For our proof, we need triangle free graphs with low independence number that we can connect with relatively high density without creating a copy of $K_4$. A construction by Bollob\'as and Erd\H{o}s shows that these graphs exist. We state their results in a slightly different way, but it directly follows from their construction. \begin{lemma}[\cite{bollobas1976ramsey}] \label{lem:K4free} For $\zeta,\gamma>0$ there is a $n_0\in \mathbb{N}$ such that for $n\ge n_0$ there is a graph $G$ on $2n$ vertices with a split into $V_1, V_2$ has the following properties. \begin{enumerate} \item $|V_1|=|V_2|=n$, \item $G[V_1]$ is isomorphic to $G[V_2]$ and they are triangle free, \item $G$ is $K_4$ free, \item $G[V_1,V_2]$ has density at least $1/2-\zeta$, \item $\alpha(G)\le \gamma n$ \mathbf End{enumerate} \mathbf End{lemma} \ \\ The next lemma connects almost tilings and fractional tilings. In order to find an almost tiling in a graph $G$ we apply the Regularity Lemma and need a fractional tiling in the reduced graph. We make use of a second auxiliary graph $\Gamma$, which is similar to a blow-up of the reduced graph. \begin{lemma} For every $r \in \mathbb{N}$ and $\mu, \mathbf Eta >0$ there exist $\beta, \mathbf Epsilon, \gamma >0$ and $n_0\in \mathbb{N}$ such that for every graph $G$ on $n\ge n_0$ vertices with minimum degree $\delta(G)\ge \left(1-\frac{2}{r}+\mu\right)n $ and $\alpha(G)\le \gamma n$ there is a graph $\Gamma $ with $\delta(\Gamma )\ge \left(1-\frac{2}{r}+\frac{\mu}{4} \right)|\Gamma | $ and $\alpha(\Gamma )\le \gamma |\Gamma |$ such that the following holds. If $\Gamma $ has a fractional $K_r$-tiling with weight at least $(1-\mathbf Eta)|\Gamma |$, then $G$ has a $K_r$-tiling covering at least $(1-2 \mathbf Eta)n$ vertices. \label{lem:tilingtransferlemma} \mathbf End{lemma} \begin{proof} Choose $\beta$, $\varepsilon$ and $\gamma$ small enough such that Lemma~\ref{lem:embedable_struct} and Lemma~\ref{lem:frac_multi_to_almost} are satisfied and smaller than $\mu/10$. Apply the Regularity Lemma (Lemma \ref{lem:reg}) to $G$ with $\beta$ and $\varepsilon$. Let $V_0\cup V_1\cup\ldots\cup V_k$ be the regular partition resulting from the Regularity Lemma and let $R_{\beta, \mathbf Epsilon}$ be the reduced multigraph of this partition. Let $y_1$ be a constant that is larger than $n_0$ from Lemma \ref{lem:K4free} with $\gamma_{\ref{lem:K4free}} = \gamma$ and $\zeta_{\ref{lem:K4free}}=\mu/8$. Construct $\Gamma$ by taking $y_0 = k\cdot y_1$ vertices and split $V(\Gamma)$ into $W_1,\ldots,W_k$ each of size $y_1$ where we associate $W_i$ with $V_i$ from the regular partition. On every vertex set $W_i$ we put a triangle-free graph from Lemma~\ref{lem:K4free}. Then add a complete bipartite graph between two clusters $W_i$ and $W_j$ if $i$ and $j$ are connected by a double-edge in $R$. Add a $K_4$-free construction given by Lemma \ref{lem:K4free} between $W_i$ and $W_j$ if $i$ and $j$ are connected by a single edge and the empty graph otherwise. Note that as the graphs inside the clusters are all isomorphic, we are guaranteed that the $K_4$-free graph construction of Lemma \ref{lem:K4free} is possible between any two clusters. We consider the minimum degree of $\Gamma $. As $G$ has minimum degree $\delta(G) \ge (1-2/r+\mu)n$, using Fact~\ref{fact:min_deg_r} we get $\delta(R_{\beta,\mathbf Epsilon})\ge 2\left( (1-2/r+\mu/2) \right) k$ which finally means in $\Gamma $ every edge from a cluster-vertex $i$ in $R_{\beta, \mathbf Epsilon}$ contributes to at least $(1/2 - \zeta)y_1 = (1/2 - \zeta)|\Gamma| /k$ many edges for a vertex in the corresponding set $W_i$ of $\Gamma$. Thus $\Gamma$ has minimum degree \[\delta(\Gamma) \ge (1-2/r+\mu/2 - 2\zeta) |\Gamma = (1-2/r+\mu/4) |\Gamma |\] where $\zeta=\mu/8$ as we chose for Lemma~\ref{lem:K4free}. The important observation now is that every $K_r$ in $\Gamma$ corresponds to a multi-embedding of $K_r$ in $R$. This is an easy consequence of the construction of $\Gamma$. For every $K_r$ in $\Gamma$ take the mapping which maps to the vertex $i$ if the vertex of $K_r$ lies in the set $W_i$ in $\Gamma$. We never embed three vertices into a vertex of $R$ because all $W_i$'s are triangle free and if there are two clusters $W_i$ and $W_j$ into which we embed two vertices each, these vertices form a $K_4$ which means in $R$, $i$ and $j$ must be connected by a double-edge. By construction, the largest independent set of every cluster $W_i$ of $\Gamma$ is at most $\gamma |W_i|$ so $\alpha(\Gamma) \le \gamma |\Gamma|$. Then by the assumption of the lemma we have a fractional $K_r$-tiling $\mathcal{T}$ of $\Gamma$. We convert the fractional $K_r$-tiling of $\Gamma $ into a fractional $K_r$-tiling $\mathcal{T}'$ of $R$ by applying the mapping from $K_r$'s to $K_r$-multi-embeddings of $R$. So, for every multi-embedding $\mathcal{K}$ of a $K_r$ into $R$ we can define the set $L_{\mathcal{K}}$ to be the set of all $K_r$ in $\Gamma $ such that the multi-embedding $\mathcal{K}$ maps to the same partitions $V_i$ corresponding to $W_i$ in $\Gamma $. Then \[ w_{\mathcal{T}'}(\mathcal{K}) \ge \sum_{K \in L_{\mathcal{K}} } \frac{ w_\mathcal{T}( K )}{y_1} . \] So the total weight of $\mathcal{T}'$ must be at least $(1-\mathbf Eta ) k$ in $R$. Then by Lemma~\ref{lem:frac_multi_to_almost} we can convert the fractional tiling into an almost cover of $G$ that covers at least $(1-2\mathbf Eta)n$ vertices. \mathbf End{proof} For the proof of Lemma~\ref{lem:fracmat} we need the following lemma which gives us a stepwise improvement of any tiling we have as long as we do not cover a $(1-\mathbf Eta)$ fraction of the vertices yet. Here a $\{K_r, K_{r+1}\}$-tiling is a disjoint union of $K_r$'s and $K_{r+1}$'s as subgraph. \begin{lemma} For every $r\in \mathbb{N}$ and $\mathbf Eta, \mu >0$ there exist $\rho$, $\gamma>0$ and $n_0\in \mathbb{N}$ such that every graph $G$ on $n\ge n_0$ vertices with $\delta(G)\ge \left(1-\frac{2}{r}+\mu\right)n$ and $\alpha(G)< \gamma n$ has the following property: Let $\mathcal{T}$ be a maximum $K_r$-tiling in $G$ with $|V(\mathcal{T})|\le (1-\mathbf Eta)n$. Then there is a $\{K_r,K_{r+1}\}$-tiling which covers at least $|V(\mathcal{T})|+\rho n$ vertices. \label{lem:KrKr+1} \mathbf End{lemma} \begin{proof} Let $\mathcal{R}= V(G) \setminus V(\mathcal{T})$ be the set of all uncovered vertices in $G$. By Lemma~\ref{lem:RT} we know that the average degree inside $\mathcal{R}$ is less than $\left(1-2/(r-1) + \mu \right)|\mathcal{R}|$ as else there would be a $K_r$ inside $\mathcal{R}$ that we could add to $\mathcal{T}$ contradicting the maximality of $\mathcal{T}$. We show that this implies that we can extend at least $\rho n$ of the $K_r$'s in $\mathcal{T}$ to $K_{r+1}$ where $\rho$ is some constant to be chosen later. Let $T=V(\mathcal{T})$ and we are guaranteed that $|T|\ge \mu n$ as otherwise every vertex in $\mathcal{R}$ would have $\deg(v,\mathcal{R})\ge (1-2/r+\mu)n-\mu n>(1-2/(r-1))|\mathcal{R}|$ contradicting our upper bound on the average degree. Moreover, as every vertex in $\mathcal{R}$ has degree at least $(1-2/r+\mu)n$ and inside $\mathcal{R}$ we have an average degree less than $(1-2/(r-1))|\mathcal{R}|$ we know that the edges in between, $\deg(\mathcal{R}, T)$, are at least \[ \left(1-\frac{2}{r}+\mu\right)n|\mathcal{R}|-\left(1-\frac{2}{r-1}\right)|\mathcal{R}|^2= \left(1-\frac{2}{r}+\mu\right)|T||\mathcal{R}|+\frac{2}{r(r-1)}|\mathcal{R}|^2 \] edges to $T$. Let $\mathcal{R}'\subseteq \mathcal{R}$ be the set of all vertices in $\mathcal{R}$ that have $\deg(v,T)> \left(1-\frac{2}{r}+\mu\right)|T|$ as we know that $\deg(v,T)\le |T|\le n$, we conclude that \[|\mathcal{R}'|\ge \frac{\frac{2}{r(r-1)}|\mathcal{R}|^2}{n}\ge \varphi |\mathcal{R}|\] for $\varphi = \frac{2}{r(r-1)}\mathbf Eta$. \begin{figure} \centering \includegraphics[scale=0.8]{images/greedykr.pdf} \label{fig:greedyext} \caption{Greedy extending to $K_{r+1}$} \mathbf End{figure} We now use vertices or edges from $\mathcal{R}'$ to extend some $K_r$ from $\mathcal{T}$ to a $K_{r+1}$. Let $\mathcal{T'}$ be the set of all $K_{r}$ that we did not yet extend in this process and $\mathcal{R}'' \subseteq \mathcal{R}'$ the set of unused vertices in $ \mathcal{R}'$ so far. The following claim asserts that the greedy process works. \begin{claim} If $\mathcal{R}'' \subseteq \mathcal{R}'$ is such that $(\mu/2r)|\mathcal{R}''|\ge \gamma n$ and for every vertex $v\in \mathcal{R}''$ we have ${T'} = V(\mathcal{T'})\subseteq T$ with $\deg(v, T') \ge \left(1-\frac{2}{r}+\frac{\mu}{2}\right)|T'|$, then we can find a $K_r$ in $\mathcal{T}'$ which can be extended to a $K_{r+1}$. \label{claim:extendKr} \mathbf End{claim} \begin{proof} If there is a $K_r$ in $\mathcal{T}'$ such that there is a vertex in $\mathcal{R}''$ which is connected to all vertices from this $K_r$, then we can extend it to a $K_{r+1}$. We can thus assume that every vertex in $\mathcal{R}''$ has at most $r-1$ edges to any $K_r$ in $\mathcal{T}'$. Then, the minimum degree condition implies that every vertex has at least $(\mu/2)|T'|$ copies of $K_r$ in $\mathcal{T}'$ such that $v$ is connected to exactly $r-1$ vertices of this $K_r$. We can construct an auxiliary bipartite graph where the vertices in one partition are the copies of $K_r$ in $\mathcal{T}'$ and the other partition is formed by the vertices in $\mathcal{R}''$. Then the previous observation implies that this bipartite graph has at least $(\mu/2)|T'||\mathcal{R}''|$ edges and we can thus find a $K_r$ in $\mathcal{T'}$ such that at least $(\mu/2)|\mathcal{R}''|$ vertices from $\mathcal{R}''$ have exactly $r-1$ edges to this particular $K_r$. Call the set of these vertices $\mathcal{R}'''$ we can then further partition $\mathcal{R}'''$ into $\mathcal{R}'''_1,\ldots,\mathcal{R}'''_r$ where we put a vertex $v\in \mathcal{R}'''$ in $\mathcal{R}'''_i$ if and only if $v$ does not have an edge to the $i$th vertex in the $K_r$ (where the order of the vertices is arbitrary but fixed). Then there is some index $j$ such that $|\mathcal{R}'''_j|\ge (\mu/2r)|\mathcal{R}''|$. As we required that $\alpha(G)<\gamma n \le (\mu/2r)|\mathcal{R}''| \le |\mathcal{R}'''_j|$, there is an edge $e$ in $\mathcal{R}'''_j$. We can thus construct a $K_{r+1}$ by removing the $j$th vertex from the $K_r$ and adding the edge $e$ to the $K_r$. \mathbf End{proof} Note that for every $K_{r+1}$ we construct we remove one $K_r$ from $\mathcal{T'}$ and at most two vertices from $\mathcal{R}''$. We choose $\rho$ maximal such that $\rho n\le (\mu / 2 r)|T|$ and $2 \rho n \le |\mathcal{R}'| - (2r / \mu)\gamma n$. After the removal of at most $\rho n $ greedily formed $K_{r+1}$'s we are thus left with at least $|\mathcal{R}'|- 2 \rho n$ vertices in $\mathcal{R}''$ each of these vertices has $\deg(v,T') \ge (1-2/r+\mu/2)|T'|$. Then Claim~\ref{claim:extendKr} gives that we can chose the $K_{r+1}$'s in a greedy manner until we extend $\rho n$ many $K_r$'s. \mathbf End{proof} Now we are ready to prove Lemma~\ref{lem:fracmat}. We restate the lemma for convenience of the reader. \fracmat* \begin{proof}[Proof of Lemma~\ref{lem:fracmat}] We start by taking a maximum $K_r$-tiling in $G$. If this covers more than $(1-\mathbf Eta)n$ vertices, then we are done immediately. Else we repeatedly apply Lemma~\ref{lem:KrKr+1} while at every step blowing up each vertex of our graph $G$ with $r$ vertices. This follows the idea which emerged from \cite{treglown2016degree}. After a constant number of blowups we can cover all but a $\mathbf Eta^2$ fraction of the vertices with $K_r$'s. We then convert this tiling of the blown up graph into a fractional tiling of the original graph which misses at most $\mathbf Eta^2 n$ of total weight, which directly implies that at most $\mathbf Eta n$ vertices can have $w_{\mathcal{T}}(v)< 1-\mathbf Eta $. In each of the steps we blow up the graph by a factor of $r$, that is we replace every vertex in the previous graph with a set of $r$ vertices and put complete bipartite graphs between all clusters that were connected by an edge in the previous graph. Note that this implies that for a $K_{r+1}$ in the previous graph we can find a perfect $K_r$ tiling in the blown up graph. We will repeat two steps: \begin{itemize} \item In the first step the \mathbf Emph{enlargement step} here we start with a $K_r$ tiling which covers a $\lambda $ fraction of the vertices into a $\{K_r,K_{r+1}\}$ tiling that covers a $\lambda' >\lambda + \rho_{\ref{lem:KrKr+1}} (\mathbf Eta^2, \mu, r)$ fraction \item The second step, the \mathbf Emph{blow up step} blows up the graph and converts the given $\{K_r,K_{r+1}\}$-tiling into a $K_r$-tiling that covers a $\lambda'$ fraction. \mathbf End{itemize} Note that a $K_r$-tiling of any graph corresponding to a constant blow up by a factor of $s$ of $G$ which covers a $\lambda$ fraction of the vertices can be converted into a fractional $K_r$-tiling in $G$ with weight $\lambda n$. This can be done as follows. Let $\mathcal{T'}$ be a $K_r$-tiling in the blown up graph. We construct the fractional $K_r$-tiling $\mathcal{T}$ in $G$ in the following way. For every $K_r\in \mathcal{T'}$ by construction there is a copy of $K_r$ in $G$ which corresponds to this $K_r$ (in particular we cannot have two vertices which originate from the same vertex in $G$ as these vertices would come from an independent set). We add this $K_r$ to $\mathcal{T}$ with weight $1/s$. When there are multiple instances that correspond to the same $K_r$ in $G$ we just increase the weight by $1/s$ for each copy in the blown up graph. Let $G^s$ be the blowup of $G$ by factor of $s$, as the tiling we constructed covers $\lambda |G^s|$ vertices in $G^s$ we get that \[ \sum_{v\in V(G)}w_{\mathcal{T}}(v) = \sum_{K_r\in \mathcal{T'}}r \frac{1}{s}=\lambda |G^s| \frac{1}{s}=\lambda |G|.\] It thus suffices to show that for some number $s$, independent of $\gamma$ and $n$ we can find a $K_r$-tiling that covers $(1-\mathbf Eta^2) |G^s|$ vertices in $G^s$. Let $\gamma =\gamma_{\ref{lem:KrKr+1}}(\mathbf Eta^2,\mu,r)$ and $\rho=\rho_{\ref{lem:KrKr+1}}(\mathbf Eta^2,\mu,r)$. Every time we apply Lemma~\ref{lem:KrKr+1} we newly cover a $\rho$ fraction of the vertices. We thus need to apply this lemma at most $1/\rho$ times. In each blow up step we replace one vertex from the previous graph by $r$ vertices. As we have to do at most $1/\rho$ blow up steps we know that $s\le r^{1/\rho}$. \mathbf End{proof} Lemma~\ref{lem:almost} follows directly by applying Lemma~\ref{lem:fracmat} with $\mu_{\ref{lem:fracmat}}=\mu/4$ to $\Gamma$ from Lemma~\ref{lem:tilingtransferlemma} with $\mu$ and $\mathbf Eta=\xi/4$. \section{Finishing the proof} All that is left to do is to combine the results from the previous sections to prove the main theorem. \begin{proof}[Proof of Theorem \ref{thm:mainresult}] Choose $\phi \le \mu/14r^2$ but independent from all other variables. Let $\xi=\xi_{\ref{lemma:absorbing}}$ where we apply Lemma \ref{lemma:absorbing} with $\phi$, $h=r$ and $t=6r+1$. Choose $\gamma$ small enough such that it satisfies Lemma \ref{lem:absorber_diamondpaths} as well as Lemma~\ref{lem:almost} dependent on the parameters $\mu$, $\phi$ and $\xi$. In order to apply Lemma~\ref{lem:absorber_diamondpaths} to get a $\xi$-absorbing set, we show that for every choice of a $r$-vertex subset $S$ of $V(G)$ we can find $\phi n$ vertex disjoint $(S,6r+1)$-absorbers. We do this as follows. Start with an arbitrary $K_r$ that does not share any vertex with $S$ using Lemma~\ref{lem:RT}. Take an arbitrary bijection $g\colon V(K_r)\to S$ of the vertices of this $K_r$ to the vertices in $S$. Then use Lemma~\ref{lem:absorber_diamondpaths} to find disjoint $K_{r+1}$-diamond paths of length at most $7$ between each pair $(v,g(v))$ for all $v\in V(K_r)$. Add arbitrary $K_r$'s in case some paths where shorter until there are exactly $6r^2 + r$ vertices in total. We can repeat this $\phi n$ times without removing more than $(6r^2 + r)\phi n < (\mu/2)n$ vertices from the graph. Having these $\phi n$ many $(S,t)$-absorbers implies by Lemma \ref{lemma:absorbing} that there is some constant $\xi$ such that there is a $\xi$-absorbing set of size at most $\phi n$. Take such a set $A$ and put it aside. Note that as $|A|\le \phi n$ we know that for $G'= G\setminus A$ we have $\delta(G')\ge (1-2/r+\mu')n'$ and $\alpha(G') \le \gamma' n' $ where $n'=|V(G')|$, $\mu'=\mu/2$ and $\gamma' = 2\gamma$. We then apply Lemma~\ref{lem:almost} with $\xi$ from Lemma~\ref{lemma:absorbing} to $G'$ to get a tiling that covers all but at most $\xi n'$ vertices. Let $V_R$ be the set of vertices that remain uncovered in Lemma~\ref{lem:almost}. By construction we have $|V_R|\le \xi n'\le \xi n$ and thus we can use the absorber $A$ to cover $A\cup V_R$. \mathbf End{proof} \section{Final Remarks} In Section~\ref{sec:absorbers} we prove the existence of an absorber as in Definition~\ref{def:absorber} and in Section~\ref{sec:almost} we find an almost cover by Lemma~\ref{lem:almost}. These together imply Theorem \ref{thm:mainresult}. Note that for $r < 4$ the same properties hold in spirit, but we have a different problem with divisibility of the connected components as described in \cite{balogh2016triangle}. We showed that additionally having a low independence number in a graph significantly improves the statement from the famous Hajnal-Szemer\'edi theorem. We can take twice the size of a clique and this shows that large independent sets are really the bottleneck of the theorem. The methods we apply might work for embedding any kind of structure into graphs with low independence number. In particular, since sparse random graphs have this property, as an immediate corollary we get that adding a dense graph (with $\delta(G) \ge (1-2/r + \mu)n$) on top of a sparse random graph ($G_{n,p}$ with $p>C\log n / n$) in any adversarial way we can still find a $K_r$-factor with high probability. Still, we have that the bottleneck seems to be that there is a large triangle free set of size $2n / r+1$. From this set we can take at most two vertices per clique so it is impossible to cover with $n/r$ cliques of size $r$. A natural question would be whether this extends to the natural generalization of the independence number. That is, instead of at least an edge in any subgraph of size $\gamma n$ we even find a triangle. More formally, let $\alpha_\mathbf Ell(G)$ be the size of the largest set of vertices in $G$ that does not contain an induced $K_\mathbf Ell$. The following question is a generalization of Theorem~\ref{thm:mainresult}. \begin{question}\label{q:generalalpha} Is it true that for every $\mathbf Ell,r\in \mathbb{N}$ with $\mathbf Ell\le r$ and $\mu>0$ there is a constant $\gamma$ and $n_0\in \mathbb{N}$ such that every graph on $n\ge n_0$ vertices where $r$ divides $n$, with $\delta(G)\ge\max\{1/2+\mu,(1-\mathbf Ell/r+\mu)n\}$ and $\alpha_\mathbf Ell(G)<\gamma n$ has a $K_r$-factor. \mathbf End{question} In our results, the independence number is always dependent on the $\mu$ of the minimum degree. Even though the examples that we know of only require $\alpha(G)$ to be smaller than $n/r$. Does there exist a fixed constant $\gamma$ dependent only on $r$ such that all graphs with minimum degree $\delta(G) \ge (1-2/r+ \mu)n $ have a $K_r$-factor? Combined with Question \ref{q:generalalpha} we ask whether the following is true. \begin{question} \label{q:generalgamma} Is there a $\gamma > 0$ dependent only on $r$ and $\mathbf Ell$ such that for every $\mu>0$ there is an $n_0$ large enough such that every graph on $n\ge n_0$ vertices where $r$ divides $n$ with $\delta(G)\ge\max\{1/2+\mu,(1-\mathbf Ell/r+\mu)n\}$ and $\alpha_\mathbf Ell(G)<\gamma n$ has a $K_r$-factor.\textbf{} \mathbf End{question} As a first step it would be interesting to answer Question \ref{q:generalgamma} for $\mathbf Ell=2$ and $r=4$. Clearly, we cannot hope for $\gamma\ge 1/r$ but it would be interesting to see how far we can push $\gamma$ towards this bound. \section*{Acknowledgment} We want to thank Rajko Nenadov for bringing the problem to our attention and helpful discussions when we started the project. We thank the anonymous referee for the useful comments and suggestions. \pagebreak \mathbf End{document}
\begin{document} \title{Entanglement between Collective Operators in the Linear Harmonic Chain} \begin{abstract} We investigate entanglement between collective operators of two blocks of oscillators in an infinite linear harmonic chain. These operators are defined as averages over local operators (individual oscillators) in the blocks. On the one hand, this approach of "physical blocks" meets realistic experimental conditions, where measurement apparatuses do not interact with single oscillators but rather with a whole bunch of them, i.e., where in contrast to usually studied "mathematical blocks" not every possible measurement is allowed. On the other, this formalism naturally allows the generalization to blocks which may consist of several non-contiguous regions. We quantify entanglement between the collective operators by a measure based on the Peres-Horodecki criterion and show how it can be extracted and transferred to two qubits. Entanglement between two blocks is found even in the case where none of the oscillators from one block is entangled with an oscillator from the other, showing genuine bipartite entanglement between collective operators. Allowing the blocks to consist of a periodic sequence of subblocks, we verify that entanglement scales at most with the total boundary region. We also apply the approach of collective operators to scalar quantum field theory. \end{abstract} \date{\today} \author{Johannes Kofler} \affiliation {Institut für Experimentalphysik, Universität Wien, Boltzmanngasse 5, 1090 Wien, Austria} \author{Vlatko Vedral} \affiliation {The School of Physics and Astronomy, University of Leeds, Leeds LS2 9JT, United Kingdom} \affiliation {Institut für Experimentalphysik, Universität Wien, Boltzmanngasse 5, 1090 Wien, Austria} \author{Myungshik S. Kim} \affiliation {School of Mathematics and Physics, Queen’s University, Belfast BT7 1NN, United Kingdom} \author{\v{C}aslav Brukner} \affiliation {Institut für Experimentalphysik, Universität Wien, Boltzmanngasse 5, 1090 Wien, Austria} \affiliation {Institut für Quantenoptik und Quanteninformation, Österreichische Akademie der Wissenschaften,\\ Boltzmanngasse 3, 1090 Wien, Austria} \pacs{03.65.-w, 03.67.-a, 03.67.Mn} \maketitle \section{Introduction} Quantum entanglement is a physical phenomenon in which the quantum states of two or more systems can only be described with reference to each other, even though the individual systems may be spatially separated. This leads to correlations between observables of the systems that cannot be understood on the basis of classical (local realistic) theories \cite{Bell1964}. Its importance today exceeds the realm of the foundations of quantum physics and entanglement has become an important physical resource, like energy, that allows performing communication and computation tasks with efficiency which is not achievable classically \cite{Niel1998}. In the near future we will certainly see more and more experiments on entanglement of increasing complexity. Moving to higher entangled systems or entangling more systems with each other, will eventually push the realm of quantum physics well into the macroscopic world. It will be therefore important to investigate under which conditions entanglement within or between ''macroscopic'' objects, each consisting of a sample containing a large number of the constituents, can arise. Recently, it was shown that macroscopic entanglement can arise ''naturally'' between constituents of various complex physical systems. Examples of such systems are chains of interacting spin systems \cite{Arne2001,Niel1998}, harmonic oscillators \cite{Aude2002,Sera2005} and quantum fields \cite{Rezn2003}. Entanglement can have an effect on the macroscopic properties of these systems \cite{Gosh2003,Bruk2005,Wies2005} and can be in principle extractable from them for quantum information processing \cite{Rezn2003,Pate2004,deCh2005,Retz2005}. With the aim of better understanding macroscopical entanglement we will investigate entanglement between \textit{collective operators} in this paper. A simple and natural system is the ground state of a linear chain of harmonic oscillators furnished with harmonic nearest-neighbor interaction. The mathematical entanglement properties of this system were extensively investigated in \cite{Aude2002,Sera2005,Pate2005,Bote2004}. Entanglement was computed in the form of logarithmic negativity for general bisections of the chain and for contiguous blocks of oscillators that do not comprise the whole chain. It was shown that the log-negativity typically decreases exponentially with the separation of the groups and that the larger the groups, the larger the maximal separation for which the log-negativity is non-zero \cite{Aude2002}. It also was proven that an area law holds for harmonic lattice systems, stating that the amount of entanglement between two complementary regions scales with their boundary \cite{Cram2005}. In a real experimental situation, however, we are typically not able to determine the complete mathematical amount of entanglement (as measured, e.g., by log-negativity) which is non-zero even if two blocks share only one arbitrarily weak entangled pair of oscillators. Our measurement apparatuses normally cannot resolve single oscillators, but rather interact with a whole bunch of them in one way, potentially even in \textit{non-contiguous regions}, thus measuring certain \textit{global properties}. Here we will study entanglement between ''physical blocks'' of harmonic oscillators --- existing only if there is entanglement between the \textit{collective operators} defined on the entire blocks --- as a function of their size, relative distance and the coupling strength. Our aim is to quantify (experimentally accessible) entanglement between global properties of two groups of harmonic oscillators. Surprisingly, we will see that such collective entanglement can be demonstrated even in the case where none of the oscillators from one block is entangled with an oscillator from the other block (i.e., it cannot be understood as a cumulative effect of entanglement between pairs of oscillators), which is in agreement with \cite{Aude2002}. This shows the existence of bipartite entanglement between collective operators. Because of the area law \cite{Cram2005}\ the amount of entanglement is relatively small in the first instance. We suggest a way to overcome this problem by allowing the collective blocks to consist of a \textit{periodic sequence of subblocks}. Then the total boundary region between them is increased and we verify that indeed a larger amount of entanglement is found for periodic blocks, where the entanglement scales at most with the \textit{total} boundary region. We give an analytical approximation of this amount of entanglement and motivate how it can in principle be extracted from the chain \cite{Rezn2003,Pate2004,deCh2005,Retz2005}. Methodologically, we will quantify the entanglement between collective operators of two blocks of harmonic oscillators by using a measure for continuous variable systems based on the Peres-Horodecki criterion \cite{Pere1996,Horo1997,Simo2000,Kim2002}. The collective operators will be defined as sums over local operators for all single oscillators belonging to the block. The infinite harmonic chain is assumed to be in the ground state and since the blocks do not comprise the whole chain, they are in a mixed state. \section{Linear Harmonic Chain} We investigate a linear harmonic chain, where each of the $N$ oscillators is situated in a harmonic potential with frequency $\omega$ and each oscillator is coupled with its neighbors by a harmonic potential with the coupling frequency $\Omega$. The oscillators have mass $m$ and their positions and momenta are denoted as $\overline{q}_{i}$ and $\overline{p}_{i}$, respectively. Assuming periodic boundary conditions ($\overline{q}_{N+1} \equiv\overline{q}_{1}$), the Hamiltonian thus reads \cite{Schw2003} \begin{equation} H= {\displaystyle\sum\limits_{j=1}^{N}} \left( \frac{\overline{p}_{j}^{2}}{2\,m}+\frac{m\,\omega^{2}\,\overline {q}_{j}^{2}}{2}+\frac{m\,\Omega^{2}\,(\overline{q}_{j}-\overline{q}_{j-1} )^{2}}{2}\right) \!. \end{equation} We canonically go to dimensionless variables:\ $q_{j}\equiv C\,\overline {q}_{j}$ and $p_{j}\equiv\overline{p}_{j}/C$, where $C\equiv\sqrt {m\omega(1+2\,\Omega^{2}/\omega^{2})^{1/2}}$ \cite{Bote2004}. By this means the Hamiltonian becomes \begin{equation} H=\frac{E_{0}}{2}\, {\displaystyle\sum\limits_{j=1}^{N}} (p_{j}^{2}+q_{j}^{2}-\alpha\,q_{j}\,q_{j+1})\,, \end{equation} with the abbreviations $\alpha\equiv2\,\Omega^{2}/(2\,\Omega^{2}+\omega^{2})$ and $E_{0}\equiv\sqrt{2\,\Omega^{2}+\omega^{2}}$. The coupling constant is restricted to values $0<\alpha<1$, where $\alpha\rightarrow0$ in the weak coupling limit ($\Omega/\omega\rightarrow0$) and $\alpha\rightarrow1$ in the strong coupling limit ($\Omega/\omega\rightarrow\infty$). In the language of second quantization the positions and momenta are converted into operators ($q_{j}\rightarrow\hat{q}_{j}$, $p_{j}\rightarrow\hat{p}_{j}$) and are expanded into modes of their annihilation and creation operators, $\hat{a}$ and $\hat{a}^{\dagger}$, respectively: \begin{align} \hat{q}_{j} & =\frac{1}{\sqrt{N}}\, {\displaystyle\sum\limits_{k=0}^{N-1}} \,\frac{1}{\sqrt{2\,\nu(\theta_{k})}}\left[ \hat{a}(\theta_{k})\,\text{e} ^{\text{i}\,\theta_{k}\,j}+\text{H.c.}\right] \!,\label{qj}\\ \hat{p}_{j} & =\frac{-\text{i}}{\sqrt{N}}\, {\displaystyle\sum\limits_{k=0}^{N-1}} \,\sqrt{\frac{\nu(\theta_{k})}{2}}\left[ \hat{a}(\theta_{k})\,\text{e} ^{\text{i}\,\theta_{k}\,j}-\text{H.c.}\right] \!. \label{pj} \end{align} Here $\theta_{k}\equiv2\,\pi\,k/N$ (with $k=0,1,...,N-1$) is the dimensionless pseudo-momentum and $\nu(\theta_{k})\equiv\sqrt{1-\alpha\cos\theta_{k}}$ is the dispersion relation. The annihilation and creation operators fulfil the well known commutation relation $\left[ \hat{a}(\theta_{k}),\hat{a}^{\dagger }(\theta_{k^{\prime}})\right] =\delta_{kk^{\prime}}$, since $[\hat{q} _{i},\hat{p}_{j}]=\;$i$\,\delta_{ij}$ has to be guaranteed. The ground state (vacuum), denoted as $\left| 0\right\rangle $, is defined by $\hat{a} (\theta_{k})\left| 0\right\rangle =0$ holding for all $\theta_{k}$. The two-point vacuum correlation functions \begin{align} g_{|i-j|} & \equiv\left\langle 0\right| \hat{q}_{i}\,\hat{q}_{j}\left| 0\right\rangle \equiv\left\langle \!\right. \hat{q}_{i}\,\hat{q}_{j}\left. \!\right\rangle ,\label{g}\\ h_{|i-j|} & \equiv\left\langle 0\right| \hat{p}_{i}\,\hat{p}_{j}\left| 0\right\rangle \equiv\left\langle \!\right. \hat{p}_{i}\,\hat{p}_{j}\left. \!\right\rangle , \label{h} \end{align} are given by $g_{l}=(2\,N)^{-1}\, {\textstyle\sum\nolimits_{k=0}^{N-1}} \nu^{-1}(\theta_{k})\cos(l\,\theta_{k})$ and $h_{l}=(2\,N)^{-1}\, {\textstyle\sum\nolimits_{k=0}^{N-1}} \nu(\theta_{k})\cos(l\,\theta_{k})$, where $l\equiv|i-j|$. In the limit of an infinite chain ($N\rightarrow\infty$) --- which we will study below --- and for $l<N/2$ they can be expressed in terms of the hypergeometric function $_{2}F_{1}$ \cite{Bote2004}:\ $g_{l}=(z^{l}/2\,\mu)\tbinom{l-1/2}{l} \,_{2}F_{1}(1/2,l+1/2,l+1,z^{2})$, $h_{l}=(\mu\,z^{l}/2)\tbinom{l-3/2} {l}\,_{2}F_{1}(-1/2,l-1/2,l+1,z^{2})$, where $z\equiv(1-\sqrt{1-\alpha^{2} })/\alpha$ and $\mu\equiv1/\sqrt{1+z^{2}}$. \section{Defining Collective Operators} In the following, we are interested in entanglement between two ''physical blocks'' of oscillators, where the blocks are represented by a specific form of \textit{collective operators} which are normalized sums of individual operators. By means of such a formalism we seek to fulfil experimental conditions and constraints, since \textit{finite experimental resolution implies naturally the measurement of, e.g., the average momentum of a bunch of oscillators rather than the momentum of only one}. On the other hand, this formalism can easily take account of blocks that\textit{ consist of non-contiguous regions}, leading to interesting results which will be shown below. We want to point out that this convention of the term \textit{block} is not the same as it is normally used in the previous literature. In contrast to the latter, for which one allows any possible measurement, our simulation of realizable experiments already lacks some information due to the averaging. Let us now consider two non-overlapping blocks of oscillators, $A$ and $B$, within the closed harmonic chain in its ground state, where each block contains $n$ oscillators. The blocks are separated by $d\geq0$ oscillators (Fig.\ \ref{Fig Blocks}). We assume $n,d\ll N$ and $N\rightarrow\infty$ for the numerical calculations of the two-point correlation functions.\begin{figure} \caption{Two blocks of a harmonic chain $A$ and $B$. Each block consists of $n$ oscillators and the blocks are separated by $d$ oscillators.} \label{Fig Blocks} \end{figure} By a Fourier transform we map the $n$ oscillators of each block onto $n$ (''orthogonal'') \textit{frequency-dependent} collective operators \begin{align} \hat{Q}_{A}^{(k)} & \equiv\dfrac{1}{\sqrt{n}}\; {\displaystyle\sum\limits_{j\in A}} \;\hat{q}_{j}\;\text{e}^{\tfrac{2\,\pi\,\text{i}\,j\,k}{n}},\\ \hat{P}_{A}^{(k)} & \equiv\dfrac{1}{\sqrt{n}}\; {\displaystyle\sum\limits_{j\in A}} \;\hat{p}_{j}\;\text{e}^{-\tfrac{2\,\pi\,\text{i}\,j\,k}{n}}, \end{align} with the frequencies $k=0,...,n-1$, and analogously for block $B$. The commutator of the collective position and momentum operators is \begin{equation} \lbrack\hat{Q}_{A}^{(k)},\hat{P}_{A}^{(k^{\prime})}]=\text{i}\,\delta _{kk^{\prime}}\,. \label{commutator QP} \end{equation} This means that collective operators for different frequencies $k\neq k^{\prime}$ commute. For different blocks the commutator vanishes:\ $[\hat {Q}_{A}^{(k)},\hat{P}_{B}^{(k^{\prime})}]=0$. If the individual positions and momenta of all oscillators are written into a vector \begin{equation} \hat{\mathbf{x}}\equiv(\hat{q}_{1},\hat{p}_{1},\hat{q}_{2},\hat{p} _{2},...,\hat{q}_{N},\hat{p}_{N})^{\text{T}}, \end{equation} then there holds the commutation relation \begin{equation} \lbrack\hat{x}_{i},\hat{x}_{j}]=\text{i}\,\Omega_{ij} \end{equation} with $\mathbf{\Omega}$ the $n$-fold direct sum of $2\!\times\!2$ symplectic matrices: \begin{equation} \mathbf{\Omega}\equiv {\displaystyle\bigoplus\limits_{j=1}^{n}} \left( \! \begin{array} [c]{cc} 0 & 1\\ -1 & 0 \end{array} \!\right) \!. \end{equation} A matrix $\mathbf{S}$ transforms $\hat{\mathbf{x}}$ into a vector of collective (and uninvolved individual) oscillators: \begin{equation} \hat{\mathbf{X}}\equiv\mathbf{S}\,\hat{\mathbf{x}}=(\{\hat{Q}_{A}^{(k)} ,\hat{P}_{A}^{(k)}\}_{k},\{\hat{Q}_{B}^{(k)},\hat{P}_{B}^{(k)}\}_{k},\{\hat {q}_{j},\hat{p}_{j}\}_{j})^{\text{T}}. \end{equation} Here $\{\hat{Q}_{A}^{(k)},\hat{P}_{A}^{(k)}\}_{k}=(\hat{Q}_{A}^{(0)},\hat {P}_{A}^{(0)},...,\hat{Q}_{A}^{(n-1)},\hat{P}_{A}^{(n-1)})$ denotes all collective oscillators of block $A$ and analogously for block $B$, whereas $\{\hat{q}_{j},\hat{p}_{j}\}_{j}$ denotes the $2\,(N-2\,n)$ position and momentum entries of those $N-2\,n$ oscillators which are not part of one of the two blocks. The matrix $\mathbf{S}$ corresponds to a Gaussian operation \cite{Eise2003}. It has determinant det$\,\mathbf{S}=1$ and preserves the symplectic structure \begin{equation} \mathbf{\Omega}=\mathbf{S}^{\text{T}}\,\mathbf{\Omega}\,\mathbf{S}\,, \label{sympl struct} \end{equation} and hence \begin{equation} \lbrack\hat{X}_{i},\hat{X}_{j}]=\text{i}\,\Omega_{ij} \label{comm sympl} \end{equation} for all $i,j$, in particular verifying (\ref{commutator QP}). This means that the Gaussianness of the ground state of the harmonic chain (i.e., the fact that the state is completely characterized by its first and second moments, see below) was preserved by the (Fourier) transformation to the frequency-dependent collective operators. \section{Quantifying Entanglement between Collective Operators} In reality, we are typically not capable of single particle resolution measurements and only of measuring the collective operators with one frequency, namely $k=0$. Note that in general the correlations of higher-frequency collective operators, e.g., $\left\langle \!\right. (\hat {Q}_{A}^{(k)})^{2}\left. \!\right\rangle $ or $\left\langle \!\right. \hat{Q}_{A}^{(k)}\hat{Q}_{B}^{(k)}\left. \!\right\rangle $ with $k\neq0$, are not real numbers. Therefore, as a natural choice, we denote as the collective operators \begin{align} \hat{Q}_{A} & \equiv\hat{Q}_{A}^{(0)}=\dfrac{1}{\sqrt{n}}\, {\displaystyle\sum\limits_{j\in A}} \,\hat{q}_{j}\,,\label{def Q}\\ \hat{P}_{A} & \equiv\hat{P}_{A}^{(0)}=\dfrac{1}{\sqrt{n}}\, {\displaystyle\sum\limits_{j\in A}} \,\hat{p}_{j}\,, \label{def P} \end{align} and analogously for block $B$. It seems to be a very natural situation that the experimenter only has access to these collective properties and we are interested in the amount of (physical) entanglement one can extract from the system if only the collective observables $\hat{Q}_{A,B}$ and $\hat{P}_{A,B}$ are measured. Reference \cite{Simo2000} derives a separability criterion which is based on the Peres-Horodecki criterion \cite{Pere1996,Horo1997} and the fact that --- in the continuous variables case --- the partial transposition allows a geometric interpretation as mirror reflection in phase space. Following largely the notation in the original paper, we introduce the vector \begin{equation} \hat{\mathbf{\xi}}\equiv(\hat{Q}_{A},\hat{P}_{A},\hat{Q}_{B},\hat{P}_{B}) \end{equation} of collective operators. The commutation relations have the compact form $[\hat{\xi}_{\alpha},\hat{\xi}_{\beta}]=\;$i$\,K_{\alpha\beta}$ with $\mathbf{K}\equiv {\textstyle\bigoplus\nolimits_{j=1}^{2}} \!\left( \genfrac{}{}{0pt}{1}{0}{-1} \genfrac{}{}{0pt}{1}{1}{0} \right) $. The separability criterion bases on the covariance matrix (of first and second moments) \begin{equation} V_{\alpha\beta}\equiv\dfrac{1}{2}\left\langle \!\right. \Delta\hat{\xi }_{\alpha}\Delta\hat{\xi}_{\beta}+\Delta\hat{\xi}_{\beta}\Delta\hat{\xi }_{\alpha}\left. \!\right\rangle , \end{equation} where $\Delta\hat{\xi}_{\alpha}\equiv\hat{\xi}_{\alpha}-\left\langle \!\right. \hat{\xi}_{\alpha}\left. \!\right\rangle $ with $\left\langle \!\right. \hat{\xi}_{\alpha}\left. \!\right\rangle =0$ in our case (state around the origin of phase space). The covariance matrix $\mathbf{V}$ is real (which would not be the case for higher-frequency collective operators) and symmetric:\ $\left\langle \!\right. \hat{Q}_{A}\hat{Q}_{B}\left. \!\right\rangle =\left\langle \!\right. \hat{Q}_{B}\hat{Q}_{A}\left. \!\right\rangle $ and $\left\langle \!\right. \hat{P}_{A}\hat{P}_{B}\left. \!\right\rangle =\left\langle \!\right. \hat{P}_{B}\hat{P}_{A}\left. \!\right\rangle $, coming from the fact that the two-point correlation functions (\ref{g}) and (\ref{h}) only depend on the absolute value of the position index difference. On the other hand, using (\ref{qj}) and (\ref{pj}), we verify that $\left\langle \!\right. \hat{q}_{i}\,\hat{p}_{j}\left. \!\right\rangle =\;$i$\,(2\,N)^{-1}\, {\textstyle\sum\nolimits_{k=0}^{N-1}} \exp[$i$\,\theta_{k}(i-j)]$ and $\left\langle \!\right. \hat{p}_{j}\,\hat {q}_{i}\left. \!\right\rangle =-$i$\,(2\,N)^{-1}\, {\textstyle\sum\nolimits_{k=0}^{N-1}} \exp[$i$\,\theta_{k}(j-i)]$. For $i\neq j$ both summations vanish ($\theta _{k}\equiv2\,\pi\,k/N$ and $i,j$ integer) and for $i=j$ they are the same but with opposite sign. Thus, in all cases $\left\langle \!\right. \hat{q} _{i}\,\hat{p}_{j}\left. \!\right\rangle =-\left\langle \!\right. \hat{p} _{j}\,\hat{q}_{i}\left. \!\right\rangle $. These symmetries also hold for the collective operators and hence we obtain \begin{equation} \mathbf{V}=\left( \! \begin{array} [c]{cccc} G & 0 & G_{AB} & 0\\ 0 & H & 0 & H_{AB}\\ G_{AB} & 0 & G & 0\\ 0 & H_{AB} & 0 & H \end{array} \!\right) \!. \label{V} \end{equation} The matrix elements are \begin{align} G & \equiv\left\langle \!\right. \hat{Q}_{A}^{2}\left. \!\right\rangle =\left\langle \!\right. \hat{Q}_{B}^{2}\left. \!\right\rangle =\frac{1}{n}\, {\displaystyle\sum\limits_{j\in A}} \, {\displaystyle\sum\limits_{i\in A}} \,g_{|j-i|}\,,\\ H & \equiv\left\langle \!\right. \hat{P}_{A}^{2}\left. \!\right\rangle =\left\langle \!\right. \hat{P}_{B}^{2}\left. \!\right\rangle =\frac{1}{n}\, {\displaystyle\sum\limits_{j\in A}} \, {\displaystyle\sum\limits_{i\in A}} \,h_{|j-i|}\,,\\ G_{AB} & \equiv\left\langle \!\right. \hat{Q}_{A}\hat{Q}_{B}\left. \!\right\rangle =\frac{1}{n}\, {\displaystyle\sum\limits_{j\in A}} \, {\displaystyle\sum\limits_{i\in B}} \,g_{|j-i|}\,,\\ H_{AB} & \equiv\left\langle \!\right. \hat{P}_{A}\hat{P}_{B}\left. \!\right\rangle =\frac{1}{n}\, {\displaystyle\sum\limits_{j\in A}} \, {\displaystyle\sum\limits_{i\in B}} \,h_{|j-i|}\,. \end{align} To quantify entanglement between two collective blocks we use the degree of entanglement $\varepsilon$, given by the absolute sum of the negative eigenvalues of the partially transposed density operator:\ $\varepsilon \equiv\;$Tr$|\rho^{\text{T}_{B}}|-1$, i.e., by measuring how much the mirror reflected state fails to be positive definite. This measure (negativity) is based on the Peres-Horodecki criterion \cite{Pere1996,Horo1997} and was shown to be an entanglement monotone \cite{Lee2000,Vida2002}. For covariance matrices of the form (\ref{V}) it reads \cite{Kim2002} \begin{equation} \varepsilon=\max\left( 0,\frac{(\delta_{1}\delta_{2})_{0}}{\delta_{1} \delta_{2}}-1\right) \!, \label{epsilon} \end{equation} where $\delta_{1}\equiv G-|G_{AB}|$ and $\delta_{2}\equiv H-|H_{AB}|$. In general, the numerator is defined by the square of the Heisenberg uncertainty relation \begin{equation} (\delta_{1}\delta_{2})_{0}\equiv\left( \frac{1}{2}\,|\!\left\langle \!\right. [\hat{Q}_{A,B},\hat{P}_{A,B}]\left. \!\right\rangle \!|\right) ^{\!2}, \end{equation} with $(\delta_{1}\delta_{2})_{0}=1/4$ due to (\ref{commutator QP}). We note that $\varepsilon$ is a \textit{degree} of entanglement (in the sense of necessity and sufficiency) only for Gaussian states which are completely characterized by their first and second moments, as for example the ground state of the harmonic chain we are studying. However, we left out the higher-frequency collective operators (and all the oscillators which are not part of the blocks) and therefore, the entanglement $\varepsilon$ has to be understood as the Gaussian part of the amount of entanglement which exists between (and can be extracted from) the two blocks when only the collective properties $\hat{Q}_{A,B}$ and $\hat{P}_{A,B}$, as defined in (\ref{def Q}) and (\ref{def P}), are accessible. There also exists an entanglement witness in form of a separability criterion based on variances, where $\Delta\equiv\left\langle \!\right. (\hat{Q} _{A}-\hat{Q}_{B})^{2}\left. \!\right\rangle +\left\langle \!\right. (\hat {P}_{A}+\hat{P}_{B})^{2}\left. \!\right\rangle =2\,(G-G_{AB}+H+H_{AB})<2$ is a sufficient condition for the state to be entangled \cite{Duan2000}. We note that the above negativity measure (\ref{epsilon}) is ''stronger'' than this witness in the whole parameter space ($\alpha,n$). In particular, there are cases where $\varepsilon>0$ although $\Delta\geq2$. This is in agreement with the finding that the variance criterion is weaker than a generalized negativity criterion \cite{Shch2005}. We further note that the amount of entanglement (\ref{epsilon}) is invariant under a change of potential redefinitions of the collective operators, e.g., $\hat{Q}_{A}\equiv\, {\textstyle\sum\nolimits_{j\in A}} \,\hat{q}_{j}$ or $\hat{Q}_{A}\equiv(1/n)\, {\textstyle\sum\nolimits_{j\in A}} \,\hat{q}_{j}$, as then the modified scaling in the correlations ($G$, $G_{AB}$, $H$, and $H_{AB}$) is exactly compensated by the modified scaling of the Heisenberg uncertainty in the numerator. Figure \ref{Fig Degr} shows the results for $d=0$ and $d=1$. In the first case --- if the blocks are neighboring --- there exists entanglement for all possible coupling strengths $\alpha$ and block sizes $n$. In the latter case --- if there is one oscillator between the blocks --- due to the strongly decaying correlation functions $g$ an $h$ there is no entanglement between two single oscillators ($n=1$), but entanglement for larger blocks (up to $n=4$, depending on $\alpha$). The statement that entanglement can emerge by going to larger blocks was also found in \cite{Aude2002}. But there the blocks were abstract objects, containing all the information of their constituents. In the case of collective operators, however, increasing the block size (averaging over more oscillators) is also connected with a loss of information. In spite of this loss and the mixedness of the state, two blocks can be entangled, although non of the individual pairs between the blocks is entangled --- indicating true bipartite entanglement between collective operators. For $d\geq2$, however, no entanglement can be found anymore.\begin{figure} \caption{Degree of collective entanglement $\varepsilon$ for two blocks of oscillators as a function of their size $n$. (a) The blocks are neighboring ($d=0$) and entanglement exists for all $n$ and coupling strengths $\alpha$. Plotted are $\alpha=0.99$ (diamonds), $\alpha=0.9$ (squares) and $\alpha=0.5$ (triangles). (b) The same for two blocks which are separated by one oscillator ($d=1$). The two blocks are unentangled for $n=1$ but can be entangled, if one increases the block size ($n>1$), although non of the individual pairs between the blocks is entangled.} \label{Fig Degr} \end{figure} These results are in agreement with the general statement that entanglement between a region and its complement scales with the size of the boundary \cite{Cram2005}. In the present case of two blocks in a one-dimensional chain (Fig.\ \ref{Fig Blocks}) the boundary is constant and as the blocks are made larger, the entanglement decreases since it is distributed over more and more oscillators. We therefore propose to increase the number of boundaries by considering two non-overlapping blocks, where we allow a \textit{periodic continuation} of the situation above, i.e.\ a sequence of $m\geq1$ subblocks, separated by $d$ oscillators and each consisting of $s\geq1$ oscillators, where $ms=n$ (Fig.\ \ref{Fig Blocksgen}).\begin{figure} \caption{Two periodic blocks of a harmonic chain $A$ and $B$. Each block can consist of $m$ subblocks with $s$ oscillators each, separated by $d$ oscillators. In the picture $d=1$, $m=2$, $s=3$ and the number of oscillators per block is $n=ms=6$.} \label{Fig Blocksgen} \end{figure} The degree of entanglement between two periodic blocks of non-separated ($d=0$) one-particle subblocks ($s=1$) is larger for stronger coupling constant $\alpha$ and grows with the overall number of oscillators $n$ (Fig.\ \ref{Fig Degrgen}a). For given $\alpha$ and $n$ and no separation between the subblocks ($d=0$) the entanglement is larger for the case of small subblocks, as then there are many of them, causing a large total boundary (Fig.\ \ref{Fig Degrgen}b). Entanglement can be even found for larger separation ($d=1,2$) with a more complicated dependence on the size $s$ of the subblocks. There is a trade-off between having a large number of boundaries and the fact that one should have large subblocks as individual separated oscillators are not entangled (Fig.\ \ref{Fig Degrgen}c,d). For $d\geq3$ no entanglement can be found anymore. (In a realistic experimental situation, where the separation $d$ is not sharply defined, e.g., where there are weighted contributions for $d=0,1,...,d_{\text{max}}$, entanglement can persist even for $d_{\text{max}}\geq3$, depending on the weighting factors.)\begin{figure} \caption{Degree of collective entanglement $\varepsilon$ for two periodic blocks of oscillators as a function of their total size $n$. (a) Neighboring one-particle subblocks ($d=0$, $s=1$). Entanglement monotonically increases with $n$ and becomes larger as the coupling strength $\alpha$ increases. Plotted are $\alpha=0.99$ (diamonds), $\alpha=0.9$ (squares) and $\alpha=0.5$ (triangles). (b) The coupling is fixed to $\alpha=0.99$ for this and the subsequent figures. There is no separation, $d=0$. Plotted are the cases $s=1,2,5$. For fixed $n$ the entanglement is more or less proportional to the number of boundaries, i.e., inversely proportional to the subblock size $s$. (c) and (d) correspond to the cases $d=1$ and $d=2$, respectively. The dependence on the size of the subblocks is more complicated as there is a trade-off between having a large number of boundaries (i.e.\ small $s$) and the fact that one should have large subblocks as individual separated oscillators are not entangled.} \label{Fig Degrgen} \end{figure} For the sake of completeness we give a rough approximation of the entanglement between two periodic blocks. Let us assume that the subblocks are directly neighbored, $d=0$. Furthermore, we consider couplings $\alpha$ such that we may neglect higher than next neighbor correlations ($\alpha\lesssim0.5$), i.e., we only take into account $g_{0}$, $g_{1}$, $h_{0}$ and $h_{1}$. The correlations read \begin{align} G & =\dfrac{1}{n}\, {\displaystyle\sum\limits_{j\in A}} \, {\displaystyle\sum\limits_{i\in A}} \,g_{|j-i|}\approx g_{0}+\dfrac{2\,m\,(s-1)}{n}\,g_{1}\,,\\ G_{AB} & =\dfrac{1}{n}\, {\displaystyle\sum\limits_{j\in A}} \, {\displaystyle\sum\limits_{i\in B}} \,g_{|j-i|}\approx\dfrac{1}{n}\,(2\,m-1)\,g_{1}\,, \end{align} and analogously for $H$ and $H_{AB}$. The first equation reflects that there are $n$ self-correlations and $m\,(s-1)$ nearest neighbor pairs (which are counted twice) within one block, i.e., $s-1$ pairs per subblock. The second equation represents the fact that there are $2\,m-1$ boundaries where blocks $A$ and $B$ meet. Using $s=n/m$, the entanglement (\ref{epsilon}) becomes (note that $g_{1}>0$ and $h_{1}<0$) \begin{equation} \varepsilon\approx\dfrac{1}{4\,[g_{0}+(2-\tfrac{4\,m-1}{n})\,g_{1} ]\,[h_{0}+(2-\tfrac{1}{n})\,h_{1}]}-1\,. \end{equation} For given $n$ this approximation obviously increases with the total number of boundaries, $m$. It can be considered as an estimate for a situation like in Fig.\ \ref{Fig Degrgen}b, if a smaller coupling is used such that the neglect of higher correlations becomes justified. We close this section by annotating that the entanglement (\ref{epsilon}) between collective blocks of oscillators --- being the Gaussian part --- can in principle (for sufficient control of the block separation $d$) be transferred to two remote qubits via a Jaynes--Cummings type interaction \cite{Rezn2003,Pate2004,Retz2005}. For the interaction with periodic blocks ''gratings'' have to be employed in the experimental setup. The interaction Hamiltonian is of the form \begin{align} \hat{H}_{\text{int}} & \sim(\text{e}^{-\text{\thinspace i\thinspace} \omega_{1}\,t}\,\hat{\sigma}_{1}^{+}+\text{e}^{+\text{\thinspace i\thinspace }\omega_{1}\,t}\,\,\hat{\sigma}_{1}^{+})\,\hat{Q}_{A}\nonumber\\ & \quad\quad+(\text{e}^{-\text{\thinspace i\thinspace}\omega_{2}\,t} \,\hat{\sigma}_{2}^{+}+\text{e}^{+\text{\thinspace i\thinspace}\omega_{2} \,t}\,\hat{\sigma}_{2}^{+})\,\hat{Q}_{B}\,, \end{align} where $\omega_{i}$ is the Rabi frequency and $\hat{\sigma}_{i}^{+} =(\hat{\sigma}_{i}^{-})^{\dagger}=\left| e\right\rangle \!_{i}\,_{i} \!\left\langle g\right| $ is the bosonic operator (with $\left| g\right\rangle \!_{i}$ and $\left| e\right\rangle \!_{i}$ the ground and the excited state) of the $i$-th qubit ($i=1,2$). \section{Collective Operators for Scalar Quantum Fields} The continuum limit of the linear harmonic chain is the (1+1)-dimensional Klein--Gordon field $\phi(x,t)$ with the canonical momentum field $\pi(x,t)=\dot{\phi}(x,t)$. It satisfies the Klein--Gordon equation (in natural units $\hbar=c=1$) with mass $m$ \begin{equation} \ddot{\phi}-\nabla^{2}\phi+m^{2}\,\phi=0\,. \end{equation} With the canonical quantization procedure $\phi$ and $\pi$ become operators satisfying the non-trivial commutation relation $[\hat{\phi}(x,t),\hat{\pi }(x^{\prime},t)]=\;$i$\,\delta(x-x^{\prime})$. The field operator can be expanded into a Fourier integral over elementary plane wave solutions \cite{Bjo2003} \begin{align} \hat{\phi}(x,t) & = {\displaystyle\int} \frac{\text{d}k}{\sqrt{4\,\pi\,\omega_{k}}}\left[ \hat{a}(k)\,\text{e} ^{\text{i}\,k\,x-\text{i}\,\omega_{k}\,t}+\text{H.c.}\right] \!,\label{field} \\ \hat{\pi}(x,t) & =-\text{i} {\displaystyle\int} \frac{\text{d}k\,\omega_{k}}{\sqrt{4\,\pi}}\left[ \hat{a}(k)\,\text{e} ^{\text{i}\,k\,x-\text{i}\,\omega_{k}\,t}-\text{H.c.}\right] \!, \end{align} where $k$ is the wave number and $\omega_{k}=+\sqrt{k^{2}+m^{2}}$ is the dispersion relation. The annihilation and creation operators fulfil $\left[ \hat{a}(k),\hat{a}^{\dagger}(k^{\prime})\right] =\delta(k-k^{\prime})$. We write the field operator as a sum of two contributions $\hat{\phi}=\hat{\phi }^{(+)}+\hat{\phi}^{(-)}$, where $\hat{\phi}^{(+)}$ ($\hat{\phi}^{(-)}$) is the contribution with positive (negative) frequency. Thus, $\hat{\phi}^{(+)}$ corresponds to the term with the annihilation operator in (\ref{field}). The vacuum correlation function is given by the (equal-time) commutator of the positive and the negative frequency part: \begin{equation} \left\langle 0\right| \hat{\phi}(x,t)\,\hat{\phi}(y,t)\left| 0\right\rangle =[\hat{\phi}^{(+)}(x,t),\hat{\phi}^{(-)}(y,t)]\,. \end{equation} It is a peculiarity of the idealization of quantum field theory that for $x=y$ this propagator diverges in the ground state: \begin{equation} \left\langle 0\right| \hat{\phi}^{2}(x,t)\left| 0\right\rangle \rightarrow\infty\,. \label{divergence} \end{equation} The same is true for $\left\langle 0\right| \hat{\pi}^{2}(x,t)\left| 0\right\rangle $ and hence we cannot easily build an entanglement measure like for the harmonic chain, since the analogs of the two-point correlation functions $g_{0}$ and $h_{0}$, (\ref{g}) and (\ref{h}), are divergent now. Automatically, we are motivated to study the more physical situation and consider extended space-time regions, which means that we should integrate the field (and conjugate momentum) over some spatial area. We define the collective operators \begin{align} \hat{\Phi}_{L}(x_{0},t) & \equiv\frac{1}{\sqrt{L}}\, {\displaystyle\int\nolimits_{-L/2}^{L/2}} \,\hat{\phi}(x+x_{0},t)\,\text{d}x\,,\label{Phi}\\ \hat{\Pi}_{L}(x_{0},t) & \equiv\frac{1}{\sqrt{L}}\, {\displaystyle\int\nolimits_{-L/2}^{L/2}} \,\hat{\pi}(x+x_{0},t)\,\text{d}x\,, \label{Pi} \end{align} Therefore, $\hat{\Phi}_{L}(x_{0},t)$ and $\hat{\Pi}_{L}(x_{0},t)$ are equal-time operators which are spatially averaged over a length $L$, centered at position $x_{0}$. The commutator is \begin{equation} \lbrack\hat{\Phi}_{L}(x_{0},t),\hat{\Pi}_{L}(x_{0},t)]=\frac{1}{L}\, {\displaystyle\int\nolimits_{-L/2}^{L/2}} \, {\displaystyle\int\nolimits_{-L/2}^{L/2}} \,\text{i}\,\delta^{(3)\!}(x-y)=\text{i}\,, \label{commutator PhiPi} \end{equation} which is in complete analogy to (\ref{commutator QP}). If $\hat{\Phi}_{L}$ and $\hat{\Pi}_{L}$ correspond to separated regions without overlap, i.e., $|x_{0}-y_{0}|>L$, then of course $[\hat{\Phi}_{L}(x_{0},t),\hat{\Pi} _{L}(y_{0},t)]=0$. The spatial integration in (\ref{Phi}) and (\ref{Pi}) can be carried out analytically: \begin{align} \hat{\Phi}_{L}(x_{0},t) & =\frac{1}{\sqrt{\pi\,L}}\, {\displaystyle\int\nolimits_{-\infty}^{\infty}} \,\frac{\text{d}k}{k\,\sqrt{\omega_{k}}}\,\sin\!\left( \frac{k\,L}{2}\right) \nonumber\\ & \quad\times\left[ \hat{a}(k)\,\text{e}^{\text{i}\,k\,x_{0}-\text{i} \,\omega_{k}\,t}+\text{H.c.}\right] \!,\label{field Phi}\\ \hat{\Pi}_{L}(x_{0},t) & =\frac{-\text{i}}{\sqrt{\pi\,L}}\, {\displaystyle\int\nolimits_{-\infty}^{\infty}} \,\frac{\text{d}k\,\sqrt{\omega_{k}}}{k}\,\sin\!\left( \frac{k\,L}{2}\right) \nonumber\\ & \quad\times\left[ \hat{a}(k)\,\text{e}^{\text{i}\,k\,x_{0}-\text{i} \,\omega_{k}\,t}-\text{H.c.}\right] \!. \label{field Pi} \end{align} The final step is to calculate the propagators of the field and the conjugate momentum.\ We find \begin{align} D_{\hat{\Phi},L}(r) & \equiv\left\langle 0\right| \hat{\Phi}_{L} (x_{0},t)\,\hat{\Phi}_{L}(y_{0},t)\left| 0\right\rangle \label{DPhi}\\ & =\frac{1}{\pi\,L}\, {\displaystyle\int\nolimits_{-\infty}^{\infty}} \,\frac{\text{d}k}{k^{2}\,\sqrt{k^{2}+m^{2}}}\,\sin^{2}\!\left( \frac {k\,L}{2}\right) \cos(k\,r)\,,\nonumber\\ D_{\hat{\Pi},L}(r) & \equiv\left\langle 0\right| \hat{\Pi}_{L} (x_{0},t)\,\hat{\Pi}_{L}(y_{0},t)\left| 0\right\rangle \label{DPi}\\ & =\frac{1}{\pi\,L}\, {\displaystyle\int\nolimits_{-\infty}^{\infty}} \,\frac{\text{d}k\,\sqrt{k^{2}+m^{2}}}{k^{2}}\,\sin^{2}\!\left( \frac {k\,L}{2}\right) \cos(k\,r)\,,\nonumber \end{align} with $r\equiv|x_{0}-y_{0}|$ the distance between the centers of the two regions, reflecting the spatial symmetry. Thus $D_{\hat{\Phi},L}(0)$ and $D_{\hat{\Pi},L}(0)$ are the analogs of $\left\langle \!\right. \hat{Q} _{A,B}^{2}\left. \!\right\rangle $ and $\left\langle \!\right. \hat{P} _{A,B}^{2}\left. \!\right\rangle $ (\textit{intra}-block correlations within the same block), respectively, whereas $D_{\hat{\Phi},L}(r>L)$ and $D_{\hat{\Pi},L}(r>L)$ correspond to $\left\langle \!\right. \hat{Q}_{A} \hat{Q}_{B}\left. \!\right\rangle $ and $\left\langle \!\right. \hat{P} _{A}\hat{P}_{B}\left. \!\right\rangle $ (\textit{inter}-block correlations between separated blocks). The expressions (\ref{DPhi}) and (\ref{DPi}) are finite, especially for $r=0$. Mathematically, the integration over a finite spatial region $L$ corresponds to a cutoff, which removes the divergence we faced in (\ref{divergence}). However, the expressions are ill defined for $L\rightarrow0$. Applying the entanglement measure (\ref{epsilon}) with $G=D_{\hat{\Phi},L} (0)$, $H=D_{\hat{\Pi},L}(0)$, $G_{AB}=D_{\hat{\Phi},L}(r)$, and $H_{AB} =D_{\hat{\Pi},L}(r)$ does not indicate entanglement for any choice of $L$ and $r>L$. The same is true for the generalized case of blocks consisting of periodic subregions of space, showing an inherent difference between the harmonic chain and its continuum limit. We believe this is due to the fact, that any spatial integration immediately corresponds to an infinitely large block in the discrete harmonic chain and that the information loss (compared to the mathematical indeed existing exponentially small entanglement \cite{Rezn2003}) due to the collective operators already is too large. Nonetheless, defining collective operators like in (\ref{Phi}) and (\ref{Pi}) and use of the measure (\ref{epsilon}) may reveal entanglement between spatially separated regions for other quantum field states, which is the subject of future research. \section{Conclusion} Our results have importance for investigating the conditions under which entanglement can be detected by measuring collective observables of blocks consisting of a large number of harmonic oscillators. This has relevance for schemes of extracting entanglement where the probe particles normally interact with whole (periodic) groups of oscillators rather than single oscillators. The results are also relevant for the transition from the quantum to the classical domain as they suggest that entanglement between collective operators (global properties) may persist even in the limit of a large number of particles. It is obvious that our approach of collective observables can be extended to more dimensions. Furthermore, we demonstrated its potential application to scalar quantum field theory. \end{document}
\begin{document} \title{$M$-ideal properties in Orlicz-Lorentz spaces} \keywords{$M$-ideals, Orlicz-Lorentz spaces, dual norm} \subjclass[2010]{46B20, 46E30, 47B38} \author{Anna Kami\'nska} \address{Department of Mathematical Sciences, The University of Memphis, TN 38152-3240} \email{[email protected]} \author{Han Ju Lee} \address{Department of Mathematical Education, Dongguk University, Seoul, 100-715, Republic of Korea} \email{[email protected]} \author{Hyung-Joon Tag} \address{Department of Mathematical Sciences, The University of Memphis, TN 38152-3240} \email{[email protected]} \date{\today} \begin{abstract} We provide explicit formulas for the norm of bounded linear functionals on Orlicz-Lorentz function spaces $\Lambda_{\varphi,w}$ equipped with two standard Luxemburg and Orlicz norms. Any bounded linear functional is a sum of regular and singular functionals, and we show that the norm of a singular functional is the same regardless of the norm in the space, while the formulas of the norm of general functionals are different for the Luxemburg and Orlicz norm. The relationship between equivalent definitions of the modular $P_{\varphi,w}$ generating the dual space to Orlicz-Lorentz space is discussed in order to compute the norm of a bounded linear functional on $\Lambda_{\varphi,w}$ equipped with Orlicz norm. As a consequence, we show that the order-continuous subspace of Orlicz-Lorentz space equipped with the Luxemburg norm is an $M$-ideal in $\Lambda_{\varphi,w}$, while this is not true for the space with the Orlicz norm when $\varphi$ is an Orlicz $N$-function not satisfying the appropriate $\Delta_2$ condition. The analogous results on Orlicz-Lorentz sequence spaces are given. \end{abstract} \maketitle \section{Introduction} A closed subspace $Y$ of a Banach space $X$ is an $M$-ideal of $X$ if $Y^{\perp}$ is the range of the bounded projection $P:X^* \rightarrow X^*$ which satisfies \[ \|x^*\| = \|Px^*\| + \|x^* - Px^*\| \ \ \ \text{for all}\ \ x^* \in X^*. \] If $Y$ is an $M$-ideal in $X$, then each $y^* \in Y^*$ has a unique norm-preserving extension to $x^* \in X^*$\cite{HWW}. It is well known that $c_0$ is an $M$-ideal in $l^\infty$. The $M$-ideal properties in Marcinkiewicz spaces have been studied in \cite{KL}. It was shown there that the subspace of order-continuous elements in $L^1 + L^{\infty}$ equipped with the standard norm is not an $M$-ideal, while there exists an equivalent norm such that this subspace is an $M$-ideal. For Orlicz spaces $L_\varphi$ it is well known that the order-continuous subspace of $L_{\varphi}$ is an $M$-ideal if the space is equipped with the Luxemburg norm \cite{A, N}, while this is not true if the space is equipped with the Orlicz norm and if $\varphi$ does not satisfy the appropriate $\Delta_2$ conditions \cite{CH}. For more details of general $M$-ideal theory and their applications, we refer to \cite{HWW}. In this article, we investigate Orlicz-Lorentz function and sequence spaces. While we obtain analogous results as in Orlicz spaces, the techniques are different and the calculations are more involved since there is necessity to deal with decreasing rearrangements and level functions, and the K\"othe associate spaces to Orlicz-Lorentz spaces are not of the same sort as in the case of Orlicz spaces. The exact isometric dual norm for regular functionals in Orlicz-Lorentz spaces has been recently found in \cite{KLR} and it is expressed in terms of the Hardy-Littlewood order and the level functions. This paper completes the topic of characterization of the dual spaces by providing exact formulas of dual norms to Orlicz-Lorentz spaces equipped with two standard Luxemburg and Orlicz norms. Denote by $L^0 = L^0(I)$ the set of all Lebesgue measurable functions $f: I= [0, \gamma) \to \mathbb{R}$, where $0 < \gamma \leq \infty$. If $I=\mathbb{N}$ then $\ell^0 = L^0(\mathbb{N})$ denotes the collection of all real valued sequences $x = (x(i))$. The interval $I=[0,\gamma)$ is equipped with the Lebesgue measure $m$, and the space $\ell^0 = L^0(\mathbb{N})$ with the counting measure $| \cdot |$. A Banach space $(X, \| \cdot \|)$ over $I$ is said to be a Banach function lattice if $X \subset L^0(I)$ and whenever $0 \leq x \leq y$, $x \in L^0(I)$, $y \in X$, then $x \in X$ and $0 \leq \|x\| \leq \|y\|$. If $I=[0,\gamma)$ then $X$ is called a Banach function space, while if $I=\mathbb{N}$ then $X$ is called a Banach sequence space. We say that a Banach function lattice $(X, \|\cdot\|)$ has the Fatou property provided that for every sequence $(x_n) \subset X$, if $x_n \uparrow x$ a.e. for $x \in L^0$ and $\sup_n \|x_n\| < \infty$, then $x \in X$ and $\|x_n\| \uparrow \|x\|$. An element $x \in X$ is order-continuous if for any $0 \leq x_n \leq |x|$, if $x_n \downarrow 0$ a.e., then $\|x_n\| \downarrow 0$. The set of all order-continuous elements in $X$ is a closed subspace of $X$ and is denoted by $X_a$. We also define a subspace $X_b$ which is the closure of the set of all simple functions with supports of finite measure. In general, $X_a \subset X_b$ \cite{BS}. The K\"othe associate space of $X$, denoted by $X'$, is a subset of $L^0(I)$, where $I = [0, \gamma)$, $0 < \gamma \leq \infty$, or $I = \mathbb{N}$ consisting of all $y \in X'$ satisfying $\|y\|_{X'}=\sup\{\int_I |xy|: \|x\|_X \leq 1\} < \infty$. The space $X'$ equipped with the norm $\|\cdot\|_{X'}$ is a Banach function lattice. It is well known that $X$ has the Fatou property if and only if $X=X''$ \cite{Z}. We say that a bounded linear functional $H \in X^*$ is regular if there exists $h\in X'$ such that $H(x) = \int_I hx$ for all $x \in X$. The set of all regular linear functionals from $X^*$ will be denoted by $X_r^*$. In the case where $X_a=X_b$ and $X$ has the Fatou property, we have that $(X_a)^*$ is isometric to $X'$, and so $X^* = (X_a)^* \oplus (X_a)^\perp$ is isometric to $X' \oplus (X_a)^\perp$. The set $(X_a)^\perp$ is called the space of singular functionals and it coincides with those $S\in X^*$ for which $S(x) = 0$ for all $x\in X_a$. It follows that any $F\in X^*$ is represented uniquely as the sum $H+S$ where $H$ is a regular functional and $S$ a singular functional \cite{Z}. A distribution function $d_x$ of $x \in X$ is defined by $d_x(\lambda) = \mu\{t \in I : |x(t)| > \lambda\}$, $\lambda > 0$, where $\mu = m$ is the Lebesgue measure on $I = [0, \gamma)$, $0 < \gamma \leq \infty$ and the counting measure on $I = \mathbb{N}$. The decreasing rearrangement of $x$, denoted by $x^*$, is given as $x^*(t) = \inf\{\lambda > 0: d_x(\lambda) \leq t\}$, $t \in [0, \gamma)$. For a sequence $x=(x(i))$, its decreasing rearrangement $x^*$ may be identified with the sequence $ (x^*(i))$ such that $x^*(i) = \inf\{\lambda > 0: d_x(\lambda) < i\}$ for $i \in \mathbb{N}$. The functions $x, y$ are said to be equimeasurable if $d_x(\lambda) = d_y(\lambda)$ for all $\lambda > 0$, denoted by $x \sim y$. It is clear that $x$ and $x^*$ are equimeasurable. A Banach function lattice $(X, \|\cdot\|)$ is called a rearrangement invariant Banach space if $x \in X$ and $y \in L^0$ with $x \sim y$, we have $y \in X$ and $\|x\| = \|y\|$. An Orlicz function $\varphi: \mathbb{R}_{+} \rightarrow \mathbb{R}_{+}$ is a convex function such that $\varphi(0) = 0$ and $\varphi(t) > 0$ for $t >0$. It is said to be an Orlicz $N$-function when $\lim_{t \rightarrow 0}{\varphi(t)}/{t} = 0$ and $\lim_{t \rightarrow \infty} {\varphi(t)}/{t} = \infty$ \cite{Chen}. The complementary function of $\varphi$, denoted by $\varphi_*$, is defined as $\varphi_*(v) = \sup\{uv - \varphi(u): u \geq 0\}$, $v\ge 0$. We have that $\varphi$ is $N$-function if and only if $\varphi_*$ is $N$-function. Let $p$ and $q$ stand for the right derivatives of $\varphi$ and $\varphi_*$, respectively. The functions $p$ and $q$ are non-negative, right-continuous and increasing on $\mathbb{R}_+$. If $\varphi$ is $N$-function then $p(0)= p(0+)= q(0) = q(0+)=0$ and $\lim_{t \rightarrow \infty}p(t)=\lim_{t \rightarrow \infty}q(t)= \infty$. Clearly for $\varphi$ and $\varphi_*$, Young's inequality is satisfied, that is, $uv \leq \varphi(u) + \varphi_*(v)$ for all $u,v \in \mathbb{R_+}$. Recall also that the equality holds for $v = p(u)$ or $u=q(v)$ \cite{Chen}. Let $w: I=[0,\gamma)\rightarrow (0, \infty)$ be a weight function that is decreasing and locally integrable. Then we define $W(t): = \int_0^t w < \infty$ for all $t \in I$. If $\gamma = \infty$, we assume $W(\infty) = \infty$. Given $f \in L^0$, define the modular \[ \rho_{\varphi,w}(f) = \int_0^{\gamma} \varphi(f^*(t))w(t)dt = \int_I \varphi(f^*)w. \] The modular $\rho_{\varphi,w}$ is orthogonally subadditive, that is, for $f, g \in L^0$, if $|f| \wedge |g| = 0$, we have $\rho_{\varphi,w}(f + g) \leq \rho_{\varphi,w}(f) + \rho_{\varphi,w}(g)$ \cite{K}. The Orlicz-Lorentz function space $\Lambda_{\varphi,w}$ is the set of all $f \in L^0$ such that $\rho_{\varphi,w}(\lambda f) < \infty$ for some $\lambda > 0$. It is equipped with either the Luxemburg norm \[ \|f\| = \|f\|_{\Lambda_{\varphi,w}} = \inf\{\epsilon > 0 : \rho_{\varphi,w}\left({f}/{\epsilon}\right) \leq 1\}, \] or the Orlicz norm \[ \|f\|^0 = \|f\|_{\Lambda_{\varphi,w}}^0 = \sup\left\{\int_I f^*g^*w : \rho_{\varphi_*, w}(g) \leq 1\right\}. \] It is well known that $\|x\| \leq \|x\|^0 \leq 2\|x\|$ \cite{WC, WR}. From now on, we let $\Lambda_{\varphi,w}$ be the Orlicz-Lorentz function space equipped with the Luxemburg norm $\|\cdot\|$ and $\Lambda_{\varphi,w}^0$ be the Orlicz-Lorentz function space equipped with the Orlicz norm $\|\cdot\|^0$. The spaces $\Lambda_{\varphi,w}$ and $\Lambda_{\varphi,w}^0$ are rearrangement invariant Banach spaces. Also, it is well known that $(\Lambda_{\varphi,w})_a = (\Lambda_{\varphi,w})_b = \{x \in \Lambda_{\varphi,w} : \rho_{\varphi,w}(\lambda x) < \infty \ \text{for all} \ \lambda > 0\}$ \cite{K}. In the case of sequence spaces let $w = (w(i))$ be a positive decreasing real sequence and $W(n) = \sum_{i=1}^n w(i)$ for all $n \in \mathbb{N}$ and $W(\infty)= \infty$. For a sequence $x \in \ell^0$, we define the modular $\alpha_{\varphi,w}(x) = \sum_{i=1}^{\infty} \varphi(x^*(i))w(i) $ and then the Orlicz-Lorentz sequence space $\lambda_{\varphi,w}$ is the set of all real sequences $x= (x(i))$ satisfying $\alpha_{\varphi,w}(\eta x)< \infty$ for some $\eta >0$. The Luxemburg and the Orlicz norm on $\lambda_{\varphi,w}$ are defined similarly as in the function case where the modular $\rho_{\varphi,w}$ is replaced by $\alpha_{\varphi,w}$, and $\lambda_{\varphi, w}$ denotes the Orlicz-Lorentz sequence space equipped with the Luxemburg norm, and $\lambda_{\varphi,w}^0$ with the Orlicz norm. The both norms are equivalent and the spaces are rearrangement invariant Banach spaces. We also have $(\lambda_{\varphi,w})_a = (\lambda_{\varphi,w})_b = \{x \in \lambda_{\varphi,w} : \alpha_{\varphi,w}(\eta x) < \infty \ \text{for all} \ \eta >0\}$ \cite{KR2}. An Orlicz function $\varphi$ satisfies $\Delta_2$ (resp., $\Delta_2^{\infty}$; $\Delta_2^0$) condition if there exist $K > 0$ (resp., $K>0$ and $u_0\geq 0$; $K>0$ and $u_0 >0$) such that $\varphi(2u) \leq K\varphi(u)$ for all $u \geq 0$ (resp., $u \geq u_0$; $0< u \leq u_0$). {\it Appropriate $\Delta_2$ condition} means $\Delta_2$ and $\Delta_2^\infty$ in the case of the function spaces for $\gamma = \infty$ and $\gamma<\infty$, respectively, and $\Delta_2^0$ for the sequence spaces. It is well known that $(\Lambda_{\varphi,w}^0)_a = \Lambda_{\varphi,w}^0$ and $(\lambda_{\varphi,w}^0)_a = \lambda_{\varphi,w}^0$ if and only if $\varphi$ satisfies the appropriate $\Delta_2$ conditions \cite{K}. If $f \in \Lambda_{\varphi,w}$ then for some $\lambda_0 > 0$, $\rho_{\varphi,w}(\lambda_0 f) < \infty$, and so for any $\lambda > 0$, $\infty> \rho_{\varphi,w}(\lambda_0 f) \geq \varphi(\lambda_0 \lambda) \int_0 ^{m\{f^* > \lambda\}} w.$ It follows from $W(\infty) = \infty$ that $d_f(\lambda) = m\{f^* > \lambda\} < \infty$ for every $\lambda > 0$. The similar fact holds for $x\in \lambda_{\varphi,w}$ by $\lambda_{\varphi,w} \subset c_0$. Let us define $k^* = k^*(f) = \inf\{k>0: \rho_{\varphi_*, w}(p(kf)) \geq 1\}$ and $k^{**} = k^{**}(f) = \sup\{k>0: \rho_{\varphi_*, w}(p(kf)) \leq 1\}$. Clearly $0\le k^* \le k^{**} \le \infty$. If $\varphi$ is $N$-function then $k^{**} < \infty$. Indeed if for a contrary $k^{**} = \infty$, then there exists a non-negative sequence $(k_n)$ such that $k_n \uparrow \infty$ and $\int_I \varphi_*(p(k_nf)^*)w \leq 1$. Hence for $t_0 = m\{f^* > 1\} < \infty$, \[ \varphi_*(p(k_n))W(t_0) = \int_0^{t_0}\varphi_*(p(k_n))w = \int_0^{m\{f^* >1\}} \varphi_*(p(k_n))w \leq \int_I \varphi_*(p(k_n f^*)w \leq 1. \] This implies that $\varphi_*(p(k_n))/p(k_n) \leq 1/W(t_0)p(k_n)$, where the left side tends to $\infty$ since $\varphi_*$ is $N$-function, and the right side approaches $0$ since $p(k_n) \rightarrow \infty$. This contradiction proves the claim. We define $k^*$ and $k^{**}$ analogously for Orlicz-Lorentz sequence spaces. Set $K(f) = [k^*, k^{**}]$ if $f\in \Lambda_{\varphi,w}$, and similarly $K(x)$ for $x\in\lambda_{\varphi,w}$. Recall the following facts which are similar in Orlicz spaces \cite{Chen}. \begin{Theorem}[\cite{WC}, pg 133]\label{WC} Let $\varphi$ be an Orlicz N-function. Then, \begin{enumerate} \item[$(1)$] If there exists $k>0$ such that $\rho_{\varphi_*,w} (p(kf)) = 1$, then $\|f\|^0 = \int_0^{\gamma} f^*p(kf^*) = \frac{1}{k}(1 + \rho_{\varphi,w} (kf))$. \item[$(2)$] For any $f \in \Lambda_{\varphi,w}^0$, $\|f\|^0 = \inf_{k>0} \frac{1}{k} (1 + \rho_{\varphi,w}(kf))$. \item[$(3)$] $k \in K(f)$ if and only if $\|f\|^0 = \frac{1}{k}(1 + \rho_{\varphi,w}(kf))$. \end{enumerate} The analogous statements occur in Orlicz-Lorentz sequence space when the modular $\rho_{\varphi,w}$ is replaced by the modular $\alpha_{\varphi,w}$. \end{Theorem} This article has three parts. In section 2, we compute the norm of a singular linear functional $S$ on Orlicz-Lorentz spaces. We show that $\|S\|$ is the same for both the Luxemburg norm and the Orlicz norm. In section 3, we compute the norm of a bounded linear functional on $\Lambda_{\varphi,w}$ and $\Lambda_{\varphi,w}^0$. The formulas differ dependently on the norm of the space. Furthermore, we show that $(\Lambda_{\varphi,w})_a$ is an $M$-ideal of $\Lambda_{\varphi,w}$, but $(\Lambda_{\varphi,w}^0)_a$ is not an $M$-ideal of $\Lambda_{\varphi,w}^0$ when $\varphi$ is an Orlicz $N$-function and does not satisfy the appropriate $\Delta_2$ condition. The analogous results for the sequence spaces are also given. \section{Singular linear functionals on Orlicz-Lorentz spaces} In this section, we show that the formula for $\|S\|$ is the same regardless of Luxemburg or Orlicz norm on Orlicz-Lorentz function or sequence spaces. Letting $f \in L^0$, define $\theta = \theta(f) = \inf\{\lambda > 0 : \rho_{\varphi,w}(f/\lambda) < \infty\}$. It is clear that $\theta(f) < \infty$ for any $f\in \Lambda_{\varphi,w}$. If $f\in (\Lambda_{\varphi,w})_a$, then $\rho_{\varphi,w}\left(\frac{f}{\lambda}\right) < \infty$ for all $\lambda > 0$, so we see that $\theta(f) = 0$. Clearly, $\theta(f) \le \|f\|$. The analogous definitions and facts also hold for Orlicz-Lorentz sequence spaces. Even though the next two results and their proofs in Orlicz-Lorentz spaces are similar to those in Orlicz spaces \cite{Chen}, we state and prove them in detail because they require slightly different techniques, mostly dealing with decreasing rearrangements. \begin{Theorem} \label{thm5} For any $f \in \Lambda_{\varphi,w}$, $\lim_n \|f - f_n\| = \lim_n\|f - f_n\|^0 = \theta(f)$, for $f_n = f \chi_{\{\frac{1}{n} \leq |f| \leq n\}}$. For any $x = (x(i)) \in \lambda_{\varphi,w}$, $\lim_n\|x-x_n\| = \lim_n \|x - x_n\|^0 = \theta(x)$ for $x_n = x \chi_{\{1,2,...,n\}}$. \end{Theorem} \begin{proof} Let first $f \in (\Lambda_{\varphi,w})_a$. Then, clearly $\theta(f) = 0$. Moreover, in view of $d_f(\lambda) < \infty$ for all $\lambda > 0$, the functions $f_n = f \chi_{\{\frac{1}{n} \leq |f| \leq n\}}$ are bounded with supports of finite measure, and $f_n \rightarrow f$ a.e. and $|f_n| \leq |f|$. Since $(\Lambda_{\varphi,w})_a = (\Lambda_{\varphi,w})_b$, from Proposition 1.3.6 in \cite{BS}, we have that $\|f - f_n\| \rightarrow 0$. Moreover, by the equivalence of $\|\cdot \|$ and $\|\cdot\|^0$, we also get $\|f - f_n\|^0 \rightarrow 0$. Now, consider $f \in \Lambda_{\varphi,w} \setminus (\Lambda_{\varphi,w})_a$ and $f_n$ as above. In this case, we have $\theta(f) > 0$. Since $d_f(\lambda) < \infty$ for all $\lambda > 0$, and $|f-f_n| \downarrow 0$ a.e., we have $(f- f_n)^* \rightarrow 0$ (\cite{KPS}, pg 68). Hence $\|f-f_n\|$ and $\|f-f_n\|^0$ are monotonically decreasing, and so the limits for both $\|f-f_n\|$ and $\|f-f_n\|^0$ exist. Letting $\epsilon \in (0, \theta)$ we have $\rho_{\varphi,w} \left(\frac{f}{\theta - \epsilon}\right) = \infty$. By the orthogonal subadditivity of $\rho_{\varphi,w}$, we have $\infty = \rho_{\varphi,w} \left(\frac{f}{\theta-\epsilon}\right) \leq \rho_{\varphi,w}\left(\frac{f_n}{\theta - \epsilon}\right) + \rho_{\varphi,w} \left(\frac{f - f_n}{\theta - \epsilon}\right)$. Clearly, the functions $f_n$ are bounded with supports of finite measure. This implies that $\rho_{\varphi,w}\left(\frac{f_n}{\theta - \epsilon}\right) < \infty$. Hence, we have $\|f - f_n\| \geq \theta - \epsilon$ from the fact that $\rho_{\varphi,w} \left(\frac{f - f_n}{\theta - \epsilon}\right)=\infty$. On the other hand for $\epsilon > 0$, we have $\rho_{\varphi,w} \left(\frac{f}{\theta + \epsilon}\right) < \infty$ by the definition of $\theta(f)$. Consequently, since $(f - f_n)^* \rightarrow 0$, we get $\lim_{n \rightarrow \infty} \rho_{\varphi,w} \left(\frac{f-f_n}{\theta + \epsilon}\right) = 0$ by the Lebesgue dominated convergence theorem. Hence, in view of Theorem \ref{WC}.(2), we see that \[ \|f - f_n\|^0 \leq (\theta + \epsilon) \left(1+ \rho_{\varphi,w} \left(\frac{f - f_n}{\theta + \epsilon}\right)\right) \rightarrow (\theta + \epsilon), \] as $n \rightarrow \infty$. Since $\|f\| \leq \|f\|^0$, we finally get \[ \theta - \epsilon \leq \|f - f_n\| \leq \|f - f_n\|^0 \leq \theta + \epsilon \] for sufficiently large $n$ and arbitrary $\epsilon > 0$, and the proof is complete in the function case. The proof in the sequence case is similar, so we skip it. \end{proof} Now, we compute the norm of a singular functional $S$ on Orlicz-Lorentz function spaces. \begin{Theorem}\label{theta} For any singular functional $S$ of $\Lambda_{\varphi,w}$ equipped with the Luxemburg norm or the Orlicz norm, $\|S\| = \|S\|_{(\Lambda_{\varphi,w})^*} = \|S\|_{(\Lambda_{\varphi,w}^0)^*} = \sup\{S(f) : \rho_{\varphi,w}(f) < \infty\} = \sup\{\frac{S(f)}{\theta(f)} : f \in \Lambda_{\varphi,w} \setminus (\Lambda_{\varphi,w})_a\}$. The analogous formulas hold for Orlicz-Lorentz sequence spaces. \end{Theorem} \begin{proof} Here we also provide the proof only in the function spaces. For a function $f \in \Lambda_{\varphi,w} \setminus (\Lambda_{\varphi,w})_a$, take again $f_n = f \chi_{\{\frac{1}{n} \leq |f| \leq n\}}$. From the fact that $f_n \in (\Lambda_{\varphi,w}^0)_a$ we have $S(f) = S(f- f_n)$ and $S(f) \leq \|S\|_{(\Lambda_{\varphi,w}^0)^*} \|f - f_n\|^0$. By Theorem \ref{thm5}, $\|f- f_n\|^0 \rightarrow \theta(f)$, and so we obtain $\frac{S(f)}{\theta(f)} \leq \|S\|_{(\Lambda_{\varphi,w}^0)^*}$. If $\rho_{\varphi,w}(f) < \infty$ then $\rho_{\varphi,w}(f - f_n) \rightarrow 0$. Thus for sufficiently large $n$, $\rho_{\varphi,w} (f - f_n) \leq 1$, and so $\|f - f_n\| \leq 1$. Hence by Theorem \ref{thm5}, $\theta(f) = \lim_{n \rightarrow \infty} \|f-f_n\| \leq 1$. Since $S(f) = 0$ for all $f \in (\Lambda_{\varphi,w})_a$, we have $\sup\left\{ S(f) : \rho_{\varphi,w}(f) < \infty\right\} = \sup\left\{ S(f): f \in \Lambda_{\varphi,w} \setminus (\Lambda_{\varphi,w})_a, \ \rho_{\varphi,w}(f) < \infty\right\}$. Notice that $S(f) \leq \frac{S(f)}{\theta(f)}$ since $\theta(f) \le 1$. Therefore, taking into account that $\|S\|_{(\Lambda_{\varphi,w}^0)^*} \leq \|S\|_{(\Lambda_{\varphi,w})^*}$ in view of the inequality $\|\cdot\| \le \|\cdot\|^0$ and that $\|f\| \leq 1$ if and only if $\rho_{\varphi,w}(f) \leq 1$, we obtain \begin{eqnarray*} \|S\|_{(\Lambda_{\varphi,w}^0)^*} \leq \|S\|_{(\Lambda_{\varphi,w})^*} &=& \sup\{S(f) : \rho_{\varphi,w}(f) \leq 1\}\\ &\leq& \sup\left\{ S(f) : \rho_{\varphi,w}(f) < \infty\right\}\\ &\leq& \sup\left\{ \frac{S(f)}{\theta(f)} : f \in \Lambda_{\varphi,w} \setminus (\Lambda_{\varphi,w})_a, \,\,\, \rho_{\varphi,w}(f) < \infty\right\}\\ &\leq& \sup\left\{ \frac{S(f)}{\theta(f)} : f \in \Lambda_{\varphi,w} \setminus (\Lambda_{\varphi,w})_a\right\}\\ &\leq& \|S\|_{(\Lambda_{\varphi,w}^0)^*}. \end{eqnarray*} \end{proof} \section{Norm of bounded linear functionals} We need to recall first the K\"othe associate space to an Orlicz-Lorentz space. For any non-negative integrable function $f\in L^0$ and $0\le a < b < \infty$, denote $F(a,b) = \int_a^b f$. Let $h \in L^0$ be non-negative and locally integrable on $I$. Then the interval $(a, b) \subset I$ is called a level interval of $h$ with respect to the weight $w$, if $R(a,t) := \frac{H(a,t)}{W(a,t)} \leq \frac{H(a,b)}{W(a,b)} = R(a, b)$ for all $a < t < b$ and $R(a,b) > 0$. In the case where $b = \infty$, define $R(a,b) = R(a, \infty) = \limsup_{t \rightarrow \infty}R(a, t)$. If the level interval $(a,b)$ is not contained in a larger level interval, we say that $(a,b)$ is a maximal level interval. Halperin's level function of $h$, denoted by $h^0$, is defined as \[ h^0(t) = \begin{cases} R(a_j, b_j)w(t) = \frac{H(a_j, b_j)}{W(a_j, b_j)}w(t) , & t \in (a_j, b_j) \ \ \text{for some} \ \ j, \\ h(t), & t \notin \cup_j(a_j, b_j), \end{cases} \] \noindent provided that each $(a_j, b_j)$ is a maximal level interval. Similarly, for a non-negative sequence $h= (h(i)) \in l^0$ and a positive decreasing weight $w= (w(i))$, the interval $(a, b] = \{a+1, a+2, ... , b\}$ is called a level interval if $r(a,j) = \frac{h(a,j)}{w(a,j)} \leq \frac{h(a,b)}{w(a,b)} = r(a,b)$ for every $a+1 \leq j \leq b$ and $r(a,b) >0$, where $h(a,j) = \sum_{i=a+1}^j h(i)$ and $w(a, j) = \sum_{i=a+1}^j w(i)$. The level sequence $h^0$ is defined as \[ h^0(i) = \begin{cases} r(a_j, b_j)w(i) , & i \in (a_j, b_j] \ \ \text{for some} \ \ j , \\ h(i), & i \notin \cup_j(a_j, b_j], \end{cases} \] where each $(a_j, b_j]$ is a maximal level interval. Letting $h\in L^0$ define $P_{\varphi,w}(h) = \inf\left\{\int_I \varphi\left(\frac{|h|}{v}\right)v : v \prec w\right\} $, and then the space $\mathcal{M}_{\varphi,w}$ as the set of all $h \in L^0$ such that $P_{\varphi,w}(\lambda h) < \infty$ for some $\lambda > 0$. By Theorem 4.7 in \cite{KLR} we have $P_{\varphi,w}(h) = \int_I \varphi((h^*)^0/w) w$ if $\varphi$ is $N$-function. The Luxemberg norm and the Orlicz norm for the modular $P_{\varphi,w}$ are defined as, \[ \|h\|_{\mathcal{M}_{\varphi,w}} = \inf\{\epsilon > 0 : P_{\varphi,w}\left({h}/{\epsilon}\right) \leq 1\} \ \ \ \text{and} \ \ \ \|h\|_{\mathcal{M}_{\varphi,w}^0} = \inf_{k > 0} \frac{1}{k}(1 + P_{\varphi,w}(kh)), \] respectively. For $h \in \ell^0$, we define $p_{\varphi,w}(h) = \inf\left\{\sum_{i=1}^{\infty} \varphi\left(\frac{|h(i)|}{v(i)}\right)v(i) : v \prec w \right\}$. The space $\mathfrak{m}_{\varphi,w}$ is the set of all $h = (h(i))$ such that $p_{\varphi,w}(\eta h) < \infty$ for some $\eta > 0$. The Luxemburg norm and the Orlicz norm on $\mathfrak{m}_{\varphi,w}$ are given analogously as in function spaces where we replace $P_{\varphi,w}$ by $p_{\varphi,w}$. From now on we denote by $\mathcal{M}_{\varphi,w}$ and $\mathfrak{m}_{\varphi,w}$ the space equipped with the Luxemburg norm $\| \cdot \|_{\mathcal{M}_{\varphi,w}}$ and $\|\cdot\|_{\mathfrak{m}_{\varphi,w}}$ respectively, and $\mathcal{M}_{\varphi,w}^0$ and $\mathfrak{m}_{\varphi,w}^0$ the space equipped with the Orlicz norms $\| \cdot \|_{\mathcal{M}_{\varphi,w}^0}$ and $\|\cdot\|_{\mathfrak{m}_{\varphi,w}^0}$ respectively. All those spaces are rearrangement invariant Banach spaces \cite{KR}. \begin{Theorem}[\cite{KLR}, Theorems 2.2, 5.2]\label{th:01} Let $w$ be a decreasing weight and $\varphi$ be an Orlicz $N$-function. Then the K\"othe dual space to an Orlicz-Lorentz space $\Lambda _{\varphi ,w}$ (resp. $\Lambda_{\varphi,w}^0$) is expressed as \begin{equation*} \left( \Lambda _{\varphi ,w}\right) ^{\prime }=\mathcal{M}_{\varphi _{\ast },w}^{0}\ \ \ (\text{resp.}\ (\Lambda_{\varphi,w}^0)^{\prime} = \mathcal{M}_{\varphi _{\ast },w}) \end{equation*} with equality of norms. Similarly in the sequence case we have \begin{equation*} \left(\lambda _{\varphi ,w}\right) ^{\prime }=\mathfrak{m}_{\varphi _{\ast},w}^{0}\ \ (\text{resp.} \ \ (\lambda_{\varphi,w}^0)' = \mathfrak{m}_{\varphi_*,w}) \end{equation*} with equality of norms. \end{Theorem} Let $X$ be an Orlicz-Lorentz function or sequence space equipped with either norm. Then, $X^* = X_r \oplus X_s$, where $X_r$ is isomorphically isometric to its K\"othe associate space $X'$, and $X_s = (X_a)^\perp$. \begin{Theorem}\label{th:lux} Assume $\varphi$ is $N$-function. Let $F$ be a bounded linear functional on $\Lambda_{\varphi,w}$. Then $F= H + S$, where $H(f) = \int_I fh$ for some $h\in \mathcal{M}^0_{\varphi_*,w}$, $\|H\|= \|h\|^0_{\mathcal{M}_{\varphi_*,w}}$, $S(f)=0$ for all $f\in (\Lambda_{\varphi,w})_a$, and $\|F\|_{(\Lambda_{\varphi,w})^*} = \|h\|_{\mathcal{M}_{\varphi_*, w}}^0 + \|S\|$. \end{Theorem} \begin{proof} By Theorem \ref{th:01} and the remark above, $F= H+S$ uniquely, where $H(f) = \int_I hf$ for some $h\in \mathcal{M}^0_{\varphi_*,w}$ with $\|H\|= \|h\|^0_{\mathcal{M}_{\varphi_*,w}}$, and $S(f)=0$ for all $f\in (\Lambda_{\varphi,w})_a$. Observe by Theorem \ref{theta} that the norm of the singular functional $\|S\|$ is the same under either the Luxemburg norm or the Orlicz norm. Clearly $\|F\|_{(\Lambda_{\varphi,w})^*} = \|H+S\|_{(\Lambda_{\varphi,w})^*} \leq \|H\|_{(\Lambda_{\varphi,w})^*} + \|S\| = \|h\|_{\mathcal{M}_{\varphi_*, w}}^0 + \|S\|$. Now we show the opposite inequality. Let $\epsilon >0$ be arbitrary. From the definitions of $\|h\|_{\mathcal{M}_{\varphi_*,w}}^0$ and $\|S\|$, we can choose $f, g \in \Lambda_{\varphi,w}$ with $\|f\| \leq 1, \|g\| \leq 1$ such that \begin{equation} \label{DN} \|h\|_{\mathcal{M}_{\varphi_*,w}}^0 - \epsilon < \int_I hf \,\,\, \text{and} \,\,\, \|S\| - \epsilon < S(g). \end{equation} We can assume that $f$ is bounded. Indeed, let $z \in S_{\Lambda_{\varphi,w}}$ be such that $\|h\|_{\mathcal{M}_{\varphi_*,w}}^0- \frac{\epsilon}{2} < \int_I |hz|$. Let $(z_n)_{n=1}^{\infty}$ be a sequence of non-negative bounded functions with supports of finite measure defined on $[0,n)$ such that $z_n \uparrow |z|$ a.e. Then, $\int_I |h| |z| = \lim_{n \rightarrow \infty} \int_I |h| z_n$ by the monotone convergence theorem, which implies that for all $\epsilon > 0$, there exists $z_{n_0}$ such that $\int_I |hz| - \frac{\epsilon}{2} \leq \int_I |h| z_{n_0}$. Hence, \[ \|h\|_{\mathcal{M}_{\varphi_*, w}}^0 - \frac{\epsilon}{2} - \frac{\epsilon}{2} \leq \int_I |hz| - \frac{\epsilon}{2} \leq \int_I |h|z_{n_0}. \] \noindent Let $f = (\sign{h})z_{n_0}$. Thus, we found a bounded function $f$ of support of finite measure such that $\|f\| \leq 1$ and $\|h\|_{\mathcal{M}_{\varphi_*, w}}^0 - \epsilon < \int_I hf$.\ Since $H$ is a bounded linear functional on $\Lambda_{\varphi,w}$, $hf$ is integrable, so there exists $\delta > 0$ such that for every measurable subset $E \subset I$, with $mE < \delta$, we have \begin{equation}\label{Ex} \int_{E} |hf| < \epsilon. \end{equation} Now, we show that there exist $n \in \mathbb{N}$ and a measurable subset $E \subset I$ such that $mE < \delta$ and \begin{equation}\label{Ey} \int_E |hg| < \epsilon,\ \ \int_0^{mE} \varphi(g^*) w < \frac{\epsilon}{2},\ \ \int_I \varphi((g\chi_{[n, \gamma)})^*)w < \frac{\epsilon}{2},\ \ \text{and} \ \ \int_n^{\gamma} |hg|< \epsilon. \end{equation} \noindent Indeed, let $E_n = \{g^* > n\} = [0, t_n)$ and define $g_n^* = g^* \chi_{[0, t_n)}$. We see that $g_n^* \leq g^*$ and $ g_n^* \downarrow 0$ a.e., so by the Lebesgue dominated convergence theorem, $\lim_{n \rightarrow \infty} \int_I \varphi(g_n^*) w = 0$. This implies that for any $\epsilon >0$, there exists $N_1$ such that for every $n \geq N_1$, \begin{equation} \label{Eyn} \int_I \varphi(g_n^*)w = \int_I \varphi(g^*\chi_{[0, t_n)})w = \int_0^{t_n} \varphi(g^*)w = \int_0^{mE_n} \varphi(g^*)w<\frac{\epsilon}{2}. \end{equation} \noindent Also, $E_{n+1} \subset E_n$ for all $n \in \mathbb{N}$ and $m(\cap E_n) = m\{g^* =\infty\} = 0$. By continuity of measure, $0= m(\cap E_n) = \lim_{n \rightarrow \infty} m\{g^*>n\}$.\ Since $g \sim g^*$, we see that $\lim_{n \rightarrow \infty} m\{|g| > n\} = 0$. The function $hg$ is integrable, so we have $\lim_{n \rightarrow \infty} \int_{\{|g| > n\}} |hg| = 0$. Then, there exists $N_2$ such that $\int_{\{|g| > n\}} |hg| < \epsilon$ for $n \geq N_2$. Since $\rho_{\varphi,w}(g) < \infty$, we choose sufficiently large $n \geq N = \max\{N_1, N_2\}$ satisfying $mE_n = m \{|g|>n\} < \delta$, $\supp{f} \cap [n, \gamma) = \emptyset$, $\int_I \varphi((g\chi_{[n, \gamma)})^*)w < \frac{\epsilon}{2}$, and $\int_{[n,\gamma)} |hg|< \epsilon$. By letting $E = \{|g|>n\}$ for such $n$, we found $n \in \mathbb{N}$ and a measureable subset $E \subset I$ satisfying (\ref{Ey}). Note that $\supp{f} \subset [0, n)$ from the construction. Define \[u(t) = \begin{cases} f(t), & t \in G_1 = \supp{f} \setminus E\\ g(t), & t \in G_2 = E \cup [n, \gamma)\\ 0, & \text{otherwise}. \end{cases} \] \noindent By the orthogonal subadditivity of the modular $\rho_{\varphi, w}$, we have \begin{eqnarray*} \rho_{\varphi,w}(u) = \int_I \varphi(f \chi_{G_1} + g \chi_{G_2})^*w &\leq& \int_I \varphi((f\chi_{G_1})^*)w + \int_I \varphi((g\chi_ {G_2})^*)w\\ &\leq& \int_0^{mG_1} \varphi(f^*)w + \int_I \varphi(g\chi_E + g\chi_{[n, \gamma) \setminus E})^* w \\ &\leq& \int_0^{mG_1} \varphi(f^*)w + \int_I \varphi(g\chi_E)^* w + \int_I \varphi(g\chi_{[n, \gamma)\setminus E})^* w\\ &\leq& \int_0^{mG_1} \varphi(f^*)w + \int_0^{mE} \varphi(g^*) w + \int_I \varphi(g\chi_{[n, \gamma)})^* w\\ &\leq& 1 + \epsilon, \end{eqnarray*} \noindent which implies that $\rho_{\varphi,w}(\frac{u}{1+\epsilon}) \leq 1$, and so $\|\frac{u}{1+\epsilon}\| \leq 1$. We see that $S(f\chi_{G_1}) = 0$ from $f\in (\Lambda_{\varphi,w})_a$. Also, $g \chi_{G_1} \in (\Lambda_{\varphi,w})_a$ because $mG_1 = m(\supp{f} \setminus E) \leq m([0,n) \setminus E) < \infty$ and $g$ is bounded on $G_1$. This implies that $S(g \chi_{G_1}) = 0$. Hence, $S(g) = S(g \chi_{G_1}) + S(g \chi_{G_2}) = S(g \chi_{G_2})$. Moreover, from (\ref{Ey}), we have $\left|\int_{E \setminus [n, \gamma)} hg\right| \leq \int_{E \setminus [n, \gamma)} |hg| \leq \int_E |hg| < \epsilon$. It follows that \begin{eqnarray*} (1+ \epsilon)\|F\| \geq (1+ \epsilon) F\left(\frac{u}{1+\epsilon}\right) = F(u) &=& F(f \chi_{G_1} + g \chi_{G_2})\\ &=& \int_I h(f \chi_{G_1}+ g\chi_{G_2}) + S((f \chi_{G_1} + g\chi_{G_2})) \\ &=& \int_I hf \chi_{G_1} + \int_I hg\chi_ {G_2} +S(f \chi_{G_1}) + S(g\chi_{G_2}) \\ &=& \int_{\supp{f} \setminus E} hf +\int_{E \cup [n, \gamma)} hg + S(g\chi_{G_2})\\ &=& \int_I hf -\int_E hf +\int_{E \setminus [n, \gamma)} hg + \int_{[n, \gamma)} hg + S(g)\\ &>& \|h\|_{\mathcal{M}_{\varphi_*,w}}^0 - 2\epsilon - 2\epsilon + S(g)\,\,\, (\text{by (\ref{Ex}) and (\ref{Ey})})\\ &>& \|h\|_{\mathcal{M}_{\varphi_*, w}}^0 -2\epsilon -2\epsilon + \|S\| - \epsilon \,\,\, (\text{by (\ref{DN})})\\ &=& \|h\|_{\mathcal{M}_{\varphi_*, w}}^0 + \|S\| - 5\epsilon. \end{eqnarray*} \noindent As $\epsilon \rightarrow 0$, the proof is done. \end{proof} The sequence version below has analogous (simpler) proof so we skip it. \begin{Theorem}\label{th:luxseq} Suppose $\varphi$ is $N$-function and let $F$ be a bounded linear functional on $\lambda_{\varphi,w}$. Then $F= H+S$, where $H(x) = \sum_{i=1}^{\infty} x(i) y(i)$, $\|H\| = \| y\|^0_{\mathfrak{m}_{\varphi_*,w}}$, $S$ is a singular functional vanishing on $(\lambda_{\varphi,w})_a$ and $\|F\|_{(\lambda_{\varphi,w})^*} = \|y\|^0_{\mathfrak{m}_{\varphi_*,w}} + \|S\|$. \end{Theorem} As a consequence of Theorems \ref{th:lux} and \ref{th:luxseq} we obtain the following result. \begin{Corollary}\label{cor:luxideal} If $\varphi$ does not satisfy the appropriate $\Delta_2$ condition then the order-continuous subspaces $(\Lambda_{\varphi,w})_a$ and $(\lambda_{\varphi,w})_a $ are non-trivial $M$-ideals of $\Lambda_{\varphi,w}$ and $\lambda_{\varphi,w}$, respectively. \end{Corollary} Recall \cite{KR, KLR} that for an Orlicz $N$-function $\varphi$ and $h \in L^0$ we have \begin{equation}\label{form:1} P_{\varphi,w}(h) = \inf\left\{\int_I \varphi\left(\frac{h^*}{v}\right)v : v \prec w, v\downarrow\right\}=\int_I \varphi \left(\frac{(h^*)^0}{w}\right) w, \end{equation} and that similar formula holds true for any sequence $x\in\ell^0$ \cite{KLR}. Hence, we have \[ p_{\varphi,w}(h) = \sum_{i=1}^{\infty} \varphi\left(\frac{(h^*)^0(i)}{w(i)}\right)w(i). \] Consider the decreasing simple function $h^* = \sum_{i=1}^n a_i \chi_{(t_{i-1}, t_i)}$ where $ a_1 > a_2 > \cdots > a_n >0$ and $t_0 = 0$. Let $H^*(a,b) = \int_a^b h^*$. By Algorithm A provided in \cite{KLR}, the maximal level intervals of $h^*$ are of the form $(t_{i_j}, t_{i_{j+1}})$ where $(t_{i_j})_{j=0}^{l-1}$ is a subsequence of $(t_i)_{i=1}^n$ with $0= t_0 = t_{i_0}< t_{i_1} < ... < t_{i_{l}} = t_n < \infty$. Then, we have \begin{equation}\label{level} \frac{(h^*)^0}{w} = \frac{\sum_{j=0}^{l-1}R(t_{i_j}, t_{i_{j+1}})w\chi_{(t_{i_j}, t_{i_{j+1}})}}{w} = \sum_{j=0}^{l-1} R(t_{i_j}, t_{i_{j+1}}) \chi_{(t_{i_j}, t_{i_{j+1}})} =\sum_{j=0}^{l-1} \frac{H^*(t_{i_j}, t_{i_{j+1}})}{W(t_{i_j}, t_{i_{j+1}})}\chi_{(t_{i_j}, t_{i_{j+1}})} . \end{equation} Observe that the sequence $(R(t_{i_j}, t_{i_{j+1}}))_{j=0}^{l-1}$ is decreasing since $\frac{(h^*)^0}{w}$ is decreasing (\cite{Hal}, Theorem 3.6). Furthermore, we obtain \[ P_{\varphi,w}(h) = \int_I \varphi \left(\sum_{j=0}^{l-1} \frac{H^*(t_{i_j}, t_{i_{j+1}})}{W(t_{i_j}, t_{i_{j+1}})}\chi_{(t_{i_j}, t_{i_{j+1}})}\right) w = \sum_{j=0}^{l-1} \varphi \left(\frac{H^*(t_{i_j}, t_{i_{j+1}})}{W(t_{i_j}, t_{i_{j+1}})} \right) \cdot W(t_{i_j}, t_{i_{j+1}}). \] The next lemma is a key ingredient for computation of the norm of a bounded linear functional on $\Lambda_{\varphi,w}^0$ or $\lambda^0_{\varphi,w}$. \begin{Lemma}\label{lem3} Let $h \in L^0$ be a non-negative simple function with support of finite measure. Then, there exists a non-negative simple function $v$ such that \begin{equation*} P_{\varphi_*, w}(h) = \int_I \varphi_* \left(\frac{h}{v}\right)v \,\,\, \text{and} \,\,\, \int_I \varphi\left(q\left(\frac{h}{v}\right)\right) v = \int_I \varphi\left(q\left(\frac{h}{v}\right)^*\right) w. \end{equation*} The similar formula holds for modular $p_{\varphi_*, w} (x)$ for any $x\in \ell^0$. \end{Lemma} \begin{proof} Let $h = \sum_{i=1}^n a_i \chi_{A_i}$ with $ a_1 > a_2 > \cdots > a_n >0$ and $\{A_i\}_{i=1}^n$ be a family of disjoint measurable subsets of $I$ with finite measure. Since $h$ and $h^*$ are equimeasurable, we see that $mA_i = t_i- t_{i-1}$ for $i=1,\dots,n$. It is well known by \cite{Hal} and \cite{KLR} that each $(t_{i-1}, t_i)$ is a level interval of $h^*$, contained in at most one maximal level interval $(t_{i_j}, t_{i_{j+1}})$ for some $0 \leq j \leq l-1$ \cite{KLR}. So, for every $j$, we can see \[ m(t_{i_j}, t_{i_{j+1}}) = m(\cup_{i_j < i \leq i_{j+1}}(t_{i-1}, t_i)) = m(\cup_{i_j < i \leq i_{j+1}}A_i), \] \noindent and this implies \begin{equation}\label{tc} H^*(t_{i_j}, t_{i_{j+1}}) = \int_{t_{i_j}}^{t_{i_{j+1}}} h^* = \sum_{i = i_j+1}^{i_{j+1}} \int_{t_{i-1}}^{t_i} a_i = \sum_{i = i_j+1}^{i_{j+1}} a_i(t_i - t_{i-1}) = \sum_{i_j < i \leq i_{j+1}} a_i mA_i. \end{equation} \noindent By (\ref{level}), we have \begin{equation*} \frac{(h^*)^0}{w} =\sum_{j=0}^{l-1} \sum_{i_j < i \leq i_{j+1}} \frac{H^*(t_{i_j}, t_{i_{j+1}})}{W(t_{i_j}, t_{i_{j+1}})} \chi_{(t_{i-1}, t_i)} = \left(\sum_{j=0}^{l-1} \sum_{i_j < i \leq i_{j+1}} \frac{H^*(t_{i_j}, t_{i_{j+1}})}{W(t_{i_j}, t_{i_{j+1}})} \chi_{A_i}\right)^*. \end{equation*} \noindent Hence, by right-continuity of $q$, we also have $q\left(\frac{(h^*)^0}{w}\right) = q\left(\sum_{j=0}^{l-1} \sum_{i_j < i \leq i_{j+1}} \frac{H^*(t_{i_j}, t_{i_{j+1}})}{W(t_{i_j}, t_{i_{j+1}})} \chi_{A_i} \right)^*$. Let $v= \sum_{j=0}^{l-1} \sum_{i_j < i \leq i_{j+1}} \frac{W(t_{i_j}, t_{i_{j+1}})}{H^*(t_{i_j}, t_{i_{j+1}})} a_i \chi_{A_i}$. Then, $q\left(\frac{(h^*)^0}{w}\right) = q\left(\frac{h}{v}\right)^*$. The functions $h$ and $v$ have the same supports, so the quotient $h/v$ is set to be zero outside of the supports of $h$ and $v$. Now, we compute $\int_I \varphi_* \left(\frac{h}{v}\right)v$ and $\int_I \varphi\left(q\left(\frac{h}{v}\right)\right) v$. \begin{eqnarray*} \int_I \varphi_* \left(\frac{h}{v}\right)v &=& \int_I \varphi_* \left(\frac{\sum_{i=1}^n a_i \chi_{A_i}}{ \sum_{j=0}^{l-1} \sum_{i_j < i \leq i_{j+1}} \frac{W(t_{i_j}, t_{i_{j+1}})}{H^*(t_{i_j}, t_{i_{j+1}})} a_i \chi_{A_i}}\right) \cdot \sum_{j=0}^{l-1} \sum_{i_j < i \leq i_{j+1}} \frac{W(t_{i_j}, t_{i_{j+1}})}{H^*(t_{i_j}, t_{i_{j+1}})} a_i \chi_{A_i}\\ &=& \sum_{j=0}^{l-1} \sum_{i_j < i \leq i_{j+1}} \int_I \varphi_* \left(\frac{H^*(t_{i_j}, t_{i_{j+1}})}{W(t_{i_j}, t_{i_{j+1}})} \right) \cdot \frac{W(t_{i_j}, t_{i_{j+1}})}{H^*(t_{i_j}, t_{i_{j+1}})} a_i \chi_{A_i}\\ &=& \sum_{j=0}^{l-1} \varphi_* \left(\frac{H^*(t_{i_j}, t_{i_{j+1}})}{W(t_{i_j}, t_{i_{j+1}})} \right) \cdot \frac{W(t_{i_j}, t_{i_{j+1}})}{H^*(t_{i_j}, t_{i_{j+1}})} \sum_{i_j < i \leq i_{j+1}} a_i \cdot mA_i\\ &=& \sum_{j=0}^{l-1} \varphi_* \left(\frac{H^*(t_{i_j}, t_{i_{j+1}})}{W(t_{i_j}, t_{i_{j+1}})} \right) \cdot W(t_{i_j}, t_{i_{j+1}})\,\,\, \text{(by (\ref{tc}))}\\ &=& P_{\varphi_*,w}(h). \end{eqnarray*} \noindent and \begin{eqnarray*} \int_I \varphi\left(q\left(\frac{h}{v}\right)\right)v &=& \int_I \varphi\left(q\left(\frac{\sum_{i=1}^n a_i \chi_{A_i}}{ \sum_{j=0}^{l-1} \sum_{i_j < i \leq i_{j+1}} \frac{W(t_{i_j}, t_{i_{j+1}})}{H^*(t_{i_j}, t_{i_{j+1}})} a_i \chi_{A_i}}\right)\right) \cdot \sum_{j=0}^{l-1} \sum_{i_j < i \leq i_{j+1}} \frac{W(t_{i_j}, t_{i_{j+1}})}{H^*(t_{i_j}, t_{i_{j+1}})} a_i \chi_{A_i}\\ &=& \sum_{j=0}^{l-1} \sum_{i_j < i \leq i_{j+1}} \int_I \varphi \left(q\left(\frac{H^*(t_{i_j}, t_{i_{j+1}})}{W(t_{i_j}, t_{i_{j+1}})} \right)\right) \cdot \frac{W(t_{i_j}, t_{i_{j+1}})}{H^*(t_{i_j}, t_{i_{j+1}})} a_i \chi_{A_i}\\ &=& \sum_{j=0}^{l-1} \varphi\left(q\left(\frac{H^*(t_{i_j}, t_{i_{j+1}})}{W(t_{i_j}, t_{i_{j+1}})} \right)\right) \cdot \frac{W(t_{i_j}, t_{i_{j+1}})}{H^*(t_{i_j}, t_{i_{j+1}})} \sum_{i_j < i \leq i_{j+1}} a_i \cdot mA_i\\ &=& \sum_{j=0}^{l-1} \int_I \varphi\left(q\left(\frac{H^*(t_{i_j}, t_{i_{j+1}})}{W(t_{i_j}, t_{i_{j+1}})} \right)\right) \cdot w\chi_{(t_{i_j}, t_{i_{j+1}})} \,\,\, (\text{by (\ref{tc})}) \\ &=& \int_I \varphi\left(q\left(\sum_{j=0}^{l-1} \frac{H^*(t_{i_j}, t_{i_{j+1}})}{W(t_{i_j}, t_{i_{j+1}})}\chi_{(t_{i_j}, t_{i_{j+1}})}\right)\right) w\\ &=& \int_I \varphi \left(q \left(\frac{(h^*)^0}{w}\right) \right) w = \int_I \varphi \left(q\left(\frac{h}{v}\right)^*\right)w. \end{eqnarray*} \end{proof} Now, we are ready to compute the norm of a bounded linear functional in $\Lambda_{\varphi,w}^0$. \begin{Theorem}\label{Orlicz} Let $\varphi$ be an Orlicz $N$-function and $F$ be a bounded linear functional on $\Lambda^0_{\varphi,w}$. Then $F= H + S$, where $H(f) = \int_I fh$ for some $h\in \mathcal{M}_{\varphi_*,w}$, $\|H\|= \|h\|_{\mathcal{M}_{\varphi_*,w}}$, $S(f)=0$ for all $f\in (\Lambda_{\varphi,w})_a$, and $\|F\| = \inf\{\lambda>0 : P_{\varphi_*,w}(\frac{h}{\lambda}) + \frac{1}{\lambda}\|S\| \leq 1\}$. \end{Theorem} \begin{proof} Similarly as in Theorem \ref{th:lux}, we have $F = H + S$, where $H(f) = \int_I hf$ for some $h \in \mathcal{M}_{\varphi_*,w}$ with $\|H\| = \|h\|_{\mathcal{M}_{\varphi_*,w}}$ and $S(f) = 0$ for all $f \in (\Lambda_{\varphi,w}^0)_a$ in view of Theorem \ref{th:01}. Thus, we only need to show the formula for $\|F\|$. Without loss of generality, assume $\|F\| = 1$. Let $f \in S_{\Lambda_{\varphi,w}^0}$. Since $h \in \mathcal{M}_{\varphi_*,w}$, we have $P_{\varphi_*,w}(\frac{h}{\lambda}) < 1$ for some $\lambda>0$. So, we can choose $\lambda>0$ such that $P_{\varphi_*,w}(\frac{h}{\lambda})+ \frac{1}{\lambda} \|S\| \leq 1$. Let $k \in K(f)$. By Theorem \ref{WC}.(3), $1 = \|f\|^0 = \frac{1}{k}(1 + \rho_{\varphi,w}(kf))$, and this implies that $\rho_{\varphi,w}(kf) < \infty$. For every $v \prec w,\, v \downarrow$, we have \[ \frac{1}{\lambda}F(kf) = \frac{1}{\lambda}\left(\int_I khf + S(kf)\right) \leq \frac{1}{\lambda} \left(\int_I kh^*f^* + S(kf)\right) = \int_I \frac{kh^*f^*v}{\lambda v} + \frac{1}{\lambda} S(kf). \] \noindent By Young's inequality, we see that $\int_I \frac{kh^*f^*v}{\lambda v} + \frac{1}{\lambda} S(kf) \leq \int_I \varphi(kf^*)v + \int_I \varphi_*\left(\frac{h^*}{\lambda v}\right)v + \frac{1}{\lambda}S(kf)$. Since by (\ref{form:1}) this is for all $v \prec w$, $v\downarrow$, and by Hardy's lemma (\cite{BS}, Proposition 3.6), we get \[ \frac{1}{\lambda}F(kf) \leq \int_I \varphi(kf^*)v + \int_I \varphi_*\left(\frac{h^*}{\lambda v}\right)v + \frac{1}{\lambda}S(kf) \leq\rho_{\varphi,w}(kf) + P_{\varphi_*,w}\left(\frac{h}{\lambda}\right) + \frac{1}{\lambda}S(kf)). \] \noindent Furthermore, $S(kf) \leq \|S\|$ because $\rho_{\varphi,w}(kf) < \infty$. Hence, \[ \frac{1}{\lambda}F(kf) \leq \rho_{\varphi,w}(kf) + P_{\varphi_*,w}\left(\frac{h}{\lambda}\right) + \frac{1}{\lambda}\|S\| \leq 1+ \rho_{\varphi,w}(kf), \] \noindent which implies that $F(f) \leq \lambda \cdot \frac{1}{k}(1 + \rho_{\varphi,w}(kf)) \leq \lambda \|f\|^0=\lambda$. Since $f$ and $\lambda$ are arbitrary, we showed that $\|F\| \leq \inf\{\lambda>0 : P_{\varphi_*,w}(\frac{h}{\lambda}) + \frac{1}{\lambda} \|S\| \leq 1 \}$. Now, suppose that \[ 1= \|F\| < \inf\{\lambda>0 : P_{\varphi_*,w}\left(\frac{h}{\lambda}\right) + \frac{1}{\lambda}\|S\| \leq 1\}. \] \noindent Then, there exists $\delta>0$ such that \[ P_{\varphi_*,w}(h) + \|S\| > 1 + 3\delta. \] \noindent From Theorem \ref{theta}, $\|S\| = \sup\{S(f): \rho_{\varphi,w}(f) < \infty\}$. So, there exists $f \in \Lambda_{\varphi,w}^0$ such that $\rho_{\varphi,w}(f) < \infty$ and $\|S\| < S(f) + \delta$. This implies that \[ P_{\varphi_*,w}(h) + S(f)+ \delta >P_{\varphi_*,w}(h) + \|S\| > 1 + 3\delta, \] \noindent and so \[ P_{\varphi_*,w}(h) + S(f) > 1 + 2\delta. \] Without loss of generality, let $h \geq 0$. Let $(h_n)_{n=1}^{\infty}$ be a sequence of simple functions with support of finite measure such that $h_n \uparrow h$. By Lemma 4.6 in \cite{KR}, we get $P_{\varphi_*,w}(h_n) \uparrow P_{\varphi_*,w}(h)$. Hence, there exists a non-negative simple function $h_0$ with $m(\supp{h_0}) < \infty$ such that $0\le h_0 \le h$ a.e. and \[ P_{\varphi_*,w}(h) < P_{\varphi_*,w}(h_0) + \delta. \] \noindent This implies that \[ P_{\varphi_*,w}(h_0) + S(f) > P_{\varphi_*,w}(h) + S(f) - \delta > 1 + 2\delta - \delta = 1 + \delta. \] Now, consider a function $f_n = f \chi_{\{\frac{1}{n}\leq |f| \leq n\}}$. The function $|f-f_n| \downarrow 0$ a.e. Hence, we have $(f- f_n)^* \rightarrow 0$, and so $\rho_{\varphi,w}(f-f_n) \downarrow 0$ by the Lebesgue dominated convergence theorem. Since $H$ is a bounded linear functional on $\Lambda_{\varphi,w}^0$, we have $\int_I |f-f_n| h \le \int_I |f| h < \|H\|\|f\|^0< \infty$, and so $\int_I |f-f_n| h \to 0$. For $\delta > 0$, there exists $N_0$ such that for $n\ge N_0$, we have \[ \rho_{\varphi,w}(f-f_n) \le 1 \ \ \ \text{and} \ \ \ \int_I |f-f_n| h < \frac{\delta}{8}. \] \noindent Let $g_1 = f-f_n$ for some $n\ge N_0$. The function $f_n$ is bounded with support of finite measure since $\supp{f_n} \subset \{|f| > \frac{1}{n}\}$ and $m\{|f| > \frac{1}{n}\} < \infty$. Thus, we have $S(f) = S(g_1) + S(f_n) = S(g_1)$ and \begin{equation}\label{eq1} \rho_{\varphi,w}(g_1) \leq 1, \,\,\, \int_I |g_1| h < \frac{\delta}{8}, \ \ \text{and} \,\,\, P_{\varphi_*,w}(h_0) + S(g_1) > 1 + \delta. \end{equation} Let $v$ be the non-negative simple function constructed in Lemma \ref{lem3} for $h_0$. By Young's equality, we obtain \[ \int_I q\left(\frac{h_0}{v}\right)h_0 = \int_I q\left(\frac{h_0}{v}\right)\frac{h_0}{v} v = \int_I \varphi\left(q\left(\frac{h_0}{v}\right)\right)v + \int_I \varphi_*\left(\frac{h_0}{v}\right)v = \int_I \varphi\left(q\left(\frac{h_0}{v}\right)\right)v + \int_I \varphi_*\left(\frac{h_0}{v}\right)v. \] \noindent Let $g_2 = q\left(\frac{h_0}{v}\right)$. It is a simple function with support of finite measure, so $g_2 \in (\Lambda_{\varphi,w}^0)_a$. In view of Lemma \ref{lem3}, we get \begin{equation}\label{eq2} P_{\varphi_*,w}(h_0) = \int_I \varphi_*\left(\frac{h_0}{v}\right)v = \int_I q\left(\frac{h_0}{v}\right)h_0 - \int_I \varphi\left(q\left(\frac{h_0}{v}\right)\right)v = \int_I g_2 h_0 - \int_I \varphi(g_2^*)w. \end{equation} The function $g_2h$ is integrable. So, there exists $\eta>0$ such that for any measurable subset $E \subset I$ with $mE < \eta$, we have $\int_E |g_2h| < \frac{\delta}{2}$. We will now show that for $\delta>0$, there exist $n \in \mathbb{N}$ and $E \subset I$ such that $mE <\eta$, \begin{equation}\label{eq3} \int_0^{mE} \varphi (g_1^*) w < \frac{\delta}{4}, \ \ \int_E |g_2 h| < \frac{\delta}{2}, \ \ \text{and} \ \ \rho_{\varphi,w}(g_1 \chi_{[n, \gamma)}) = \int_I\varphi((g_1 \chi_{[n, \gamma)})^*)w < \frac{\delta}{8}. \end{equation} \noindent Let $E_n=\{g_1^*> n\}= [0, t_n)$. We see that $g_1^* \chi_{E_n} \leq g_1^*$ for all $n$ and $g_1^* \chi_{E_n} \rightarrow 0$ a.e. By the Lebesgue dominated convergence theorem, for $\delta > 0$, there exists $N_1$ such that for all $n \geq N_1$, \[ \int_I \varphi(g_1^*\chi_{E_n})w = \int_0^{mE_n} \varphi(g_1^*)w< \frac{\delta}{4}. \] \noindent Since $g_1$ and $g_1^*$ are equimeasurable, we have $m\{|g_1|>n\} = m\{g_1^* > n\} = mE_n$ for all $n$. Choose $n > N_1$ such that $mE_n < \eta$, $\supp{h_0} \cap [n, \gamma) = \emptyset$, and $\rho_{\varphi,w}(g_1 \chi_{[n, \gamma)}) = \int_I\varphi((g_1 \chi_{[n, \gamma)})^*)w < \frac{\delta}{8}$. Finally, by letting $\{|g_1|>n\} = E$ for such $n$, we obtain $n \in \mathbb{N}$ and a measurable subset $E \subset I$ satisfying (\ref{eq3}). Note that $\supp{h_0} \subset [0, n)$. Now, we define \[\bar{u}(t) = \begin{cases} g_2(t), & t \in A_1 = \supp{h_0} \setminus E\\ g_1(t),& t \in A_2 = E \cup [n, \gamma) \\ 0, & \text{Otherwise}. \end{cases} \] \noindent The function $g_1$ is bounded on the set $A_2^c$. Moreover, $A_2^c$ is a subset of $[0,n)$. So, $g_1\chi_{A_2^c} \in (\Lambda_{\varphi,w}^0)_a$, and this implies that $S(g_1) = S(g_1 \chi_{A_2})$. Since $g_2$ is a simple function with support of finite measure, $S(g_2\chi_{A_1}) = 0$. By orthogonal subadditivity of $\rho_{\varphi,w}$, we get \[ \rho_{\varphi,w}(\bar{u}) \leq \rho_{\varphi,w}(g_2 \chi_{A_1}) + \rho_{\varphi,w}(g_1 \chi_{A_2}) \leq \rho_{\varphi,w}(g_2 \chi_{A_1}) + \rho_{\varphi,w}(g_1 \chi_E) + \rho_{\varphi,w}(g_1\chi_{[n, \gamma)}), \] \noindent and by (\ref{eq3}), we have \[ \rho_{\varphi,w}(\bar{u}) < \rho_{\varphi,w}(g_2 \chi_{A_1}) + \rho_{\varphi,w}(g_1 \chi_E) + \frac{\delta}{8}. \] \noindent Hence, we see that \begin{equation}\label{eq6} \int_I \bar{u}h + S(\bar{u}) - \rho_{\varphi,w}(\bar{u}) \geq \int_{A_1} g_2 h + \int_{A_2} g_1 h + S(g_1) - \rho_{\varphi, w} (g_2\chi_{A_1}) - \rho_{\varphi,w}(g_1 \chi_E) - \frac{\delta}{8}. \end{equation} \noindent Since $g_2 \ge 0$ and $h \ge h_0 \ge 0$, we have \[ \int_{A_1} g_2 h\ge \int_{A_1} g_2 h_0 = \int_{I \setminus E} g_2 h_0. \] \noindent Also, in view of (\ref{eq1}) and (\ref{eq3}), we see that \[ \int_{A_2} |g_1 h| < \int_I |g_1 h| < \frac{\delta}{8} \ \ \ \text{and} \ \ \ \int_E g_2h_0 \leq \int_E g_2h < \frac{\delta}{2}. \] \noindent Then, the inequality (\ref{eq6}) becomes \[ \int_I h\bar{u} + S(\bar{u}) - \rho_{\varphi,w}(\bar{u}) \ge \int_{I \setminus E} g_2 h_0 -\frac{\delta}{4} + S(g_1) - \rho_{\varphi, w} (g_2\chi_{A_1}) - \rho_{\varphi,w}(g_1\chi_{E}). \] \noindent Hence, we obtain \begin{eqnarray*} \int_I \bar{u}h + S(\bar{u}) - \rho_{\varphi,w}(\bar{u}) &\geq& \int_{I \setminus E} g_2 h_0 -\frac{\delta}{4} + S(g_1) - \rho_{\varphi, w} (g_2\chi_{A_1}) - \rho_{\varphi,w}(g_1\chi_{E}) \\ &\geq& \int_I g_2 h_0 - \int_E g_2 h_0 - \frac{\delta}{4} + S(g_1) - \rho_{\varphi, w} (g_2) - \rho_{\varphi,w}(g_1 \chi_{E})\\ &\geq& \int_I g_2 h_0 - \int_E g_2 h_0 - \frac{\delta}{4}+ S(g_1) - \rho_{\varphi, w} (g_2) - \frac{\delta}{4}\,\,\, \text{by (\ref{eq3})}\\ &=& P_{\varphi_*,w}(h_0) -\int_E g_2h_0 + S(g_1) - \frac{\delta}{2} \,\,\, \text{by (\ref{eq2})}\\ &\geq& P_{\varphi_*,w}(h_0)- \frac{\delta}{2} + S(g_1) - \frac{\delta}{2} \\ &>& 1+ \delta - \delta = 1. \,\,\, \text{by (\ref{eq1})} \end{eqnarray*} \noindent Finally, this implies that \[ 1 = \|F\| \geq F\left(\frac{\bar{u}}{\|\bar{u}\|^0}\right) = \frac{H(\bar{u}) + S(\bar{u})}{\|\bar{u}\|^0} = \frac{\int_I \bar{u}h+ S(\bar{u})}{\|\bar{u}\|^0}> \frac{1 + \rho_{\varphi,w}(\bar{u})}{\|\bar{u}\|^0} > 1, \] \noindent which leads to a contradiction. \end{proof} Next result is the sequence analogue of the formula for the norm of a bounded linear functional on $\lambda^0_{\varphi,w}$. \begin{Theorem}\label{Orliczseq} If $\varphi$ is an Orlicz $N$-function and $F$ is a bounded linear functional on $\lambda^0_{\varphi,w}$ then $F = H+S$, where $H(x) = \sum_{i=1}^{\infty} x(i) y(i)$, $ \|H\| = \| y\|_{\mathfrak{m}_{\varphi_*,w}}$, $S$ is a singular functional vanishing on $(\lambda_{\varphi,w})_a$ and $\|F\| = \inf\{\eta>0 : p_{\varphi_*,w}(\frac{h}{\eta}) + \frac{1}{\eta}\|S\| \leq 1\}$. \end{Theorem} Contrary to Corollary \ref{cor:luxideal} about $M$-ideals in the Orlicz-Lorentz spaces equipped with the Luxemburg norm, we conclude this paper by showing that $(\Lambda_{\varphi,w}^0)_a$ and $(\lambda_{\varphi,w}^0)_a$ are not $M$-ideals in $\Lambda_{\varphi,w}^0$ and $\lambda_{\varphi,w}^0$ respectively, if the Orlicz $N$-function $\varphi$ does not satisfy the appropriate $\Delta_2$ condition. \begin{Corollary} Let $\varphi$ be an Orlicz $N$-function which does not satisfy the appropriate $\Delta_2$ condition. Then the order-continuous subspaces $(\Lambda_{\varphi,w}^0)_a$ or $(\lambda_{\varphi,w}^0)_a$ are not $M$-ideals in $\Lambda_{\varphi,w}^0$ or $\lambda_{\varphi,w}^0$, respectively. \end{Corollary} \begin{proof} We give a proof only in the case of function spaces. Let $\varphi$ be an Orlicz $N$-function, which does not satisfy the appropriate $\Delta_2$ condition. Then $(\Lambda_{\varphi,w}^0)_a$ is a proper subspace of $\Lambda_{\varphi,w}^0$, and in view of Theorem \ref{Orlicz} there exists $S \in (\Lambda_{\varphi,w}^0)^*$ such that $S \neq 0$. So, choose $S \in (\Lambda_{\varphi,w}^0)^*$ such that $0 < \|S\| < 1$. We show that there exist $u>0$ and $0 < t_0 < \gamma$ such that $h = uw\chi_{(0,t_0)}$ and $\|h\|_{\mathcal{M}_{\varphi_*,w}} + \|S\| =1$. Indeed choose $u$ satisfying $\varphi_*(u) > 1/W(\gamma)$, where $1/W(\infty) = 0$. Then $\frac{1}{\varphi_*(u/(1-\|S\|))} < W(\gamma)$. Since $W$ is continuous on $(0, \gamma)$, there exists $0 < t_0 < \gamma$ such that $W(t_0) = \frac{1}{\varphi_*(u/(1-\|S\|))}$. Let $h = uw\chi_{(0,t_0)}$ for such $u$ and $t_0$. Clearly $h$ is a decreasing function. Furthermore, the interval $(0, t_0)$ is its maximal level interval since $R(0, t) = \frac{uW(t)}{W(t)} = \frac{uW(t_0)}{W(t_0)} = R(0,t_0) = u$ for all $0 < t < t_0$, and $R(0,t_0) < R(0,t)$ for $\gamma> t> t_0$ . Hence $\frac{h^0}{w} = u \chi_{(0, t_0)}$, and so $P_{\varphi_*,w}(h) = \int_I \varphi_*\left(\frac{h^0}{w}\right)w = \varphi_*(u)W(t_0)$. It follows that \begin{eqnarray*} \|h\|_{\mathcal{M}_{\varphi_*,w}} &=& \inf\left\{\epsilon > 0 : P_{\varphi_*,w}\left(\frac{h}{\epsilon}\right) \leq 1\right\} = \inf\left\{\epsilon > 0 : \varphi_* \left(\frac{u}{\epsilon}\right) \leq \frac{1}{W(t_0)}\right\}\\ &=& \inf\left\{\epsilon > 0 : \varphi_*\left(\frac{u}{\epsilon}\right) \leq \varphi_*\left(\frac{u}{1-\|S\|}\right)\right\} = \inf\{\epsilon > 0 : \epsilon \geq 1 - \|S\|\} = 1 - \|S\|. \end{eqnarray*} Thus, we have $\|h\|_{\mathcal{M}_{\varphi_*,w}} + \|S\| = 1$, which implies that $P_{\varphi_*,w} (\frac{h}{1 - \|S\|}) \leq 1$. Now since $\varphi$ is $N$-function, $\varphi_*$ is also an $N$-function, and so $\varphi_*$ is not identical to a linear function $ku$ for any $k>0$. Hence for all $u>0$, $\lambda> 1$ we have $\varphi_*(\lambda u) > \lambda \varphi_*(u)$. Therefore by $\frac{1}{1-\|S\|} > 1$, \[ 1 \geq P_{\varphi_*,w}\left(\frac{h}{1-\|S\|}\right) = \varphi_*\left(\frac{u}{1-\|S\|}\right)W(t_0) > \frac{1}{1 - \|S\|} P_{\varphi_*,w}(h), \] which shows that \begin{equation}\label{eq:00} P_{\varphi_*,w}(h) < 1 - \|S\| = \|h\|_{\mathcal{M}_{\varphi_*,w}}. \end{equation} On the other hand if we assume that $(\Lambda_{\varphi,w}^0)_a$ is an $M$-ideal of $\Lambda_{\varphi,w}^0$ then, $1 =\|H + S\| = \|h\|_{\mathcal{M}_{\varphi_*,w}} + \|S\| \geq P_{\varphi_*,w}(h) + \|S\|$. It follows that $P_{\varphi_*,w}(h) + \|S\| = 1$. Indeed, suppose that $P_{\varphi_*,w}(h) + \|S\| < 1$. Define $g(\lambda) = P_{\varphi_*,w}(\lambda h) + \lambda \|S\|$ for $\lambda > 0$. The function $g$ is convex, $g(0) = 0$, and $\lim_{\lambda \rightarrow \infty} g(\lambda) = \infty$. Since $g(1) = P_{\varphi_*,w}(h) + \|S\| < 1$, there exists $\frac{1}{\lambda_0} > 1$ such that $P_{\varphi_*,w}\left(\frac{h}{\lambda_0}\right) + \frac{1}{\lambda_0}\|S\| = 1$. But then, from Theorem \ref{Orlicz}, we have $1 = \|H+S\| = \inf\{\lambda>0 : P_{\varphi_*,w}(\frac{h}{\lambda}) + \frac{1}{\lambda}\|S\| \leq 1\} > 1$, which is a contradiction. However $P_{\varphi_*,w}(h) + \|S\| = 1$ contradicts (\ref{eq:00}) and completes the proof. \end{proof} \end{document}
\begin{document} \begin{abstract} We prove that the partition function for tripartite double-dimer configurations of a planar bipartite graph satisfies a recurrence related to the Desnanot-Jacobi identity from linear algebra. A similar identity for the dimer partition function was established nearly 20 years ago by Kuo and has applications to random tiling theory and the theory of cluster algebras. This work was motivated in part by the potential for applications in these areas. Additionally, we discuss an application to a problem in Donaldson-Thomas and Pandharipande-Thomas theory. The proof of our recurrence requires generalizing work of Kenyon and Wilson; specifically, lifting their assumption that the nodes of the graph are black and odd or white and even. \end{abstract} \title{Combinatorics of the double-dimer model} \section{Introduction} \begin{wrapfigure}{r}{0.35\textwidth} \centering \begin{tikzpicture}[scale=.5] \def7{7} \node at (-0.5,0) {$1$}; \node at (3,-0.5) {$2$}; \node at (6,-0.5) {$3$}; \node at (7.5,2) {$4$}; \node at (7, 7.5) {$5$}; \node at (4, 7.5) {$6$}; \node at (1, 7.5) {$7$}; \node at (-.5,5) {$8$}; \draw[line width = .25mm] (0, 0) -- (1, 0); \draw[line width = .25mm] (2, 0) -- (2, 1); \draw[line width = .25mm] (1, 1) -- (0, 1); \draw[line width = .25mm] (1, 3) -- (1, 4); \draw[line width = .25mm] (0, 2)-- (1, 2); \draw[line width = .25mm] (0, 5) -- (1, 5); \draw[line width = .25mm] (-.1, 3) -- (-.1, 4); \draw[line width = .25mm] (0.1, 3) -- (0.1, 4); \draw[line width = .25mm] (-.1, 6) -- (-.1, 7); \draw[line width = .25mm] (.1, 6) -- (.1, 7); \draw[line width = .25mm] (2.1, 2) -- (2.1, 3); \draw[line width = .25mm] (1.9, 2) -- (1.9, 3); \draw[line width = .25mm] (1.9, 4) -- (1.9, 5); \draw[line width = .25mm] (2.1, 4) -- (2.1, 5); \draw[line width = .25mm] (3.1, 3) -- (3.1, 4); \draw[line width = .25mm] (2.9, 3) -- (2.9, 4); \draw[line width = .25mm] (1, 0) -- (2, 0); \draw[line width = .25mm] (2, 1) -- (1, 1); \draw[line width = .25mm] (0, 1) -- (0, 2); \draw[line width = .25mm] (1, 2) -- (1, 3); \draw[line width = .25mm] (1, 4) -- (1, 5); \draw[line width = .25mm] (1, 7) -- (2, 7); \draw[line width = .25mm] (3, 7) -- (4, 7); \draw[line width = .25mm] (3, 7) -- (4, 7); \draw[line width = .25mm] (3, 6) -- (4, 6); \draw[line width = .25mm] (5, 6) -- (6, 6); \draw[line width = .25mm] (3, 5) -- (4, 5); \draw[line width = .25mm] (5, 5) -- (6, 5); \draw[line width = .25mm] (1, 5.9) -- (2, 5.9); \draw[line width = .25mm] (1, 6.1) -- (2, 6.1); \draw[line width = .25mm] (5, 7.1) -- (6, 7.1); \draw[line width = .25mm] (5, 6.9) -- (6, 6.9); \draw[line width = .25mm] (2, 7) -- (3, 7); \draw[line width = .25mm] (4, 6) -- (5, 6); \draw[line width = .25mm] (4, 5) -- (5, 5); \draw[line width = .25mm] (6, 6) -- (6, 5); \draw[line width = .25mm] (3, 5) -- (3, 6); \draw[line width = .25mm] (3, 0) -- (3, 1); \draw[line width = .25mm] (3, 2) -- (4, 2); \draw[line width = .25mm] (4,3)-- (4, 4); \draw[line width = .25mm] (5, 4) -- (6, 4); \draw[line width = .25mm] (7, 4) -- (7,5); \draw[line width = .25mm] (7, 6) -- (7,7); \draw[line width = .25mm] (3, 1) -- (3, 2); \draw[line width = .25mm] (4, 2) -- (4,3); \draw[line width = .25mm] (4, 4) -- (5, 4); \draw[line width = .25mm] (6, 4) -- (7, 4); \draw[line width = .25mm] (7, 5) -- (7,6); \draw[line width = .25mm] (6, 0) -- (7, 0); \draw[line width = .25mm] (7, 1) -- (6, 1); \draw[line width = .25mm] (6, 2) -- (6, 3); \draw[line width = .25mm] (7, 3) -- (7, 2); \draw[line width = .25mm] (4.9, 2) -- (4.9, 3); \draw[line width = .25mm] (5.1, 2) -- (5.1, 3); \draw[line width = .25mm] (5, 0) -- (5, 1); \draw[line width = .25mm] (4, 1) -- (4, 0); \draw[line width = .25mm] (7, 0) -- (7, 1); \draw[line width = .25mm] (6, 1) -- (6, 2); \draw[line width = .25mm] (6, 3)-- (7, 3); \draw[line width = .25mm] (4, 0) -- (5, 0); \draw[line width = .25mm] (5, 1) -- (4, 1); \foreach \x [count = \n] in {0, 2, 4, 6}{ \foreach \y in {0, 2, 4, 6}{ \filldraw[fill=black, draw=black] (\x,\y) circle (0.15cm); \filldraw[fill=white, draw=black] (\x+1,\y) circle (0.15cm); } } \foreach \x [count = \n] in {1, 3, 5, 7}{ \foreach \y in {1, 3, 5, 7}{ \filldraw[fill=black, draw=black] (\x,\y) circle (0.15cm); \filldraw[fill=white, draw=black] (\x-1,\y) circle (0.15cm); } } \end{tikzpicture} \caption{A double-dimer configuration on a grid graph with $8$ nodes. In this configuration, the pairing of the nodes is $((1, 8), (3, 4), (5, 2), (7, 6))$.} \label{fig:DDconfig} \end{wrapfigure} Let $G = (V_1, V_2, E)$ be a finite edge-weighted bipartite planar graph embedded in the plane with $|V_1| = |V_2|$. Let ${\bf N}$ denote a set of special vertices called {\em nodes} on the outer face of $G$ numbered consecutively in counterclockwise order. A {\em double-dimer configuration} on $(G, {\bf N})$ is a multiset of the edges of $G$ with the property that each internal vertex is the endpoint of exactly two edges, and each vertex in ${\bf N}$ is the endpoint of exactly one edge. In other words, it is a configuration of paths connecting the nodes in pairs, doubled edges, and disjoint cycles of length greater than two (called {\em loops}). Define a probability measure Pr where the probability of a configuration is proportional to the product of its edge weights times $2^{\ell}$, where $\ell$ is the number of loops in the configuration. Kenyon and Wilson initiated the study of the double-dimer model in \cite{KW2006}, when they showed how to compute the probability that a random double-dimer configuration has a particular node pairing. Before going into the details of Kenyon and Wilson's work, we will describe Kuo's recurrence for {\em dimer configurations}, which is the motivation for this paper, and state one of our main results. A dimer configuration (or perfect matching) of $G$ is a collection of the edges that covers all of the vertices exactly once. The weight of a dimer configuration is the product of its edge weights. Let $Z^{D}(G)$ denote the sum of the weights of all possible dimer configurations on $G$. In \cite{Kuo}, Kuo proved that $Z^{D}(G)$ satisfies an elegant recurrence. \begin{thm}\cite[Theorem 5.1]{Kuo} \label{thm:kuo} Let $G = (V_1, V_2, E)$ be a planar bipartite graph with a given planar embedding in which $|V_1| = |V_2|$. Let vertices $a, b, c,$ and $d$ appear in a cyclic order on a face of $G$. If $a, c \in V_1$ and $b, d \in V_2$, then \begin{equation} \label{eqn:kuo} Z^{D}(G)Z^{D}(G -\{a, b, c, d\}) =Z^{D}(G - \{a, b\})Z^{D}(G - \{c, d\}) + Z^{D}(G - \{a, d\})Z^{D}(G - \{b, c\}). \end{equation} \end{thm} His proof uses a technique called {\em graphical condensation}, which is named for its resemblance to {\em Dodgson condensation}, a method for computing the determinants of square matrices. \begin{wrapfigure}{l}{0.5\textwidth} \centering \begin{tikzpicture}[scale = .7] \filldraw[fill=green, draw=black] (0,0) circle (0.1cm); \filldraw[fill=green, draw=black] (1,0) circle (0.1cm); \filldraw[fill=green, draw=black] (2,0) circle (0.1cm); \filldraw (3,0) circle (0.1cm); \filldraw[fill=blue, draw=black] (4,0) circle (0.1cm); \filldraw[fill=red, draw=black] (0,1) circle (0.1cm); \filldraw[fill=white, draw=black] (1,1) circle (0.1cm); \filldraw (2,1) circle (0.1cm); \filldraw[fill=white, draw=black] (3,1) circle (0.1cm); \filldraw[fill=blue, draw=black] (4,1) circle (0.1cm); \filldraw[fill=red, draw=black] (0,2) circle (0.1cm); \filldraw (1,2) circle (0.1cm); \filldraw[fill=white, draw=black] (2,2) circle (0.1cm); \filldraw (3,2) circle (0.1cm); \filldraw[fill=red, draw=black] (4,2) circle (0.1cm); \node at (-.25,2) {$1$}; \node at (-.25,1) {$2$}; \node at (-.25,0) {$3$}; \node at (4.25,2) {$8$}; \node at (4.25,1) {$7$}; \node at (1,-.4) {$4$}; \node at (2,-.4) {$5$}; \node at (4.25,0) {$6$}; \draw[thick] (2.1,0) -- (3.9,0); \draw[thick] (1, 0.1) -- (1, .9); \draw[thick] (1, 1.1) -- (1, 2) -- (0.1, 2); \draw[thick] (0, 0.1) -- (0, .9); \draw[thick] (4, 1.1) -- (4, 1.9); \draw[thick] (2,1) -- (2.9,1); \draw[thick] (3,1.1)-- (3,2) --(2.1, 2); \draw[thick] (2,1.9) -- (2, 1); \end{tikzpicture} \hspace{.5cm} \begin{tikzpicture}[scale = .7] \filldraw[fill=red, draw=black] (0,0) circle (0.1cm); \filldraw[fill=green, draw=black] (1,0) circle (0.1cm); \filldraw[fill=green, draw=black] (2,0) circle (0.1cm); \filldraw (3,0) circle (0.1cm); \filldraw[fill=blue, draw=black] (4,0) circle (0.1cm); \filldraw[fill=red, draw=black] (0,1) circle (0.1cm); \filldraw[fill=white, draw=black] (1,1) circle (0.1cm); \filldraw (2,1) circle (0.1cm); \filldraw[fill=white, draw=black] (3,1) circle (0.1cm); \filldraw[fill=blue, draw=black] (4,1) circle (0.1cm); \filldraw[fill=yellow, draw=black] (0,2) circle (0.1cm); \filldraw (1,2) circle (0.1cm); \filldraw[fill=white, draw=black] (2,2) circle (0.1cm); \filldraw (3,2) circle (0.1cm); \filldraw[fill=yellow, draw=black] (4,2) circle (0.1cm); \node at (-.25,2) {$1$}; \node at (-.25,1) {$2$}; \node at (-.25,0) {$3$}; \node at (4.25,2) {$8$}; \node at (4.25,1) {$7$}; \node at (1,-.4) {$4$}; \node at (2,-.4) {$5$}; \node at (4.25,0) {$6$}; \draw[thick] (2.1,0) -- (3.9,0); \draw[thick] (0, 1.9) -- (0, 1.1); \draw[thick] (0.1, 0) -- (.9, 0); \draw[thick] (4, 1.1) -- (4, 1.9); \draw[thick] (1.1,1) -- (2.9,1); \draw[thick] (3,1.1)-- (3,2) --(2.1, 2); \draw[thick] (1.9, 2) -- (1,2) -- (1, 1.1); \end{tikzpicture} \hspace{.1cm} \caption{ \small{Two double-dimer configurations on a grid graph. The pairing of the nodes on the left is a tripartite pairing because the nodes can be colored contiguously using three colors so that no pair contains nodes of the same RGB color. The pairing on the right is not a tripartite pairing because four colors are required.}} \label{fig:tripartite} \end{wrapfigure} In this paper, we will show that when $\sigma$ is a {\em tripartite pairing}, a similar identity to (\ref{eqn:kuo}) holds for $Z^{DD}_{\sigma}(G, {\bf N})$, the weighted sum of all double-dimer configurations on $(G, {\bf N})$ with pairing $\sigma$. A planar pairing $\sigma$ is a tripartite pairing if the nodes can be divided into three circularly contiguous sets $R, G$, and $B$ so that no node is paired with a node in the same set (see Figure~\ref{fig:tripartite}). We often color the nodes in the sets red, green, and blue, in which case $\sigma$ is the unique planar pairing in which like colors are not paired. The following double-dimer version of equation (\ref{eqn:kuo}) is a corollary to Theorem~\ref{thm:cond} in Section \ref{sec:organization}. \begin{thm} \label{cor:cond} Let $G = (V_1, V_2, E)$ be a finite edge-weighted planar bipartite graph with a set of nodes {\bf N}. Divide the nodes into three circularly contiguous sets $R$, $G$, and $B$ such that $|R|$, $|G|$ and $|B|$ satisfy the triangle inequality and let $\sigma$ be the corresponding tripartite pairing\footnotemark.\footnotetext{If $|R|, |G|$, and $|B|$ do not satisfy the triangle inequality, there is no corresponding tripartite pairing $\sigma$.} Let $x, y, w, v$ be nodes appearing in a cyclic order such that the set $\{x,y,w,v\}$ contains at least one node of each RGB color\footnotemark. If $x, w \in V_1$ and $y, v \in V_2$ then \begin{eqnarray*} Z^{DD}_{\sigma}(G, {\bf N}) Z^{DD}_{\sigma_{xywv}}(G, {\bf N} - \{x, y, w, v\}) \hspace{-.2cm} &=& \hspace{-.2cm} Z^{DD}_{\sigma_{xy}}(G, {\bf N} - \{x, y\}) Z^{DD}_{\sigma_{wv}}(G, {\bf N} - \{w, v\})\\ & &\hspace{-.1cm} + Z^{DD}_{\sigma_{xv}}(G, {\bf N} - \{x, v\}) Z^{DD}_{\sigma_{wy}}(G, {\bf N} - \{w, y\}), \end{eqnarray*} \normalsize where for $i, j \in \{x, y, w, v\}$, $\sigma_{ij}$ is the unique planar pairing on ${\bf N} - \{i, j\}$ in which like colors are not paired together. \end{thm} \footnotetext{The nodes of $G$ have two colors: the black-white coloring from the bipartite assumption, and the RGB coloring. The coloring we are referring to is often clear from context, but to avoid ambiguity we will often write RGB color to emphasize that we are referring to the red, green, blue coloring of the nodes rather than the black-white coloring.} We illustrate Theorem~\ref{cor:cond} with an example. \begin{example} \label{ex:thmillustration} If $G$ is a graph with eight nodes colored red, green, and blue as shown below, then $\sigma = ((1, 8), (3, 4), (5, 2), (7, 6))$. If $x = 8, y = 1, w= 2, v= 5$, then by Theorem~\ref{cor:cond}, \begin{center} \small \hspace{.2cm} $ Z^{DD}_{\sigma}({\bf N}) Z^{DD}_{\sigma_{1258}}({\bf N} - \{1, 2, 5, 8\})$\hspace{.025cm} $=$ \hspace{.025cm} $Z^{DD}_{\sigma_{18}}({\bf N} -\{ 1, 8\}) Z^{DD}_{\sigma_{25}}({\bf N} - \{2, 5\})$ $+$ $Z^{DD}_{\sigma_{12}}({\bf N} - \{1, 2\}) Z^{DD}_{\sigma_{58}}({\bf N} - \{5, 8\}) $ \end{center} \normalsize \hspace{-.6cm} \begin{minipage}{.175\textwidth} \begin{tikzpicture}[scale=.3] \def7{7} \node at (-0.5,0) {\color{red}\footnotesize{$1$}}; \node at (3,-0.5) {\color{red}\footnotesize{$2$}}; \node at (6,-0.5) {\color{red}\footnotesize{$3$}}; \node at (7.5,2) {\color{green}\footnotesize{$4$}}; \node at (7, 7.5) {\color{green}\footnotesize{$5$}}; \node at (4, 7.5) {\color{green}\footnotesize{$6$}}; \node at (1, 7.5) {\color{blue}\footnotesize{$7$}}; \node at (-.5,5) {\color{blue}\footnotesize{$8$}}; \normalsize \draw (0, 0) -- (2, 0) -- (2, 1) -- (0, 1) -- (0, 2) -- (1, 2) -- (1, 5) -- (0, 5); \draw (0, 3) -- (0, 4); \draw (0, 6) -- (0, 7); \draw (2, 2) -- (2, 3); \draw (2, 4) -- (2, 5); \draw (3, 3) -- (3, 4); \draw (1, 7) -- (4, 7); \draw (1, 6) -- (2, 6); \draw (3, 6) -- (6, 6) -- (6, 5) -- (3, 5) -- (3, 6); \draw (5, 7) -- (6, 7); \draw (3, 0) -- (3, 2) -- (4, 2) -- (4, 4) -- (7, 4) -- (7,7); \draw (6, 0) -- (7, 0) -- (7, 1) -- (6, 1) -- (6, 3)-- (7, 3) -- (7, 2); \draw (5, 2) -- (5, 3); \draw (4, 0) -- (5, 0) -- (5, 1) -- (4, 1) -- (4, 0); \foreach \x [count = \n] in {0, 2, 4, 6}{ \foreach \y in {0, 2, 4, 6}{ \filldraw[fill=black, draw=black] (\x,\y) circle (0.15cm); \filldraw[fill=white, draw=black] (\x+1,\y) circle (0.15cm); } } \foreach \x [count = \n] in {1, 3, 5, 7}{ \foreach \y in {1, 3, 5, 7}{ \filldraw[fill=black, draw=black] (\x,\y) circle (0.15cm); \filldraw[fill=white, draw=black] (\x-1,\y) circle (0.15cm); } } \end{tikzpicture} \end{minipage} \begin{minipage}{.175\textwidth} \begin{tikzpicture}[scale=.3] \def7{7} \node at (6,-0.5) {\color{red}\footnotesize{$3$}}; \node at (7.5,2) {\color{green}\footnotesize{$4$}}; \node at (4, 7.5) {\color{green}\footnotesize{$6$}}; \node at (1, 7.5) {\color{blue}\footnotesize{$7$}}; \normalsize \draw (0, 0) -- (3, 0) -- (3, 1) -- (0, 1) -- (0, 0); \draw (0, 6) -- (0, 7); \draw (0, 2) -- (1, 2); \draw (2, 2) -- (3, 2); \draw (4, 2) -- (5, 2); \draw (0, 3) -- (1, 3); \draw (2, 3) -- (3, 3); \draw (4, 3) -- (5, 3); \draw (1, 5) -- (2, 5); \draw (1, 7) -- (4, 7); \draw (1, 6) -- (2, 6); \draw (3, 6) -- (6, 6) -- (6, 5) -- (3, 5) -- (3, 6); \draw (5, 7) -- (6, 7); \draw (0, 5) -- (0, 4); \draw (1, 4) -- (2, 4); \draw (3, 4) -- (4, 4); \draw (5, 4) -- (6, 4); \draw (7, 4) -- (7,5); \draw (7, 6) -- (7,7); \draw (6, 0) -- (7, 0) -- (7, 1) -- (6, 1) -- (6, 3)-- (7, 3) -- (7, 2); \draw (4, 0) -- (5, 0) -- (5, 1) -- (4, 1) -- (4, 0); \foreach \x [count = \n] in {0, 2, 4, 6}{ \foreach \y in {0, 2, 4, 6}{ \filldraw[fill=black, draw=black] (\x,\y) circle (0.15cm); \filldraw[fill=white, draw=black] (\x+1,\y) circle (0.15cm); } } \foreach \x [count = \n] in {1, 3, 5, 7}{ \foreach \y in {1, 3, 5, 7}{ \filldraw[fill=black, draw=black] (\x,\y) circle (0.15cm); \filldraw[fill=white, draw=black] (\x-1,\y) circle (0.15cm); } } \end{tikzpicture} \end{minipage} \begin{minipage}{.175\textwidth} \begin{tikzpicture}[scale=.3] \def7{7} \node at (3,-0.5) {\color{red}\footnotesize{$2$}}; \node at (6,-0.5) {\color{red}\footnotesize{$3$}}; \node at (7.5,2) {\color{green}\footnotesize{$4$}}; \node at (7, 7.5) {\color{green}\footnotesize{$5$}}; \node at (4, 7.5) {\color{green}\footnotesize{$6$}}; \node at (1, 7.5) {\color{blue}\footnotesize{$7$}}; \normalsize \draw (0, 0) -- (2, 0) -- (2, 1) -- (0, 1) -- (0, 0); \draw (0, 2) -- (1, 2) -- (1, 5) -- (0, 5) -- (0, 2); \draw (0, 3) -- (0, 4); \draw (0, 6) -- (0, 7); \draw (2, 2) -- (2, 3); \draw (2, 4) -- (2, 5); \draw (3, 3) -- (3, 4); \draw (1, 7) -- (4, 7); \draw (1, 6) -- (2, 6); \draw (3, 6) -- (6, 6) -- (6, 5) -- (3, 5) -- (3, 6); \draw (5, 7) -- (6, 7); \draw (3, 0) -- (3, 2) -- (4, 2) -- (4, 4) -- (7, 4) -- (7,7); \draw (6, 0) -- (7, 0) -- (7, 1) -- (6, 1) -- (6, 3)-- (7, 3) -- (7, 2); \draw (5, 2) -- (5, 3); \draw (4, 0) -- (5, 0) -- (5, 1) -- (4, 1) -- (4, 0); \foreach \x [count = \n] in {0, 2, 4, 6}{ \foreach \y in {0, 2, 4, 6}{ \filldraw[fill=black, draw=black] (\x,\y) circle (0.15cm); \filldraw[fill=white, draw=black] (\x+1,\y) circle (0.15cm); } } \foreach \x [count = \n] in {1, 3, 5, 7}{ \foreach \y in {1, 3, 5, 7}{ \filldraw[fill=black, draw=black] (\x,\y) circle (0.15cm); \filldraw[fill=white, draw=black] (\x-1,\y) circle (0.15cm); } } \end{tikzpicture} \end{minipage} \hspace{-.75cm} \begin{minipage}{.175\textwidth} \begin{tikzpicture}[scale=.3] \def7{7} \node at (-0.5,0) {\color{red}\footnotesize{$1$}}; \node at (6,-0.5) {\color{red}\footnotesize{$3$}}; \node at (7.5,2) {\color{green}\footnotesize{$4$}}; \node at (4, 7.5) {\color{green}\footnotesize{$6$}}; \node at (1, 7.5) {\color{blue}\footnotesize{$7$}}; \node at (-.5,5) {\color{blue}\footnotesize{$8$}}; \normalsize \draw (0, 0) -- (2, 0) -- (2, 1) -- (0, 1) -- (0, 2) -- (1, 2) -- (1, 5) -- (0, 5); \draw (0, 3) -- (0, 4); \draw (0, 6) -- (0, 7); \draw (2, 2) -- (2, 3); \draw (2, 4) -- (2, 5); \draw (1, 7) -- (4, 7); \draw (1, 6) -- (2, 6); \draw (3, 6) -- (6, 6) -- (6, 5) -- (3, 5) -- (3, 6); \draw (5, 7) -- (6, 7); \draw (7, 7) -- (7, 6); \draw (7, 5) -- (7, 4); \draw (6, 4) -- (5, 4); \draw (3, 2) -- (4, 2) -- (4,4) -- (3, 4) -- (3, 2); \draw (3, 0) -- (3, 1); \draw (6, 0) -- (7, 0) -- (7, 1) -- (6, 1) -- (6, 3)-- (7, 3) -- (7, 2); \draw (5, 2) -- (5, 3); \draw (4, 0) -- (5, 0) -- (5, 1) -- (4, 1) -- (4, 0); \foreach \x [count = \n] in {0, 2, 4, 6}{ \foreach \y in {0, 2, 4, 6}{ \filldraw[fill=black, draw=black] (\x,\y) circle (0.15cm); \filldraw[fill=white, draw=black] (\x+1,\y) circle (0.15cm); } } \foreach \x [count = \n] in {1, 3, 5, 7}{ \foreach \y in {1, 3, 5, 7}{ \filldraw[fill=black, draw=black] (\x,\y) circle (0.15cm); \filldraw[fill=white, draw=black] (\x-1,\y) circle (0.15cm); } } \end{tikzpicture} \end{minipage} \begin{minipage}{.175\textwidth} \begin{tikzpicture}[scale=.3] \def7{7} \node at (6,-0.5) {\color{red}\footnotesize{$3$}}; \node at (7.5,2) {\color{green}\footnotesize{$4$}}; \node at (7, 7.5) {\color{green}\footnotesize{$5$}}; \node at (4, 7.5) {\color{green}\footnotesize{$6$}}; \node at (1, 7.5) {\color{blue}\footnotesize{$7$}}; \node at (-.5,5) {\color{blue}\footnotesize{$8$}}; \normalsize \draw (0, 0) -- (3, 0) -- (3, 1) -- (0, 1) -- (0, 0); \draw (0, 6) -- (0, 7); \draw (4, 2) -- (4, 3); \draw (3, 2) -- (3, 3); \draw (2, 2) -- (2, 3); \draw (1, 2) -- (1, 3); \draw (0, 2) -- (0, 3); \draw (1, 5) -- (2, 5); \draw (1, 7) -- (4, 7); \draw (1, 6) -- (2, 6); \draw (3, 6) -- (6, 6) -- (6, 5) -- (3, 5) -- (3, 6); \draw (5, 7) -- (6, 7); \draw (0, 5) -- (0, 4) -- (7, 4) -- (7,7); \draw (6, 0) -- (7, 0) -- (7, 1) -- (6, 1) -- (6, 3)-- (7, 3) -- (7, 2); \draw (5, 2) -- (5, 3); \draw (4, 0) -- (5, 0) -- (5, 1) -- (4, 1) -- (4, 0); \foreach \x [count = \n] in {0, 2, 4, 6}{ \foreach \y in {0, 2, 4, 6}{ \filldraw[fill=black, draw=black] (\x,\y) circle (0.15cm); \filldraw[fill=white, draw=black] (\x+1,\y) circle (0.15cm); } } \foreach \x [count = \n] in {1, 3, 5, 7}{ \foreach \y in {1, 3, 5, 7}{ \filldraw[fill=black, draw=black] (\x,\y) circle (0.15cm); \filldraw[fill=white, draw=black] (\x-1,\y) circle (0.15cm); } } \end{tikzpicture} \end{minipage} \hspace{-.5cm} \begin{minipage}{.175\textwidth} \begin{tikzpicture}[scale=.3] \def7{7} \node at (-0.5,0) {\color{red}\footnotesize{$1$}}; \node at (3,-0.5) {\color{red}\footnotesize{$2$}}; \node at (6,-0.5) {\color{red}\footnotesize{$3$}}; \node at (7.5,2) {\color{green}\footnotesize{$4$}}; \node at (4, 7.5) {\color{green}\footnotesize{$6$}}; \node at (1, 7.5) {\color{blue}\footnotesize{$7$}}; \normalsize \draw (0, 0) -- (2, 0) -- (2, 1) -- (0, 1) -- (0, 2) -- (1, 2) -- (1, 5) -- (0, 5); \draw (0, 3) -- (0, 4); \draw (0, 6) -- (0, 7); \draw (2, 2) -- (2, 3); \draw (2, 4) -- (2, 5); \draw (3, 3) -- (3, 4); \draw (0, 5) -- (0, 6); \draw (0, 7) -- (1, 7); \draw (2, 7) -- (3, 7); \draw (4, 7) -- (5, 7); \draw (6, 7) -- (7, 7); \draw (1, 6) -- (2, 6); \draw (3, 6) -- (6, 6) -- (6, 5) -- (3, 5) -- (3, 6); \draw (5, 7) -- (6, 7); \draw (3, 0) -- (3, 2) -- (4, 2) -- (4, 4) -- (7, 4) -- (7,7); \draw (6, 0) -- (7, 0) -- (7, 1) -- (6, 1) -- (6, 3)-- (7, 3) -- (7, 2); \draw (5, 2) -- (5, 3); \draw (4, 0) -- (5, 0) -- (5, 1) -- (4, 1) -- (4, 0); \foreach \x [count = \n] in {0, 2, 4, 6}{ \foreach \y in {0, 2, 4, 6}{ \filldraw[fill=black, draw=black] (\x,\y) circle (0.15cm); \filldraw[fill=white, draw=black] (\x+1,\y) circle (0.15cm); } } \foreach \x [count = \n] in {1, 3, 5, 7}{ \foreach \y in {1, 3, 5, 7}{ \filldraw[fill=black, draw=black] (\x,\y) circle (0.15cm); \filldraw[fill=white, draw=black] (\x-1,\y) circle (0.15cm); } } \end{tikzpicture} \end{minipage} \end{example} We were motivated to find an analogue of Theorem~\ref{thm:kuo} by its potential applications, which we discuss in the next section. \subsection{Applications} Kuo's work has a variety of applications. For example, Kuo uses graphical condensation to give a new proof that the number of tilings of an order-$n$ Aztec diamond is $2^{n(n+1)/2}$ \cite[Theorem 3.2]{Kuo} and a new proof for MacMahon's generating function for plane partitions that are subsets of a box \cite[Theorem 6.1]{Kuo}. His results also have applications to random tiling theory (see \cite[Section 4.1]{Kuo}) and the theory of cluster algebras. Cluster algebras are a class of commutative rings introduced by Fomin and Zelevinsky \cite{FZ} to study total positivity and dual canonical bases in Lie theory. The theory of cluster algebras has since been connected to many areas of math, including quiver representations, Teichm{\"u}ller theory, Poisson geometry, and integrable systems \cite{Williams}. In \cite{Lai1, Lai2}, Tri Lai and Gregg Musiker study toric cluster variables for the quiver associated to the cone over the del Pezzo surface $dP_3$, giving algebraic formulas for these cluster variables as Laurent polynomials. Using identities similar to Kuo's Theorem~\ref{thm:kuo}, they give combinatorial interpretations of most of these formulas \cite{Lai1}. We expect Theorem~\ref{cor:cond} to have similar applications. In addition, by using both Theorem~\ref{thm:kuo} and Theorem~\ref{cor:cond} we can give a direct proof of a problem in Donaldson-Thomas and Pandharipande-Thomas theory. \subsubsection{Application to Donaldson-Thomas and Pandharipande-Thomas theory.} \label{sec:DTapp} Donaldson-Thomas (DT) theory, Pandharipande-Thomas (PT), and Gromov-Witten (GW) theory are branches of enumerative geometry closely related to mirror symmetry and string theory. The DT and GW theories give frameworks for counting curves\footnotemark~on a threefold $X$. One of the conjectures in \cite{MNOP1, MNOP} gives a correspondence between the DT and GW frameworks, which has been proven in special cases, such as when $X$ is toric \cite{MNOP2}. PT theory gives a third framework for counting curves when $X$ is a nonsingular projective threefold that is Calabi-Yau. The correspondence between the DT and PT frameworks was first conjectured in \cite{PT16} and was proven in \cite{Bridgeland}, which is closely related to the work in \cite{Toda}. Specifically, let $X$ be a toric Calabi-Yau 3-fold. Define $Z_{DT}(q) = \sum\limits_{n} I_{n} q^{n}$, where $I_{n}$ counts length $n$ subschemes of $X$, and $Z_{PT}(q) = \sum\limits_{n} P_{n} q^n$, where $P_{n}$ counts stable pairs on $X$ (see \cite{PT16}). Bridgeland proved that these generating functions coincide up to a factor of $M(q) = \prod\limits_{n = 1}^{\infty} \dfrac{1}{(1 - q^{n})^{n} }$, which is the total $q$-weight of all plane partitions \cite{MacMahon}. \footnotetext{The frameworks differ in what is meant by a curve on $X$.} \begin{thm}\cite[Theorem 1.1]{Bridgeland} \label{conj32} $Z_{DT}(q) = Z_{PT}(q) M(q) \footnotemark$. \end{thm} \footnotetext{In \cite{Bridgeland, PT2009, MNOP1, MNOP2} and elsewhere in the geometry literature, the formulas have $-q$ rather than $q$. The sign is there for geometric reasons which are immaterial to us.} The application of Theorem~\ref{cor:cond} that we describe relates to Theorem~\ref{conj32} at the level of the topological vertex. Define $V_{\lambda, \mu, \nu} =q^{c(\lambda, \mu, \nu)} \sum\limits_{\pi} q^{|\pi|}$, where the sum is taken over all plane partitions $\pi$ {\em asymptotic to} $(\lambda, \mu, \nu)$. Maulik, Nekrasov, Okounkov, and Pandharipande \cite{MNOP1, MNOP} proved that $Z_{DT}(q) = V_{\lambda, \mu, \nu}$ and thus $V_{\lambda, \mu, \nu}$ is called the DT topological vertex. Let $W_{\lambda, \mu, \nu}= q^{c(\lambda, \mu, \nu)} \sum\limits_{i} d_{i} q^{i}$ where $d_{i}$ is a certain weighted enumeration of {\em labelled box configurations} of length $i$ \cite{PT2009}. In \cite[Theorem/Conjecture 2]{PT2009} Pandharipande and Thomas conjecture that $W_{\lambda, \mu, \nu}$ is the stable pairs vertex, i.e. that $Z_{PT}(q) = W_{\lambda, \mu, \nu}$. In a forthcoming paper with Gautam Webb and Ben Young (for an extended abstract, see \cite{JWY}), we prove that \begin{conj}\cite[Calabi-Yau case of Conjecture 4]{PT2009} \label{conj:vwm} $V_{\lambda, \mu, \nu} = W_{\lambda, \mu, \nu} M(q)$. \end{conj} Pandharipande and Thomas remark that a straightforward (but long) approach to this conjecture using DT theory exists \cite{PT2009}. Our proof interprets $V_{\lambda, \mu, \nu}$ using the dimer model and $W_{\lambda, \mu, \nu}$ using the double-dimer model, and then uses Theorems~\ref{thm:kuo} and~\ref{cor:cond} to show that both $V_{\lambda, \mu, \nu}/M(q)$ and $W_{\lambda, \mu, \nu}$ satisfy the same recurrence. Conjecture~\ref{conj:vwm}, taken together with a substantial body of geometric work, proves the aforementioned Theorem/Conjecture 2 of \cite{PT2009}. For further details, see \cite{JWY}. \subsection{Proof of Theorem~\ref{cor:cond}} \label{sec:proofsketch} Presently, we discuss the main ideas behind the proof of Theorem~\ref{cor:cond}. We start by giving an overview of the results from \cite{KW2006, KW2009} that are needed for our work. \subsubsection{Background} \label{sec:KWwork} Kenyon and Wilson gave explicit formulas for the probability that a random double-dimer configuration has a particular node pairing $\sigma$. When $\sigma$ is a tripartite pairing, this probability is proportional to the determinant of a matrix. To be more precise, we need to introduce some notation and definitions. Since $G$ is bipartite, we can color its vertices black and white so that each edge connects a black vertex to a white vertex. Let $G^{BW}$ be the subgraph of $G$ formed by deleting the nodes except for the ones that are black and odd or white and even. Define $G^{WB}$ analogously, but with the roles of black and white reversed. Let $G^{BW}_{i, j}$ be the graph $G^{BW}$ with nodes $i$ and $j$ included if and only if they were not included in $G^{BW}$. For convenience, Kenyon and Wilson assume the nodes alternate in color, so all nodes are black and odd or white and even. (If a graph $G$ does not have this property, we can add edges of weight 1 to each node that has the wrong color to obtain a graph whose double-dimer configurations are in a one-to-one weight-preserving correspondence with double-dimer configurations of $G$.) For each planar pairing $\sigma$, Kenyon and Wilson showed the normalized probability $$\widehat{ \Pr }(\sigma) := \Pr(\sigma) \dfrac{ Z^D(G^{WB})}{Z^D(G^{BW})} = \dfrac{Z^{DD}_{\sigma}(G, {\bf N}) }{(Z^D(G^{BW}))^2}$$ that a random double-dimer configuration has pairing $\sigma$ is an integer-coefficient homogeneous polynomial in the quantities $X_{i, j} := \dfrac{Z^D(G^{BW}_{i, j})}{Z^D(G^{BW})}$ \cite[Theorem 1.3]{KW2006}. For example, the normalized probability $\widehat{\Pr}$ that a random double-dimer configuration on eight nodes has the pairing $((1, 8), (3, 4), (5, 2), (7, 6))$ (see Figure~\ref{fig:DDconfig}) is \begin{eqnarray*} \setlength{\arraycolsep}{2.5pt} \widehat{\Pr} \footnotesize \left(\hspace{-.1cm} \begin{array}{ c| c | c | c} 1 & 3 & 5 & 7\\ 8 & 4 & 2 & 6 \end{array} \hspace{-.1cm}\right) \normalsize &= & X_{1, 8} X_{3, 4} X_{5,2} X_{7, 6} - X_{1, 4} X_{3, 8} X_{5, 2} X_{7, 6} + X_{1, 6}X_{3, 4}X_{5, 8}X_{7, 2} - X_{1, 8}X_{3, 6}X_{5, 2}X_{7, 4} \\ & &- X_{1, 4}X_{3, 6}X_{5, 8}X_{7, 2} + X_{1, 6}X_{3, 8}X_{5, 2}X_{7, 4}. \end{eqnarray*} Kenyon and Wilson gave an explicit method for computing these polynomials: they defined a matrix $\mathcal{P}^{(DD)}$ with rows indexed by planar pairings and columns indexed by odd-even pairings. They showed how to calculate the columns of the matrix completely combinatorially and proved that for any planar pairing $\sigma$, \begin{equation} \label{eqn:kwthm14} \widehat{\Pr}(\sigma) = \sum\limits_{\text{ odd-even pairings } \tau} \mathcal{P}^{(DD)}_{\sigma, \tau} X'_{\tau}. \end{equation} where $X'_{\tau} = (-1)^{\text{\# crosses of }\tau} \prod\limits_{i \text{ odd} } X_{i,\tau(i)}$ \cite[Theorem 1.4]{KW2006}. In the case where $\sigma$ is a tripartite pairing, $\widehat{ \Pr }(\sigma)$ is a determinant of a matrix whose entries are $X_{i, j}$ or $0$. \begin{thm}\cite[Theorem 6.1]{KW2009} \label{thm:kw61} Suppose that the nodes are contiguously colored red, green, and blue (a color may occur zero times), and that $\sigma$ is the (unique) planar pairing in which like colors are not paired together. Let $\sigma(i)$ denote the item that $\sigma$ pairs with item $i$. We have $$\widehat{ \Pr }(\sigma) = \det [1_{i, j \text{ RGB-colored differently } } X_{i, j} ]^{i = 1, 3, \ldots, 2n-1}_{j = \sigma(1), \sigma(3), \ldots, \sigma(2n-1) }.$$ \end{thm} Initially, it seems that Theorem~\ref{cor:cond} will follow immediately from combining Theorem~\ref{thm:kw61} with the Desnanot-Jacobi identity. \begin{thm}[Desnanot-Jacobi identity] Let $M = (m_{i, j})_{i, j=1}^{n}$ be a square matrix, and for each $1 \leq i, j \leq n$, let $M_{i}^{j}$ be the matrix that results from $M$ by deleting the $i$th row and the $j$th column. Then $$\det(M) \det(M_{i, j}^{i, j}) = \det(M_{i}^{i}) \det(M_{j}^{j}) - \det(M_{i}^{j}) \det(M_{j}^{i})$$ \end{thm} However, we run into some technical obstacles, which we illustrate with an example. \subsubsection{Example} Suppose we wish to prove the equation from Example~\ref{ex:thmillustration}: $$ Z^{DD}_{\sigma}({\bf N}) Z^{DD}_{\sigma_{1258}}({\bf N} - \{1, 2, 5, 8\})\hspace{-.1cm} =\hspace{-.1cm} Z^{DD}_{\sigma_{18}}({\bf N} -\{ 1, 8\}) Z^{DD}_{\sigma_{25}}({\bf N} - \{2, 5\}) + Z^{DD}_{\sigma_{12}}({\bf N} - \{1, 2\}) Z^{DD}_{\sigma_{58}}({\bf N} - \{5, 8\}) $$ where recall that $\sigma = ((1,8), (3,4), (5, 2), (7, 6))$. Then the matrix $M$ from Theorem~\ref{thm:kw61} is $$M = \begin{pmatrix} X_{1,8} &X_{1, 4} & 0 & X_{1, 6} \\ X_{3, 8} & X_{3, 4} & 0 & X_{3, 6} \\ X_{5, 8} & 0 & X_{5, 2} & 0 \\ 0 & X_{7, 4} & X_{7, 2} & X_{7, 6} \\ \end{pmatrix}. $$ Since the first row and column of $M$ correspond to nodes 1 and 8, respectively, and the third row and column correspond to nodes 5 and 2, we apply the Desnanot-Jacobi identity with $i = 1$ and $j = 3$: $$\det(M) \det(M_{1, 3}^{1, 3}) = \det(M_{1}^{1}) \det(M_{3}^{3}) - \det(M_{1}^{3}) \det(M_{3}^{1}).$$ By Theorem~\ref{thm:kw61}, $$\det(M) = \dfrac{Z^{DD}_{\sigma}(G, {\bf N}) }{(Z^{D}(G^{BW}))^2 }.$$ We also need to prove, for example, that \begin{equation} \label{eqn:exampleminor} \det(M_{3}^{3} ) = \dfrac{Z^{DD}_{\sigma_{25}}(G, {\bf N} - \{2, 5\}) }{(Z^{D}(G^{BW}))^2 }, \end{equation} where $$M_{3}^{3} = \begin{pmatrix} X_{1,8} &X_{1, 4} & X_{1, 6} \\ X_{3, 8} & X_{3, 4} & X_{3, 6} \\ 0 & X_{7, 4} & X_{7, 6} \end{pmatrix}.$$ An example of a double-dimer configuration counted by $Z^{DD}_{\sigma_{25}}(G, {\bf N} - \{2, 5\}) $ is shown in Figure~\ref{fig:examplefromtalk}. \begin{figure} \caption{Left: A double-dimer configuration on a grid graph with node set ${\bf N} \label{fig:examplefromtalk} \end{figure} We cannot apply Theorem~\ref{thm:kw61} to prove equation~(\ref{eqn:exampleminor}) because the nodes are not numbered consecutively. We might hope to resolve this by relabeling the nodes, as shown in Figure 3. But since Kenyon and Wilson assume that all nodes are black and odd or white and even, in order to satisfy the assumptions of Kenyon and Wilson's theorem, we need to add edges of weight 1 to nodes $2$ and $3$. Call the resulting graph $\widetilde{G}$ and let $\widetilde{X}_{i,j} = \dfrac{Z^{D}(\widetilde{G}^{BW}_{i, j}) }{Z^{D}(\widetilde{G}^{BW})}$. The matrix from Theorem~\ref{thm:kw61} is $$\widetilde{M} = \begin{pmatrix} \widetilde{X}_{1,6} &0& \widetilde{X}_{1, 4} \\ \widetilde{X}_{3, 6} & \widetilde{X}_{3, 2} & 0 \\ 0 & \widetilde{X}_{5, 2} & \widetilde{X}_{5, 4} \end{pmatrix}. $$ To prove equation (\ref{eqn:exampleminor}) it suffices to show \begin{equation} \label{eqn:messy} (Z^{D}(\widetilde{G}^{BW}))^2 \det(\widetilde{M}) = (Z^{D}(G^{BW}))^2 \det(M_{3}^{3}), \end{equation} since $\det(\widetilde{M}) = \dfrac{Z^{DD}_{\sigma_2}(\widetilde{G}, {\bf N} - \{2, 5\}) }{(Z^{D}(\widetilde{G}^{BW}))^2 }$ by Theorem~\ref{thm:kw61}. Verifying equation (\ref{eqn:messy}) is a straightforward computation, but as we consider graphs with more nodes, the computations quickly become more involved. To be able to interpret the minors of Kenyon and Wilson's matrix outside of small examples, we need to lift their assumption that the nodes of the graph are black and odd or white and even. Notice that under the assumption that the nodes of the graph are black and odd or white and even, $X_{i, j} = \dfrac{Z^{D}(G^{BW}_{i, j})}{Z^{D}(G^{BW})} = \dfrac{Z^{D}(G_{i, j})}{Z^{D}(G)}$. This suggests that the correct generalization of Kenyon and Wilson's matrix will have entries $\dfrac{Z^{D}(G_{i, j})}{Z^{D}(G)}$. \subsubsection{Our approach} \label{sec:ourapproach} The previous remark motivates our approach, which is to define $Y_{i, j} :=\dfrac{Z^D(G_{i, j})}{Z^D(G)}$ and $\widetilde{\Pr}(\sigma) = \dfrac{ Z^{DD}_{\sigma}(G, {\bf N}) }{ (Z^{D}(G))^{2} }$. When $G$ is a graph with nodes that are either black and odd or white and even, $Z^D(G) = Z^D(G^{BW})$, so $Y_{i, j} = X_{i,j}$ and $\widetilde{\Pr}(\sigma) = \widehat{\Pr}(\sigma)$. In this paper, we will prove analogues of many of Kenyon and Wilson's results from \cite{KW2006, KW2009} in the variables $Y_{i, j}$. Once we have established our generalization of Theorem~\ref{thm:kw61}, we will be able to apply the Desnanot-Jacobi identity to prove Theorem~\ref{cor:cond}. \subsection{Organization of paper} \label{sec:organization} This paper is structured as follows. In Section 2, we generalize some of Kenyon and Wilson's results from \cite{KW2006}. The main result of Section 2 is an analogue of \cite[Theorem 1.4]{KW2006}: we show that we can write $\widetilde{\Pr}(\sigma)$ as an integer-coefficient homogeneous polynomial in the quantities $Y_{i, j}$. To this end, we define $$Y'_{\rho} = (-1)^{\text{\# crosses of }\rho} \prod\limits_{i \text{ black} } Y_{i,\rho(i)}$$ for any black-white pairing $\rho$. Note that we work with black-white pairings rather than odd-even pairings since we are not requiring that the nodes are either black and odd or white and even. In \cite{KW2006, KW2009}, black-white pairings and odd-even pairings coincide, so $X_{i, j} = 0$ when $i$ and $j$ have the same parity, which occurs exactly when they have the same color\footnotemark. In our general setting, $Y_{i, j}$ may be nonzero when $i$ and $j$ have the same parity, but if $i$ and $j$ are the same color then there are no dimer configurations of $G_{i, j}$, so $Y_{i,j} = 0$. \footnotetext{Here, and elsewhere in Section~\ref{sec2},``same color'' refers to the black-white coloring from the bipartite assumption.} Our analogue of Kenyon and Wilson's matrix $\mathcal{P}^{(DD)}$ (see equation (\ref{eqn:kwthm14})) is $\mathcal{Q}^{(DD)}$. The rows of $\mathcal{Q}^{(DD)}$ are indexed by planar pairings and columns are indexed by black-white pairings. To prove that $\mathcal{Q}^{(DD)}$ is integer-valued, we show that the columns of this matrix can be computed combinatorially, and in Section~\ref{sec:firstmajorproof} we prove the following theorem: \begin{thm} \label{thm:thm1} Let $G$ be a finite edge-weighted planar bipartite graph with a set of nodes. For any planar pairing $\sigma$, $$ \widetilde{\Pr}(\sigma) = \sum_{\text{black-white pairings } \rho} \mathcal{Q}^{(DD)}_{\sigma, \rho} Y'_{\rho},$$ where the coefficients $\mathcal{Q}^{(DD)}_{\sigma, \rho}$ are all integers. \end{thm} To prove Theorem~\ref{thm:thm1}, we use Kenyon and Wilson \cite{KW2006} as a road map, proving analogues of Lemmas $3.1-3.5$ and Theorem 3.6 from \cite{KW2006}. Because we follow their work so closely, before presenting each of our lemmas we state the corresponding lemma from \cite{KW2006}. In some cases the proofs are very similar. In others, substantially more work is required. In Section 3, we use our results from Section 2 to generalize Kenyon and Wilson's determinant formula from Theorem~\ref{thm:kw61}. Before stating our version of their formula, we observe that \small $$\det [1_{i, j \text{ RGB-colored differently } } X_{i, j} ]^{i = 1, 3, \ldots, 2n-1}_{j = \sigma(1), \sigma(3), \ldots, \sigma(2n-1) } = \text{sign}_{OE}(\sigma)\det [1_{i, j \text{ RGB-colored differently } } X_{i, j} ]^{i = 1, 3, \ldots, 2n-1}_{j = 2, 4, \ldots, 2n },$$ \normalsize where $\text{sign}_{OE}(\sigma)$ is the parity of the permutation $\begin{pmatrix} \frac{\sigma(1)}{2} & \frac{\sigma(3)}{2} & \cdots & \frac{\sigma(2n-1)}{2} \end{pmatrix}$ written in one-line notation. We prove that \begin{thm} \label{thm61} Let $G$ be a finite edge-weighted planar bipartite graph with a set of nodes. Suppose that the nodes are contiguously colored red, green, and blue (a color may occur zero times), and that $\sigma$ is the (unique) planar pairing in which like colors are not paired together. We have $$\widetilde{\Pr}(\sigma)= \text{sign}_{OE}(\sigma) \det [1_{i, j \text{ RGB-colored differently } } Y_{i, j} ]^{i = b_1, b_2, \ldots, b_{n}}_{j = w_1, w_2, \ldots, w_{n} },$$ where $b_1 < b_2 < \cdots < b_n$ are the black nodes and $w_1 < w_2 < \cdots < w_n$ are the white nodes. \end{thm} By combining Theorem~\ref{thm61} with the Desnanot-Jacobi identity, we prove our main result: \begin{thm} \label{thm:cond} Let $G= (V_1, V_2, E)$ be a finite edge-weighted planar bipartite graph with a set of nodes {\bf N}. Divide the nodes into three circularly contiguous sets $R$, $G$, and $B$ such that $|R|, |G|,$ and $|B|$ satisfy the triangle inequality and let $\sigma$ be the corresponding tripartite pairing. If $x, w \in V_1$ and $y, v \in V_2$ then \begin{eqnarray*} & & \text{sign}_{OE}(\sigma) \text{sign}_{OE}(\sigma'_{xywv})Z^{DD}_{\sigma}(G, {\bf N}) Z^{DD}_{\sigma_{xywv}}(G, {\bf N} - \{x, y, w, v\}) \hspace{.4cm}\\ &=& \text{sign}_{OE}(\sigma'_{xy}) \text{sign}_{OE}(\sigma'_{wv}) Z^{DD}_{\sigma_{xy}}(G, {\bf N} - \{x, y\}) Z^{DD}_{\sigma_{wv}}(G, {\bf N} - \{w, v\}) \\ && - \text{sign}_{OE}(\sigma'_{xv}) \text{sign}_{OE}(\sigma'_{wy}) Z^{DD}_{\sigma_{xv}}(G, {\bf N} - \{x, v\}) Z^{DD}_{\sigma_{wy}}(G, {\bf N} - \{w, y\}), \end{eqnarray*} where for $i, j \in \{x, y, w, v\}$, $\sigma_{ij}$ is the unique planar pairing on ${\bf N} - \{i, j\}$ in which like colors are not paired together, and $\sigma_{ij}'$ is the pairing after the the node set ${\bf N} - \{i, j\}$ has been relabeled so that the nodes are numbered consecutively. \end{thm} Theorem~\ref{cor:cond} follows as a corollary; the additional assumptions in Theorem~\ref{cor:cond} lead to a nice simplification of the signs in Theorem~\ref{thm:cond}. As discussed, Theorems~\ref{thm:thm1} and \ref{thm61} generalize the combinatorial results of \cite{KW2006, KW2009, KW11}. The main questions of interest in these bodies of work involve asymptotic and probabilistic properties of the double-dimer model, which were further studied in \cite{K14, Dubedat, GR}. In \cite{KP}, Kenyon and Pemantle give a connection between the double-dimer model and cluster algebras. None of these results required taking minors of the matrices from Theorem~\ref{thm:kw61}, so the assumption that the nodes of $G$ are black and odd or white and even was convenient and suitable for their purposes. \section{Proof of Theorem \ref{thm:thm1}} \label{sec2} In this paper, $G$ always denotes a finite edge-weighted bipartite planar graph embedded in the plane with a set of $2n$ nodes ${\bf N}$ on the outer face of $G$ numbered consecutively in counterclockwise order. Kenyon and Wilson \cite{KW2006, KW2009} assume that the nodes alternate in color so that the black nodes are odd and the white nodes are even. We allow the nodes to have any coloring, as long as ${\bf N}$ has an equal number of black and white nodes. To prove Theorem \ref{thm:thm1}, we need to prove analogues of Lemmas $3.1 -3.5$ and Theorem 3.6 from Kenyon and Wilson \cite{KW2006} in this more general setting. For ease of exposition, we prove our lemmas in a slightly different order. \subsection{Lemma 3.4 from Kenyon and Wilson} \label{sec:lem34} The purpose of this section is to prove an analogue of the following lemma from Kenyon and Wilson \cite{KW2006} for black-white pairings. \begin{lemma}\cite[Lemma 3.4]{KW2006} \label{lem:kw34} For odd-even pairings $\rho$, $$\text{sign}_{OE}(\rho) \prod\limits_{(i, j) \in \rho} (-1)^{(|i-j|-1)/2} = (-1)^{\# \text{ crosses of } \rho}.$$ \end{lemma} A {\em cross} of a pairing $\rho$ is a set of two pairs $(a, c)$ and $(b, d)$ of $\rho$ such that $a < b < c < d$. Recall from Section~\ref{sec:organization} that the sign of an odd-even pairing $\rho = ((1, \rho(1)), (3, \rho(3)), \ldots, (2n-1, \rho(2n-1)))$ is the parity of the permutation $\begin{pmatrix} \frac{\rho(1)}{2} & \frac{\rho(3)}{2} & \cdots & \frac{\rho(2n-1)}{2} \end{pmatrix}$ written in one-line notation. For our version of this lemma, we need to define the sign of a black-white pairing $\rho$, which we will denote $\text{sign}_{BW}(\rho)$. \begin{defn} \label{def:signbw} If $\rho$ is a black-white pairing, then we can write $\rho = ((b_1, w_1), (b_2, w_2), \ldots, (b_n, w_n))$, where $b_1 < b_2 < \cdots < b_n$. Let $r: \{w_1, \ldots, w_n\} \to \{1, \ldots, n\}$ be the map defined by \\ \mbox{$r(k) = \#\{i:w_i \leq w_k\}.$} Then the sign of $\rho$, denoted $\text{sign}_{BW}(\rho)$, is the parity of the permutation $$\sigma_{\rho} = \begin{pmatrix} r(w_1) & r(w_2) & \cdots & r(w_n) \end{pmatrix}$$ written in one-line notation. \end{defn} When $\rho$ is a pairing that is both black-white and odd-even, these signs agree. \begin{lemma} \label{lem:OEandBWsigns} If $\rho$ is a black-white pairing that is also odd-even, then $\text{sign}_{OE}(\rho) = \text{sign}_{BW}(\rho)$. \end{lemma} The proof of Lemma~\ref{lem:OEandBWsigns} is straightforward, but it is postponed to Section~\ref{sec:OEandBW} for clarity of exposition. In Lemma~\ref{lem:kw34}, the sign of a pair $(i, j)$ of $\rho$ is $(-1)^{(|i-j|-1)/2}$. If $\rho$ is a black-white pairing that is not odd-even and $(b, w)$ is a pair in $\rho$, it is not necessarily the case that $\frac{ |b-w| - 1}{2}$ is an integer. Therefore we need a different way to define the sign of a pair. To motivate this definition, notice that if two nodes of the opposite color $b$ and $w$ have the same parity, it cannot be the case that the nodes between $b$ and $w$ alternate black and white. Therefore we must keep track of the number of consecutive nodes of the same color between $b$ and $w$. Consecutive nodes of the same color appear in pairs. For example, if we have a graph with eight nodes so that nodes $1, 3, 4,$ and $6$ are black and nodes $2, 5, 7, 8$ are white, there are two pairs of consecutive nodes of the same color: $(3, 4)$ and $(7, 8)$. Since we frequently use the term pair when describing pairings of the nodes, we will refer to pairs of consecutive nodes as {\em couples of consecutive nodes} instead. \begin{defn} If $(b, w)$ is a pair in a black-white pairing, let $a_{b, w}$ be the number of couples of consecutive nodes of the same color in the interval $[\min\{b, w\}, \min\{b, w\} + 1, \ldots, \max\{b, w\}]$. \end{defn} We note that a triple of consecutive nodes that are all the same color contributes 2 to $a_{b,w}$. \begin{rem} \label{rem:signiswelldefined} If $(b,w)$ is a pair in a black-white pairing, then $\frac{|b-w| +a_{b, w} -1}{2}$ is an integer. \end{rem} \begin{proof} Let $(n_1, n_1 + 1), (n_2, n_2 + 1), \ldots, (n_{2k}, n_{2k} + 1)$ be a complete list of couples of consecutive nodes of the same color in ${\bf N}$ so that $n_1 < n_2 < \cdots < n_{2k}$, where it is possible that $n_{i+1} = n_{i} + 1$. Every time we reach a couple of consecutive nodes, the black nodes and white nodes switch parity. That is, if the black nodes in the interval $[n_{\ell} + 1,n_{\ell} + 2, \ldots, n_{\ell+1}]$ are odd, then the black nodes in the interval $[n_{\ell+1} + 1,n_{\ell+1} + 2, \ldots, n_{\ell+2}]$ are even. (Note that these intervals could be length 1). It follows that if $b$ and $w$ are the same parity, then there are an odd number of couples of consecutive nodes in the interval $[\min\{b, w\}, \min\{b, w\} + 1, \ldots, \max\{b, w\}]$. So in this case $\frac{|b-w| +a_{b, w} -1}{2}$ is an integer. If $b$ and $w$ are opposite parity, then there are an even number of couples of consecutive nodes in the interval $[\min\{b, w\}, \min\{b, w\} + 1, \ldots, \max\{b, w\}]$. So $\frac{|b-w| +a_{b, w} -1}{2}$ is an integer in this case as well. \end{proof} \begin{defn} \label{def:signpair} If $(b, w)$ is a pair in a black-white pairing, let \[ \text{sign}(b, w) = (-1)^{(|b-w|+ a_{b, w}-1)/2} . \] \end{defn} We observe that when the nodes of $G$ alternate black and white, $a_{b, w} = 0$ for all pairs $(b, w)$, so this definition of the sign of a pair agrees with Kenyon and Wilson's definition. \begin{rem} \label{rem:notation} For the remainder of the paper, we use the following notation. We let \begin{itemize} \item $(n_1, n_1 + 1), (n_2, n_2 + 1), \ldots, (n_{2k}, n_{2k} + 1)$ be a complete list of couples of consecutive nodes of the same color so that $n_1 < \cdots < n_{2k}$, \item $(s_1, s_1 + 1), (s_2, s_2 + 1), \ldots, (s_k, s_k+1)$ be a complete list of couples of consecutive black nodes so that $s_1 < \cdots < s_{k}$, and \item $(u_1, u_1 + 1), (u_2, u_2 + 1), \ldots, (u_k, u_k+1)$ be a complete list of couples of consecutive white nodes so that $u_1 < \cdots < u_k$. \end{itemize} Note that we could have $n_{i+1} = n_{i} + 1$, $s_{i+1} = s_i + 1$, or $u_{i+1} = u_i + 1$. \end{rem} Since we are allowing arbitrary node colorings, many of our results contain a global sign that depends on the order in which the couples of consecutive nodes appear. For example, suppose a node set {\bf N} has two couples of consecutive nodes: a couple of consecutive black nodes $(s, s+1)$ and a couple of consecutive white nodes $(u, u+1)$. Then the global sign will be $1$ if $u < s$ and $-1$ if $s < u$. To emphasize that this sign only depends on the relative ordering of the couples of consecutive nodes of the same color, we use the notation $\text{sign}_{\cons}({\bf N})$. \begin{defn} \label{def:nodesign} Using the notation from Remark~\ref{rem:notation}, if node $1$ is black, define the map $\varphi: \{n_1, n_2, \ldots, n_{2k} \} \to \{1,2, \ldots, 2k\}$ by $$\varphi(n_j) = \begin{cases} 2i-1 & \mbox{if } n_j = u_i \\ 2i & \mbox{if } n_j = s_i \end{cases}. $$ Then the image of $\{n_1, n_2, \ldots, n_{2k}\}$ under the map $\varphi$ can be considered as a permutation in one-line notation: $$\sigma_{\bf N} = \begin{pmatrix} \varphi(n_1) & \varphi(n_2) & \cdots & \varphi(n_{2k}) \end{pmatrix}.$$ Define $\text{sign}_{\cons}({\bf N})$ to be the sign of this permutation. Note that if $u_1 < s_1 < u_2 < s_2< \cdots < u_k < s_k$ then $\sigma_{\bf N} = \begin{pmatrix} 1 & 2 & \cdots & 2k \end{pmatrix}$, so $\text{sign}_{\cons}({\bf N})= 1$. If node $1$ is white, define the map $\varphi: \{n_1, n_2, \ldots, n_{2k} \} \to \{1,2, \ldots, 2k\}$ by $$\varphi(n_j) = \begin{cases} 2i-1 & \mbox{if } n_j = s_i \\ 2i & \mbox{if } n_j = u_i \end{cases}. $$ As above, the image of $\{n_1, n_2, \ldots, n_{2k}\}$ under the map $\varphi$ can be considered as a permutation in one-line notation and we define $\text{sign}_{\cons}({\bf N})$ to be the sign of this permutation. Note that if $s_1 < u_1 < s_2 < u_2< \cdots < s_k < u_k$, $\text{sign}_{\cons}({\bf N})= 1$. In the case where there are no consecutive nodes of the same color, we define $\text{sign}_{c}({\bf N}) =1$. \end{defn} In Definition \ref{def:nodesign}, if node $1$ is black, it is possible that $s_k = 2n$. Similarly, if node $1$ is white, it is possible that $u_k = 2n$. \begin{defn} \label{defn:inversioninnodecolors} Since the image of $\{n_1, n_2, \ldots, n_{2k} \}$ under the map $\varphi$ can be considered a permutation in one-line notation, we say that a pair $(u_\ell, s_m)$ is an inversion with respect to the node coloring of {\bf N} if $(\varphi(u_{\ell}), \varphi(s_m))$ is an inversion of $\sigma_{{\bf N}}$. \end{defn} \begin{example} Let ${\bf N}$ be a set of nodes where node 1 is black. \begin{itemize} \item If {\bf N} has four couples of consecutive nodes of the same color with $u_1 < s_1 < s_2 < u_2$, then $\sigma_{\bf N} = \begin{pmatrix} 1& 2& 4 & 3 \end{pmatrix}$, so $\text{sign}_{\cons}({\bf N}) = -1$. The pair $(s_2, u_2)$ is an inversion with respect to the node coloring. \item If instead $s_1 < u_1 < s_2 < u_2$, then $\sigma_{\bf N} = \begin{pmatrix} 2& 1& 4 & 3 \end{pmatrix}$, so $\text{sign}_{\cons}({\bf N}) = 1$. The pairs $(s_1, u_1)$ and $(s_2, u_2)$ are inversions. \end{itemize} \end{example} \begin{example} Let ${\bf N}$ be a set of nodes where node 1 is white. If {\bf N} has six couples of consecutive nodes of the same color with $s_1 < s_2 < u_1 < u_2 < u_3 < s_3$, then $\sigma_{\bf N} = \begin{pmatrix} 1&3& 2 & 4 & 6 & 5 \end{pmatrix}$. The pairs $(s_2, u_1)$ and $(u_3, s_3)$ are inversions. \end{example} \begin{rem} \label{rem:inversioninnodecolors} If node 1 is black, $(u_\ell, s_m)$ is an inversion with respect to the node coloring when $u_\ell < s_m$ and $\ell > m$. The pair $(s_m, u_\ell)$ is an inversion when $s_m < u_\ell$ and $m \geq \ell$. If node 1 is white, $(u_\ell, s_m)$ is an inversion with respect to the node coloring when $u_\ell < s_m$ and $\ell \geq m$. The pair $(s_m, u_\ell)$ is an inversion when $s_m < u_\ell$ and $m > \ell$. \end{rem} We have now established the definitions needed for our version of Kenyon and Wilson's lemma. \begin{lemma}[analogue of Lemma 3.4 from \cite{KW2006}] \label{lemma34} If $\rho$ is a black-white pairing, \begin{equation*} \text{sign}_{\cons}({\bf N}) \text{sign}_{BW}(\rho) \prod\limits_{(b, w) \in \rho} \text{sign}(b, w) = (-1)^{\# \text{ crosses of } \rho}. \end{equation*} \end{lemma} We remark that in Kenyon and Wilson's case where all black nodes are odd and all white nodes are even, there are no consecutive nodes of the same color, so for all $(b, w) \in \rho$, $a_{b, w} = 0$ and thus $\text{sign}(b, w)= (-1)^{(|b-w|+ a_{b, w}-1)/2} = (-1)^{(|b-w| -1)/2}$. If all black nodes are odd and all white nodes are even, a black-white pairing is also an odd-even pairing, and by Lemma~\ref{lem:OEandBWsigns}, $\text{sign}_{BW}(\rho) = \text{sign}_{OE}(\rho)$. Finally, by Definition~\ref{def:nodesign}, $\text{sign}_{\cons}({\bf N}) = 1$. So in this case, Lemma~\ref{lemma34} agrees exactly with Lemma~\ref{lem:kw34}. Before proving Lemma~\ref{lemma34}, we will prove the following: \begin{lemma} \label{firstlemma34} There exists a planar black-white pairing $\rho$ such that \begin{equation*} \text{sign}_{BW}(\rho) \prod\limits_{(b, w) \in \rho} \text{sign}(b, w) = \text{sign}_{\cons}({\bf N}) . \end{equation*} \end{lemma} \subsubsection{Proof of Lemma \ref{firstlemma34}} We will prove Lemma~\ref{firstlemma34} by induction on $k$, where {\bf N} is a set of $2n$ nodes with $2k$ couples of consecutive nodes of the same color. The following lemma is the base case $k=1$. \begin{lemma}[Base case of Lemma~\ref{firstlemma34}] \label{lem:planarBWrho} For any node coloring such that there are exactly two couples of consecutive nodes of the same color, there is a planar black-white pairing $\rho$ such that $$\text{sign}_{BW}(\rho) \prod\limits_{(b, w) \in \rho} \text{sign}(b, w) = \text{sign}_{\cons}({\bf N}).$$ \end{lemma} \begin{proof} Let $(n_1, n_1 + 1), (n_2, n_2 + 1)$ be the list of the couples of consecutive nodes of the same color so that $n_1 < n_2$. There are two cases to consider: Either $n_1$ and $1$ are opposite colors, or $n_1$ and $1$ are the same color. If $n_1$ and $1$ are opposite colors, the pairing $\rho = ((1,2), (3, 4), \ldots, (2n-1, 2n))$ is black-white. To see this, note that since $n_1$ and $1$ are opposite colors, $n_1$ is even, so the only pairs of adjacent nodes that are both the same color are of the form $(x, x+1)$, where $x$ is even, or $(2n, 1)$. Since all pairs of $\rho$ are of the form $(i, i +1)$ where $i$ is odd and $i+1$ is even, $\rho$ is a black-white pairing. By the previous definitions we deduce that $\text{sign}_{BW}(\rho) = 1$ and $\prod\limits_{(b, w) \in \rho} \text{sign}(b, w) = 1$. Since we assumed $n_1$ and $1$ are opposite colors, either $u_1 < s_1$ (if $1$ is black) or $s_1 < u_1$ (if $1$ is white) and thus $\text{sign}_{\cons}({\bf{N}}) =1$ by Definition~\ref{def:nodesign}. Therefore the claim holds. If $n_1$ and $1$ are the same color, the pairing $\rho = ((2n, 1), (2, 3), \ldots, (2n-2, 2n-1))$ is black-white. The reasoning is analogous to the previous case: $n_1$ is odd, so the only pairs of adjacent nodes that are both the same color are of the form $(x, x+1)$, where $x$ is odd. In this case, $\text{sign}_{BW}(\rho) = (-1)^{n-1}$ and $\prod\limits_{(b, w) \in \rho} \text{sign}(b, w) = (-1)^{(2n-1 + a_{2n, 1} -1)/2} = (-1)^{n-1}(-1)^{a_{2n, 1}/2}$, so $$\text{sign}_{BW}(\rho) \prod\limits_{(b, w) \in \rho} \text{sign}(b, w) = (-1)^{a_{2n, 1}/2} =(-1)^{2/2} = -1.$$ Similar to the previous case, since we assumed $n_1$ and $1$ are the same color, $\text{sign}_{\cons}({\bf{N}}) = -1$, and thus the claim holds. \end{proof} \begin{defn} \label{def:invofrho} Suppose $\rho$ is a black-white pairing. Then recall that we can write \\ $\rho = ((b_1, w_1), (b_2, w_2), \ldots, (b_n, w_n))$, where $b_1 < b_2 < \cdots < b_n$. We say that $(w_i, w_j)$ is an inversion of $\rho$ if $i < j$ and $w_i > w_j$. Note that $(w_i, w_j)$ is an inversion of $\rho$ if and only if $(r(w_i), r(w_j) )$ is an inversion of $\sigma_{\rho}$ (see Definition \ref{def:signbw}). \end{defn} \begin{defn} \label{defn:invofpi} Similarly, if $\pi$ is an odd-even pairing, then we can write\\ $\pi = ((1, \pi(1)), (3, \pi(3)), \ldots, (2n-1, \pi(2n-1)))$ and we will say $(\pi(i), \pi(j))$ is an inversion of $\pi$ if $i < j$ and $\pi(i) > \pi(j)$. This is equivalent to defining an inversion of $\pi$ to be an inversion of the permutation $\begin{pmatrix} \frac{\pi(1)}{2} & \frac{\pi(3)}{2} & \cdots & \frac{\pi(2n-1)}{2} \end{pmatrix}$. \end{defn} \begin{figure} \caption{An inversion of a planar pairing $\pi$ corresponds to a nesting. Left: the pairing $((1, 2), (3, 4))$ has no inversions and its diagram has no nestings. Center: The pairing $((1,4), (3, 2))$ has one inversion and its diagram has one nesting. Right: The pairing $((1, 8), (3, 4), (5, 2), (7, 6))$ has four inversions and four nestings: $\{(1, 8), (3, 4 )\} \label{fig:nesting} \end{figure} \begin{rem} \label{rem:inversionsarenestings} In the special case where an odd-even pairing $\pi$ is also planar, we remark that an inversion of $\pi$ corresponds to a {\em nesting} in the diagram constructed by placing the nodes in order on a line and linking pairs in the upper half-plane. More precisely, two arcs $(a_1, b_1)$, $(a_2, b_2)$ are said to be {\em nesting} if $a_1 < a_2 < b_2 < b_1$ (see Figure~\ref{fig:nesting}). This correspondence between inversions and nestings follows immediately from the four node case, where the only planar pairings are $((1, 2), (3, 4))$ and $((1, 4), (3, 2))$. \end{rem} \begin{proof}[Proof of Lemma \ref{firstlemma34}] The proof of the lemma is technical, so we first identify a few easy cases. \\ \noindent {\bf Easy case 1.} If \begin{itemize} \item node 1 is black and $u_1 < s_1 < u_2 < s_2 < \cdots < u_k < s_k$, or \item node 1 is white and $s_1 < u_1 < s_2 < u_2 < \cdots <s_k < u_k$, \end{itemize} then as in the proof of the first case of Lemma~\ref{lem:planarBWrho}, the pairing $((1, 2), (3, 4), \ldots, (2n-1, 2n))$ is a planar black-white pairing with $\text{sign}_{BW}(\rho) = 1$, $\prod\limits_{(b, w) \in \rho} \text{sign}(b, w) = 1$, and $\text{sign}_{\cons}({\bf N}) = 1$. Thus the claim holds. \\ \noindent {\bf Easy case 2.} If \begin{itemize} \item node 1 is black and $s_1 < u_1 < s_2 < u_2 < \cdots <s_k < u_k$, or \item node 1 is white and $u_1 < s_1 < u_2 < s_2 < \cdots < u_k < s_k$, \end{itemize} then the pairing $\rho = ((2n, 1), (2, 3), \ldots, (2n-2, 2n-1))$ is black-white (as in the second case of Lemma~\ref{lem:planarBWrho}). In this case, \begin{itemize} \item $\text{sign}_{BW}(\rho) =(-1)^{n-1}$, and \item $\prod\limits_{(b, w) \in \rho} \text{sign}(b, w) = (-1)^{(2n-1 + a_{2n, 1} -1)/2} = (-1)^{n-1}(-1)^{a_{2n, 1}/2} = (-1)^{n-1}(-1)^{k}$, \end{itemize} so $\text{sign}_{BW}(\rho) \prod\limits_{(b, w) \in \rho} \text{sign}(b, w) = (-1)^{k} = \text{sign}_{\cons} ({\bf N})$. \\ \noindent {\bf General case.} For the general case, we proceed by induction on the number of couples of consecutive nodes of the same color. The base case is when there are two couples of consecutive nodes of the same color, which is Lemma \ref{lem:planarBWrho}. Assume the claim holds when we have a set of nodes that has $2(k-1)$ couples of consecutive nodes of the same color and let ${\bf N}$ be a set of nodes with $2k$ couples of consecutive nodes of the same color. \noindent \begin{minipage}{.75\textwidth} \hspace{10pt} Using the notation from Remark~\ref{rem:notation}, let $h$ be the smallest integer so that $n_{h-1}$ and $n_{h}$ are different colors. Then $\rho_1 = ((n_{h-1} + 1, n_{h-1} + 2), \ldots, (n_{h}-1, n_{h}))$ is a black-white pairing that contains at least one pair. \hspace{10pt} Throughout this proof, we will illustrate the main ideas with the example where ${\bf N}$ is a set of 12 nodes colored so that nodes 1, 3, 4, 5, 7, and 10 are black, as shown to the right. In this example, the couples of consecutive nodes of the same color are $(3, 4), (4, 5), (8, 9),$ and $(11, 12)$. Since $n_1 = 3$ and $n_2 = 4$ are black and $n_3 = 8$ is white, $h = 3$. So the pairing $\rho_1$ is $((5, 6), (7, 8))$. \end{minipage} \begin{minipage}{.23\textwidth} \begin{center} \begin{tikzpicture}[scale=.75] \draw (0,0) circle (2); \foreach \x in {1,2,...,12} { \node[shape=circle,fill=black, scale=0.5,label={{((\x-1)*360/12)+90}:\x}] (n\x) at ({((\x-1)*360/12)+90}:2) {}; }; \foreach \x in {2, 6, 8, 9, 11, 12} { \node[shape=circle,fill=white, scale=0.4] (n\x) at ({((\x-1)*360/12)+90}:2) {}; }; \end{tikzpicture} \end{center} \end{minipage} Consider ${\bf N'} = \{1, \ldots, |{\bf N}| - (n_h - n_{h-1}) \}$. Define $\psi: {\bf N} - \{n_{h-1} + 1, \ldots, n_h \} \to {\bf N'}$ by \begin{equation} \label{eqn:relabeling} \psi(\ell) = \begin{cases} \ell &\mbox{ if }\ell \leq n_{h-1} \\ \ell - (n_h - n_{h-1}) &\mbox{ if }\ell > n_{h} \\ \end{cases} \end{equation} That is, $\psi$ defines a relabeling of the nodes of ${\bf N} - \{n_{h-1} + 1, \ldots, n_h \}$ so that node 1 is labeled $1,\ldots,$ node $n_{h-1}$ is labeled $n_{h-1}$, node $n_{h} + 1$ is labeled $n_{h-1} +1,\ldots,$ node $2n$ is labeled $2n - (n_{h} - n_{h-1})$. Since ${\bf N'}$ has $2k-2$ couples of consecutive nodes of the same color, by the induction hypothesis there is a black-white planar pairing $\rho_2$ of the nodes of ${\bf N'}$ such that $$\text{sign}_{BW}(\rho_2) \prod\limits_{(b, w) \in \rho_2} \text{sign}(b, w) = \text{sign}_{\cons}({\bf N'}).$$ Let $\psi^{-1}(\rho_2)$ denote the pairing that results from applying $\psi^{-1}$ to each node in $\rho_2$. That is, $\psi^{-1}(\rho_2)$ is the pairing obtained by returning the nodes of $\rho_2$ to their original labels in ${\bf N}$. Let $\rho = \rho_1 \cup \psi^{-1} (\rho_2)$. Observe that $\rho$ is a planar black-white pairing of ${\bf N}$. In our example, the map $\psi$ defines a relabeling of ${\bf N} - \{5, 6, 7, 8\}$ so that node 9 is labeled $5,\ldots,$ node 12 is labeled 8. The node set ${\bf N'}$ has two couples of consecutive pairs of the same color. By Lemma \ref{lem:planarBWrho}, the pairing $\rho_2$ is $((1, 8), (3, 2), (5, 4), (7, 6))$, so the pairing $\psi^{-1}(\rho_2)$ is $((1, 12), (3, 2), (9, 4), (11, 10))$ and thus $\rho = ((1, 12), (3, 2), (5, 6), (7, 8), (9, 4), (11, 10))$, as shown in Figure~\ref{fig:lem5ex}. We will next \begin{enumerate} \item[(1)] Compare $ \prod\limits_{(b, w) \in \rho_2} \text{sign}(b, w)$ to $ \prod\limits_{(b, w) \in \rho} \text{sign}(b, w),$ \item[(2)] Compare $\text{sign}_{BW}(\rho_2)$ to $\text{sign}_{BW}(\rho)$, and \item[(3)] Compare $\text{sign}_{\cons}({\bf N'})$ to $\text{sign}_{\cons}({\bf N})$. \end{enumerate} \begin{figure} \caption{Left: The pairing $\rho_2$ of ${\bf N'} \label{fig:lem5ex} \end{figure} \paragraph{(1) Comparing $ \prod\limits_{(b, w) \in \rho_2} \text{sign}(b, w)$ to $\prod\limits_{(b, w) \in \rho} \text{sign}(b, w)$.} If $(b, w)$ is a pair in $\rho$ that is a pair of $\rho_1$, then $\text{sign}(b, w) = 1$. If $(b, w)$ is a pair in $\rho$ that is a pair of $\psi^{-1} (\rho_2)$, then consider $(\psi(b), \psi(w))$ (the corresponding pair of $\rho_2$). If $b, w\leq n_{h-1}$ or $b, w\geq n_{h} + 1$, then $\text{sign}(b, w) = \text{sign}( \psi(b), \psi(w) )$ because $a_{b, w} = a_{\psi(b), \psi(w) }$. If $w\leq n_{h-1}$ and $b \geq n_{h} +1$ then \begin{eqnarray*} \text{sign}( \psi(b), \psi(w) ) = (-1)^{ ( \psi(b) - \psi(w) + a_{\psi(b), \psi(w)} -1 )/2 } & = &(-1)^{( b - (n_{h} - n_{h-1}) - w + a_{b, w} - 2 -1 )/2} \\ & = & (-1)^{ (-(n_{h} - n_{h-1}) - 2 ))/2} \text{sign}(b, w) \\ & = & (-1)^{ (n_{h} - n_{h-1} + 2 )/2} \text{sign}(b, w). \end{eqnarray*} so $$\prod\limits_{(b, w)\in \rho_2 } \text{sign}(b, w) = \prod\limits_{ \substack{ (b, w) \in \rho: \\ \min(b, w) \leq n_{h-1} \text{ and} \\ \max(b, w) \geq n_{h} + 1 } } (-1)^{ (n_{h} - n_{h-1} + 2 )/2} \prod\limits_{(b, w) \in \rho} \text{sign}(b, w). $$ In the example, there are two pairs $(b, w)$ with $\min(b, w) \leq n_2$ and $\max(b, w) \geq n_3 + 1$: the pairs $(1, 12)$ and $(4, 9)$. \paragraph{(2) Comparing $\text{sign}_{BW}(\rho_2)$ to $\text{sign}_{BW}(\rho)$.} Comparing $\text{sign}_{BW}(\rho_2)$ to $\text{sign}_{BW}(\rho)$ requires comparing the number of inversions of $\rho$ to the number of inversions of $\rho_2$ (see Definition \ref{def:invofrho}). Since $\rho_1$ contains only pairs of the form $(i, i+1)$, $\rho_1$ contains no inversions. Since the pairings under consideration are planar we can use the fact that inversions of a planar pairing correspond to nestings in the corresponding diagram (see Remark \ref{rem:inversionsarenestings}). Since there are $\frac{ n_h - n_{h-1} }{2}$ pairs in $\rho_1$, $\rho$ has $\frac{ n_h - n_{h-1} }{2}$ additional inversions compared to $\rho_2$ for each pair $(b, w)$ such that $\min(b, w) \leq n_{h-1}$ and $\max(b, w) \geq n_h + 1$. So, $$\text{sign}_{BW}(\rho_2) = \text{sign}_{BW}(\rho) \prod\limits_{ \substack{ (b, w) \in \rho: \\ \min(b, w) \leq n_{h-1} \text{ and} \\ \max(b, w) \geq n_{h} + 1 } } (-1)^{ (n_{h} - n_{h-1} )/2}$$ In the example, since there are two pairs $(b, w)$ with $\min\{b, w\} \leq n_2$ and $\max\{b, w\} \geq n_3 + 1$ and the pairing $\rho_1$ consists of two pairs, there are four more inversions in $\rho$ than in $\rho_2$. \paragraph{(3) Comparing $\text{sign}_{\cons}({\bf N'})$ to $\text{sign}_{\cons}({\bf N})$.} We will show that $$\text{sign}_{\cons}({\bf N'}) = (-1)^{h-1} \text{sign}_{\cons}({\bf N})$$ by comparing the number of inversions with respect to the node coloring of ${\bf N}$ to the number of inversions in with respect to the node coloring of ${\bf N'}$ (see Definition \ref{defn:inversioninnodecolors}). In particular, we show that there are $(h-2) + (h-1)$ inversions in ${\bf N}$ that do not have a counterpart in ${\bf N'}$ and that there are $h-2$ inversions in ${\bf N'}$ that do not have a counterpart in ${\bf N}$. Recall the notation from Remark~\ref{rem:notation}: $s_i$ is the first in a couple of consecutive black nodes in ${\bf N}$ and $u_i$ is the first in a couple of consecutive white nodes in ${\bf N}$. Define $s'_i$ and $u'_i$ analogously for ${\bf N'}$. First assume node 1 is black and that we have $$s_1 < \cdots < s_{h-1} < u_1 < \cdots.$$ \noindent {\em Inversions with respect to the node coloring of ${\bf N}$.} By Remark \ref{rem:inversioninnodecolors}, there are two types of inversions with respect to the node coloring of {\bf N}. \begin{enumerate} \item[(1)] Nodes $x$ and $y$ in ${\bf N}$ such that $x < y$, $x= s_a$, $y = u_b$, and $a \geq b$. \item[(2)] Nodes $x$ and $y$ in ${\bf N}$ such that $x < y$, $x = u_a$, $y = s_b$, and $a > b$. \end{enumerate} Considering the first type of inversion, there are several cases: \begin{enumerate} \item[(a)] If $a \leq h-2$ and $b > 1$, then $\psi(x) = s'_a$ and $\psi(y) = u'_{b-1}$. Since $a \geq b$, $a \geq b-1$, so in this case there is a corresponding inversion in ${\bf N'}$. \item[(b)] If $a \leq h-2$ and $b = 1$, then $y \notin {\bf N} - \{n_{h-1} + 1, \ldots, n_h \}$, so in this case there is not a corresponding inversion in ${\bf N'}$. \item[(c)] If $a = h-1$ and $b \leq h-1$, then $x \notin {\bf N} - \{n_{h-1} + 1, \ldots, n_h \}$, so in this case there is not a corresponding inversion in ${\bf N'}$. \item[(d)] If $a > h-1$ and $b \leq a$, then $b > 1$ (since $u_1 < s_a$). In this case, $\psi(x) = s'_{a-1}$ and $\psi(y) = u'_{b-1}$, so there is a corresponding inversion in ${\bf N'}$. \end{enumerate} Note that (b) gives $h-2$ inversions in ${\bf N}$ that are not in ${\bf N'}$ and (c) gives $h-1$ inversions in ${\bf N}$ that are not in ${\bf N'}$. Considering the second type of inversion, since $s_{h-1} < u_1$ we must have $a > h$. In this case, $\psi(x) = u'_{a-1}$ and $\psi(y) = s'_{b-1}$, so there is a corresponding inversion in ${\bf N'}$. In the example, the pairs $(s_1, u_1), (s_2, u_1)$, and $(s_2, u_2)$ are inversions with respect to the node coloring of ${\bf N}$. Since $h = 3$, the inversion $(s_1, u_1)$ is in case (b) of the first type and the inversions $(s_2, u_1)$ and $(s_2, u_2)$ are in case (c) of the first type. So in this example, all of the inversions with respect to the node coloring of ${\bf N}$ do not have corresponding inversions in ${\bf N'}$. \\ \noindent {\em Inversions with respect to the node coloring of ${\bf N'}$.} Similarly, there are two types of inversions in ${\bf N'}$. \begin{enumerate} \item[(1)] Nodes $w$ and $z$ in ${\bf N'}$ such that $w < z$, $w = s'_a$, $z = u'_b$, and $a \geq b$. \item[(2)] Nodes $w$ and $z$ in ${\bf N'}$ such that $w < z$, $w = u'_a$, $z = s'_b$, and $a > b$. \end{enumerate} Considering the first type of inversion, there are two cases: \begin{enumerate} \item[(a)] If $a \leq h-2$, then $\psi^{-1}(w)= s_a$ and $\psi^{-1}(z) = u_{b+1}$. \begin{enumerate} \item[(i)] If $a \geq b+1$ then there is a corresponding inversion in {\bf N}. \item[(ii)] If $a = b$ there is not a corresponding inversion in {\bf N}. \end{enumerate} \item[(b)] If $a \geq h-1$, then $\psi^{-1}(w) = s_{a+1}$ and $\psi^{-1}(z) = u_{b+1}$, so there is a corresponding inversion in ${\bf N}$. \end{enumerate} We see that case (a)(ii) gives $h-2$ inversions in ${\bf N}'$ that are not in ${\bf N}$. Considering the second type of inversion, since $s'_{h-2} < u'_1$ the only possibility is that $a > h-1$. In this case, $\psi^{-1}(w) = u_{a+1}$ and $\psi^{-1}(z) = s_{b+1}$, so there is a corresponding inversion in ${\bf N}$. In the example, the only inversion with respect to the node coloring of ${\bf N'}$ is $(s'_1, u'_1)$, which is an example of case (a)(ii), so there is not a corresponding inversion in ${\bf N}$. We conclude that in the case where node 1 is black and we have $s_1 < \cdots < s_{h-1} < u_1 < \cdots,$ the equation $\text{sign}_{\cons}({\bf N'}) = (-1)^{h-1} \text{sign}_{\cons}({\bf N})$ holds. Combining this with \begin{itemize} \item $\text{sign}_{BW}(\rho_2) \prod\limits_{(b, w) \in \rho_2} \text{sign}(b, w) = \text{sign}_{\cons}({\bf N'}),$ \item $\prod\limits_{(b, w) \in \rho_2 } \text{sign}(b, w) = \prod\limits_{ \substack{ (b, w)\in \rho: \\ \min(b, w)\leq n_{h-1} \text{ and} \\ \max(b, w) \geq n_{h} + 1 } } (-1)^{ (n_{h} - n_{h-1} + 2 )/2} \prod\limits_{(b, w) \in \rho} \text{sign}(b, w)$, and \item $\text{sign}_{BW}(\rho_2) = \text{sign}_{BW}(\rho) \prod\limits_{ \substack{ (b, w) \in \rho: \\ \min(b, w) \leq n_{h-1} \text{ and} \\ \max(b, w) \geq n_{h} + 1 } } (-1)^{ (n_{h} - n_{h-1} )/2}$, \end{itemize} we have $$ \text{sign}_{BW}(\rho) \prod\limits_{(b, w) \in \rho} \text{sign}(b, w) = \text{sign}_{\cons}({\bf N}) \cdot (-1)^{h-1} \cdot \prod\limits_{ \substack{ (b, w) \in \rho: \\ \min(b, w) \leq n_{h-1} \text{ and} \\ \max(b, w) \geq n_{h} + 1 } } (-1). $$ So it remains to observe that the number of pairs $(b, w) \in \rho$ such that $\min(b, w) \leq s_{h-1}$ and $\max(b, w) \geq u_1 + 1$ has the same parity as $h-1$. There are exactly $h-1$ more black nodes than white nodes in the interval $[1, \ldots, s_{h-1}]$ because there are $h-1$ black nodes that are not followed by a white node in this interval. So there are $h-1$ black nodes that must all be paired with a white node with label $\geq u_1 + 1$. Therefore there are at least $h-1$ pairs $(b, w) \in \rho$ such that $\min(b, w) \leq s_{h-1}$ and $\max(b, w) \geq u_1 + 1$. There may be more than $h-1$ such pairs, but there must be $h-1+ 2m$ pairs for some $m \geq 0$. There are three other cases: when node 1 is white and we have $s_1 < \cdots < s_{h-1} < u_1 < \cdots$, when node 1 is black and we have $u_1 < \cdots < u_{h-1} < s_1 < \cdots$, and when node 1 is white and we have $u_1 < \cdots < u_{h-1} < s_1 < \cdots$. These are omitted because the analyses are nearly identical to the case we just considered. \end{proof} \subsubsection{Proof of Lemma~\ref{lemma34}} Recall that we want to show that if $\rho$ is a black-white pairing on a graph $G$ with node set ${\bf N}$, \begin{equation} \label{eqn0:lemma34} \text{sign}_{\cons}({\bf N}) \text{sign}_{BW}(\rho) \prod\limits_{(b, w) \in \rho} \text{sign}(b, w) = (-1)^{\# \text{ crosses of } \rho}. \end{equation} By Lemma \ref{firstlemma34} there is a black-white planar pairing $\rho$ such that $$\text{sign}_{BW}(\rho) \prod\limits_{(b, w) \in \rho} \text{sign}(b, w) =\text{sign}_{\cons} ({\bf N}).$$ Since $\rho$ is planar, $(-1)^{\# \text{crosses of } \rho} = 1$, so equation (\ref{eqn0:lemma34}) holds. \\ To prove equation (\ref{eqn0:lemma34}) holds for all black-white pairings we consider ways we can modify black-white pairings to obtain new black-white pairings and determine how these modifications affect equation (\ref{eqn0:lemma34}). \begin{defn} Let $\sigma$ be a (not necessarily black-white) pairing on $\{1,\ldots,2n\}$, such that $x$ is not paired with $y$. When we {\em swap the locations of $x$ and $y$ in $\sigma$} we create a new pairing $\sigma'$ that is identical to $\sigma$ except that it contains the pairs $(x, \sigma(y))$ and $(y, \sigma(x))$ rather than $(x, \sigma(x))$ and $(y, \sigma(y))$. \end{defn} \begin{example} Suppose $\sigma$ is the pairing $((1, 3), (2, 4), (5, 6))$. If we swap the locations of $3$ and $4$ in $\sigma$ we obtain the pairing $\sigma' = ((1, 4), (2, 3), (5, 6))$. \end{example} \begin{rem} \label{rem:swappingandparity} If $\rho$ is a black-white pairing and $\rho'$ is obtained from $\rho$ by swapping the locations of two nodes of the same color, $\text{sign}_{BW}(\rho') = -\text{sign}_{BW}(\rho)$. \end{rem} Now we observe that we can obtain any black-white pairing on ${\bf N}$ from a given black-white pairing $\rho$ using the following types of swaps: \begin{enumerate} \item[(1)] Swapping the locations of $u$ and $u+1$ in $\rho$, where $(u, u+1)$ is a couple of consecutive white nodes. \item[(2)] Swapping the locations of $x$ and $y$ in $\rho$, where $x < y$ are white nodes and all $\ell$ nodes appearing between $x$ and $y$ are black, where $\ell \geq 1$. \end{enumerate} To see that these swaps are sufficient, let $w_1, \ldots, w_n$ be the white nodes in increasing order. The swaps described are the adjacent transpositions $(w_1, w_2), (w_2, w_3), \ldots, (w_{n-1}, w_{n})$. We will show that equation (\ref{eqn0:lemma34}) holds after applying each type of swap. This requires a few additional lemmas. Note that the proofs of Lemmas \ref{swapu} through \ref{lem:swapxy2} follow immediately from Definition \ref{def:signpair}. \begin{lemma} \label{swapu} Let $b$ be a black node and let $(u, u+1)$ be a couple of consecutive white nodes. Then $\text{sign}(b, u) = - \text{sign}(b, u+1)$. \end{lemma} \begin{proof} If $b < u$, then $a_{b, u+1} = a_{b, u} + 1$. So \[ \text{sign}(b, u) = (-1)^{(u - b + a_{b, u} - 1)/2} =- (-1)^{(u +1- b + a_{b, u+1} - 1)/2} = - \text{sign}(b, u+1) . \] If $b > u+1$, then $a_{b, u+1} = a_{b, u} - 1$. So \[ \text{sign}(b, u) = (-1)^{(b-u + a_{b, u} - 1)/2} =- (-1)^{(b-(u+1) + a_{b, u+1} - 1)/2} =- \text{sign}(b, u+1) . \] \end{proof} \begin{lemma} \label{lem:swapxy} Assume the nodes $x$ and $y$ with $x < y$ are white and all $\ell$ nodes between $x$ and $y$ are black, where $\ell \geq 1$. If $b$ is a black node not in the interval $[x + 1, \ldots, y-1]$, then $\text{sign}(b, x) = (-1)^{\ell} \text{sign}(b, y)$. \end{lemma} \begin{proof} If $b < x$, then $a_{b, y} = a_{b, x} + \ell - 1$. Then since $y = x + \ell + 1$, \begin{eqnarray*} \text{sign}(b, x) = (-1)^{ (x - b + a_{b, x} - 1)/2 } = (-1)^{ (y - (\ell + 1) - b + a_{b, y} - \ell + 1 - 1)/2 } & = & (-1)^{\ell} (-1)^{ (y - b + a_{b, y} - 1)/2 } \\ & = & (-1)^{\ell} \text{sign}(b, y). \end{eqnarray*} If $b > y$, then $a_{b, y} = a_{b, x} - (\ell - 1)$. Then \begin{eqnarray*} \text{sign}(b, x) = (-1)^{ (b-x + a_{b, x} - 1)/2 } = (-1)^{ (b - (y - (\ell + 1) ) + a_{b, y} + (\ell - 1) - 1)/2 } & = & (-1)^{\ell} (-1)^{ (b-y + a_{b, y} - 1)/2 } \\ & = & (-1)^{\ell} \text{sign}(b, y). \end{eqnarray*} \end{proof} \begin{lemma} \label{lem:swapxy2} Assume the nodes $x$ and $y$ with $x < y$ are white and all $\ell$ nodes between $x$ and $y$ are black, where $\ell \geq 1$. If $b$ is a black node in the interval $[x + 1, \ldots, y-1]$, so $b = x+j$ for some $j \leq \ell$, then $\text{sign}(b, x) = (-1)^{\ell-1} \text{sign}(b, y)$. \end{lemma} \begin{proof} Since $b = x+j$ and $a_{b, x} = j-1$, we see that \[ \text{sign}(b, x) = (-1)^{ (b-x + a_{b, x} - 1)/2 } = (-1)^{ (j + j-1 - 1)/2 } = (-1)^{j-1}. \] Using the fact that $y - b = \ell + 1 - j$ and $a_{b, y} = \ell - j$, we have \[ \text{sign}(b, y) = (-1)^{ (y - b + a_{b, y} - 1)/2 } = (-1)^{ (\ell + 1 - j + \ell - j - 1)/2} = (-1)^{\ell - j}. \] So $\text{sign}(b, x) = (-1)^{\ell-1} \text{sign}(b, y)$. \end{proof} \begin{rem} \label{crossingremark} The symmetric group $S_{2n}$ acts on the set of pairings on $\{1, \ldots, 2n\}$: the transposition $(i,i+1)$ acts on a pairing $\rho$ by swapping the locations of $i$ and $i+1$. If $i$ is paired with $i+1$, acting with $(i,i+1)$ leaves the pairing fixed; otherwise, $(i,i+1)$ acts nontrivially and changes the parity of the number of crossings. Let $\rho$ be a (not necessarily black-white) pairing on $\{1,\ldots,2n\}$. Let $x$ and $y$ be two nodes such that $x <y$. Assume no node in the interval $[x, y]$ is paired with any other node in this interval. Then $$(x, y) \rho = (x, x+1) \cdots (y-1, y) \cdots (x+1, x+2) (x, x+1) \rho$$ where each transposition of the form $(i, i+1)$ acts nontrivially. \end{rem} \begin{lemma} \label{crossinglemma3} Let $\rho$ be a (not necessarily black-white) pairing on $\{1,\ldots,2n\}$. Let $x$ and $y$ be two nodes such that $x <y$ and $x$ is not paired with $y$. Assume that no node in the interval $[x+1, \ldots, y-1]$ is paired with any other node in this interval. Then when the locations of $x$ and $y$ in $\rho$ are swapped, \begin{itemize} \item[(1)] if $x$ and $y$ were both paired with nodes in the interval $[x+1, \ldots, y-1]$, the number of crossings of $\rho$ changes parity, \item[(2)] if exactly one of $x$ and $y$ was paired with a node in the interval $[x+1, \ldots, y-1]$, then the number of crossings of $\rho$ does not change parity, and \item[(3)] if neither $x$ nor $y$ was paired with a node in the interval $[x+1, \ldots, y-1]$ then the number of crossings of $\rho$ changes parity. \end{itemize} \end{lemma} \begin{proof} Let $\rho$ be a pairing on $\{1,\ldots,2n\}$ and consider $(x, y) \rho$. There are several cases. The strategy is to factor $(x, y)$ into adjacent transpositions and determine which transpositions act nontrivially. \\ \noindent {\bf Case 1.} If the nodes $\rho(x)$ and $\rho(y)$ are both in the interval $[x+1, \ldots, y-1]$, then $(x, y) \rho = (\rho(x), \rho(y)) \rho$. Let $a = \min(\rho(x), \rho(y))$ and let $b = \max(\rho(x), \rho(y))$. Then $$(\rho(x), \rho(y)) \rho = (a, a+1) \cdots (b-1, b)\cdots (a+1, a+2) (a, a+1) \rho$$ We have written $(\rho(x), \rho(y))$ as a product of an odd number of transpositions of the form $(i, i+1)$. Since no node in the interval $[a, \ldots, b]$ is paired with any other node in this interval, all these transpositions act nontrivially by Remark \ref{crossingremark}. Thus the parity of the number of crossings changes. \\ \noindent {\bf Case 2.} If exactly one of the nodes $\rho(x)$ or $\rho(y)$ is in the interval $[x+1, \ldots, y-1]$, then $$(x, y) \rho = (x, x+1) \cdots (y-1, y)\cdots (x+1, x+2) (x, x+1) \rho$$ and exactly one of these transpositions acts trivially. For if $x$ is paired with $x+k$, then after applying the transposition $(x, x+1)$ to $\rho$, $x+1$ and $x+k$ are paired. Similarly, after applying the transposition $(x+1, x+2)$ to $(x, x+1) \rho$, $x+2$ and $x+k$ are paired. It follows that the transposition $(x+k-1, x+k)$ acts trivially because when we reach this transposition, $x+k-1$ and $x+k$ are paired. Then, the transposition $(x+k-1, x+k-2)$ acts nontrivially and similarly we see that the remaining transpositions act nontrivially. Since an even number of transpositions of the form $(i, i+1)$ act nontrivially, the parity of the number of crossings does not change. \ \\ \noindent {\bf Case 3.} If neither of the nodes $\rho(x)$ and $\rho(y)$ are in the interval $[x+1, \ldots, y-1]$, then $$(x, y) \rho = (x, x+1) \cdots (y-1, y)\cdots (x+1, x+2) (x, x+1) \rho$$ so we have written $(\rho(x), \rho(y))$ as a product of an odd number of transpositions of the form $(i, i+1)$. Since no node in the interval $[x, y]$ is paired with any other node in this interval, all of these transpositions act nontrivially by Remark \ref{crossingremark}. Thus the parity of the number of crossings changes. \\ \end{proof} Now that we have established Lemmas \ref{swapu} through \ref{crossinglemma3} we can show that equation (\ref{eqn0:lemma34}) holds after applying both types of swaps to $\rho$. By Remark \ref{rem:swappingandparity}, each swap changes $\text{sign}_{BW}(\rho)$. \\ \noindent (1) {\bf Swapping the locations of $u$ and $u+1$.} \\ Let $b_1$ be the node paired with $u$ and let $b_2$ be the node paired with $u+1$. By Lemma \ref{swapu}, $\text{sign}(b_1, u) = -\text{sign}(b_1, u+1)$ and $\text{sign}(b_2, u+1) = -\text{sign}(b_2, u)$. So when we swap the locations of $u$ and $u+1$, $\prod\limits_{(b, w) \in \rho} \text{sign}(b, w)$ does not change. Since $\text{sign}_{BW}(\rho)$ changes, the sign of the LHS of (\ref{eqn0:lemma34}) changes. Swapping $u$ and $u+1$ changes $(-1)^{\# \text{ crosses of } \rho}$, so swapping the locations of $u$ and $u+1$ does not affect equation (\ref{eqn0:lemma34}). \\ \noindent (2) {\bf Swapping the locations of $x$ and $y$, where $x < y$ are white nodes and all $\ell$ nodes between $x$ and $y$ are black.} \\ \noindent {\bf Case 1.} If $x$ and $y$ are both paired with black nodes in the interval $[x+1, x+2, \ldots, y-1]$, then $(-1)^{\# \text{ crosses of } \rho}$ changes sign by Lemma \ref{crossinglemma3}. By Lemma \ref{lem:swapxy2}, $$\text{sign}(\rho(x), x) \text{sign}(\rho(y), y) = ( (-1)^{\ell -1 })^2 \text{sign}(\rho(x), y) \text{sign}(\rho(y), x)$$ so $\prod\limits_{(b, w) \in \rho} \text{sign}(b, w)$ does not change. Since $\text{sign}_{BW}(\rho)$ changes, the sign of the LHS of (\ref{eqn0:lemma34}) changes.\\ \noindent {\bf Case 2.} If exactly one of $x$ and $y$ is paired with a black node in the interval $[x+1, x+2, \ldots, y-1]$, then $(-1)^{\# \text{ crosses of } \rho}$ does not change sign by Lemma \ref{crossinglemma3}. By Lemmas \ref{lem:swapxy} and \ref{lem:swapxy2}, $$\text{sign}(\rho(x), x) \text{sign}(\rho(y), y) = (-1)^{\ell - 1}(-1)^{\ell} \text{sign}(\rho(x), y) \text{sign}(\rho(y), x)$$ so $\prod\limits_{(b, w) \in \rho} \text{sign}(b, w)$ changes. Since $\text{sign}_{BW}(\rho)$ changes, the sign of the LHS of (\ref{eqn0:lemma34}) does not change sign. \\ \noindent {\bf Case 3.} If neither $x$ nor $y$ is paired with a black node in the interval $[x+1, x+2, \ldots, y-1]$, then $(-1)^{\# \text{ crosses of } \rho}$ changes sign. By Lemma \ref{lem:swapxy}, $$\text{sign}(\rho(x), x) \text{sign}(\rho(y), y) = ( (-1)^{\ell })^2 \text{sign}(\rho(x), y) \text{sign}(\rho(y), x)$$ so $\prod\limits_{(b, w) \in \rho} \text{sign}(b, w)$ does not change. Since $\text{sign}_{BW}(\rho)$ changes, the sign of the LHS of (\ref{eqn0:lemma34}) changes.\\ This completes the proof of Lemma~\ref{lemma34}. We conclude Section~\ref{sec:lem34} by proving Lemma~\ref{lem:OEandBWsigns}, which states that when a black-white pairing $\rho$ is also odd-even, $\text{sign}_{OE}(\rho) = \text{sign}_{BW}(\rho)$. \subsubsection{Proof of Lemma~\ref{lem:OEandBWsigns}} \label{sec:OEandBW} Before we prove Lemma~\ref{lem:OEandBWsigns}, we prove the lemma in the case where $\rho$ is planar. \begin{lemma} \label{lem:OEandBWsignsold} When $\rho$ is a planar black-white pairing, $$\text{sign}_{OE}(\rho) = \text{sign}_{BW}(\rho)$$ \end{lemma} \begin{proof} Let $\rho$ be a planar black-white pairing. Recall from Definition \ref{def:invofrho} that all black-white pairings can be written $\rho = ((b_1, \rho(b_1)), (b_2, \rho(b_2)), \ldots, (b_n, \rho(b_n)))$, where $b_1 < b_2 < \cdots < b_n$, and we say that $(\rho(b_i), \rho(b_j))$ is an inversion of $\rho$ if $i < j$ and $\rho(b_i) > \rho(b_j)$. All planar pairings are odd-even, and recall from Definition~\ref{defn:invofpi} that if\\ $\rho = ((1, \rho(1)), (3, \rho(3)), \ldots, (2n-1, \rho(2n-1)))$ is an odd-even pairing, we say $(\rho(i), \rho(j))$ is an inversion if $i < j$ and $\rho(i) > \rho(j)$. We will show that there is a one-to-one correspondence between inversions of $\rho$ when it is considered as a black-white pairing (which we will call black-white inversions) and inversions of $\rho$ when it is considered as an odd-even pairing (which we will call odd-even inversions). Consider a black-white inversion, that is, some $b_i < b_j$ such that $\rho(b_i) > \rho(b_j)$. There are several cases to consider: \\ \noindent {\bf Case 1.} $b_i, b_j$ are both odd. \\ In this case, $b_i = 2k-1$ and $b_j = 2 \ell -1$ for some $k < \ell$, so $(\rho(b_i), \rho(b_j))$ is an odd-even inversion. \\ \noindent {\bf Case 2.} $b_i, b_j$ are both even. \\ Since $b_i < b_j$ and $\rho(b_i) > \rho(b_j)$, $(b_j, b_i)$ is an odd-even inversion. \\ \noindent {\bf Case 3.} $b_i$ is odd and $b_j$ is even.\\ There are two subcases to consider. If $\rho(b_j) < b_i$, then it must be the case that $b_j > \rho(b_i)$. To see this, observe that if $b_j < \rho(b_i)$, then $\rho(b_j) < b_i < b_j < \rho(b_i)$, but then we have a crossing, which contradicts the planarity of $\rho$. So $(b_j, \rho(b_i))$ is an odd-even inversion. If $\rho(b_j) > b_i$, then $\rho(b_i) > b_j$ (otherwise $b_i < \rho(b_j) < \rho(b_i) < b_j$, so $\rho$ has a crossing). So $(\rho(b_i), b_j)$ is an odd-even inversion. \\ \noindent {\bf Case 4.} $b_i$ is even and $b_j$ is odd.\\ If $\rho(b_i) > b_j$, then $\rho(b_j) > b_i$ (otherwise $\rho(b_j) < b_i < b_j < \rho(b_i)$ is a crossing), so $(\rho(b_j), b_i)$ is an odd-even inversion. If $\rho(b_i) < b_j$, then $b_i > \rho(b_j)$ (otherwise $b_i < \rho(b_j) < \rho(b_i) < b_j$ is a crossing), so $(b_i, \rho(b_j))$ is an odd-even inversion. \\ A similar argument shows that for each odd-even inversion, there is a black-white inversion. Since there is a one-to-one correspondence between odd-even inversions and black-white inversions, $\text{sign}_{OE}(\rho) = \text{sign}_{BW}(\rho)$. \end{proof} \noindent {\bf Lemma~\ref{lem:OEandBWsigns}.} When $\rho$ is a black-white pairing that is also odd-even, $$\text{sign}_{OE}(\rho) = \text{sign}_{BW}(\rho).$$ \begin{proof} One can get from an odd-even black-white pairing $\rho_1$ to any other odd-even black white pairing $\rho_2$ by applying a series of moves, where each move swaps the locations of two nodes of the same color and parity. Since each of these moves changes $\text{sign}_{OE}$ and $\text{sign}_{BW}$, the claim follows from Lemma \ref{lem:OEandBWsignsold}. \end{proof} \subsection{Lemmas 3.1 and 3.2 from Kenyon and Wilson} \label{sec:lem31} Throughout this section, $S$ denotes a balanced subset of nodes (a subset containing an equal number of black and white nodes). In \cite{KW2006}, Kenyon and Wilson show that $Z^{D}(G \setminus S) Z^{D}(G \setminus S^{c})$ is a sum of double-dimer partition functions $Z^{DD}_{\pi}(G, {\bf N})$, where the sum is over all pairings $\pi$ that do not connect nodes in $S$ to nodes in $S^c$. \begin{lemma}\cite[Lemma 3.1]{KW2006} \label{lem3.1} If $S$ is a balanced subset of nodes then $Z^{D}(G \setminus S) Z^{D}(G \setminus S^{c})$ is a sum of double-dimer configurations for all connection topologies $\pi$ for which $\pi$ connects no element of $S$ to an element of $S^{c}:={\bf N} \setminus S$. That is, $$Z^{D}(G \setminus S) Z^{D}(G \setminus S^{c}) = Z^{DD} \sum\limits_{\pi} M_{S, \pi} \Pr (\pi),$$ where $M_{S, \pi}$ is 0 or 1 according to whether $\pi$ connects nodes in $S$ to $S^{c}$ or not. \end{lemma} This lemma relates the quantity $Z^{D}(G \setminus S) Z^{D}(G \setminus S^{c})$ to $\Pr(\pi)$. Next, Kenyon and Wilson show that $\dfrac{Z^{D}(G \setminus S) Z^{D}(G \setminus S^{c})}{(Z^{D}(G))^2}$ is a determinant in the quantities $X_{i, j}$. \begin{lemma}\cite[Lemma 3.2]{KW2006} \label{lem:kwlem32} Let $S$ be a balanced subset of $\{1, \ldots, 2n\}$. Then $$\dfrac{Z^{D}(G \setminus S) Z^{D}(G \setminus S^{c})}{(Z^{D}(G))^{2}} = \det[(1_{i, j \in S} + 1_{i, j \notin S}) \times (-1)^{(|i - j| -1)/2} X_{i, j} ]^{i = 1, 3, \ldots, 2n-1}_{j = 2, 4, \ldots, 2n}.$$ \end{lemma} The combination of these results shows that $\widehat{\Pr}(\pi)$ is a homogeneous polynomial in the $X_{i, j}$, since the matrix $M$ from \cite[Lemma 3.1]{KW2006} has full rank \cite[Lemma 3.3]{KW2006}. Our analogues of these lemmas have several differences (such as the additional global signs in our version of Lemma~\ref{lem:kwlem32}, see Lemma~\ref{lemma32gen}), but our proofs are quite similar to their proofs. We begin with Lemma~\ref{lem3.1}. For a graph $G$ with node set ${\bf N}$ that does not necessarily have the property that all nodes are black and odd or white and even, a statement very similar to Lemma \ref{lem3.1} holds. For the remainder of this section, we let $T \subseteq {\bf N}$ be the set of nodes that are odd and white or even and black. Since ${\bf N}$ is assumed to have an equal number of black and white nodes, $|T|$ is even. Let $\widetilde{G}$ be $G$ with an extra vertex and edge with weight 1 added to each node in $T$, so all of the nodes in $\widetilde{G}$ are black and odd or white and even. We note that $Z^{D}(\widetilde{G} \setminus S) = Z^{D}(G \setminus (S \triangle T) ),$ where $S \triangle T$ denotes the symmetric difference of the sets $S$ and $T$. For example, if ${\bf N}$ is a set of 12 nodes colored so that nodes 1, 3, 4, 5, 7, and 10 are black (see the proof of Lemma~\ref{firstlemma34}) then $T = \{4, 9, 10, 11\}$. If $S =\{2, 3, 9, 10\}$, then $S \triangle T = \{2, 3, 4, 11\}$. Lemma \ref{lem3.1} implies the following. \begin{cor} Let $S$ be a balanced subset of nodes. $Z^{D}(G \setminus (S \triangle T)) Z^{D}(G \setminus (S \triangle T)^{c})$ is a sum of double-dimer configurations for all connection topologies $\pi$ for which $\pi$ connects no element of $S$ to an element of $S^{c}$. That is, $$Z^{D}(G \setminus (S \triangle T)) Z^{D}(G \setminus (S \triangle T)^{c}) = Z^{DD}(G) \sum\limits_{\pi} M_{S, \pi} \Pr (\pi),$$ where $M_{S, \pi}$ is 0 or 1 according to whether $\pi$ connects nodes in $S$ to $S^{c}$ or not. \end{cor} If $V = S \triangle T$, then $S = V\triangle T$, so we have: \begin{cor} \label{corlem31} Let $V$ be a balanced subset of nodes. $Z^{D}(G \setminus V) Z^{D}(G \setminus V^{c})$ is a sum of all connection topologies $\pi$ for which $\pi$ connects no elements of $V \triangle T$ to $(V \triangle T)^c$. That is, $$Z^{D}(G \setminus V) Z^{D}(G \setminus V^{c}) = Z^{DD}(G) \sum\limits_{\pi} M_{V \triangle T, \pi} \Pr (\pi),$$ where $M_{V \triangle T, \pi}$ is 0 or 1 depending on whether $\pi$ connects nodes in $V \triangle T$ to $(V \triangle T)^{c}$. \end{cor} Corollary \ref{corlem31} is the version of Lemma \ref{lem3.1} that we will need to prove Theorem \ref{thm:thm1}. Our version of \cite[Lemma 3.2]{KW2006} is the following. \begin{lemma} \label{lemma32gen} Let $S$ be a balanced subset of ${\bf N} = \{1, \ldots, 2n\}$. Then \small \begin{equation} \label{lem32} \dfrac{Z^{D}(G \setminus S) Z^{D}(G \setminus S^{c})}{(Z^{D}(G))^{2}} = \text{sign}_{\cons}({\bf N}) \text{sign}(S) \det \left[(1_{i, j \in S} + 1_{i, j \notin S}) \times \text{sign}(i, j) Y_{i, j} \right]^{i = b_1, b_2, \ldots, b_n}_{j = w_1, w_2, \ldots, w_n} \end{equation} \normalsize where $b_1, b_2, \ldots, b_n$ are the black nodes of $\{1, 2, \ldots, 2n\}$ listed in ascending order, $w_1, w_2, \ldots, w_n$ are the white nodes of $\{1, 2, \ldots, 2n\}$ listed in ascending order, $\text{sign}(i, j)$ is defined in Definition \ref{def:signpair}, $Y_{i, j} =\dfrac{Z^D(G_{i, j})}{Z^D(G)}$, and \[\text{sign}(S) = (-1)^{\# \text{ crosses of $\rho$} }, \] where $\rho$ is a black-white pairing that does not connect\footnotemark~$S$ to $S^c$ and is planar when restricted to $S$ and planar when restricted to $S^c$. \end{lemma} \footnotetext{The statement ``$\rho$ does not connect $S$ to $S^c$'' is an abbreviation for ``$\rho$ does not connect nodes in $S$ to nodes in $S^c$''.} \begin{rem} The fact that such a pairing $\rho$ always exists is a consequence of Lemma \ref{firstlemma34}, which states that for any node coloring there is a planar black-white pairing $\rho$ satisfying \\ $\text{sign}_{BW}(\rho) \prod\limits_{(b, w) \in \rho} \text{sign}(b, w) = \text{sign}_{c}({\bf N})$. Since $S$ is balanced, the existence of a planar black-white pairing of $S$ and a planar black-white pairing of $S^c$ follows. \end{rem} The proof of Lemma \ref{lemma32gen} requires some Kasteleyn theory. The reader familiar with basic facts about Kasteleyn matrices can skip the following section. \subsubsection{Kasteleyn matrices} Recall that $G = (V_1, V_2, E)$ is a finite edge-weighted bipartite planar graph embedded in the plane. Let $\omega((i, j))$ denote the weight of an edge $(i, j) \in E$. \begin{defn} A Kasteleyn (or flat) weighting of $G$ is a choice of sign for each edge with the property that each face with 0 mod 4 edges has an odd number of $-$ signs and each face with 2 mod 4 edges has an even number of $-$ signs. \end{defn} For the remainder of this section we will let $\sigma: E \to \pm 1$ denote the Kasteleyn weighting of $G$. A Kasteleyn matrix of $G$ is a weighted, signed bipartite adjacency matrix of $G$. More precisely, define a $|V_1| \times |V_2|$ matrix $K$ by $$ K_{i, j} = \begin{cases} \sigma( (i, j) ) \omega( (i, j) ) & \mbox{ if } (i, j) \in E \\ 0 & \mbox{ otherwise} \end{cases} $$ Kasteleyn showed that every bipartite planar graph with an even number of vertices has a Kasteleyn matrix \cite{Kas67}. Furthermore, if $|V_1| = |V_2|$ then $|\det K |$ is the weighted sum of all dimer configurations of $G$. The proof of Lemma~\ref{lemma32gen} uses a few straightforward facts about Kasteleyn weightings. First, we will show that if $G = (V_1, V_2, E)$ has a Kasteleyn weighting $\sigma$, and we add edges to $G$ to obtain $G'$, we can choose weights for the added edges to obtain a Kasteleyn weighting $\sigma'$ of $G'$ with the property that $\sigma'(e) = \sigma(e)$ for all $e \in E$. \begin{lemma} \label{kasteleynlemma1} Let $b$ and $w$ be two vertices of opposite color on a face $F$ of $G = (V_1, V_2, E)$. Let $E' = E \cup \{ \tilde{e} \}$, where $\tilde{e} \notin E$ is an edge connecting $b$ and $w$ that separates $F$ into two faces and let $G' = (V_1, V_2, E')$. Define $\sigma': E \cup \{\tilde{e}\} \to \pm 1$ so that $\sigma'(e) = \sigma(e)$ for all $e \in E$ and choose $\sigma'(\tilde{e})$ so that one of the faces bounded by $\tilde{e}$ is flat (i.e., it has an odd number of $-$ signs if it has 0 mod 4 edges, and an even number of $-$ signs otherwise). Then $\sigma'$ is a Kasteleyn weighting of $G'$. \end{lemma} \begin{proof} By assumption, the edge $\widetilde{e}$ separates $F$ into two faces: the face consisting of the edges of a path $\mathcal{Q}$ and the edge $\widetilde{e}$, and the face consisting of the edges of a path $\mathcal{P}$ and the edge $\widetilde{e}$. The path $\mathcal{P}$ consists of $1$ mod $4$ edges or $3$ mod $4$ edges. Define \[ \sigma'(\tilde{e}) = \begin{cases} \prod\limits_{e \in \mathcal{P}} \sigma(e) & \text{ if $\mathcal{P}$ has $1$ mod 4 edges } \\ -\prod\limits_{e \in \mathcal{P}} \sigma(e) & \text{ if $\mathcal{P}$ has $3$ mod 4 edges } \end{cases} \] and define $\sigma'(e) = \sigma(e)$ for all $e \in E$. Now the face consisting of the path $\mathcal{P}$ and the edge $e$ is flat. It remains to check that the face $F'$ consisting of $\mathcal{Q}$ and $\widetilde{e}$ is flat, which is done by breaking into cases based on whether the paths $\mathcal{P}$, $\mathcal{Q}$ have $1$ or $3$ edges mod 4. \end{proof} \begin{lemma} \label{kasteleynlemma2} Let $W = \{v_1, \ldots, v_{2m}\}$ be a set of vertices on the outer face of $G = (V_1, V_2, E)$. Pair the vertices of $W$ so that we can add edges $e_1, \ldots, e_m$ connecting the pairs without introducing any edge crossings. Let $E^{(m)} = E \cup \{e_1, \ldots, e_m \}$ and let $G^{(m)} = (V_1, V_2, E^{(m)}, \omega)$. Define $\sigma_i: E \cup \{e_i\} \to \pm 1$ as in Lemma \ref{kasteleynlemma1}: $\sigma_i(e) = \sigma(e)$ for all $e \in E$ and $\sigma_i(e_i)$ is chosen so that one of the faces bounded by $e_i$ is flat. By Lemma \ref{kasteleynlemma1}, $\sigma_i$ is a Kasteleyn weighting for all $1 \leq i \leq m$. Then $\tau: E \cup \{e_1, \ldots, e_m \} \to \pm 1$ defined by $\tau(e) = \sigma(e)$ for all $e \in E$ and $\tau(e_i) = \sigma_i(e_i)$ for $1 \leq i \leq m$ is a Kasteleyn weighting of $G^{(m)}$. \end{lemma} \begin{proof} We prove the claim by induction on $m$. When $m =1$, there is nothing to show. Assume the claim holds when we add $m-1$ edges to $G$. Now suppose we add $m$ edges $e_1, \ldots, e_m$. Choose an ``innermost" edge $e_j$, i.e. an edge with the property that one of its faces is bounded only by edges of $G$ and $e_j$. By the induction hypothesis, $\tau: E \cup \{e_1, \ldots,e_{j-1}, e_{j+1}, \ldots, e_{m} \} \to \pm 1$ defined by $\tau(e) = \sigma(e)$ for all $e \in E$ and $\tau(e_i) = \sigma_i(e_i)$ for $i = 1, 2, \ldots, j-1, j+1, \ldots, m$ is a Kasteleyn weighting of $G^{(m-1)}= (V_1, V_2, E \cup \{ e_1, \ldots,e_{j-1}, e_{j+1}, \ldots e_{m} \} )$. Since $e_j$ is an innermost edge and $\sigma_j: E \cup \{e_j \} \to \pm 1$ was defined so that when $e_j$ is added to $G$, one of the faces bounded by $e_j$ is flat, we may apply Lemma \ref{kasteleynlemma1} to conclude that $\tau: E \cup \{e_1, \ldots, e_m \} \to \pm 1$ defined by $\tau(e) = \sigma(e)$ for all $e \in E$ and $\tau(e_i) = \sigma_i(e_i)$ for $1 \leq i \leq m$ is a Kasteleyn weighting of $G^{(m)} = (V_1, V_2, E \cup \{e_1, \ldots, e_m \} )$. \end{proof} We also need to show that if we delete rows and columns from a Kasteleyn matrix of a graph, the resulting submatrix is a Kasteleyn matrix of the corresponding graph. \begin{lemma} \label{kasteleynlemma3} Let $K$ be a Kasteleyn matrix of $G$. Let $S$ be a balanced subset of vertices on the outer face of $G$. Then $K_{\setminus S}$, the submatrix of $K$ formed by deleting the rows and columns from $S$, is a Kasteleyn matrix of $G \setminus S$. \end{lemma} To prove this, we need the following lemma and corollary, which are proven in \cite{Kuperberg}. \begin{lemma}\cite[Theorem 2.1]{Kuperberg} If $G$ is a planar bipartite graph with an even number of vertices, there are an even number of faces with $4k$ sides. \end{lemma} \begin{cor}\cite[Theorem 2.2]{Kuperberg} \label{Kcor} Every signed graph with an even number of vertices has an even number of non-flat faces. \end{cor} \begin{proof}[Proof of Lemma \ref{kasteleynlemma3}] $G \setminus S$ is flat at each internal face because $G$ is flat at each internal face, so it remains to show that it is flat on the outer face as well. Since $G \setminus S$ has an even number of vertices, it has an even number of non-flat faces by Corollary~\ref{Kcor}, so it must be flat on the outer face. \end{proof} \subsubsection{Proof of Lemma~\ref{lemma32gen}} \begin{proof}[Proof of Lemma~\ref{lemma32gen}] Assume there are $2k$ couples of consecutive nodes of the same color. As in Remark~\ref{rem:notation}, we label the couples of consecutive white nodes $(u_i, u_{i} +1)$ and the couples of consecutive black nodes $(s_i, s_{i} +1)$ for $1 \leq i \leq k$. Following the proof of \cite[Lemma 3.2]{KW2006}, we adjoin to the graph $G$ $2n-2k$ edges connecting all adjacent nodes except nodes $s_i$ and $s_i+1$ and nodes $u_i$ and $u_i+1$. The resulting graph is still bipartite by the assumption that the nodes alternate between black and white except for the nodes $s_i$ and $s_i+1$ and the nodes $u_i$ and $u_i+1$. Now add $4k$ more edges as follows. Since $G$ is bipartite, there is a white vertex $t_i$ on the outer face of $G$ between nodes $s_i$ and $s_i+1$ and a black vertex $v_i$ on the outer face of $G$ between nodes $u_i$ and $u_i+1$. Add edges connecting nodes $s_i$ and $t_i$ and $t_i$ and $s_i+1$, and edges connecting nodes $u_i$ and $v_i$ and $v_i$ and $u_i+1$. Give the $2n-2k + 4k = 2n + 2k$ edges we have added weight $\epsilon$ (and then take the limit $\epsilon \to 0$). Let $G'$ denote the resulting graph. Given a Kasteleyn weighting of a graph, the signs of edges incident to a vertex may be reversed, and each face will still have a correct number of minus signs. Fix a Kasteleyn weighting of the graph $G'$. List the vertices from the set ${\bf{N}} \cup \{ t_{i} \}_{i=1}^{k} \cup \{ v_{i} \}_{i=1}^{k}$ in counterclockwise order. For each vertex in this list, if the edge from the vertex $i$ to the next vertex in the list $j$ has a minus sign, reverse the signs of all edges incident to vertex $j$. This ensures that the edges of weight $\epsilon$ we added to $G$ have positive sign, with the possible exception of the edge from node $2n$ to $1$, which must have sign $-(-1)^{n+k}$ for the outer face to have a correct number of minus signs (because if $n+k$ is even, the outer face has 0 mod 4 edges, and if $n+k$ is odd, the outer face has 2 mod 4 edges). Let $S$ be a balanced subset of $\{1, \ldots, 2n\}$. Let $(w_1, b_1), \ldots, (w_j, b_j)$ be any noncrossing pairing of the nodes of $S$, where $w_1, \ldots, w_j$ are the white nodes of $S$ and $b_1, \ldots, b_j$ are the black nodes of $S$. Adjoin edges of weight $W$ connecting $w_i$ to $b_i$ for $1 \leq i \leq j$. Because of the edges of weight $\epsilon$ we adjoined to $G$, we let the sign of a new edge of weight $W$ connecting black node $b$ and white node $w$ be \[ \text{sign}(b, w) = (-1)^{(|b-w|+ a_{b, w}-1)/2}, \] where recall that $a_{b, w}$ is the number of couples of consecutive nodes of the same color in the interval $[\min\{b, w\}, \min\{b, w\} + 1, \ldots, \max\{b, w\}]$. Observe that with this choice of signs, when we add any one of the edges $e_i = (b_i, w_i)$ to $G'$ so that it separates the outer face of $G'$ into two faces, one of the faces bounded by $e_i$ is flat. So by Lemma \ref{kasteleynlemma2}, this is a Kasteleyn weighting. Let $K_W$ be the Kasteleyn matrix of the resulting graph, with rows and columns ordered so that $b_1, \ldots, b_j$ are the first $j$ rows and $w_1, \ldots, w_j$ are the first $j$ columns. Let $K = K_0$ be the corresponding Kasteleyn matrix when $W = 0$. Then $Z^{D}(G \setminus S) = \pm [W^j] \det(K_W)$ where $[W^j] \det(K_W)$ denotes the coefficient of $W^j$ in the polynomial $\det(K_W)$. (Because $[W^j] \det(K_W)$ is, up to a sign, the weighted sum of matchings that include all of the edges of weight $W,$ which is exactly the weighted sum of matchings of $G \setminus S$.) Since each term of $\det(K_W)$ has the same sign, \[ \dfrac{ Z^{D}(G \setminus S)}{Z^{D}(G)} = \dfrac{ [W^j] \det(K_W)}{[W^0] \det(K_W)}. \] Next let $K_{\setminus S}$ denote the submatrix of $K$ formed by deleting the rows and columns from $S$. By Lemma \ref{kasteleynlemma3}, $K_{\setminus S}$ is a Kasteleyn matrix of $G \setminus S$. The sign of $\det(K_{\setminus S})$ and the sign of $[W^j] \det(K_W)$ differ by the product of the signs of the edges of weight $W$. So, noting that $[W^0] \det(K_W)= \det(K)$, we have \[ \dfrac{ [W^j] \det(K_W)}{[W^0] \det(K_W)} = \prod\limits_{\ell=1}^{j} \text{sign}(b_{\ell}, w_{\ell}) \dfrac{ \det (K_{\setminus S} )}{ \det(K)}. \] By Jacobi's determinant identity, \[ \prod\limits_{\ell=1}^{j} \text{sign}(b_{\ell}, w_{\ell}) \dfrac{ \det (K_{\setminus S}) }{ \det(K)} = \prod\limits_{\ell=1}^{j} \text{sign}(b_{\ell}, w_{\ell}) \det[ K_{b, w}^{-1}]_{w = w_1, \ldots, w_j}^{b = b_1, \ldots, b_j}. \] So we have \begin{equation} \label{proofeqn} \dfrac{ Z^{D}(G \setminus S)}{Z^{D}(G)} = \prod\limits_{\ell=1}^{j} \text{sign}(b_{\ell}, w_{\ell}) \det[ K_{b, w}^{-1}]_{w = w_1, \ldots, w_j}^{b = b_1, \ldots, b_j}. \end{equation} Letting $S = \{b, w\}$ in equation (\ref{proofeqn}), we get \[ Y_{b, w} = \dfrac{Z^{D}(G_{b, w})}{Z^{D}(G)} = \text{sign}(b, w) K_{b, w}^{-1}. \] From this and equation (\ref{proofeqn}) we find that when $\rho_1 = (w_1, b_1), \ldots, (w_j, b_j)$ is a noncrossing pairing of the nodes of $S$ and $\rho_2 = (w_{j+1}, b_{j+1}), \ldots, (w_n, b_n)$ is a noncrossing pairing of the nodes of $S^c$, \small \begin{eqnarray*} && \dfrac{Z^{D}(G \setminus S) Z^{D}(G \setminus S^{c})}{(Z^{D}(G))^{2}}\\ & =& \prod\limits_{\ell=1}^{j} \text{sign}(b_{\ell}, w_{\ell}) \prod\limits_{\ell=j+1}^{n} \text{sign}(b_{\ell}, w_{\ell}) \det \left[ \text{sign}(b, w) Y_{b, w} \right]_{w = w_1, \ldots, w_j}^{b = b_1, \ldots, b_j} \det \left[ \text{sign}(b, w) Y_{b, w} \right]_{w = w_{j+1}, \ldots, w_n}^{b = b_{j+1}, \ldots, b_n} \\ &=& \prod\limits_{\ell=1}^{n} \text{sign}(b_{\ell}, w_{\ell}) \det \left[ (1_{i, j \in S} + 1_{i, j \notin S})\text{sign}(b, w) Y_{b, w} \right]_{w = w_1, \ldots, w_n}^{b = b_1, \ldots, b_n} \end{eqnarray*} \normalsize which is equation (\ref{lem32}), except for the global sign and the order of the rows and columns (since $w_1, \ldots, w_n$ and $b_1, \ldots, b_n$ are not necessarily in ascending order). Let $\rho = \rho_1 \cup \rho_2$. Reorder the rows so that the black nodes are in ascending order. For each row swap, make the corresponding column swap. Then $\rho$ pairs the node corresponding to row $i$ with the node corresponding to column $i$. Since the row swaps and column swaps we have made are in one-to-one correspondence, we have not changed the sign of the determinant. Finally, we need to put the columns in ascending order. The number of swaps required to do this is exactly $\text{sign}_{BW}(\rho)$. So after reordering the rows and columns so that they are listed in ascending order, the global sign is: $$ \prod\limits_{\ell=1}^{n} \text{sign}(b_{\ell}, w_{\ell}) \text{sign}_{BW}(\rho)$$ which is equal to $\text{sign}_{\cons}({\bf N}) (-1)^{\# \text{ crosses of $\rho$} } $ by Lemma \ref{lemma34}. \end{proof} \subsection{Defining $\mathcal{Q}^{(DD)}$.} Let $Y'$ be the vector of monomials $Y'_{\rho}$ indexed by black-white pairings, where $Y'_{\rho} = (-1)^{\# \text{ crosses of $\rho$}} \prod\limits_{(i, j) \in \rho } Y_{i, j}$. In this section, we define $Q^{(DD)}$, which is the matrix satisfying $P = Q^{(DD)}Y'$, where $P$ is the vector indexed by planar pairings $\pi$ with entries $\widetilde{\Pr}(\pi)$. Recall from Section~\ref{sec:organization} that $\widetilde{\Pr}(\pi) = \dfrac{ Z^{DD}_{\pi}(G, {\bf N}) }{ (Z^{D}(G))^{2} }$. We begin with a few definitions. \begin{defn} If $\sigma$ and $\tau$ are two pairings on a set of nodes $\{1, 2, \ldots, 2n\}$, construct the undirected multigraph $C$ with vertex set $\{1, 2, \ldots, 2n\}$ by adding an edge between vertices $i$ and $j$ for each pair $(i, j)$ of $\sigma$, and similarly for $\tau$. The {\em number of components in $\sigma \cup \tau$} is the number of connected components in $C$. Note that all connected components of $C$ are cycles. \end{defn} \begin{example} If $\sigma = ((1, 2), (3, 4), (5, 6))$ and $\tau = ((1, 5), (2, 6), (3, 4))$ then there are two components in $\sigma \cup \tau$, as shown below. \[\begin{tikzpicture}[scale = .5] \node[vertex][fill] (n1) at (0, 0) [label=below:\small{1}] {}; \node[vertex][fill] (n2) at (2,0) [label=below:\small{2}] {}; \node[vertex] (n3) at (4,0) [label=below:\small{3}] {}; \node[vertex][fill] (n4) at (6, 0) [label=below:\small{4}] {}; \node[vertex] (n5) at (8,0) [label=below:\small{5}] {}; \node[vertex] (n6) at (10,0) [label=below:\small{6}] {}; \draw (n2) arc (0:180:1cm); \draw (n4) arc (0:180:1cm); \draw (n6) arc (0:180:1cm); \draw (n1) arc (180:360:4cm); \draw (n2) arc (180:360:4cm); \draw (n3) arc (180:360:1cm); \node[vertex][fill=white] (n3) at (4,0) {}; \node[vertex][fill=white] (n5) at (8,0) {}; \node[vertex][fill=white] (n6) at (10,0) {}; \end{tikzpicture}\] \end{example} \begin{defn} \label{signrhopi} If $\pi$ is an odd-even pairing and $\rho$ is a black-white pairing, define \[ \text{sign}(\pi, \rho) = (-1)^{\# \text{nodes}/2} (-1)^{\# \text{ components in } \pi \cup \rho} \text{sign}_{OE}(\pi) \text{sign}_{BW}(\rho). \] \end{defn} \begin{defn} \label{Bdefn} Define the matrix $\mathcal{B}_{2}$ which has rows indexed by planar pairings and columns indexed by black-white pairings by \[(\mathcal{B}_2)_{\pi, \rho} = \text{sign}( \pi, \rho) 2^{\# \text{ components in } \pi \cup \rho }. \] \end{defn} Let $M$ be the matrix from Corollary~\ref{corlem31} and let $D$ be the vector indexed by balanced sets $S$ with entries $D_{S} = \dfrac{Z^{D}(G \setminus S)Z^{D}(G \setminus S^c)}{(Z^D(G))^2}$ (see Lemma~\ref{lemma32gen}). Following Kenyon and Wilson, we will show that $$M^{T}D = \mathcal{B}_{2} Y'$$ (Lemma~\ref{lem:mylem35}). This result is nontrivial, requiring several lemmas, but once it is established it is nearly immediate that $$M^{T}M P = \mathcal{B}_{2} Y',$$ where $P$ is the vector indexed by planar pairings $\pi$ with entries $\widetilde{\Pr}(\pi)$ (Theorem~\ref{thm36}). Kenyon and Wilson proved that $M^{T}M$ is invertible (\cite[Theorem 3.3]{KW2009}), so we conclude the section by defining $\mathcal{Q}^{(DD)}$ as $(M^{T}M)^{-1} \mathcal{B}_{2}$. \begin{lemma}[analogue of Lemma 3.5 from \cite{KW2006}] \label{lem:mylem35} $M^{T}D = \mathcal{B}_2 Y'$. \end{lemma} In the proof of \cite[Lemma 3.5]{KW2006}, Kenyon and Wilson use the fact that if the nodes of $G$ are all either black and odd or white and even and $\pi$ and $\rho$ are odd-even pairings, then there are $2^{\text{\# components in } \pi \cup \rho}$ balanced sets $S$ such that $\pi$ and $\rho$ do not connect $S$ to $S^c$ (for each component, either put all of its nodes in $S$ or all of its nodes in $S^c$). Recall from Section~\ref{sec:lem31} that $T \subseteq {\bf N}$ is the set of nodes that are odd and white or even and black; under Kenyon and Wilson's assumptions, $T = \emptyset$. It turns out that after removing the requirement that the nodes be black and odd or white and even, if $\pi$ is an odd-even pairing and $\rho$ is a black-white pairing there are still $2^{\text{\# components in } \pi \cup \rho}$ sets $S$ such that $\rho$ does not connect $S$ to $S^c$ and $\pi$ does not connect $S \triangle T$ to $(S \triangle T)^c$. \begin{lemma} \label{alglemma} Let $\pi$ be an odd-even pairing and let $\rho$ be a black-white pairing. For each component of $\pi \cup \rho$ there are exactly two ways to put the nodes in this component into $S$ and $S^c$ so that $\rho$ does not connect $S$ to $S^c$ and $\pi$ does not connect $S \triangle T$ to $(S \triangle T)^c$. \end{lemma} \begin{proof} We start by placing an initial node $a$ into $S$ or $S^c$, and then apply the algorithm below until all nodes in the component have been placed into $S$ or $S^c$. \\ \noindent {\bf {\underline{Algorithm}}} \\ \noindent Step 1 \begin{itemize} \item[(a)] If $a \in S \cap T^c$ or $a \in S^c \cap T$: \begin{itemize} \item[(i)] If $\pi(a) \in T$: \begin{itemize} \item[] Put $\pi(a)$ in $S^c$. \end{itemize} \item[(ii)] Else if $\pi(a) \notin T$: \begin{itemize} \item[] Put $\pi(a)$ in $S$. \end{itemize} \end{itemize} \item[(b)] Else if $a \in S \cap T$ or $a \in S^c \cap T^c$: \begin{itemize} \item[(i)] If $\pi(a) \in T$: \begin{itemize} \item[] Put $\pi(a)$ in $S$. \end{itemize} \item[(ii)] Else if $\pi(a) \notin T$: \begin{itemize} \item[] Put $\pi(a)$ in $S^c$. \end{itemize} \end{itemize} \item[] Go to Step 2 with $a := \pi(a)$. \end{itemize} \noindent { Step 2} \begin{itemize} \item[] If $a \in S$: \begin{itemize} \item[] Put $\rho(a)$ in $S$. \end{itemize} \item[] Else if $a \in S^c$: \begin{itemize} \item[] Put $\rho(a)$ in $S^{c}$. \end{itemize} \item[] Go to Step 1 with $a := \rho(a)$. \end{itemize} \noindent {\bf Claim.} The set $S$ described in the algorithm is well-defined and balanced. \begin{proof} We will prove this claim by induction on the number of nodes in a component of $\pi \cup \rho$. \noindent {\em {Base Cases.}} First note that in the case where the nodes alternate between black and white, $T = \emptyset$ or $T = {\bf N}$ so the algorithm reduces to putting all of the nodes in a component in $S$ or all of the nodes of a component in $S^{c}$, so $S$ is well-defined. Since in this case both pairings are black-white, $S$ is balanced as well. If there are two nodes in a component, since $\rho$ is a black-white pairing one of the nodes is black and the other is white, so by the previous comment there is nothing to show. If there are four nodes in a component, since $\rho$ is a black-white pairing two nodes must be black and two nodes must be white. By symmetry, it is enough to consider when nodes 1 and 2 are black and nodes 3 and 4 are white. There are two odd-even pairings: $((1, 2), (3, 4))$ and $((1, 4), (3, 2))$ and two black-white pairings: $((1, 4), (3, 2))$ and $((1, 3), (2, 4))$. \begin{figure} \caption{The diagram of $\pi \cup \rho$ when $\pi = ((1, 2), (3, 4))$ and $\rho = ((1, 4), (2, 3))$. The nodes in $T$ are underlined. If we start the algorithm by putting $1 \in S$, we get $S = \{1, 4\} \label{basecase} \end{figure} For example, when $\pi = ((1, 2), (3, 4))$ and $\rho = ((1, 4), (2, 3))$ (see Figure \ref{basecase}), we start by putting node 1 in $S$ . (We could also start by putting node 1 in $S^{c}$.) Then we run the algorithm: \\ Step 1. Since $1 \notin T$ and $\pi(1) = 2 \in T$, we put $2 \in S^{c}$. \\ Step 2. Since $2 \in S^{c}$ we put $\rho(2) = 3 \in S^{c}$. \\ Step 1. Since $3 \in S^{c}$, $3 \in T$ and $4 \notin T$, we put $4 \in S$. \\ So we get $S = \{1, 4\}$, which is balanced. To check that $S$ is well-defined, it suffices to show that if we continue the algorithm for one more step, we do not get a contradiction. If we apply Step 2 starting at node 4, we get that we should put $\rho(4) = 1$ in $S$, as desired. In the table below are the results of applying the algorithm for each possible combination of odd-even pairings $\pi$ and black-white pairings $\rho$ that results in a component of size 4. When $\pi = \rho = ((1, 4), (3, 2))$, there are two components each of size 2, so this is omitted from the table. \begin{center} \begin{tabular}{c | c | c | c | c | c} $\pi$ & $\rho$ & $S$ & start & end & one more step \\ \hline $((1, 2), (3, 4))$ & $((1, 4), (2, 3))$ & \{1, 4\} & $1 \in S$ & $4 \in S$ & $1 \in S$ \\ $((1, 2), (3, 4))$ & $((1, 3), (2, 4))$ & \{1, 3\} & $1 \in S$ & $3 \in S$ & $1 \in S$ \\ $((1, 4), (3, 2))$ & $((1, 3), (2, 4))$ & \{1, 2, 3, 4\} & $1 \in S$ & $3 \in S$ & $1 \in S$ \end{tabular} \end{center} In each case, $S$ is balanced, and continuing the algorithm for one more step does not create a contradiction.\\ \begin{figure} \caption{Illustration of case 1. Shown left is the odd-even pairing $\pi$ (top) and the black-white pairing $\rho$ (bottom). On the right we have replaced $\rho$ with $\tilde{\rho} \label{case1} \end{figure} Now suppose that a component of $\pi \cup \rho$ has $2n$ nodes, where $2n > 4$. Assume that if a component has fewer than $2n$ nodes, the set $S$ is well-defined and balanced. Let ${\bf N}$ denote the set of nodes in this component. There are two cases to consider based on whether or not $\pi |_{\bf N}$ has a black-white pair.\\ \noindent {\bf Case 1.} (Illustrated in Figure \ref{case1}). Assume $\pi |_{\bf N}$ has at least one black-white pair $(a, \pi(a))$. Since $\rho$ is a black-white pairing, $\rho(a)$ and $\rho(\pi(a))$ are opposite color. Consider the black-white pairing $\tilde{\rho}$ on ${\bf{N}} - \{a, \pi(a) \}$ obtained from $\rho$ by removing the pairs $(a, \rho(a))$ and $(\pi(a), \rho(\pi(a)))$ and adding the pair $(\rho(a), \rho(\pi(a)))$. Let $\tilde{\pi} = \pi |_{{\bf N} - \{a, \pi(a) \}}$. Now $\tilde{\pi} \cup \tilde{\rho}$ is a single component with $2n-2$ nodes. Start the algorithm by putting $\rho(\pi(a)) \in S$. By the induction hypothesis, the set $S$ produced by the algorithm is well-defined and balanced. Note that the fact that $S$ is well-defined means that $\rho(a) \in S$. Considering the original component of $\pi \cup \rho$, when we start the algorithm at $\rho(\pi(a))$ it proceeds identically as it did with $\tilde{\pi} \cup \tilde{\rho}$ until we reach the node $\rho(a)$. Since $\rho(a) \in S$, applying Step 2 of the algorithm we add $a$ to $S$. (Note that we are guaranteed to be on Step 2 here by the fact that $\rho(a)$ is paired with $\rho(\pi(a))$ in $\tilde{\rho}$, and the algorithm starts with Step 1.) Since $\pi$ is odd-even, black-white pairs of $\pi$ have the property that either both nodes are in $T$ or both are not in $T$. So after the next step of the algorithm (Step 1) we add $\pi(a)$ to $S$. Since we added $a$ and $\pi(a)$ to $S$, $S$ is still balanced. Since $\pi(a) \in S$, continuing the algorithm for one more step would put $\rho(\pi(a)) \in S$, which is consistent. \\ \begin{figure} \caption{Illustration of case 2. Shown left is the odd-even pairing $\pi$ (top) and the black-white pairing $\rho$ (bottom). On the right we have replaced $\rho$ with $\tilde{\rho} \label{case2a} \end{figure} \noindent {\bf Case 2.} (Illustrated in Figure~\ref{case2a}). If $\pi |_{\bf N}$ does not have a black-white pair, then consider a white pair of $\pi |_{\bf N}$: $(a, \pi(a))$. Let $b = \rho(a)$. Since $a$ is white, $\rho(a)$ must be black, and $(b, \pi(b))$ is a black pair of $\pi |_{\bf N}$ by the assumption that $\pi |_{\bf N}$ does not have a black-white pair. Consider the black-white pairing $\tilde{\rho}$ on ${\bf{N}} - \{a, \pi(a), b, \pi(b)\}$ obtained from $\rho$ by removing the pairs $(a, b), (\pi(a), \rho(\pi(a))),$ and $(\pi(b), \rho(\pi(b)))$ and adding the pair $(\rho(\pi(a)), \rho(\pi(b)))$. Let $\tilde{\pi} = \pi |_{{\bf N} - \{a, \pi(a), b, \pi(b) \}}$. Now $\tilde{\pi} \cup \tilde{\rho}$ is a single component with $2n-4$ nodes. Start the algorithm by putting $\rho(\pi(a)) \in S$. By the induction hypothesis, the set $S$ produced by the algorithm is well-defined and balanced. Note that the fact that $S$ is well defined means that $\rho(\pi(b)) \in S$. Considering the original component of $\pi \cup \rho$, when we start the algorithm by putting $\rho(\pi(a)) \in S$ it proceeds identically as it did with $\tilde{\pi} \cup \tilde{\rho}$ until we reach the node $\rho(\pi(b))$. Since $\rho(\pi(b)) \in S$, applying Step 2 of the algorithm we add $\pi(b)$ to $S$. Since $\pi$ is odd-even, exactly one of $\{b, \pi(b)\}$ is in $T$. This means that after applying Step 1 we put $b \in S^{c}$. Then we put $a \in S^{c}$ (since $\rho(a) = b$) and $\pi(a) \in S$ (since exactly one of $\{a, \pi(a)\}$ is in $T$). Since we added $\pi(b)$ and $\pi(a)$ to $S$, $S$ is still balanced. Since $\pi(a) \in S$, continuing the algorithm for one more step puts $\rho(\pi(a)) \in S$, which is consistent. \end{proof} \noindent {\bf Claim.} After applying this algorithm, $\rho$ does not connect $S$ to $S^c$ and $\pi$ does not connect $S \triangle T$ to $(S \triangle T)^c$. \begin{proof} By Step 2, for each node $a$, $a$ and $\rho(a)$ will either both be in $S$ or $S^{c}$, so $\rho$ does not connect $S$ to $S^c$. To show that $a$ and $\pi(a)$ are either both in $S \triangle T$ or both in $(S \triangle T)^c$, there are several cases to consider. \begin{itemize} \item If $a$ and $\pi(a)$ are both not in $T$, then they are both placed into $S$ by Step 1(a)(ii) or both placed into $S^{c}$ by Step 1(b)(ii). In the first case, $a$ and $\pi(a)$ are both in $S \triangle T$, and in the second case $a$ and $\pi(a)$ are both in $(S \triangle T)^c$. \item If $a \in T$ and $\pi(a) \notin T$, then one of $a, \pi(a)$ is placed into $S$ and one is placed into $S^{c}$ by Step 1(a)(ii) or Step 1(b)(ii). If $a$ is placed into $S$ and $\pi(a)$ is placed into $S^{c}$, then $a$ and $\pi(a)$ are both in $(S \triangle T)^c$. The other case is similar. \item If $a \in T$ and $\pi(a) \in T$, then they are both placed into $S^{c}$ by Step 1(a)(i) or both placed into $S$ by Step 1(b)(i). \item If $a \notin T$ and $\pi(a) \in T$, then one is placed in $S$ and one is placed in $S^{c}$ by Step 1(a)(i) or Step 1(b)(i). \end{itemize} \end{proof} We have shown that the algorithm produces a well-defined balanced set $S$ with the desired properties. We conclude that for each component of $\pi \cup \rho$ there are exactly two ways to put the nodes in this component into $S$ and $S^c$ so that $\rho$ does not connect $S$ to $S^c$ and $\pi$ does not connect $S \triangle T$ to $(S \triangle T)^c$. \end{proof} We need two more facts to prove Lemma~\ref{lem:mylem35}. \begin{lemma} \label{lem:signSdefn2} Let $S$ be a balanced subset of nodes and let $\text{sign}(S)$ be defined as in Lemma \ref{lemma32gen}. Then \begin{equation*} \text{sign}(S) = (-1)^{\frac{\# \text{nodes}}{2}} (-1)^{\# \text{ comp in } \pi \cup \rho} \text{sign}_{OE}(\pi) \text{sign}_{BW}(\rho), \end{equation*} where $\pi$ is an odd-even pairing such that $\pi$ does not connect $S \triangle T$ to $(S \triangle T)^c$ and $\rho$ is a black-white pairing such that $\rho$ does not connect $S$ to $S^c$. \end{lemma} The proof of Lemma~\ref{lem:signSdefn2} is lengthy and technical so we postpone it to Section~\ref{sec:signSformula} for ease of exposition. The following is an immediate consequence of this lemma. \begin{cor} \label{samesignlemma} Let $\pi$ be an odd-even pairing and let $\rho$ be a black-white pairing. If $S_1$ and $S_2$ are balanced subsets of nodes such that $\pi$ does not connect $S_i \triangle T$ to $(S_i \triangle T)^c$ and $\rho$ does not connect $S_i$ to $S_i^c$ for $i = 1, 2$, then $\text{sign}(S_1) = \text{sign}(S_2)$. \end{cor} To see that Corollary~\ref{samesignlemma} follows from Lemma~\ref{lem:signSdefn2}, observe that if $\pi$ and $\rho$ satisfy the hypotheses of Lemma 2.3.7 for $S_1$ and $S_2$, then $S_1$ and $S_2$ must have the same sign, because all of the quantities on the right hand side of the equation in Lemma~\ref{lem:signSdefn2} depend only on $\pi$ and $\rho$. \begin{proof}[Proof of Lemma \ref{lem:mylem35}] Recall from Lemma \ref{lemma32gen} that \begin{equation*} D_{S} = \dfrac{Z^{D}(G \setminus S) Z^{D}(G \setminus S^{c})}{(Z^{D}(G))^{2}} = \text{sign}_{\cons}({\bf N}) \text{sign}(S) \det \left[(1_{i, j \in S} + 1_{i, j \notin S}) \times \text{sign}(i, j) Y_{i, j} \right]^{i = b_1,b_2, \ldots, b_n}_{j = w_1,w_2, \ldots, w_n}, \end{equation*} where $b_1, b_2, \ldots, b_n$ are the black nodes listed in ascending order and $w_1, w_2, \ldots, w_n$ are the white nodes listed in ascending order. When we expand the determinant, we get \[D_{S} = \text{sign}_{\cons}({\bf N}) \text{sign}(S) \sum_{ \substack{ \text{ BW pairings $\rho$: } \\ \text{ $\rho$ does not connect } \\ \text{ $S$ to $S^{c}$ } } } \text{sign}_{BW}(\rho) \prod\limits_{(i, j) \in \rho } \text{sign}(i, j)Y_{i, j}. \] By Lemma \ref{lemma34}, \[D_{S} = \text{sign}_{\cons}({\bf N}) \text{sign}(S) \sum_{ \substack{ \text{ BW pairings $\rho$: } \\ \text{ $\rho$ does not connect } \\ \text{ $S$ to $S^{c}$ } } } \text{sign}_{\cons}({\bf N}) (-1)^{\text{\# crosses of $\rho$} } \prod\limits_{(i, j) \in \rho } Y_{i, j}, \] and thus by definition, \begin{equation} D_{S} = \text{sign}(S) \sum_{ \substack{ \text{ BW pairings $\rho$: } \\ \text{ $\rho$ does not connect } \\ \text{ $S$ to $S^{c}$ } } } Y'_{\rho}. \end{equation} Let $\pi$ be a planar pairing and let $M$ be the matrix from Corollary \ref{corlem31}. The $\pi$th row of $M^{T}D$ is \[ \sum_{ \substack{ S \subseteq \{1, 2, \ldots, 2n\} \\ \pi \text{ does not connect} \\ S \triangle T \text{ to } (S \triangle T)^c} } D_S. \] We see that \begin{eqnarray*} \sum_{ \substack{ S \subseteq \{1, 2, \ldots, 2n\}: \\ \pi \text{ does not connect} \\ S \triangle T \text{ to } (S \triangle T)^c} } D_S &= & \sum_{ \substack{ S \subseteq \{1, 2, \ldots, 2n\}: \\ \pi \text{ does not connect} \\ S \triangle T \text{ to } (S \triangle T)^c} } \text{sign}(S) \sum_{ \substack{ \text{BW pairings } \rho: \\ \rho \text{ does not connect} \\ S \text{ to }S^{c} }} Y'_{\rho} \\ & = & \sum_{\text{BW pairings } \rho } \sum_{\substack{ S\text{: }\rho \text{ does not } \\ \text{ connect } S \text{ to }S^{c} \text{ and } \\ \pi \text{ does not connect} \\ S \triangle T \text{ to } (S \triangle T)^c} } \text{sign}(S)Y'_{\rho}. \end{eqnarray*} By Lemma \ref{alglemma} and Corollary \ref{samesignlemma}, \[ \sum_{\text{BW pairings } \rho } \sum_{\substack{ S\text{: }\rho \text{ does not } \\ \text{ connect } S \text{ to }S^{c} \text{ and } \\ \pi \text{ does not connect} \\ S \triangle T \text{ to } (S \triangle T)^c} } \text{sign}(S)Y'_{\rho} = \sum_{\text{BW pairings } \rho } \text{sign}( \pi, \rho ) 2^{ \# \text{ comp in } \pi \cup \rho } Y'_{\rho} . \] Since this sum is the $\pi$th row of $\mathcal{B}_{2} Y'$, we have proven the claim. \end{proof} \begin{thm}[analogue of Theorem 3.6 from \cite{KW2006}] \label{thm36} $ M^T M P = \mathcal{B}_{2} Y'$ \end{thm} \begin{proof} Noting that $\widetilde{\Pr}(\pi) = \Pr(\pi) Z^{DD}(G)/(Z^D(G))^2$, we see that by Corollary~\ref{corlem31}, $MP = D$. Then, applying Lemma~\ref{lem:mylem35} we have $M^T M P = M^T D = \mathcal{B}_{2} Y'$. \end{proof} It remains to show that $M^T M$ is invertible. In fact, $M^{T}M$ is equal to the {\em meander matrix} $\mathcal{M}_{q}$ evaluated at $q = 2$. \begin{lemma}\cite[Lemma 3.3]{KW2006} \label{lem3.3} Let $M$ be the matrix from Lemma \ref{lem3.1}. Then $M^{T}M = \mathcal{M}_{2}$, where $\mathcal{M}_{2}$ is a matrix with rows and columns indexed by planar pairings, with entries $$(\mathcal{M}_{2})_{\sigma, \tau} = 2^{\# \text{ comp in } \sigma \cup \tau}$$ \end{lemma} Since the only difference between the matrix from Lemma \ref{lem3.1} and the matrix from Corollary \ref{corlem31} is the ordering of the rows, Lemma~\ref{lem3.3} applies to the matrix $M$ from Corollary \ref{corlem31} as well. \begin{defn} \label{matrixdefn} Since $\mathcal{M}_{2}$ is invertible (see \cite{meanders}), define $$\mathcal{Q}^{(DD)} = \mathcal{M}_{2}^{-1} \mathcal{B}_{2}$$ \end{defn} Since $P = \mathcal{Q}^{(DD)} Y'$, $\mathcal{Q}^{(DD)}$ is the matrix of the $Y'$ polynomials: for a given planar pairing $\pi$, the $\pi$th row of $\mathcal{Q}^{(DD)}$ gives the polynomial $\widetilde{\Pr}(\pi)$. That is, \begin{equation*} \widetilde{\Pr}(\sigma) = \sum_{\text{black-white pairings } \rho} \mathcal{Q}^{(DD)}_{\sigma, \rho} Y'_{\rho}. \end{equation*} Our next aim is to prove that $\mathcal{Q}^{(DD)}$ is integer-valued. To this end, we will show that we can compute the columns combinatorially using a {\em transformation rule} from Kenyon and Wilson's study of {\em groves} \cite{KW2006}. \subsection{Groves} \label{sec:groves} \begin{defn}\cite{KW2006} If $G$ is a finite edge-weighted planar graph embedded in the plane with a set of nodes, a {\em grove} is a spanning acyclic subgraph of $G$ such that each component tree contains at least one node. The weight of a grove is the product of the weights of the edges it contains. \end{defn} \begin{wrapfigure}{r}{.3\textwidth} \centering \begin{tikzpicture}[scale=.5] \def7{7} \node at (-0.5,0) {$1$}; \node at (3,-0.5) {$2$}; \node at (6,-0.5) {$3$}; \node at (7.5,2) {$4$}; \node at (7, 7.5) {$5$}; \node at (4, 7.5) {$6$}; \node at (1, 7.5) {$7$}; \node at (-.5,5) {$8$}; \draw[line width = .25mm] (0, 0) -- (1, 0); \draw[line width = .25mm] (2, 0) -- (2, 1); \draw[line width = .25mm] (1, 1) -- (0, 1); \draw[line width = .25mm] (1, 3) -- (1, 4); \draw[line width = .25mm] (0, 2)-- (1, 2); \draw[line width = .25mm] (0, 5) -- (1, 5); \draw[line width = .25mm] (0, 3) -- (0, 4); \draw[line width = .25mm] (0, 3) -- (1, 3); \draw[line width = .25mm] (1, 6) -- (1, 7); \draw[line width = .25mm] (2, 6) -- (2, 7); \draw[line width = .25mm] (0, 6) -- (0, 7); \draw[line width = .25mm] (0, 7) -- (1, 7); \draw[line width = .25mm] (2, 2) -- (2, 1); \draw[line width = .25mm] (2, 2) -- (2, 3); \draw[line width = .25mm] (2, 3) -- (2, 4); \draw[line width = .25mm] (2, 4) -- (2, 5); \draw[line width = .25mm] (3, 3) -- (3, 4); \draw[line width = .25mm] (1, 0) -- (2, 0); \draw[line width = .25mm] (2, 1) -- (1, 1); \draw[line width = .25mm] (0, 1) -- (0, 2); \draw[line width = .25mm] (1, 2) -- (1, 3); \draw[line width = .25mm] (1, 4) -- (1, 5); \draw[line width = .25mm] (1, 7) -- (2, 7); \draw[line width = .25mm] (3, 7) -- (4, 7); \draw[line width = .25mm] (3, 7) -- (4, 7); \draw[line width = .25mm] (3, 6) -- (3, 7); \draw[line width = .25mm] (6, 6) -- (6, 7); \draw[line width = .25mm] (5, 6) -- (6, 6); \draw[line width = .25mm] (5, 5) -- (6, 5); \draw[line width = .25mm] (3,2) -- (3, 3); \draw[line width = .25mm] (5, 7) -- (6, 7); \draw[line width = .25mm] (4, 7) -- (5, 7); \draw[line width = .25mm] (2, 7) -- (3, 7); \draw[line width = .25mm] (4, 6) -- (5, 6); \draw[line width = .25mm] (4, 5) -- (5, 5); \draw[line width = .25mm] (6, 6) -- (6, 5); \draw[line width = .25mm] (3, 5) -- (3, 6); \draw[line width = .25mm] (3, 0) -- (3, 1); \draw[line width = .25mm] (3, 2) -- (4, 2); \draw[line width = .25mm] (4,3)-- (4, 4); \draw[line width = .25mm] (5, 4) -- (6, 4); \draw[line width = .25mm] (7, 4) -- (7,5); \draw[line width = .25mm] (7, 6) -- (7,7); \draw[line width = .25mm] (3, 1) -- (3, 2); \draw[line width = .25mm] (4, 2) -- (4,3); \draw[line width = .25mm] (4, 4) -- (5, 4); \draw[line width = .25mm] (6, 4) -- (7, 4); \draw[line width = .25mm] (7, 5) -- (7,6); \draw[line width = .25mm] (6, 0) -- (7, 0); \draw[line width = .25mm] (7, 1) -- (6, 1); \draw[line width = .25mm] (6, 2) -- (6, 3); \draw[line width = .25mm] (7, 3) -- (7, 2); \draw[line width = .25mm] (5, 2) -- (5, 3); \draw[line width = .25mm] (5, 2) -- (5, 1); \draw[line width = .25mm] (5, 0) -- (5, 1); \draw[line width = .25mm] (4, 1) -- (4, 0); \draw[line width = .25mm] (6, 1) -- (6, 2); \draw[line width = .25mm] (6, 3)-- (7, 3); \draw[line width = .25mm] (4, 0) -- (5, 0); \draw[line width = .25mm] (5, 0) -- (6, 0); \draw[line width = .25mm] (7, 3) -- (7, 4); \foreach \x [count = \n] in {0, 2, 4, 6}{ \foreach \y in {0, 2, 4, 6}{ \filldraw[fill=black, draw=black] (\x,\y) circle (0.15cm); \filldraw[fill=white, draw=black] (\x+1,\y) circle (0.15cm); } } \foreach \x [count = \n] in {1, 3, 5, 7}{ \foreach \y in {1, 3, 5, 7}{ \filldraw[fill=black, draw=black] (\x,\y) circle (0.15cm); \filldraw[fill=white, draw=black] (\x-1,\y) circle (0.15cm); } } \end{tikzpicture} \caption{A grove of a grid graph with $8$ nodes. The partition of the nodes is $\{ \{1, 8\}, \{2, 4, 5\}, \{3\}, \{6, 7\} \}$. } \label{fig:grove} \end{wrapfigure} The connected components of a grove partition the nodes into a planar partition. If $\sigma$ is a planar partition of $1, 2, \ldots, n$, let $\Pr(\sigma)$ be the probability that a random grove of $G$ partitions the nodes according to $\sigma$. Kenyon and Wilson showed that $\dddot{ \Pr}(\sigma) := \dfrac{ \Pr(\sigma) }{ \Pr( 1|2| \cdots |n)}$ is an integer-coefficient homogeneous polynomial in the variables $L_{i, j}$\footnotemark~\cite[Theorem 1.2]{KW2006}. \footnotetext{When $G$ is viewed as a resistor network with conductances equal to the edge weights, $L_{i, j}$ is the current that would flow into node $j$ if node $i$ were set to one volt and all other nodes were set to zero volts \cite[Appendix A]{KW2006}.} For example, the normalized probability $\dddot{ \text{Pr}}(\sigma)$ that a random grove on four nodes partitions the nodes according to $1 | 234$ is $\dddot{ \text{Pr}}(\sigma) = L_{2, 3} L_{3, 4} + L_{2, 3} L_{2, 4} + L_{2, 4} L_{3, 4} + L_{1, 3} L_{2, 4}.$ (See \cite[Section 1.2]{KW2006}). Each monomial in the polynomial $\dddot{ \text{Pr}}(\sigma)$ is of the form $L_{\tau} = \sum\limits_{F} \prod\limits_{\{i, j\} \in F} L_{i, j}$. The sum is over spanning forests $F$ of the complete graph $K_n$ for which the trees of $F$ span the parts of $\tau$ and the product is over edges $\{i, j\}$ of the forest $F$. To compute these polynomials, Kenyon and Wilson define a matrix $\mathcal{P}^{(t)}$ with rows indexed by planar partitions and columns indexed by all partitions and show how to compute the columns of this matrix combinatorially. The $\tau$th column of $\mathcal{P}^{(t)}$ is computed by writing the partition $\tau$ as a linear combination of planar partitions. So if $\tau$ is planar, then $\mathcal{P}^{(t)}_{\tau, \tau} = 1$ and $\mathcal{P}^{(t)}_{\sigma, \tau} = 0$ for all $\sigma \neq \tau$. If $\tau$ is nonplanar, the rule is a generalization of the rule for four nodes: \begin{equation} \label{eqn:4noderule} 13|24 \rightarrow 1 | 234 + 2 | 314 + 3 | 124 + 4 | 123- 12|34 - 14|23 \end{equation} This rule tells us, for example, that $P^{(t)}_{12|34, 13|24} = -1$. In general, if a partition is nonplanar, then there will exist nodes $a < b < c < d$ such that $a$ and $c$ belong to one part, and $b$ and $d$ belong to another part. In Kenyon and Wilson's transformation rule, 1, 2, 3, and 4 in equation (\ref{eqn:4noderule}) are replaced with parts $A, B, C$ and $D$, which contain the nodes $a, b, c$ and $d$, respectively. \begin{trule}\cite[Rule 1]{KW2006} \label{kwrule1} Arbitrarily subdivide the part containing $a$ and $c$ into two sets $A$ and $C$ such that $a \in A$ and $c \in C$, and similarly subdivide the part containing $b$ and $d$ into $B \ni b$ and $D \ni d$. Let the remaining parts of the partition be denoted by ``rest." Then the transformation rule is $$AC|BD|\text{rest} \to A|BCD|\text{rest} + B|ACD|\text{rest} + C|ABD|\text{rest} + D|ABC|\text{rest} - AB|CD|\text{rest} - AD|BC|\text{rest}$$ \end{trule} \begin{rem} \label{rem:rule1error} If we arbitrarily subdivide the part containing $a$ and $c$ into two sets $A$ and $C$ such that $a \in A$ and $c \in C$, and similarly for the part containing $b$ and $d$, it is possible to repeat Rule~\ref{kwrule1} indefinitely without ever obtaining a linear combination of planar partitions. For example, consider the partition $1235|46$. One crossing is $a = 1$, $b = 4$, $c= 5$, $d= 6$. If we choose $A = \{1, 2, 3\}$, $B = \{4\}, C = \{5\},$ and $D = \{6\}$, then after applying Rule~\ref{kwrule1}, all of the partitions are planar. But if we choose $A = \{1, 2\}$, $B = \{4\}$, $C = \{3, 5\}$, and $D = \{6\}$ then after applying Rule~\ref{kwrule1} we get $$12|3456 + 4|12356 + 35|1246 + 6|12345 - 124|356 - 126|345$$ which includes nonplanar partitions. For example, the partition $124|356$ has crossing $a = 1$, $b = 3$, $c= 4$, $d= 6$. If we choose $A = \{1, 2\}$, $B = \{3, 5\}$, $C = \{4\}$, and $D = \{6\}$ then after applying Rule~\ref{kwrule1} to $124|356$ we get $$12|3456 + 35|1246 + 4|12356 + 6|12345 - 1235|46 - 126|345.$$ So after applying Rule~\ref{kwrule1} twice, all partitions cancel except for the partition $1235|46$, which is the partition we started with. We could continue this process indefinitely. \end{rem} Remark~\ref{rem:rule1error} motivates the following modification of Rule~\ref{kwrule1}. \begin{trule} \label{hrule} Subdivide the part containing $a$ and $c$ into two sets $A$ and $C$ such that $A$ contains all the items in this part less than $b$, and $C$ contains all other items. Similarly, subdivide the part containing $b$ and $d$ into two sets $B$ and $D$ so that $B$ contains all items in this part less than $c$, and $D$ contains all other items. Then the transformation rule is $$AC|BD|\text{rest} \to A|BCD|\text{rest} + B|ACD|\text{rest} + C|ABD|\text{rest} + D|ABC|\text{rest} - AB|CD|\text{rest} - AD|BC|\text{rest}$$ \end{trule} Applying Rule~\ref{hrule} repeatedly will result in a linear combination of planar partitions. We have now presented all the definitions needed to state Kenyon and Wilson's main result for groves. \begin{thm}\cite[Theorem 1.2]{KW2006} \label{thm:KWgrovethm} Any partition $\tau$ may be transformed into a formal linear combination of planar partitions by repeated application of Rule~\ref{hrule}\footnotemark, and the resulting linear combination does not depend on the choices made when applying Rule~\ref{hrule}, so that we may write $$\tau \to \sum_{\text{ planar partitions } \sigma} \mathcal{P}_{\sigma, \tau}^{(t)} \sigma.$$ For any planar partition $\sigma$, the same coefficients $\mathcal{P}_{\sigma, \tau}^{(t)}$ satisfy the equation $$\dddot{ \Pr}(\sigma) = \dfrac{ \Pr(\sigma) }{ \Pr( 1|2| 3| \cdots |n)} = \sum_{\text{ partitions } \tau} \mathcal{P}_{\sigma, \tau}^{(t)} L_{\tau}$$ for bipartite edge-weighted planar graphs. \end{thm} \footnotetext{In \cite[Theorem 1.2]{KW2006}, Rule~\ref{kwrule1} is used in the theorem statement, but for the reasons stated in Remark \ref{rem:rule1error}, we have changed it to Rule~\ref{hrule}.} \subsection{Proof that $\mathcal{Q}^{DD}$ is integer-valued} \label{sec:firstmajorproof} We will complete the proof of Theorem~\ref{thm:thm1} by showing that we can use the transformation rule introduced in the previous section to compute the columns of the matrix $\mathcal{Q}^{(DD)}$. \begin{rem} \label{defn:myrule1} For pairings, both Rule~\ref{kwrule1} and Rule~\ref{hrule} become the following: If a pairing $\rho$ is nonplanar, then there will exist items $a < b < c < d$ such that $a$ and $c$ are paired, and $b$ and $d$ are paired. Then the transformation rule is \begin{equation} \label{eqn:myrule1} ac|bd|\text{rest} \to - ab|cd|\text{rest} - ad|bc|\text{rest}. \end{equation} \end{rem} \begin{trule} \label{myrule2} For a black-white pairing $\rho$, repeatedly apply (\ref{eqn:myrule1}) until we have written $\rho$ as a linear combination of planar pairings. Then multiply each planar pairing in this sum by $\text{sign}_{OE}(\sigma)\text{sign}_{BW}(\rho)$. \end{trule} The fact that Rule~\ref{myrule2} is well-defined follows from Theorem \ref{thm:KWgrovethm}. Proving that Rule~\ref{myrule2} computes the columns of $\mathcal{Q}^{(DD)}$ will prove that the matrix $\mathcal{Q}^{(DD)}$ is integer-valued and gives us the desired theorem, which is stated in full below. \begin{customthm}{\ref{thm:thm1}} Any black-white pairing $\rho$ can be transformed into a formal linear combination of planar pairings by repeated application of Rule~\ref{myrule2}, and the resulting linear combination does not depend on the choices we made when applying Rule~\ref{myrule2}, so that we may write $$\rho \to \sum\limits_{\text{planar pairings } \sigma} \mathcal{Q}^{(DD)}_{\sigma, \rho} \sigma.$$ For any planar pairing $\sigma$, these same coefficients $\mathcal{Q}^{(DD)}_{\sigma, \rho}$ satisfy the equation $$\widetilde{\Pr}(\sigma) := \dfrac{Z^{DD}_{\sigma}(G, {\bf N})}{(Z^D(G))^2} = \sum_{\text{black-white pairings } \rho} \mathcal{Q}^{(DD)}_{\sigma, \rho} Y'_{\rho}.$$ \end{customthm} \begin{rem} The fact that the resulting linear combination does not depend on the choices we made when applying Rule~\ref{myrule2} is an immediate consequence of Theorem~\ref{thm:KWgrovethm}. \end{rem} The proof of Theorem~\ref{thm:thm1} requires two additional lemmas. \begin{lemma} \label{lem:componentlemma} Let $\pi$ be a pairing and let $\rho$ be a pairing with nodes $a < b < c < d$ that form a crossing in $\rho$. Let $\rho_1$ be the pairing obtained from $\rho$ by replacing the pairs $(a, c)$ and $(b, d)$ with $(a, b)$ and $(c, d)$ and let $\rho_2$ be the pairing obtained from $\rho$ by replacing the pairs $(a, c)$ and $(b, d)$ with $(a, d)$ and $(b, c)$. Then either \begin{enumerate} \item[(1)] $\pi \cup \rho$ has one more component than both $\pi \cup \rho_1$ and $\pi \cup \rho_2$, \item[(2)] $\pi \cup \rho_1$ has one more component than $\pi \cup \rho$, and $\pi \cup \rho_2$ and $\pi \cup \rho$ have the same number of components, or \item[(3)] $\pi \cup \rho_2$ has one more component than $\pi \cup \rho$, and $\pi \cup \rho_1$ and $\pi \cup \rho$ have the same number of components. \end{enumerate} \end{lemma} \begin{proof} Observe that either $a, b, c,$ and $d$ are all in the same component of $\pi \cup \rho$ or $a$ and $c$ are in the same component and $b$ and $d$ are in a different component. If $a$ and $c$ are in the same component and $b$ and $d$ are in a different component, then pairing $a$ with $b$ and $c$ with $d$ merges these two components. Similarly, pairing $a$ with $d$ and $b$ with $c$ merges these two components. If $a, b, c$ and $d$ are in the same component, then we consider the following path in $\pi \cup \rho$: \begin{equation} \label{eqn:path} c - a - \pi(a) - \rho(\pi(a)) - \cdots \end{equation} This path reaches $b$ or $d$ before it reaches $c$ since by assumption $a, b, c, d$ are all in the same component. If it reaches $b$ before $d$, then in $\rho_1$, $a$ and $b$ are in a different component than $c$ and $d$. This is because path (\ref{eqn:path}) is replaced with $$b - a - \pi(a) - \rho(\pi(a)) - \cdots - b,$$ so $\pi \cup \rho_1$ has one more component than $\pi \cup \rho$. In $\rho_2$, $a, b, c,$ and $d$ are all in the same component, because path (\ref{eqn:path}) is replaced with $$d - a - \pi(a) - \rho(\pi(a)) - \cdots - b - c,$$ so $\pi \cup \rho_2$ and $\pi \cup \rho$ have the same number of components. If the path reaches $d$ before $b$, then in $\rho_2$, $a$ and $d$ are in a different component than $b$ and $c$, so $\pi \cup \rho_2$ has one more component than $\pi \cup \rho$. In $\rho_1$, $a, b, c,$ and $d$ are in the same component, so $\pi \cup \rho_1$ and $\pi \cup \rho$ have the same number of components. \end{proof} \begin{lemma} \label{lem:decompintoplanar} Let $\rho$ be a pairing (not necessarily black-white). Then for any planar pairing $\pi$, \begin{equation} \label{eqn:decompintoplanar} \text{sign}_{OE}(\pi) (-1)^{ C_{\rho}} (-1)^{\# \text{nodes}/2} 2^{C_{\rho}} = \sum_{\text{ planar pairings } \sigma} \mathcal{P}_{\sigma, \rho}^{(t)} \text{sign}_{OE}( \sigma)2^{C_{\sigma} }. \end{equation} Here, $C_{\rho}$ denotes the number of components in $ \pi \cup \rho$ and $C_{\sigma}$ denotes the number of components in $ \pi \cup \sigma$. \end{lemma} \begin{proof} We will prove the claim by induction on the number of crossings in $\rho$. \noindent {\em Base Case.} When $\rho$ has 0 crossings, equation (\hyperref[eqn:decompintoplanar]{\ref{eqn:decompintoplanar}}) becomes \[ \text{sign}_{OE}(\pi) (-1)^{ C_{\rho}} (-1)^{\# \text{nodes}/2} 2^{ C_{\rho}} = \mathcal{P}_{\rho, \rho}^{(t)} \text{sign}_{OE}( \rho)2^{ C_{\rho}},\] which is equivalent to \begin{equation} \label{eqn:decompintoplanarbasecase} \text{sign}_{OE}(\pi) (-1)^{C_{\rho}} (-1)^{\# \text{nodes}/2} \text{sign}_{OE}( \rho) = 1 . \end{equation} First suppose $\rho = \pi$. Since $(-1)^{\# \text{ comp in } \pi \cup \pi} = (-1)^{\# \text{nodes}/2}$, equation~(\ref{eqn:decompintoplanarbasecase}) holds. We can obtain any planar pairing from any other planar pairing by a sequence of moves, where each move consists of swapping the locations of two nodes of the same parity. So we will show that when $\rho$ is a planar pairing, $x$ and $y$ are two nodes of the same parity, and $\rho'$ is the pairing obtained from $\rho$ by swapping the locations of $x$ and $y$, replacing $\rho$ with $\rho'$ does not change the left hand side of equation~(\ref{eqn:decompintoplanarbasecase}). Since $\text{sign}_{OE}(\rho) = - \text{sign}_{OE}(\rho')$, we must show that $(-1)^{\# \text{ comp in } \pi \cup \rho} = - (-1)^{\# \text{ comp in } \pi \cup \rho'}$. If $x$ and $\rho(x)$ are in a different component than $y$ and $\rho(y)$ in $\pi \cup \rho$, then $\pi \cup \rho'$ has one fewer component than $\pi \cup \rho$. If $x, \rho(x), y,$ and $\rho(y)$ are all in the same component in $\pi \cup \rho$, then without loss of generality assume that $x$ and $y$ are both even, so $\rho(x)$ and $\rho(y)$ are both odd, and consider the following path in $\pi \cup \rho$: $$\rho(x) - x- \pi(x) - \rho(\pi(x)) - \cdots.$$ Since $\rho$ and $\pi$ are both odd-even, segments $n \!\frown\! \rho(n)$ go from an odd node to an even node. Since $\rho(y)$ is odd and $y$ is even, this means that we must reach the node $\rho(y)$ before the node $y$. Therefore we have the path $$\rho(x) - x - \pi(x) - \rho(\pi(x)) - \cdots - \rho(y) - y - \cdots$$ When we replace the pairs $(x, \rho(x)), (y, \rho(y))$ with $(x, \rho(y))$ and $(y, \rho(x))$, this path is replaced with $$\rho(y) - x - \pi(x) - \rho(\pi(x)) - \cdots - \rho(y)$$ so $(x, \rho(y))$ and $(y, \rho(x))$ are in different components of $\pi \cup \rho'$. We conclude that equation (\ref{eqn:decompintoplanarbasecase}) holds for all planar pairings $\rho$. Now assume that equation (\ref{eqn:decompintoplanar}) holds for pairings $\rho$ with $\leq k$ crossings. Let $\rho$ be a pairing with $k+1$ crossings. Let $a < b < c < d$ be nodes that form a crossing in $\rho$. Let $\rho_1$ be the pairing obtained by replacing the pairs $(a, c)$ and $(b, d)$ with $(a, b)$ and $(c, d)$ and let $\rho_2$ be the pairing obtained by replacing the pairs $(a, c)$ and $(b, d)$ with $(a, d)$ and $(b, c)$. We claim that both $\rho_1$ and $\rho_2$ have fewer than $k+1$ crossings. Observe that if a chord connecting two nodes $n_1$ and $n_2$ crosses the chord connecting $a$ and $b$ in $\rho_1$, it also crosses the chord connecting $a$ and $c$ or the chord connecting $b$ and $d$ in $\rho$. Similarly, if a chord connecting two nodes crosses the chord connecting $c$ and $d$ in $\rho_1$, it also crosses the chord connecting $a$ and $c$ or the chord connecting $b$ and $d$ in $\rho$. It follows that $\rho_1$ has at least one fewer crossing than $\rho$. A similar argument shows that $\rho_2$ has at least one fewer crossing than $\rho$. By the induction hypothesis, \[ \text{sign}_{OE}(\pi) (-1)^{C_{\rho_{1}}} (-1)^{\# \text{nodes}/2} 2^{C_{\rho_{1}}} = \sum_{\text{ planar pairings } \sigma} \mathcal{P}_{\sigma, \rho_1}^{(t)} \text{sign}_{OE}( \sigma)2^{C_{\sigma}} \] and \[ \text{sign}_{OE}(\pi) (-1)^{ C_{\rho_{2}} } (-1)^{\# \text{nodes}/2} 2^{ C_{\rho_{2}} } = \sum_{\text{ planar pairings } \sigma} \mathcal{P}_{\sigma, \rho_2}^{(t)} \text{sign}_{OE}( \sigma)2^{ C_{\sigma} }.\] By the transformation rule (\ref{eqn:myrule1}), $$\mathcal{P}_{\sigma, \rho_1}^{(t)} + \mathcal{P}_{\sigma, \rho_2}^{(t)} = -\mathcal{P}_{\sigma, \rho}^{(t)}$$ so we have \[ \sum_{\substack{ \text{ planar} \\ \text{pairings } \sigma }} \mathcal{P}_{\sigma, \rho}^{(t)} \text{sign}_{OE}( \sigma)2^{C_{\sigma} } = - \text{sign}_{OE}(\pi)(-1)^{\# \text{nodes}/2} \left( (-1)^{C_{\rho_{1}}} 2^{C_{\rho_{1}} } + (-1)^{ C_{\rho_{2}} } 2^{ C_{\rho_{2}} } \right). \] By Lemma \ref{lem:componentlemma} there are three cases to consider: \begin{enumerate} \item[(1)] $\pi \cup \rho$ has one more component than both $\pi \cup \rho_1$ and $\pi \cup \rho_2$, \item[(2)] $\pi \cup \rho_1$ has one more component than $\pi \cup \rho$, and $\pi \cup \rho_2$ and $\pi \cup \rho$ have the same number of components, and \item[(3)] $\pi \cup \rho_2$ has one more component than $\pi \cup \rho$, and $\pi \cup \rho_1$ and $\pi \cup \rho$ have the same number of components. \end{enumerate} \noindent{\bf Case (1).} Since $C_{\rho_{i}} - C_{\rho} = -1$ for $i = 1, 2$, \begin{eqnarray*} (-1)^{C_{\rho_{1}} } 2^{ C_{\rho_{1}} } + (-1)^{C_{\rho_{2}} } 2^{C_{\rho_{2}} } &= & -(-1)^{ C_{\rho} }\cdot \frac{1}{2} \cdot 2^{ C_{\rho} } + -(-1)^{ C_{\rho} }\cdot \frac{1}{2} \cdot 2^{C_{\rho}} \\ & = & (-1)^{C_{\rho} } 2^{C_{\rho}} \left(-\frac{1}{2} - \frac{1}{2} \right)\\ & = & -(-1)^{C_{\rho}} 2^{C_{\rho}}. \end{eqnarray*} \noindent{\bf Cases (2) and (3).} We will only include the proof for case (2), since case (3) is completely analogous. Since $C_{\rho_{1}} - C_{\rho} = 1$ and $C_{\rho_{2}} - C_{\rho} = 0$, \begin{eqnarray*} (-1)^{ C_{\rho_{1}} } 2^{ C_{\rho_{1}} } + (-1)^{ C_{\rho_{2}}} 2^{ C_{\rho_{2}} } & = & -(-1)^{ C_{\rho} } 2 \cdot 2^{ C_{\rho}} + (-1)^{ C_{ \rho}} 2^{ C_{ \rho}} \\ & = & (-1)^{C_{ \rho} } 2^{ C_{\rho}} (-2 + 1)\\ & = & -(-1)^{ C_{ \rho}} 2^{ C_{ \rho}} . \end{eqnarray*} So in all cases, \begin{eqnarray*} - \text{sign}_{OE}(\pi)(-1)^{\# \text{nodes}/2} \left( (-1)^{ C_{ \rho_{1}} } 2^{ C_{ \rho_{1}} } + (-1)^{ C_{ \rho_{2}} } 2^{C_{ \rho_{2}} } \right) = \text{sign}_{OE}(\pi)(-1)^{\# \text{nodes}/2} (-1)^{C_{ \rho}} 2^{C_{ \rho}}, \end{eqnarray*} and thus $$\text{sign}_{OE}(\pi) (-1)^{ C_{\rho} } (-1)^{\# \text{nodes}/2} 2^{ C_{\rho} } = \sum_{\text{ planar pairings } \sigma} \mathcal{P}_{\sigma, \rho}^{(t)} \text{sign}_{OE}( \sigma)2^{ C_{\sigma} }.$$ \end{proof} \begin{proof}[Proof of Theorem \ref{thm:thm1}] Let $\widetilde{\mathcal{Q}}$ be the matrix obtained by the procedure from Rule~\ref{myrule2}, so the $(\sigma, \rho)$th entry of $\widetilde{\mathcal{Q}}$ is the product of $\text{sign}_{OE}(\sigma)\text{sign}_{BW}(\rho)$ with the coefficient of $\sigma$ when $\rho$ is written as a linear combination of planar pairings using (\ref{eqn:myrule1}). That is, \begin{equation} \label{eqn:Qtilde} \widetilde{\mathcal{Q}}_{\sigma, \rho} = \text{sign}_{OE}(\sigma) \text{sign}_{BW}(\rho) \mathcal{P}_{\sigma, \rho}^{(t)}. \end{equation} We will show that $$\mathcal{M}_{2} \widetilde{\mathcal{Q}} {\bf e}_{i} = \mathcal{B}_{2} {\bf e}_{i}$$ for all $i$. This will show that $\mathcal{M}_{2} \widetilde{\mathcal{Q}} = \mathcal{M}_{2} \mathcal{Q}^{(DD)}$, which proves the theorem since $\mathcal{M}_{2}$ is invertible. Let $\rho$ be a black-white pairing. Recall from Definition \hyperref[Bdefn]{\ref{Bdefn}} that $(\mathcal{B}_2)_{\pi, \rho} = \text{sign}(\pi, \rho) 2^{C_{ \rho} }$. Then by equation (\ref{eqn:Qtilde}), to show that $\mathcal{M}_{2} \widetilde{\mathcal{Q}} {\bf e}_{i} = \mathcal{B}_{2} {\bf e}_{i}$, we need to show that for each planar pairing $\pi$, $$\text{sign}(\pi, \rho) 2^{C_{\rho}} = \sum_{\text{ planar pairings } \sigma} \mathcal{P}_{\sigma, \rho}^{(t)} \text{sign}_{OE}( \sigma) \text{sign}_{BW}( \rho) 2^{C_{\sigma}}. $$ By Definition \ref{signrhopi}, $$\text{sign}(\pi, \rho) = (-1)^{\# \text{nodes}/2} (-1)^{C_{\rho}} \text{sign}_{OE}(\pi) \text{sign}_{BW} (\rho).$$ Applying Lemma~\ref{lem:decompintoplanar} completes the proof. \end{proof} \subsection{Another characterization of $\text{sign}(S)$} \label{sec:signSformula} In this section, we prove Lemma \ref{lem:signSdefn2}, which was key in establishing Lemma~\ref{lem:mylem35}.\\ \noindent {\bf Lemma \ref{lem:signSdefn2}.} Let $S$ be a balanced subset of nodes and let $\text{sign}(S)$ be defined as in Lemma \ref{lemma32gen}. Then \begin{equation} \label{eqn:signSdefn2} \text{sign}(S) = (-1)^{\# \text{nodes}/2} (-1)^{\# \text{ comp in } \pi \cup \rho} \text{sign}_{OE}(\pi) \text{sign}_{BW}(\rho) \end{equation} where $\pi$ is an odd-even pairing such that $\pi$ does not connect $S \triangle T$ to $(S \triangle T)^c$ and $\rho$ is a black-white pairing such that $\rho$ does not connect $S$ to $S^c$. \\ Proving Lemma~\ref{lem:signSdefn2} requires \begin{itemize} \item[(1)] proving that such pairings $\pi$ and $\rho$ always exist, \item[(2)] proving that equation (\ref{eqn:signSdefn2}) is well-defined, and \item[(3)] proving that equation (\ref{eqn:signSdefn2}) holds. \end{itemize} We will postpone the proof of (1) because the fact that such pairings $\pi$ and $\rho$ always exist will follow quickly from the proofs of (2) and (3). \subsubsection{Proof that equation (\ref{eqn:signSdefn2}) is well-defined} The strategy of the proof is to define local moves that allow us to get from a pair $(\pi_1, \rho_1)$ such that $\pi_1$ does not connect $S \triangle T$ to $(S \triangle T)^c$ and $\rho_1$ does not connect $S$ to $S^c$ to any other pair $(\pi_2, \rho_2)$ with this property, and to show that these moves do not change the right hand side of equation~(\ref{eqn:signSdefn2}). Specifically, we will define two types of local moves. First, we define moves that modify $\pi$ by swapping the locations of two nodes of the same parity under certain conditions but leave $\rho$ fixed, called moves of type $A_{OE}$. Next, we define moves that modify $\rho$ by swapping the locations of two nodes of the same color under similar conditions but leave $\pi$ fixed, called moves of type $A_{BW}$. In order to describe the conditions under which we can swap the locations of two nodes, we need the following definition. \begin{defn} We call a pair of nodes $(a, \eta(a))$ a {\em transition pair} if exactly one of the nodes $a, \eta(a)$ is in $T$. \end{defn} \begin{rem} If $a$ and $b$ are two nodes in the same component of $\pi \cup \rho$, there are two paths from $a$ to $b$. Since the algorithm in Lemma~\ref{alglemma} is well-defined, the parity of the number of transition pairs is independent of the path. \end{rem} \begin{defn} \label{defn:AOE} Suppose $\pi$ is an odd-even pairing and $\rho$ is a black-white pairing. Let $a$ and $b$ be two nodes of the same parity. If \begin{itemize} \item $a$ and $b$ are in different components, \item $a$ and $b$ are the same color and a path from $a$ to $b$ contains an even number of transition pairs, or \item $a$ and $b$ are different colors and a path from $a$ to $b$ contains an odd number of transition pairs, \end{itemize} let $\pi'$ be the pairing obtained from $\pi$ by swapping the locations of $a$ and $b$ in $\pi$. We say that $(\pi', \rho)$ and $(\pi, \rho)$ differ by a move of type $A_{OE}$. See Figure \ref{fig:typeAOE} for an example. \end{defn} \begin{figure} \caption{{\em Left} \label{fig:typeAOE} \end{figure} \begin{defn} \label{defn:ABW} Let $\pi$ be an odd-even pairing and let $\rho$ be a black-white pairing. Suppose $a$ and $b$ are the same color and either $a$ and $b$ are in different components, or a path in $\pi \cup \rho$ from $a$ to $b$ contains an even number of transition pairs. Suppose we swap the locations of $a$ and $b$ in $\rho$ to obtain the pairing $\rho'$. Then we say that $(\pi, \rho')$ and $(\pi, \rho)$ differ by a {\em move of type $A_{BW}$}. See Figure \ref{fig:typeAOE} for an example. \end{defn} \begin{lemma} \label{AOElemma} Let $\pi, \pi'$ be odd-even pairings and let $\rho$ be a black-white pairing such that $( \pi, \rho)$ and $(\pi', \rho)$ differ by a move of type $A_{OE}$. Then the number of components in $\pi \cup \rho$ and the number of components in $\pi' \cup \rho$ differ by one. \end{lemma} \begin{proof} If $a$ and $b$ are in different components of $\pi \cup \rho$, swapping the locations of $a$ and $b$ in $\pi$ merges these two components, so the number of components decreases by one. If $a$ and $b$ are in the same component, without loss of generality assume that node $a$ is white. Consider the following path from $a$ to $b$, which starts by traversing the edge connecting $a$ to $\pi(a)$: $$a - \pi(a) - \cdots - b.$$ We claim that we always reach $b$ before $\pi(b)$. This follows from the observation that because $\rho$ is black-white and $\pi$ is odd-even, a path in $\pi \cup \rho$ alternates between black and white nodes unless a pair $(d, \pi(d))$ in the path is a transition pair. So since our path starts at a white node by traversing the edge in $\pi$, if we consider an edge $d \!\frown\! \pi(d)$ of the path, $d$ is white and $\pi(d)$ is black if and only if we traverse this edge after passing through an even number of transition pairs. So, if we were to reach $\pi(b)$ before $b$, $b$ is black if and only if there are an even number of transition pairs between $a$ and $b$, a contradiction since $a$ is white. It follows that we must reach $b$ before $\pi(b)$. Thus we have the following path in $\pi \cup \rho$: $$a - \pi(a) - \cdots - b - \pi(b)$$ When we replace the pairs $(a, \pi(a))$ and $(b, \pi(b))$ in $\pi$ with $(a, \pi(b))$ and $(b, \pi(a))$ to obtain $\pi'$ the middle portion of the path above $\pi(a) - \cdots - b$ becomes a new component, so the number of components increases by one. \end{proof} \begin{cor} \label{AOEcor} A move of type $A_{OE}$ does not change the right hand side of equation (\ref{eqn:signSdefn2}). \end{cor} \begin{proof} If $(\pi, \rho)$ and $(\pi', \rho)$ differ by a move of type $A_{OE}$, then $(-1)^{\# \text{ comp in } \pi \cup \rho} = -(-1)^{\# \text{ comp in } \pi' \cup \rho}$ by Lemma~\ref{AOElemma} and $\text{sign}_{OE}(\pi) = -\text{sign}_{OE}(\pi')$, so replacing $\pi$ with $\pi'$ does not change the right hand side of equation (\ref{eqn:signSdefn2}). \end{proof} \begin{cor} \label{ABWcor} A move of type $A_{BW}$ does not change the right hand side of equation (\ref{eqn:signSdefn2}). \end{cor} \begin{proof} The proof that a move of type $A_{BW}$ changes the number of components in $\pi \cup \rho$ by one is analogous to the proof of Lemma \ref{AOElemma}. The claim follows as it did in the proof of Corollary~\ref{AOEcor}. \end{proof} \begin{proof}[Proof that equation (\ref{eqn:signSdefn2}) is well-defined] By Corollaries \ref{AOEcor} and \ref{ABWcor}, moves of type $A_{OE}$ and type $A_{BW}$ do not change the right hand side of equation (\ref{eqn:signSdefn2}). So to prove that the formula for $\text{sign}(S)$ is well-defined, it suffices to show that these two types of moves are enough to get from a pair $(\pi_1, \rho_1)$ such that $\pi_1$ does not connect $S \triangle T$ to $(S \triangle T)^c$ and $\rho_1$ does not connect $S$ to $S^c$ to any other pair $(\pi_2, \rho_2)$ with this property. We can get from any pairing of nodes in $S$ to any other pairing of nodes in $S$ using moves of type $A_{BW}$ because type $A_{BW}$ moves allow us to exchange any nodes of the same color in $S$. By the same reasoning, we can get from any pairing of nodes in $S^c$ to any other pairing of nodes in $S^c$. So, if $\rho$ and $\rho'$ are two pairings that both do not connect $S$ to $S^c$, then we can get from $\rho$ to $\rho'$ using a sequence of moves of type $A_{BW}$. Similarly, we can get from any odd-even pairing of nodes in $S \triangle T$ to any other odd-even pairing of nodes in $S \triangle T$ by swapping nodes of the same parity in $S \triangle T$. We can also get from any odd-even pairing of nodes in $(S \triangle T)^c$ to any other odd-even pairing of nodes in $(S \triangle T)^c$. So, if $\pi$ and $\pi'$ are two odd-even pairings that both do not connect $S \triangle T$ to $(S \triangle T)^c$, then we can get from $\pi$ to $\pi'$ using a sequence of moves of type $A_{OE}$. We have thus shown if we have two pairs of pairings $(\pi_1, \rho_1)$ and $(\pi_2, \rho_2)$ such that $\pi_i$ is odd-even and does not connect $S \triangle T$ to $(S \triangle T)^c$ and $\rho_i$ is black-white and does not connect $S$ to $S^c$, that the right hand side of equation (\ref{eqn:signSdefn2}) is unchanged when we replace $(\pi_1, \rho_1)$ with $(\pi_2, \rho_2)$. \end{proof} \subsubsection{Proof that equation (\ref{eqn:signSdefn2}) holds} \label{sec:eqnholds} First assume that $S$ is a balanced set of size $2j$ such that there is a planar black-white pairing $\rho$ that does not connect $S$ to $S^c$. Although it may not be obvious that such a set always exists, recall from Lemma \ref{firstlemma34} that regardless of the node coloring of {\bf N}, there exists a planar black-white pairing $\rho$ of ${\bf N}$. So we choose $S$ to be $2j$ of the arcs of $\rho$. Then by definition, $$\text{sign}(S) = (-1)^{\# \text{ crosses of } \rho} = 1.$$ Let $\pi = \rho$. Since $\pi$ is odd-even and black-white, for all pairs in $\pi$, either both nodes of the pair are in $T$ or both are not in $T$, so $\pi$ does not connect $S \triangle T$ to $(S \triangle T)^c$. Since $\pi = \rho$, $(-1)^{\# \text{nodes}/2}= (-1)^{\# \text{ comp in } \pi \cup \rho}$. Also, $\text{sign}_{OE}(\pi) = \text{sign}_{BW}(\rho)$ by Lemma~\ref{lem:OEandBWsignsold}, so equation (\ref{eqn:signSdefn2}) holds. We can obtain any balanced set of size $2j$ from $S$ by making a sequence of the following types of replacements: \begin{itemize} \item[(1)] Replace $x \in S$ with $x+1 \in S^c$, where $(x, x+1)$ is a couple of consecutive nodes of the same color. (Or replace $x+1 \in S$ with $x \in S^c$). \item[(2)] Replace $x \in S$ with $y \in S^c$, where $x < y$ are the same color and all $\ell$ nodes in the interval $[x+1, x+2, \ldots, y-1]$ are the opposite color of $x$ and $y$ ($\ell \geq 1$). (Or replace $y \in S$ with $x \in S^c$). \end{itemize} Therefore it suffices to show the following. Assume we're given a balanced set $S$, an odd-even pairing $\pi$ that does not connect $S \triangle T$ to $(S \triangle T)^c$, and a black-white pairing $\rho$ that does not connect $S$ to $S^c$ such that $\rho |_{S}$ and $\rho |_{S^c}$ are planar. After making either of the above two types of replacements to obtain $S'$, we can construct an odd-even pairing $\pi'$ that does not connect $S' \triangle T$ to $(S' \triangle T)^c$ and a black-white pairing $\rho'$ that does not connect $S'$ to $S'^c$ such that $\rho' |_{S'}$ and $\rho' |_{S'^c}$ are planar. After replacing $S$, $\pi$, $\rho$ in equation (\ref{eqn:signSdefn2}) with $S'$, $\pi'$ and $\rho'$, equation (\ref{eqn:signSdefn2}) still holds. This requires several lemmas. \begin{lemma} \label{lem:pix=y} Let $S$ be a balanced subset of nodes. Let $x$ and $y$ be two nodes of the same color and opposite parity with $x < y$ such that $x \in S$ and $y \notin S$. Let $\rho$ be a black-white pairing such that $\rho$ does not connect $S$ to $S^c$ and let $\pi$ be an odd-even pairing such that $\pi$ does not connect $S \triangle T$ to $(S \triangle T)^c$. Let $S' = S \setminus \{x\} \cup \{y\}$ and let $\rho'$ be the pairing obtained by swapping the locations of $x$ and $y$ in $\rho$. Then \begin{itemize} \item[(a)] if $\pi(x) = y$, \begin{itemize} \item[(i)]$\pi$ does not connect $S' \triangle T$ to $(S' \triangle T)^c$, and \item[(ii)] when $\rho$ is replaced with $\rho'$, the right hand side of equation (\ref{eqn:signSdefn2}) changes sign. \end{itemize} \item[(b)] if $\pi(x) \neq y$, let $\pi'$ be the pairing obtained from $\pi$ by pairing $x$ with $y$, $\pi(x)$ with $\pi(y)$, and leaving the remaining pairs the same. Then \begin{itemize} \item[(i)] $\pi'$ does not connect $S' \triangle T$ to $(S' \triangle T)^c$. \item[(ii)] when $\rho$ is replaced with $\rho'$ and $\pi$ is replaced with $\pi'$, the right hand side of equation (\ref{eqn:signSdefn2}) changes sign. \end{itemize} \end{itemize} \end{lemma} \begin{proof} We will first prove part (a). The fact that $\pi$ does not connect $S' \triangle T$ to $(S' \triangle T)^c$ follows from the observation that since $\pi(x) = y$, both $x$ and $y$ are in $S \triangle T$ or both are in $(S \triangle T)^c$. If both $x, y$ are in $S \triangle T$ then since we assumed $x \in S$ and $y \notin S$, $y$ must be in $T$, so both $x, y$ are in $(S' \triangle T)^c$. So $\pi$ does not connect $S' \triangle T$ to $(S' \triangle T)^c$. Since we obtained $\rho'$ from $\rho$ by swapping the locations of $x$ and $y$, $\text{sign}_{BW}(\rho') = - \text{sign}_{BW}(\rho)$. The number of components in $\pi \cup \rho$ is the same as the number of components in $\pi \cup \rho'$ because when we replace $\rho$ with $\rho'$ the path $\pi(\rho(x)) - \rho(x) - x - y- \rho(y)$ is replaced with $\pi(\rho(x)) - \rho(x) - y - x- \rho(y)$. So the right hand side of equation (\ref{eqn:signSdefn2}) changes sign. Next, we prove part (b). The proof of (i) relies on the observation that since $x$ and $y$ are the same color but opposite parity, exactly one of the nodes $x, y$ is in $T$. This implies that $x$ and $y$ are both in $S \triangle T$ or both in $(S \triangle T)^c$ and that $x$ and $y$ are both in $S' \triangle T$ or both in $(S' \triangle T)^c$. Since $x$ and $y$ are both in $S \triangle T$ or both in $(S \triangle T)^c$, $\pi(x)$ and $\pi(y)$ are both in $S \triangle T$ or both in $(S \triangle T)^c$. Since neither $\pi(x)$ nor $\pi(y)$ is $x$ or $y$, $\pi(x)$ and $\pi(y)$ are both in $S' \triangle T$ or both in $(S' \triangle T)^c$. We conclude that $\pi'$ does not connect $S' \triangle T$ to $(S' \triangle T)^c$. For the proof of (ii), first note that pairing $x$ and $y$ and $\pi(x)$ with $\pi(y)$ is the same as swapping the locations of $y$ and $\pi(x)$. It follows that $\text{sign}_{OE}(\pi') = - \text{sign}_{OE}(\pi)$, and since $\text{sign}_{BW}(\rho') = - \text{sign}_{BW}(\rho)$, it remains to show that the number of components in $\pi' \cup \rho'$ and the number of components in $\pi \cup \rho$ differ by 1. By letting $a = \pi(x)$ and $b = y$ in Definition~\ref{defn:AOE}, we see that $(\pi', \rho)$ and $(\pi, \rho)$ differ by a move of type $A_{OE}$. If $\pi(x)$ and $y$ are in different components, this is clear, since $\pi(x)$ and $y$ have the same parity. If $\pi(x)$ and $y$ are in the same component, we must show that they are the same color if and only if there are an even number of transition pairs between them. This is because \begin{itemize} \item $y$ and $x$ are the same color \item a path from $y$ to $x$ contains an odd number of transition pairs (since $x \in S$ and $y \notin S$) \item $x$ and $\pi(x)$ are the same color if and only if $(x, \pi(x))$ is a transition pair \end{itemize} So, by Lemma \ref{AOElemma}, the number of components in $\pi' \cup \rho$ and the number of components in $\pi \cup \rho$ differ by one. Then, since $\pi'(x) = y$, by the proof of part (a), the number of components in $\pi' \cup \rho'$ is the same as the number of components in $\pi' \cup \rho$. We conclude that when $\rho$ is replaced with $\rho'$ and $\pi$ is replaced with $\pi'$ the right hand side of equation (\ref{eqn:signSdefn2}) changes sign. \end{proof} \begin{lemma} \label{lem:planarwhenrestricted} Let $S \subseteq {\bf N}$ be a balanced set. Let $x, y$ be nodes of the same color such that $x \in S$, $y \in S^c$, $x < y$ and all $\ell$ nodes in the interval $[x+1, x+2, \ldots, y-1]$ are the opposite color of $x$ and $y$ ($\ell \geq 1$). Let $\rho$ be a black-white pairing such that $\rho$ does not connect $S$ to $S^c$ and $\rho |_{S}$ and $\rho |_{S^c}$ are planar. \begin{itemize} \item[(1)] If $\rho(x)$ is not in the interval $[x+1, \ldots, y-1]$ and there is a node in this interval that is in $S$, let $k$ be the smallest integer such that $x+k$ is in $S$ and let $\rho'$ be the pairing obtained from $\rho$ by replacing the pairs $(x, \rho(x))$ and $(x+k, \rho(x+k))$ with the pairs $(x, x+k)$ and $(\rho(x), \rho(x+k))$. Then $\rho' |_{S}$ and $\rho' |_{S^c}$ are planar. Also, replacing $\rho$ with $\rho'$ does not change the right hand side of equation (\ref{eqn:signSdefn2}). \item[(2)] If $\rho(y)$ is not in the interval $[x+1, \ldots, y-1]$ and there is a node in this interval that is in $S^c$, let $k$ be the smallest integer such that $y - k$ is in $S^c$ and let $\rho'$ be the pairing obtained from $\rho$ by replacing the pairs $(y, \rho(y))$ and $(y-k, \rho(y-k))$ with the pairs $(y,y - k)$ and $(\rho(y), \rho(y-k))$. Then $\rho' |_{S}$ and $\rho' |_{S^c}$ are planar. Also, replacing $\rho$ with $\rho'$ does not change the right hand side of equation (\ref{eqn:signSdefn2}). \end{itemize} \end{lemma} \begin{proof} Since the proofs of (1) and (2) are completely analogous, we only prove (1). We first show that $\rho'|_{S}$ is planar. Since we chose the smallest integer $k$ such that $x+k$ is in $S$, there are no chords connecting two nodes in $S$ that cross the chord $x \!\frown\! (x+k)$. We need to check that there are no chords connecting two nodes in $S$ that cross the chord $\rho(x) \!\frown\! \rho(x+k)$. If there was such a crossing, that means that there is a node $a \in S$ such that one of the following holds: \begin{itemize} \item[(1)] $a < \rho(x+k) < \rho(a) < \rho(x)$, \item[(2)] $\rho(x+k) < a < \rho(x) < \rho(a)$, \item[(3)] $a < \rho(x) < \rho(a) < \rho(x+k)$, or \item[(4)] $\rho(x) < a < \rho(x+k) < \rho(a)$. \end{itemize} We use the facts that if $a > x$ then $a > x+k$ (since otherwise $a \in S^c$, a contradiction) or, similarly, if $\rho(a) > x$ then $\rho(a) > x+k$, to show that if the inequalities in (1), (2), (3), or (4) hold, then $\rho |_{S}$ is not planar. For example, in case (1), if $a > x$ then $a > x+k$. So we have $$x+k < a < \rho(x+k) < \rho(a),$$ which contradicts that $\rho |_{S}$ is planar. If $a < x$ then there are two cases. If $\rho(a) < x$, we have $a < \rho(x+k) < \rho(a) < x+k.$ If instead $\rho(a) > x$, we have $a < x < \rho(a) < \rho(x).$ In both cases, we have a contradiction. In case (2), if $a > x$, then we have $x < a < \rho(x) < \rho(a).$ If $a < x$ and $\rho(a) < x$, then $a < \rho(x) < \rho(a) <x.$ If $a < x$ and $\rho(a) > x$, then $\rho(x+k) < a < x+k < \rho(a).$ In all cases, we have a contradiction. Case (3) is similar to case (2), and case (4) is similar to case (1). We conclude that $\rho' |_{S}$ is planar. Since $\rho |_{S^c}$ was planar and the nodes $x, x+k, \rho(x), \rho(x+k)$ are all in $S$, $\rho' |_{S^c}$ is also planar. Next, we observe that the number of components in $\pi \cup \rho$ and the number of components in $\pi \cup \rho'$ differ by 1. This is because to obtain the pairing $\rho'$ from $\rho$, we swapped the locations of $x$ and $\rho(x+k)$. Since $x$ and $\rho(x+k)$ are both in $S$ and both the same color, $(\pi, \rho')$ and $(\pi, \rho)$ differ by a move of type $A_{BW}$. So by Corollary \ref{ABWcor}, the number of components in $\pi \cup \rho$ and the number of components in $\pi \cup \rho'$ differ by 1. Since $\text{sign}(\rho') = - \text{sign}(\rho)$, replacing $\rho$ with $\rho'$ does not change the right hand side of equation (\ref{eqn:signSdefn2}). \end{proof} The following useful observation is immediate from the definitions. \begin{rem} \label{lem:swapab} Let $\sigma$ be a pairing such that $x$ and $y$ are two nodes that are not paired in $\sigma$, and let $\sigma'$ be the pairing obtained by swapping the locations of $x$ and $y$ in $\sigma$. Suppose $S$ is a balanced subset of nodes such that $x \in S$ and $y \in S^c$. Let $S' = (S \setminus \{x \} ) \cup \{y \}$. If $\sigma$ does not connect $S$ to $S^c$, then $\sigma'$ does not connect $S'$ to $S'^c$. \end{rem} \noindent {\em{Proof that equation (\ref{eqn:signSdefn2}) holds.}} Throughout this proof, we assume that we are given a balanced set $S$, an odd-even pairing $\pi$ that does not connect $S \triangle T$ to $(S \triangle T)^c$, and a black-white pairing $\rho$ that does not connect $S$ to $S^c$ and is planar when restricted to $S$ and when restricted to $S^c$. Recall from the beginning of Section~\ref{sec:eqnholds} that we are considering two types of replacements that we can make to $S$ to obtain $S'$: (1) replacing $x \in S$ with $x+1 \in S^c$, where $(x, x+1)$ is a couple of consecutive nodes of the same color, and (2) replacing $x \in S$ with $y \in S^c$, where $x < y$ are the same color and all $\ell$ nodes appearing between $x$ and $y$ are the opposite color of $x$ and $y$ for some $\ell \geq 1$. For both types of replacements, we will construct a black-white pairing $\rho'$ that does not connect $S'$ to $S'^c$ such that $\rho |_{S'}$ and $\rho |_{S'^c}$ are planar and an odd-even pairing $\pi'$ that does not connect $S' \triangle T$ to $(S' \triangle T)^c$. We will show that after replacing $S$, $\pi$, $\rho$ in equation (\ref{eqn:signSdefn2}) with $S'$, $\pi'$ and $\rho'$, equation (\ref{eqn:signSdefn2}) still holds. \\ \noindent {\bf (1) Replace $x \in S$ with $x+1$ $\in S^c$. } \\ Suppose we replace $x \in S$ with $x +1 \in S^c$ to obtain $S'$. There are two cases to consider based on whether or not $\pi(x) = x+1$. In both cases, we let $\rho'$ be the pairing obtained by swapping the locations of $x$ and $x+1$ in $\rho$. By Remark \ref{lem:swapab}, $\rho'$ is a black-white pairing that does not connect $S'$ to $S'^c$. Also note that since $\rho |_{S}$ and $\rho |_{S^c}$ are planar, $\rho' |_{S'}$ and $\rho' |_{S'^c}$ are planar.\\ \noindent {\bf Case 1.} If $\pi(x) = x+1$, $\pi$ does not connect $S' \triangle T$ to $(S' \triangle T)^c$ and when we replace $\rho$ with $\rho'$, the right hand side of equation (\ref{eqn:signSdefn2}) changes sign by Lemma \ref{lem:pix=y}. Since we swapped the locations of $x$ and $x+1$ in $\rho$ to obtain $\rho'$, $(-1)^{\text{\# of crosses of } \rho} =-(-1)^{\text{\# of crosses of } \rho'}$. So equation (\ref{eqn:signSdefn2}) holds. \\ \noindent {\bf Case 2.} If $\pi(x) \neq x+1$, let $\pi'$ be the pairing obtained from $\pi$ by pairing $x$ with $x+1$, $\pi(x)$ with $\pi(x+1)$, and leaving the remaining pairs the same. By Lemma \ref{lem:pix=y}, $\pi'$ does not connect $S' \triangle T$ to $(S' \triangle T)^c$ and when we replace $\pi$ with $\pi'$ and $\rho$ with $\rho'$, the right hand side of equation (\ref{eqn:signSdefn2}) changes sign. As in Case 1, $(-1)^{\text{\# of crosses of } \rho} =-(-1)^{\text{\# of crosses of } \rho'}$, so equation (\ref{eqn:signSdefn2}) holds. \\ \noindent {\bf (2) Replace $x$ with $y$, where $x < y$ are the same color and all $\ell$ nodes in the interval $[x+1, x+2, \ldots, y-1]$ are the opposite color of $x$ and $y$ ($\ell \geq 1$). }\\ Suppose we replace $x \in S$ with $y \in S^c$ to obtain $S'$. There are several cases to consider based on whether $x$ and $y$ are paired with nodes in the interval $[x+1, x+2, \ldots, y-1]$. \\ \noindent {\bf Case 1.} We first consider the case when both $x$ and $y$ are paired with a node in the interval $[x+1, x+2, \ldots, y-1]$. \\ \noindent {\em Construction of $\rho'$}. Let $\rho^{(1)}$ be the pairing obtained by swapping the locations of $x$ and $y$. By Remark \ref{lem:swapab}, $\rho^{(1)}$ does not connect $S'$ to $S'^c$. We observe that if $\ell > 2$, at least one of $\rho^{(1)} |_{S'}$, $\rho^{(1)} |_{S'^c}$ is not planar. To see this, observe that since $\rho |_{S}$ and $\rho |_{S^c}$ are planar, the nodes in the interval $[x+1, \ldots, \rho(x)-1]$ are in $S^c$ and the nodes in the interval $[\rho(y)+1, \ldots, y-1]$ are in $S$ (see Figure~\ref{fig:bothpaired}). \begin{wrapfigure}{r}{.3\textwidth} \begin{tikzpicture}[decoration={brace, mirror, raise=4pt}] \draw (1*60:2.4) node {$\rho(x)$}; \draw (0*60:2.4) node {$x$}; \draw (-4*60:2.4) node {$\rho(y)$}; \draw (-30-3*60:2.4) node {$y$}; \draw (9*15:2.3) node {\small{$S$}}; \node[shape=circle,fill=black, scale=0.5] (1) at (0*30:2) {}; \draw (1) arc (0:360:2); \node[shape=circle,fill=white, scale=0.4] at (0*15:2) {}; \node[shape=circle,fill=black, scale=0.5] (2) at (1*15:2) {}; \node[shape=circle,fill=black, scale=0.5] (3) at (2*15:2) {}; \node[shape=circle,fill=black, scale=0.5] (4) at (3*15:2) {}; \node[shape=circle,fill=black, scale=0.5] (5) at (4*15:2) {}; \node[shape=circle,fill=black, scale=0.5] (6) at (5*15:2) {}; \node[shape=circle,fill=black, scale=0.5] (7) at (6*15:2) {}; \node[shape=circle,fill=black, scale=0.5] (0) at (7*15:2) {}; \node[shape=circle,fill=black, scale=0.5] (8) at (8*15:2) {}; \node[shape=circle,fill=black, scale=0.5] (9) at (9*15:2) {}; \node[shape=circle,fill=black, scale=0.5] (10) at (10*15:2) {}; \node[shape=circle,fill=white, scale=0.4] at (10*15:2) {}; \draw (1) -- (5); \draw (10) -- (8); \draw[decorate] (2) -- (4) node [black,midway,xshift=0.5cm,yshift = 0.25cm] {\small $\in S^c$}; \end{tikzpicture} \caption{A possible configuration of the nodes in Case 1.} \label{fig:bothpaired} \end{wrapfigure} Suppose towards a contradiction that $\rho^{(1)} |_{S'}$ and $\rho^{(1)} |_{S'^c}$ are planar. Since $\rho^{(1)} |_{S'^c}$ is planar, all nodes in the interval $[x+1, \ldots, \rho(y)-1]$ are in $S'$. This means that either \begin{itemize} \item[(1)] $\rho(y) = x+1$, or \item[(2)] $\rho(x) = x+1$ and $\rho(y) = x+2$. \end{itemize} If (1) holds, there is at least one node in the interval $[\rho(y) + 1, \ldots, y-1]$ other than $\rho(x)$. By the observation in the previous paragraph, this node is in $S'$. If it is in the interval $[\rho(x) + 1, \ldots, y-1]$ its chord crosses the $\rho(x) \!\frown\! y$ chord, contradicting the assumption that $\rho^{(1)}|_{S'}$ is planar. If it is in the interval $[x+1, \ldots, \rho(x)-1]$ it crossed the $\rho(x) \!\frown\! x$ chord, contradicting the planarity of $\rho|_{S}$. If (2) holds, there is at least one node in the interval $[\rho(y)+1, \ldots, y-1]$, this node is in $S'$, and its chord crosses the $\rho(x) \!\frown\! y$ chord, contradicting the assumption that $\rho^{(1)} |_{S'}$ is planar. Observe that since $\rho$ pairs $x$ and $y$ with nodes in the interval $[x+1, x+2, \ldots, y-1]$, any crossings in $\rho^{(1)} |_{S'}$ must involve nodes in the interval $[x+1, x+2, \ldots, y-1]$. We claim that we can undo the crossings in $\rho^{(1)} |_{S'}$ one at a time without changing the right hand side of equation (\ref{eqn:signSdefn2}). To prove the claim, we will describe a procedure for constructing $\rho^{(m+1)}$ from $\rho^{(m)}$ so that $\rho^{(m+1)} |_{S'}$ has one fewer crossing than $\rho^{(m)} |_{S'}$. \\ \begin{procedure} \label{procedure} (Illustrated in Figure~\ref{fig:iterative}). Choose the smallest node $i_m \in S'$ greater than $\rho^{(m)}(y)$ such that $i_m \!\frown\! \rho^{(m)}(i_m)$ crosses the chord $y \!\frown\! \rho^{(m)}(y)$. Note that $\rho^{(m)}(i_m) = \rho(i_m)$ for all $m$ and $\rho^{(1)}(y) = \rho(x)$. Since $i_m$ and $\rho^{(m)}(y)$ are the same color and both in $S'$, we can swap the locations of $i_m$ and $\rho^{(m)}(y)$ in $\rho^{(m)}$ to obtain $\rho^{(m+1)}$, and this is a move of type $A_{BW}$. By Corollary \ref{ABWcor}, replacing $\rho^{(m)}$ with $\rho^{(m+1)}$ does not change the right hand side of equation (\ref{eqn:signSdefn2}). We claim that $\rho^{(m+1)} |_{S'}$ has one fewer crossing than $\rho^{(m)} |_{S'}$. First observe that since all nodes between $x$ and $y$ are the same color, any chord that crosses the chord $i_m \!\frown\! y$ must have also crossed the chord $y\!\frown\!\rho^{(m)}(y)$. So we just need to check that pairing $\rho^{(m)}(y)$ with $\rho(i_m)$ did not create any crossings. If a black-white chord $a \!\frown\! \rho(a)$ with $a, \rho(a) \in S'$ crosses $\rho^{(m)}(y) \!\frown\! \rho(i_m)$, then if either one of $a, \rho(a)$ is in the interval $[\rho^{(m)}(y) + 1, \ldots, i_m-1]$, it would have crossed $y \!\frown\!\rho^{(m)}(y)$, contradicting the assumption that $i_m$ is the node in $S'$ closest to $\rho^{(m)}(y)$ that crossed $y\!\frown\!\rho^{(m)}(y)$. So both $a, \rho(a)$ are outside the interval $[\rho^{(m)}(y) + 1, \ldots, i_m-1]$, meaning $a \!\frown\! \rho(a)$ crosses $\rho^{(m)}(y) \!\frown\! \rho(i_m)$ if and only if it crosses $i_m \!\frown\! \rho(i_m)$. \\ \end{procedure} Note that if $\rho^{(m)}$ does not connect $S'$ to $S'^c$, then $\rho^{(m+1)}$ does not connect $S'$ to $S'^c$. We repeat this procedure until we have a pairing $\rho^{(n)}$ such that $\rho^{(n)}|_{S'}$ is planar. \begin{figure} \caption{Illustration of the procedure for undoing the crossings in $\rho^{(1)} \label{fig:iterative} \end{figure} Similarly, we can undo the crossings in $\rho^{(n)} |_{S'^c}$ one at a time without changing the right hand side of equation (\ref{eqn:signSdefn2}). The resulting pairing is $\rho'$. \\ \noindent {\em Construction of $\pi'$ and analysis of equation (\ref{eqn:signSdefn2})}. We break into subcases based on the parity of $\ell$. \noindent {\bf Case 1a.} $\ell$ is odd \noindent {\em Analysis of LHS of (\ref{eqn:signSdefn2})}. Since $x$ and $y$ are both paired with black nodes in the interval $[x+1, x+2, \ldots, y-1]$, $(-1)^{\# \text{ crosses of } \rho^{(1)}} = - (-1)^{\# \text{ crosses of } \rho}$ by Lemma \ref{crossinglemma3}. We will show that when we undo crossings to obtain $\rho'$ as described, we apply Procedure~\ref{procedure} an odd number of times. Recall that every node between $x$ and $\rho(x)$ is in $S^c$ and every node between $\rho(y)$ and $y$ is in $S$. It follows that $\rho(x) < \rho(y)$ or $\rho(x) = \rho(y) +1$. Putting these facts together, we see that every node in $S'^c \cap \{x+1, \ldots, y-1\}$ crosses the $x \!\frown\! \rho(y)$ chord, and every node in $S' \cap \{x+1, \ldots, y-1\}$ crosses the $y \!\frown\! \rho(x)$ chord. Since there are an odd number of nodes in $\{x+1, \ldots, y-1\} \setminus \{ \rho(x), \rho(y) \}$, we must apply Procedure~\ref{procedure} an odd number of times. We conclude that $(-1)^{\# \text{ crosses of } \rho'} = (-1)^{\# \text{ crosses of } \rho}$. \noindent {\em Construction of $\pi'$ and analysis of RHS of (\ref{eqn:signSdefn2})}. Since $\ell$ is odd, $x$ and $y$ are the same parity, so we let $\pi'$ be the pairing obtained by swapping the locations of $x$ and $y$. We claim that $\pi'$ does not connect $S' \triangle T$ to $(S' \triangle T)^c$. Since $x$ and $y$ are the same parity and the same color, either both of $x, y$ are in $T$ or neither $x$ nor $y$ are in $T$. Since $x, y$ are either both in $T$ or both not in $T$, exactly one of $x, y$ is in $S \triangle T$. So by Remark \ref{lem:swapab}, $\pi'$ does not connect $S' \triangle T$ to $(S' \triangle T)^c$. Also, $\pi' \cup \rho^{(1)}$ has the same number of components as $\pi \cup \rho$ because when we replace $\pi$ with $\pi'$ and $\rho$ with $\rho^{(1)}$, the path $\cdots - \pi(x) - x - \rho(x) - \cdots$ in $\pi \cup \rho$ is replaced with $\cdots -\pi(y) - x - \rho(y) - \cdots$ in $\pi' \cup \rho^{(1)}$ and the path $\cdots - \pi(y) - y - \rho(y) - \cdots$ in $\pi \cup \rho$ is replaced with $\cdots -\pi(x) - y - \rho(x) - \cdots$. Since we applied Procedure~\ref{procedure} an odd number of times and each application of Procedure~\ref{procedure} is a move of type $A_{BW}$, by Lemma~\ref{AOElemma}, $$ (-1)^{\# \text{ comp in } \pi' \cup \rho'} = -(-1)^{\# \text{ comp in } \pi' \cup \rho^{(1)}} = - (-1)^{\# \text{ comp in } \pi \cup \rho}.$$ Since $\text{sign}_{BW}(\rho^{(1)}) = -\text{sign}_{BW}(\rho)$ and $\text{sign}_{BW}(\rho^{(m+1)}) =- \text{sign}_{BW}(\rho^{(m)})$, $\text{sign}_{BW}(\rho') = \text{sign}_{BW}(\rho)$. Finally, since $\text{sign}_{OE}(\pi') = - \text{sign}_{OE}(\pi)$, we conclude that equation (\ref{eqn:signSdefn2}) holds when $\pi$ is replaced with $\pi'$ and $\rho$ is replaced with $\rho'$. \noindent {\bf Case 1b.} $\ell$ is even \noindent {\em Analysis of LHS of (\ref{eqn:signSdefn2})}. As in Case 1a, $(-1)^{\# \text{ crosses of } \rho^{(1)}} = - (-1)^{\# \text{ crosses of } \rho}$. If $\ell = 2$ then we let $\rho' = \rho^{(1)}$ and both $\rho' |_{S'}$ and $\rho' |_{S'^c}$ are planar. If $\ell > 2$, then we will show that when we undo crossings in $\rho^{(1)}$ to obtain $\rho'$ we apply Procedure~\ref{procedure} an even number of times. The reasoning is analogous to the $\ell$ is odd case: the claim follows from the fact that there are an even number of nodes in $\{x + 1, \ldots, y-1\} \setminus \{ \rho(x), \rho(y) \}$. We conclude that $(-1)^{\# \text{ crosses of } \rho'} = - (-1)^{\# \text{ crosses of } \rho}$. \noindent {\em Construction of $\pi'$ and analysis of RHS of (\ref{eqn:signSdefn2})}. We break into cases based on whether $\pi(x) = y$ or $\pi(x) \neq y$. If $\pi(x) = y$, we let $\pi' = \pi$. If $\pi(x) \neq y$, we let $\pi'$ be the pairing obtained from $\pi$ by pairing $x$ with $y$, $\pi(x)$ with $\pi(y)$, and leaving the remaining pairs the same. In both cases $\pi'$ does not connect $S' \triangle T$ to $(S' \triangle T)^c$ and $\text{sign}_{OE}(\pi) (-1)^{\# \text{ comp in } \pi \cup \rho} = \text{sign}_{OE}(\pi') (-1)^{\# \text{ comp in } \pi' \cup \rho^{(1)}}$ by Lemma \ref{lem:pix=y}. Since we applied Procedure~\ref{procedure} an even number of times and each application of Procedure~\ref{procedure} is a move of type $A_{BW}$, by Lemma~\ref{AOElemma}, $ (-1)^{\# \text{ comp in } \pi' \cup \rho^{(1)}} = (-1)^{\# \text{ comp in } \pi' \cup \rho'}$. Finally, since $\text{sign}_{BW}(\rho^{(1)}) = -\text{sign}_{BW}(\rho)$ and $\text{sign}_{BW}(\rho^{(m+1)}) =- \text{sign}_{BW}(\rho^{(m)})$, $\text{sign}_{BW}(\rho') = -\text{sign}_{BW}(\rho)$. We conclude that when $\rho$ is replaced with $\rho'$ and $\pi$ is replaced with $\pi'$, the right hand side of equation (\ref{eqn:signSdefn2}) changes sign. Thus equation (\ref{eqn:signSdefn2}) holds. \\ \noindent {\bf Case 2.} We next consider the case where exactly one of $x$ or $y$ is paired with a black node in the interval $[x+1, x+2, \ldots, y-1]$. Without loss of generality, suppose that $x$ is the node that is paired with a black node in the interval $[x+1, \ldots, y-1]$. There are two subcases to consider. \noindent {\bf Case 2a.} If one of the $\ell$ nodes between $x$ and $y$ is in $S^c$, then let $k$ be the smallest integer such that $y-k$ is in $S^c$ and let $\rho'$ be the pairing obtained by pairing $y$ with $y-k$ and $\rho(y)$ with $\rho(y-k)$. By Lemma \ref{lem:planarwhenrestricted}, $\rho' |_{S}$ and $\rho' |_{S^c}$ are planar, and replacing $\rho$ with $\rho'$ does not change the right hand side of equation (\ref{eqn:signSdefn2}). To show that replacing $\rho$ with $\rho'$ does not change the left hand side of equation (\ref{eqn:signSdefn2}), we must show that $(-1)^{\# \text{ crosses of } \rho'} = (-1)^{\# \text{ crosses of } \rho}$. This follows from the observations that: \begin{itemize} \item since $\rho |_{S^c}$ is planar, the chords $(y-k) \!\frown\! \rho(y-k)$ and $y \!\frown\! \rho(y)$ do not cross, and \item a chord $a \!\frown\! \rho(a)$ crosses exactly one of $(y-k) \!\frown\! \rho(y-k)$, $y \!\frown\! \rho(y)$ if and only if it crosses exactly one of $\rho(y-k) \!\frown\! \rho(y)$, $(y-k) \!\frown\! y$. \end{itemize} Thus we have reduced Case 2a to Case 1, where both $x$ and $y$ are paired with nodes in the interval $[x+1, x+2, \ldots, y-1]$. \noindent {\bf Case 2b.} If all of the $\ell$ nodes between $x$ and $y$ are in $S$ (this includes the case where the only node between $x$ and $y$ is $x+1$), then since $\rho |_{S}$ is planar, $x$ is paired with $x+1$. When we swap the locations of $x$ and $y$ to obtain $\rho^{(1)}$, $\rho^{(1)}|_{S'^c}$ is planar but $\rho^{(1)}|_{S'}$ is not planar. In fact, every node between $x+1$ and $y$ is in $S$ (and therefore in $S'$) and crosses the $y\!\frown\! (x+1)$ chord. As in Case 1, we obtain $\rho'$ by applying Procedure~\ref{procedure} to undo the crossings in $\rho^{(1)}|_{S'}$, and this does not change the right hand side of equation (\ref{eqn:signSdefn2}). We break into cases based on whether $\ell$ is odd or $\ell$ is even before constructing $\pi'$. \noindent {\bf Case 2bi.} $\ell$ is odd Since exactly one of $x$ and $y$ is paired with a node in the interval $[x+1, x+2, \ldots, y-1]$, $(-1)^{\# \text{ crosses of } \rho^{(1)}} = (-1)^{\# \text{ crosses of } \rho}$ by Lemma \ref{crossinglemma3}. We claim that when we undo crossings to obtain $\rho'$, there are an even number of crossings to undo. This is because every node between $x+1$ and $y$ crosses the $(x+1)\!\frown\! y$ chord, and since $\ell$ is odd there are an even number of such nodes. So $(-1)^{\# \text{ crosses of } \rho'} = (-1)^{\# \text{ crosses of } \rho}$. We let $\pi'$ be the pairing obtained by swapping the locations of $x$ and $y$. By the type of arguments used in Case 1, $\pi'$ does not connect $S' \triangle T$ to $(S' \triangle T)^c$, $(-1)^{\# \text{ comp in }\pi' \cup \rho'} = (-1)^{\# \text{ comp in } \pi \cup \rho }$, and $\text{sign}_{BW}(\rho') = -\text{sign}_{BW}(\rho)$. We conclude that equation (\ref{eqn:signSdefn2}) holds when $\pi$ is replaced with $\pi'$ and $\rho$ is replaced with $\rho'$. \noindent {\bf Case 2bii.} $\ell$ is even As in Case 2bi, $(-1)^{\# \text{ crosses of } \rho^{(1)}} =(-1)^{\# \text{ crosses of } \rho }$. When we undo crossings to obtain $\rho'$, there are an odd number of crossings to undo, so $(-1)^{\# \text{ crosses of } \rho'} = - (-1)^{\# \text{ crosses of } \rho }$. We break into cases based on whether $\pi(x) = y$ or $\pi(x) \neq y$. If $\pi(x) = y$, we let $\pi' = \pi$. If $\pi(x) \neq y$, we let $\pi'$ be the pairing obtained from $\pi$ by pairing $x$ with $y$, $\pi(x)$ with $\pi(y)$, and leaving the remaining pairs the same. In both cases $\pi'$ does not connect $S' \triangle T$ to $(S' \triangle T)^c$, and $\text{sign}_{OE}(\pi) (-1)^{\# \text{ comp in } \pi \cup \rho} = \text{sign}_{OE}(\pi') (-1)^{\# \text{ comp in } \pi' \cup \rho^{(1)}}$. By the type of arguments used in Case 1, $ (-1)^{\# \text{ comp in } \pi' \cup \rho^{(1)}} = -(-1)^{\# \text{ comp in } \pi' \cup \rho'}$ and $\text{sign}_{BW}(\rho') = \text{sign}_{BW}(\rho)$. We conclude that when when $\rho$ is replaced with $\rho'$ and $\pi$ is replaced with $\pi'$, the right hand side of equation (\ref{eqn:signSdefn2}) changes sign. Thus equation (\ref{eqn:signSdefn2}) holds. \\ \noindent {\bf Case 3.} Finally, we observe that we can reduce the case where neither $x$ nor $y$ is paired with a black node in the interval $[x+1, x+2, \ldots, y-1]$ to the case where exactly one of $x$ or $y$ is paired with a black node in the interval $[x+1, x+2, \ldots, y-1]$. First assume that at least one of the $\ell$ nodes between $x$ and $y$ is in $S$. Choose the smallest integer $k$ such that $x+k$ is in $S$. Let $\rho'$ be the pairing that pairs $x$ with $x+k$ and $\rho(x)$ with $\rho(x+k)$. By Lemma \ref{lem:planarwhenrestricted}, $\rho' |_{S}$ is planar and $\rho|_{S^c}$ are planar and replacing $\rho$ to $\rho'$ does not change the right hand side of equation (\ref{eqn:signSdefn2}). The argument that $(-1)^{\# \text{ crosses of } \rho'} = (-1)^{\# \text{ crosses of } \rho}$ is the same as the argument in Case 2a. Finally, if all of the $\ell$ nodes between $x$ and $y$ are in $S^c$, pair $y$ with $x+ \ell$. The argument then proceeds identically. \subsubsection{Proof that $(\pi, \rho)$ exists} We conclude by proving the existence of an odd-even pairing $\pi$ and a black-white pairing $\rho$ such that $\pi$ does not connect $S \triangle T$ to $(S \triangle T)^c$ and $\rho$ does not connect $S$ to $S^c$. Recall that at the beginning of Section~\ref{sec:eqnholds} we showed that for all $j$ there is a balanced set $S$ of size $2j$ with a planar black-white pairing $\rho$ that does not connect $S$ to $S^c$, and by choosing $\pi = \rho$ we also have an odd-even pairing $\pi$ that does not connect $S \triangle T$ to $(S \triangle T)^c$. We also showed that any balanced set of size $2j$ can be obtained from $S$ by making a sequence of replacements of types (1) and (2) discussed in the beginning of Section~\ref{sec:eqnholds}. Furthermore, we showed that given an odd-even pairing $\pi$ and a black-white pairing $\rho$ such that $\pi$ does not connect $S \triangle T$ to $(S \triangle T)^c$ and $\rho$ does not connect $S$ to $S^c$, and a set $S'$ obtained from $S$ by making a replacement of the form (1) or (2), we can modify $\pi$ and $\rho$ to obtain $\pi'$ and $\rho'$ so that $\pi'$ does not connect $S' \triangle T$ to $(S' \triangle T)^c$ and $\rho'$ does not connect $S'$ to $S'^c$. We conclude that for each balanced subset $S$, there is an odd-even pairing $\pi$ and a black-white pairing $\rho$ with the desired properties. \section{A recurrence for tripartite double-dimer configurations} \subsection{Kenyon and Wilson's determinant formula} In this section we prove our analogue of Kenyon and Wilson's determinant formula for tripartite pairings. Recall the statement of their theorem from Section \ref{sec:KWwork}: \begin{customthm}{\ref{thm:kw61}}\cite[Theorem 6.1]{KW2009} Suppose that the nodes are contiguously colored red, green, and blue (a color may occur zero times), and that $\sigma$ is the (unique) planar pairing in which like colors are not paired together. We have $$ \widehat{\Pr}(\sigma) = \text{sign}_{OE}(\sigma) \det [1_{i, j \text{ RGB-colored differently } } X_{i, j} ]^{i = 1, 3, \ldots, 2n-1}_{j =2, 4 \ldots, 2n}.$$ \end{customthm} Kenyon and Wilson proved Theorem~\ref{thm:kw61} by combining two key results. The first is from their study of groves (see Section~\ref{sec:groves}). Recall that Kenyon and Wilson showed in Theorem~\ref{thm:KWgrovethm} that $\dddot{ \Pr}(\sigma)$ is an integer-coefficient homogeneous polynomial in the variables $L_{i, j}$. Furthermore, they showed that when $\sigma$ is a partition that is a tripartite pairing, the grove polynomial $\dddot{ \Pr}(\sigma)$ can be expressed as a Pfaffian whose entries are $L_{i, j}$ or 0. \begin{thm}\cite[Theorem 3.1]{KW2009} \label{KWthm31} Let $\sigma$ be the tripartite pairing partition defined by circularly contiguous sets of nodes $R, G,$ and $B$, where $|R|, |G|,$ and $|B|$ satisfy the triangle inequality. Then $$\dddot{ \Pr }(\sigma) = \text{Pf} \begin{pmatrix} 0 & L_{R, G} & L_{R, B} \\ -L_{G, R} & 0 & L_{G, B} \\ -L_{B, R} & -L_{B, G} & 0 \end{pmatrix}$$ where $L$ is the matrix with entries $L_{i, j}$ whose rows and columns are indexed by the nodes, and $L_{R,G}$ is the submatrix of $L$ whose rows are the red nodes and columns are the green nodes. \end{thm} The second result they needed is a theorem which allows one to compute the double-dimer polynomials $\widehat{\Pr}(\sigma)$ using the grove polynomials. \begin{thm}\cite[Theorem 4.2]{KW2006} \label{thm42} If a planar partition $\sigma$ only contains pairs and we make the following substitutions to the grove partition polynomial $\dddot{ \Pr}(\sigma)$: $$L_{i, j} \to \begin{cases} 0, & \text{ if } i \text{ and } j \text{ have the same parity, } \\ (-1)^{(|i-j| -1)/2} X_{i, j}, & \text{ otherwise,} \end{cases}$$ then the result is $\text{sign}_{OE}(\sigma)$ times the double-dimer pairing polynomial $\widehat{ \Pr }(\sigma)$, when we interpret $\sigma$ as a pairing. \end{thm} We prove Theorem~\ref{thm61} (our version of Theorem~\ref{thm:kw61}) similarly. We can use Theorem~\ref{KWthm31} as stated, but we need the following analogue of Theorem~\ref{thm42}: \begin{thm} \label{mythm42} If a planar partition $\sigma$ only contains pairs and we make the following substitutions to the grove partition polynomial $\dddot{ \Pr}(\sigma)$: $$L_{i, j} \to \begin{cases} 0, & \text{ if i and j are the same color,} \\ \text{sign}(i, j) Y_{i, j}, & \text{ otherwise,} \end{cases}$$ then the result is $\text{sign}_{c}({\bf N}) \text{sign}_{OE}(\sigma) \widetilde{\Pr }(\sigma).$ \end{thm} \begin{proof} In Theorem~\ref{thm:thm1}, we established that $$\widetilde{\Pr}(\sigma) := \dfrac{Z^{DD}_{\sigma}(G, {\bf N})}{(Z^D(G))^2} = \sum_{\text{black-white pairings } \rho} \mathcal{Q}^{(DD)}_{\sigma, \rho} Y'_{\rho}.$$ In the proof of Theorem~\ref{thm:thm1}, we showed $\mathcal{Q}^{(DD)}_{\sigma, \rho} = \text{sign}_{OE}(\sigma) \text{sign}_{BW}(\rho) \mathcal{P}_{\sigma, \rho}^{(t)}$ (see equation~(\ref{eqn:Qtilde})). This connects the polynomials $\widetilde{\Pr}(\sigma)$ to the grove polynomials $\dddot{ \Pr}(\sigma)$, since $\dddot{ \Pr}(\sigma) = \sum\limits_{\text{ partitions } \tau} \mathcal{P}_{\sigma, \tau}^{(t)} L_{\tau}$ by Theorem~\ref{thm:KWgrovethm}. Specifically, in the case where $\sigma$ is a pair, we have $$\dddot{ \Pr}(\sigma) = \text{sign}_{OE}(\sigma) \sum\limits_{\text{ pairs } \rho} \text{sign}_{BW}(\rho) \mathcal{Q}^{(DD)}_{\sigma, \rho} L_{\rho}.$$ Observe the sum is over all pairs $\rho$ rather than all partitions. This is because by Rule~\ref{hrule}, when we express a partition as a linear combination of planar partitions, any singleton parts of that partition show up in each planar partition with nonzero coefficient. Also observe that when we apply Rule~\ref{hrule} to a partition, each of the resulting partitions contains the same number of parts as the original partition. It follows that if $\sigma$ is a pairing, and $\mathcal{P}_{\sigma, \rho}^{(t)} \neq 0$ for some partition $\rho$, then $\rho$ is also pairing. Finally, we recall that $Y'_{\rho} = (-1)^{\text{\# crosses of }\rho} \prod\limits_{i \text{ black} } Y_{i,\rho(i)}$ and by Lemma \ref{lemma34}, \begin{equation*} \text{sign}_{\cons}({\bf N}) \text{sign}_{BW}(\rho) \prod\limits_{(i, j) \in \rho} \text{sign}(i, j) = (-1)^{\# \text{ crosses of } \rho}. \end{equation*} The theorem follows. \end{proof} The remainder of this section will be devoted to proving the following theorem. \begin{customthm}{\ref{thm61}} Suppose that the nodes are contiguously colored red, green, and blue (a color may occur zero times), and that $\sigma$ is the (unique) planar pairing in which like colors are not paired together. We have $$\widetilde{\Pr}(\sigma)= \text{sign}_{OE}(\sigma) \det [1_{i, j \text{ RGB-colored differently } } Y_{i, j} ]^{i = b_1, b_2, \ldots, b_{n}}_{j = w_1, w_2, \ldots, w_{n} }.$$ where $b_1 < b_2 < \ldots < b_n$ are the black nodes listed in increasing order and $w_1 < w_2 < \ldots < w_n$ are the white nodes listed in increasing order. \end{customthm} While our proof of Theorem~\ref{thm61} is very similar to Kenyon and Wilson's proof of Theorem~\ref{thm:kw61}, we do require the following technical lemma. \begin{lemma} \label{lem:lifesaver} Let ${\bf N}$ be a set of $2n$ nodes and let $(n_1, n_1 +1), \ldots, (n_{2k}, n_{2k} + 1)$ be a complete list of couples of consecutive nodes of the same color. Define $(-1)^{i > j}$ to be $-1$ if $i > j$, and $1$ otherwise, and let \[M = [(-1)^{i > j} \text{sign}(i, j) Y_{i, j} ]^{i = b_1, b_2, \ldots, b_{n}}_{j = w_1, w_2, \ldots, w_{n} }, \] where $b_1 < b_2 < \cdots < b_n$ are the black nodes listed in increasing order and $w_1 < w_2 < \cdots < w_n$ are the white nodes listed in increasing order. Then $M$ is a block matrix where within each block, the signs of the entries are staggered in a checkerboard pattern. Furthermore, let $t$ be the total number of rows and columns of $M$ that we need to multiply by $-1$ to obtain a matrix with entries whose signs are staggered in a checkerboard pattern where the upper left entry is positive. If node 1 is black, $$(-1)^{t} = \text{sign}_{\cons}({\bf N}) (-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor} $$ and if node 1 is white, $$(-1)^{t} = (-1)^{n} \text{sign}_{\cons}({\bf N}) (-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor}.$$ \end{lemma} \begin{proof} We will first prove the claim that $M$ is a block matrix where within each block, the signs of the entries are staggered in a checkerboard pattern. \noindent \begin{minipage}{.75\textwidth} \hspace{10pt} We begin with an example. Suppose we have 20 nodes colored as shown right. Then there are four couples of consecutive nodes of the same color: $(4, 5), (8, 9), (13, 14)$, and $(17, 18)$ and $M$ is the matrix shown below. We see that the blocks of $M$ correspond to consecutive nodes of the same color. More precisely, the last column in a block corresponds to a white node that precedes at least two consecutive black nodes. The first column in the next block corresponds to the first white node after these consecutive black nodes. Similarly, the nodes corresponding to the last row in a block and the first row in the next block are separated by at least two consecutive white nodes. \end{minipage} \hspace{.1cm} \begin{minipage}{.2\textwidth} \begin{center} \begin{tikzpicture}[scale=.65] \draw (0,0) circle (2); \foreach \x in {1,2,...,20} { \node[shape=circle,fill=black, scale=0.5,label={{((\x-1)*360/20)+90}:\small{\x}}] (n\x) at ({((\x-1)*360/20)+90}:2) {}; }; \foreach \x in {2, 4, 5, 7, 10, 12, 15, 17, 18, 20} { \node[shape=circle,fill=white, scale=0.4] (n\x) at ({((\x-1)*360/20)+90}:2) {}; }; \end{tikzpicture} \end{center} \end{minipage} $$ \left(\begin{array}{cc cc | cc | cc cc} Y_{1, 2} & -Y_{1, 4} & Y_{1, 5} & -Y_{1, 7} & -Y_{1, 10} & Y_{1, 12} & Y_{1, 15} & -Y_{1, 17} & Y_{1, 18} & -Y_{1, 20} \\ -Y_{3, 2} & Y_{3, 4} & -Y_{3, 5} & Y_{3, 7} & Y_{3, 10} & -Y_{3, 12} & -Y_{3, 15} & Y_{3, 17} & -Y_{3, 18} & Y_{3, 20} \\ \hline -Y_{6, 2} & Y_{6, 4} & -Y_{6, 5} & Y_{6, 7} & Y_{6, 10} & -Y_{6, 12} & -Y_{6, 15} & Y_{6, 17} & -Y_{6, 18} & Y_{6, 20} \\ Y_{8, 2} & -Y_{8, 4} & Y_{8, 5} & -Y_{8, 7} & -Y_{8, 10} & Y_{8, 12} & Y_{8, 15} & -Y_{8, 17} & Y_{8, 18} & -Y_{8, 20} \\ -Y_{9, 2} & Y_{9, 4} & -Y_{9, 5} & Y_{9, 7} & Y_{9, 10} & -Y_{9, 12} & -Y_{9, 15} & Y_{9, 17} & -Y_{9, 18} & Y_{9, 20} \\ Y_{11, 2} & -Y_{11, 4} & Y_{11, 5} & -Y_{11, 7} & -Y_{11, 10} & Y_{11, 12} & Y_{11, 15} & -Y_{11, 17} & Y_{11, 18} & -Y_{11, 20} \\ -Y_{13, 2} & Y_{13, 4} & -Y_{13, 5} & Y_{13, 7} & Y_{13, 10} & -Y_{13, 12} & -Y_{13, 15} & Y_{13, 17} & -Y_{13, 18} & Y_{13, 20} \\ Y_{14, 2} & -Y_{14, 4} & Y_{14, 5} & -Y_{14, 7} & -Y_{14, 10} & Y_{14, 12} & Y_{14, 15} & -Y_{14, 17} & Y_{14, 18} & -Y_{14, 20} \\ -Y_{16, 2} & Y_{16, 4} & -Y_{16, 5} & Y_{16, 7} & Y_{16, 10} & -Y_{16, 12} & -Y_{16, 15} & Y_{16, 17} & -Y_{16, 18} & Y_{16, 20} \\ \hline -Y_{19, 2} & Y_{19, 4} & -Y_{19, 5} & Y_{19, 7} & Y_{19, 10} & -Y_{19, 12} & -Y_{19, 15} & Y_{19, 17} & -Y_{19, 18} & Y_{19, 20} \end{array}\right).$$ Since in the matrix above, row $i$ does not correspond to node $i$, we introduce the following notation. We define the map $B: \{1, 2, \ldots, n\} \to \{b_1, \ldots, b_n\}$ by letting $B(i)$ be the node corresponding to row $i$. Similarly, we define $W: \{1, 2, \ldots, n\} \to \{w_1, \ldots, w_n\}$ by letting $W(j)$ be the node corresponding to column $j$. In the example above, $B(4) = 8$ and $W(8) = 17$. We will show that $M$ has the form $$ \kbordermatrix{ & W(j) < s_1 & s_1 < W(j) < s_2 & \cdots & s_{k-1} < W(j) < s_k \\ B(i) < u_1 & A_{1, 1} &A _{1, 2} & \cdots & A_{1, k} \\ u_1 < B(i) < u_2& A_{2, 1} & A_{2, 2} & \cdots & A_{2, k} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ u_{k-1} < B(i) < u_k & A_{k, 1} & A_{k, 2} & \cdots & A_{k, k} \\ },$$ where in each block $A_{i, j}$, the signs of the entries are staggered in a checkerboard pattern. Note that a block could be empty. We first show that within a block, rows $i$ and $i+1$ have opposite sign. There are two cases to consider: \begin{itemize} \item[(1)] $B(i+1) - B(i) =2$, and \item[(2)] $B(i+1) - B(i) = 1$. \end{itemize} These are the only cases because if $B(i+1) - B(i) > 2$, then there is at least one couple of consecutive white nodes between $B(i)$ and $B(i+1)$, so rows $i$ and $i+1$ are in different blocks. In case (1), there is not a couple of consecutive nodes of the same color between $B(i)$ and $B(i+1)$, so $a_{B(i), w} = a_{B(i+1), w}$ for all $w$. It follows immediately from the definition $\text{sign}(b,w) = (-1)^\frac{|b-w| + a_{b, w} -1}{2}$ that $\text{sign}(B(i+1), w) = -\text{sign}(B(i), w)$ unless $B(i) < w < B(i+1)$. But if $B(i) < w < B(i+1)$, the sign $(-1)^{b > w}$ flips. So in case (1), rows $i$ and $i+1$ have opposite sign. In case (2), $(B(i), B(i+1))$ is a couple of consecutive black nodes, so $|a_{B(i+1), w} - a_{B(i), w}| = 1$. If $B(i+1) > w$, $$\text{sign}(B(i+1), w) = (-1)^{\frac{B(i+1) - w+ a_{B(i+1), w} -1}{2}} = (-1)^{\frac{B(i) +1 - w+ a_{B(i), w} +1 -1}{2}} = - \text{sign}(B(i), w).$$ The case where $B(i+1) < w$ is completely analogous. We conclude that within a block, rows $i$ and $i+1$ have opposite sign. The proof that within a block columns $j$ and $j+1$ have opposite sign is identical. So, within each block, the signs of the entries are staggered in a checkerboard pattern.\\ Since $M$ is a block matrix where the signs of each block are staggered in a checkerboard pattern, we can always choose rows and columns to multiply by $-1$ so that the signs of the matrix entries are staggered in a checkerboard pattern and the upper left entry is positive. Let $t$ be the total number of rows and columns we need to multiply by $-1$. We claim that if node 1 is black, $(-1)^{t} = \text{sign}_{\cons}({\bf N}) (-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor} $ and if node 1 is white, $(-1)^{t} = (-1)^{n} \text{sign}_{\cons}({\bf N}) (-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor}.$ We will prove the claim by induction on $n$, where $2n$ is the total number of nodes. The base case is when there are 4 nodes. In this case, $(-1)^{n} = 1$. We check all possible node colorings in the table below. \begin{center} \begin{tabular}{ |c | c | c | c | c |} \hline & & & & \\[-1em] black nodes & $M$ & $t$ & $\text{sign}_{\cons}({\bf N})$ & $(-1)^{\sum \lfloor \frac{n_i}{2} \rfloor}$ \\ \hline 1, 2 &$ \left(\begin{array}{ c c } -Y_{1, 3} & Y_{1, 4} \\ Y_{2, 3} & -Y_{2, 4} \end{array} \right)$& 2 & $-1$ & $-1$ \\ \hline 1, 3 &$ \left(\begin{array}{ c c } Y_{1, 2} & -Y_{1, 4} \\ -Y_{3, 2} & Y_{3, 4} \end{array} \right)$& 0 & 1 & 1 \\ \hline 1, 4 &$ \left(\begin{array}{ c c } Y_{1, 2} & -Y_{1, 3} \\ Y_{4, 2} & -Y_{4, 3} \end{array} \right)$& 1 & 1 & $-1$ \\ \hline 3, 4 &$ \left(\begin{array}{ c c } Y_{3, 1} & -Y_{3,2} \\ -Y_{4, 1} & Y_{4, 2} \end{array} \right)$& 0& $-1$ & $-1$ \\ \hline 2, 4 &$ \left(\begin{array}{ c c } -Y_{2, 1} & Y_{2, 3} \\ Y_{4, 1} & -Y_{4, 3} \end{array} \right)$& 2& 1 & 1 \\ \hline 2, 3 &$ \left(\begin{array}{ c c } -Y_{2, 1} & -Y_{2,4} \\ Y_{3, 1} & Y_{3, 4} \end{array} \right)$& 1& 1 & $-1$ \\ \hline \end{tabular} \end{center} Now assume the claim holds when there are $2n-2$ nodes and suppose that $|{\bf N}| = 2n$. Choose the largest nodes $x, x+1$ such that $x, x+1$ are different colors. Let ${\bf N'} = \{1, \ldots, 2n-2 \}$. Define $\psi: {\bf N} - \{x, x+1 \} \to {\bf N'}$ by \begin{equation*} \psi(\ell) = \begin{cases} \ell &\mbox{ if }\ell < x \\ \ell - 2 &\mbox{ if }\ell > x+1 \ \end{cases}. \end{equation*} That is, $\psi$ defines a relabeling of the nodes of ${\bf N} - \{x, x+1 \}$ so that node 1 is labeled 1, $\ldots,$ node $x-1$ is labeled $x-1$, node $x+2$ is labeled $x,\ldots,$ node $2n$ is labeled $2n - 2$. Recall that $(n_1, n_1 +1), \ldots, (n_{2k}, n_{2k} + 1)$ is a complete list of couples of consecutive nodes of the same color in ${\bf N}$. Let $(n'_1, n'_1 +1), \ldots, (n'_{2j}, n'_{2j} + 1)$ be a complete list of couples of consecutive nodes of the same color in ${\bf N'}$. Let $M'$ denote the matrix corresponding to ${\bf N'}$. Let $t'$ denote the total number of rows and columns we need to multiply by $-1$ to get a matrix $M'_{(1)}$ with entries whose signs are staggered in a checkerboard pattern so that the upper left entry is positive. By the induction hypothesis, $$(-1)^{t'} = \text{sign}_{\cons}({\bf N'}) (-1)^{\sum\limits_{i=1}^{2j} \lfloor \frac{n'_i}{2} \rfloor}$$ if node 1 is black and $$(-1)^{t'} = (-1)^{n-1} \text{sign}_{\cons}({\bf N'}) (-1)^{\sum\limits_{i=1}^{2j} \lfloor \frac{n'_i}{2} \rfloor}$$ if node 1 is white. There are several cases to consider based on whether or not ${\bf N}$ and ${\bf N'}$ have the same number of couples of consecutive nodes of the same color. In each case, we will assume that node $2n$ is white. When $2n$ is black, the argument is completely analogous. Each case will involve two steps: \begin{enumerate} \item[(i)] comparing $\text{sign}_{\cons}({\bf N'})$ to $\text{sign}_{\cons}({\bf N})$ and $(-1)^{\sum \lfloor \frac{n'_i}{2} \rfloor}$ to $(-1)^{\sum \lfloor \frac{n_i}{2} \rfloor}$, and \item[(ii)] comparing $t$ to $t'$. \end{enumerate} \begin{figure} \caption{Shown left is an example of a possible node coloring ${\bf N} \label{fig:signlemmaex1} \end{figure} \noindent {\bf Case 1.} In the first case, ${\bf N'}$ has the same number of couples of consecutive nodes of the same color as ${\bf N}$. There are two ways this can occur: $x+1 < 2n$, or $x+1 = 2n$. \\ \noindent {\bf Case 1(a).} $x+1 < 2n$ We first assume that node 1 is black. \noindent{\bf (i) Comparing $\text{sign}_{\cons}({\bf N})$ to $\text{sign}_{\cons}({\bf N'})$ and $(-1)^{\sum \lfloor \frac{n'_i}{2} \rfloor}$ to $(-1)^{\sum \lfloor \frac{n_i}{2} \rfloor}$.} Since ${\bf N'}$ has the same number of couples of consecutive nodes as ${\bf N}$, $\text{sign}_{\cons}({\bf N}) = \text{sign}_{\cons}({\bf N'})$. Since we assumed that $x$ and $x+1$ are the largest nodes such that $x$ and $x+1$ are different colors and ${\bf N'}$ has the same number of couples of consecutive nodes as ${\bf N}$, node $x-1$ and all nodes in the interval $[x+1, \ldots, 2n]$ are white. Since each node in the interval $[x+1, \ldots, 2n]$ of ${\bf N}$ is white, each node in the interval $[x-1, \ldots, 2n-2]$ of ${\bf N}$ is white, and node 1 is black in both ${\bf N}$ and ${\bf N'}$ we have \[ (-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n'_i}{2} \rfloor} = (-1)^{2n-x-1} (-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor} .\] We conclude that \begin{equation} \label{eqn:comparison} \text{sign}_{\cons}({\bf N'}) (-1)^{\sum\limits_{i=1}^{2j} \lfloor \frac{n'_i}{2} \rfloor} = (-1)^{2n-x-1} \text{sign}_{\cons}({\bf N}) (-1)^{\sum\limits_{i=1}^{2j} \lfloor \frac{n_i}{2} \rfloor}. \end{equation} \noindent {\bf (ii) Comparing $t$ to $t'$.} Comparing the parity of $t$ and $t'$ is a multi-step process. Recall that we obtained $M'_{(1)}$ from $M'$ by multiplying $t'$ rows and columns of $M'$. We start by returning the nodes of $M'_{(1)}$ to their original labels to obtain $M'_{(2)}$. Then, we add the row and column corresponding to nodes $x$ and $x+1$ to $M'_{(2)}$ to get $M'_{(3)}$. Finally, we let $\widetilde{M}$ be the matrix obtained from $M$ by doing all the row and column multiplications we did to $M'$ to get $M'_{(1)}$, and consider the relationship between $\widetilde{M}$ and $M'_{(3)}$. Previously we defined the map $B: \{1, 2, \ldots, n\} \to \{b_1, \ldots, b_n\}$ by letting $B(i)$ be the node corresponding to row $i$ of $M$ and we defined $W: \{1, 2, \ldots, n\} \to \{w_1, \ldots, w_n\}$ by letting $W(j)$ be the node corresponding to column $j$ of $M$. It will be convenient to let $R:= B^{-1}$ and $C:= W^{-1}$, so for example $R(6)$ is the row corresponding to the black node $6$, and $C(7)$ is the column corresponding to the white node 7. We define $B'$, $W'$, $R'$ and $C'$ analogously for $M'$. Because this portion of the proof is long, we will illustrate the main ideas with an example. Let $G$ be a graph with 8 nodes where nodes $1, 3, 4$ and $6$ are colored black (see Figure~\ref{fig:signlemmaex1}). In this example, $x = 6$. So ${\bf N'} = \{1, 2, 3, 4, 5, 6\}$ where nodes $1, 3,$ and $4$ are black. This means that $$M' = \left( \begin{array}{ c c c } Y_{1, 2} & Y_{1, 5} & - Y_{1, 6} \\ -Y_{3, 2} & - Y_{3, 5} & Y_{3, 6} \\ Y_{4, 2} & Y_{4, 5} & - Y_{4, 6} \\ \end{array} \right).$$ To obtain $$M'_{(1)} = \left( \begin{array}{ c c c } Y_{1, 2} & -Y_{1, 5} & Y_{1, 6} \\ -Y_{3, 2} & Y_{3, 5} & -Y_{3, 6} \\ Y_{4, 2} & -Y_{4, 5} & Y_{4, 6} \\ \end{array} \right)$$ we multiply the second and third columns of $M$ by $-1$, so $t' = 2$. In general, to get from $M'$ to $M'_{(1)}$, either we multiply all of the columns in a block or none of the columns in a block, because within each block, the signs of the entries are staggered in a checkerboard pattern. The same is true for the rows. \noindent {\bf Return the nodes of $M'_{(1)}$ to their original labels.} Next, we return the nodes to their original labels (equivalently, we apply the map $\psi^{-1}$) to get $M'_{(2)}$. Note that the only entries that are affected are the entries in the columns corresponding to nodes $\psi(x+2), \ldots, \psi(2n)$. In the example, we return node 6 to its original label of 8, resulting in the matrix $$M'_{(2)} = \left( \begin{array}{ c c c } Y_{1, 2} & -Y_{1, 5} & Y_{1, 8} \\ -Y_{3, 2} & Y_{3, 5} & -Y_{3, 8} \\ Y_{4, 2} & -Y_{4, 5} & Y_{4, 8} \\ \end{array} \right).$$ \noindent {\bf Add the row and column corresponding to nodes $x$ and $x+1$ to $M'_{(2)}$.} Now, add to $M'_{(2)}$ the column corresponding to node $x+1$ (i.e. the column with entries $(-1)^{i > x+1} \text{sign}(i, x+1) Y_{i, x+1}$) in between the columns corresponding to nodes $x-1$ and $x+2$. Also add the row corresponding to node $x$ as the last row. Change the sign of the entries in the new column in the rows of $M'$ that we multiplied by $-1$. Similarly, change the sign of the entries of the new row in the columns that we multiplied by $-1$. Call the resulting matrix $M'_{(3)}$. In the example, we get $$M'_{(3)} = \left( \begin{array}{ c c c c} Y_{1, 2} & -Y_{1, 5} & -Y_{1, 7} &Y_{1, 8} \\ -Y_{3, 2} & Y_{3, 5} & Y_{3, 7} & -Y_{3, 8} \\ Y_{4, 2} & -Y_{4, 5} & -Y_{4, 7} & Y_{4, 8} \\ -Y_{6, 2} & Y_{6, 5} & Y_{6, 7} & -Y_{6, 8} \\ \end{array} \right),$$ where note that we changed the sign of entries $Y_{6, \psi^{-1}(5)} = Y_{6, 5}$ and $Y_{6, \psi^{-1}(6)} = Y_{6, 8} $ because we multiplied the columns of $M'$ corresponding to nodes 5 and 6 by $-1$. Since we changed the signs of entries in the row $R(x)$ and the column $C(x+1)$ as described above, $M'_{(3)}$ is a block matrix with checkerboard blocks with the following additional properties: \begin{itemize} \item[(1)] All columns strictly to the left of column $C(x+1)$ and all rows strictly above row $R(x)$ are in the same block. \item[(2)] The $j$th entry of $C(x-1)$ and $C(x+2)$ have opposite sign because they were adjacent in $M'$, which is checkerboard. \item[(3)] All columns strictly to the right of $C(x+1)$ are in the same block(s). \item[(4)] $C(x+1)$ is either in same block as $C(x+2)$ or in the same block as $C(x-1)$. \item[(5)] $R(x)$ is either in the same block as all other rows, or in its own block. \end{itemize} \noindent {\bf Compare $\widetilde{M}$ to $M'_{(3)}$ and conclusion.} Observe that if $i< x$ and $j > x + 1$, then \begin{equation} \label{eqn:signsoff} \text{sign}( \psi(i), \psi(j) ) = (-1)^{ ( \psi(j) - \psi(i) + a_{\psi(i), \psi(j)} -1 )/2 } = (-1)^{( j - i - 2 + a_{i, j} -1 )/2} \\ = -\text{sign}(i, j), \end{equation} so the entries in the columns $C(x+2), \ldots, C(2n)$ are opposite in sign in $M$ compared to the entries in columns $C'(\psi(x+2)), \ldots, C'(\psi(2n))$ in $M'$. Returning to our example, we see that \[ M = \left( \begin{array}{ c c c c } Y_{1, 2} & Y_{1, 5} & - Y_{1, 7} & Y_{1, 8} \\ -Y_{3, 2} & - Y_{3, 5} & Y_{3, 7} & - Y_{3, 8} \\ Y_{4, 2} & Y_{4, 5} & - Y_{4, 7} & Y_{4, 8} \\ - Y_{6, 2} & - Y_{6, 5} & Y_{6, 7} & -Y_{6, 8} \\ \end{array} \right) \text{ and } M' = \left( \begin{array}{ c c c } Y_{1, 2} & Y_{1, 5} & - Y_{1, 6} \\ -Y_{3, 2} & - Y_{3, 5} & Y_{3, 6} \\ Y_{4, 2} & Y_{4, 5} & - Y_{4, 6} \\ \end{array} \right), \] so indeed each entry in column $C(8) = 4$ has sign opposite of the corresponding entry of column $C(6) = 3$. Now let $\widetilde{M}$ be the matrix $M$ obtained by doing all of the $t'$ row and column multiplications we did to $M'$ to obtain $M'_{(1)}$. In our example, \[ \widetilde{M} = \left( \begin{array}{ c c c c } Y_{1, 2} & -Y_{1, 5} & - Y_{1, 7} & - Y_{1, 8} \\ -Y_{3, 2} & Y_{3, 5} & Y_{3, 7} & Y_{3, 8} \\ Y_{4, 2} & -Y_{4, 5} & - Y_{4, 7} & - Y_{4, 8} \\ - Y_{6, 2} & Y_{6, 5} & Y_{6, 7} & Y_{6, 8} \\ \end{array} \right).\] Since we changed the signs of entries in the row $R(x)$ and the column $C(x+1)$ as described in the previous step, by equation~(\ref{eqn:signsoff}) $\widetilde{M}$ is identical to $M'_{(3)}$ except for the columns $C(x+2), \ldots, C(2n)$. Combining this fact with observations (1), (2), and (3) about $M'_{(3)}$ above, we conclude that that $\widetilde{M}$ is checkerboard except possibly for the row $R(x)$ and column $C(x+1)$. Both the entries in $R(x)$ and the entries in $C(x+1)$ alternate in signs, so it remains to determine whether or not we need to multiply $R(x)$ and/or $C(x+1)$ by $-1$. Since $(-1)^{x > x+1} = 1$ and $\text{sign}(x+1, x) = 1$, the entry $(R(x), C(x+1))$ of $\widetilde{M}$ is positive. Also, since in a matrix with checkerboard entries, the entry $(R(x), C(2n))$ has positive sign, and all nodes $x+1, \ldots, 2n$ are white, the entry $(R(x), C(x+1))$ of the final checkerboard matrix we get after multiplying $R(x)$ and/or $C(x+1)$ by $-1$ has positive sign if and only if $x$ is odd. This means that $x$ is odd if and only if we must multiply both $R(x)$ and $C(x+1)$ by $-1$ or neither by $-1$ to achieve a checkerboard matrix. We conclude that $x$ is odd if and only if the parity of $t$ is the same as the parity of $t'$. Since $x$ is odd if and only if $(-1)^{2n-x-1} = 1$, by equation (\ref{eqn:comparison}), $t$ has the same parity as $$\text{sign}_{\cons}({\bf N}) (-1)^{\sum\limits_{i=1}^{2j} \lfloor \frac{n_i}{2} \rfloor},$$ as desired. When node 1 is white, the argument is very similar, but we have \[ (-1)^{\sum\limits_{i=1}^{2j} \lfloor \frac{n'_i}{2} \rfloor} = (-1)^{2n-x} (-1)^{\sum\limits_{i=1}^{2j} \lfloor \frac{n_i}{2} \rfloor} \] since $(8, 1)$ is a couple of consecutive nodes of the same color. It follows that \begin{equation*} (-1)^{n-1} \text{sign}_{\cons}({\bf N'}) (-1)^{\sum\limits_{i=1}^{2j} \lfloor \frac{n'_i}{2} \rfloor} = (-1)^{n} (-1)^{2n-x-1} \text{sign}_{\cons}({\bf N}) (-1)^{\sum\limits_{i=1}^{2j} \lfloor \frac{n_i}{2} \rfloor}. \end{equation*} The rest of the argument is identical. \\ \noindent {\bf Case 1(b).} $x+1 = 2n$. If ${\bf N'}$ has the same number of couples of consecutive nodes as ${\bf N}$ and $x+1 = 2n$, there are two possibilities: either nodes $2n-2$ and $1$ are black, or nodes $2n-2$ and $1$ are white. We first assume that node $1$ is black. \noindent{\bf (i) Comparing $\text{sign}_{\cons}({\bf N})$ to $\text{sign}_{\cons}({\bf N'})$ and $(-1)^{\sum \lfloor \frac{n'_i}{2} \rfloor}$ to $(-1)^{\sum \lfloor \frac{n_i}{2} \rfloor}$.} Since ${\bf N'}$ has the same number of couples of consecutive nodes as ${\bf N}$ and $x+1 = 2n$, $\text{sign}_{\cons}({\bf N}) = \text{sign}_{\cons}({\bf N'})$ and $$(-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n'_i}{2} \rfloor} = (-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor},$$ so $$(-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor} \text{sign}_{\cons}({\bf N}) =(-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n'_i}{2} \rfloor} \text{sign}_{\cons}({\bf N'}).$$ \noindent {\bf (ii) Comparing $t$ to $t'$.} In this case $\psi$ is the identity map, so $M'_{(1)} = M'_{(2)}$. We obtain $M'_{(3)}$ as described in Case 1(a). By the same reasoning as in Case 1(a), all columns to the left of column $C(2n)$ and all rows above row $R(2n-1)$ are in the same block. $C(2n)$ is either in the same block as the other columns or in its own block, and similarly for row $R(2n-1)$. Let $\widetilde{M}$ be the matrix $M$ obtained by doing all of the $t'$ multiplications we did to $M'$ to obtain $M'_{(3)}$. Since $\psi$ is the identity map, $\widetilde{M} = M'_{(3)}$. It remains to determine whether or not we need to multiply $R(2n-1)$ and/or $C(2n)$ by $-1$. Since $(-1)^{2n-1 > 2n} = 1$ and $\text{sign}(2n-1, 2n) = 1$, we need to multiply both $C(2n)$ and $R(2n-1)$ or neither in order for $\widetilde{M}$ to be checkerboard. So $t$ has the same parity as $t'$ and therefore the same parity as $$(-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor} \text{sign}_{\cons}({\bf N}).$$ This proves the claim when node 1 is black. If node 1 is white, the only difference is that $$(-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n'_i}{2} \rfloor} = -(-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor} $$ because node $2n$ was the first in a couple of consecutive white nodes in ${\bf N}$, and in ${\bf N'}$, node $2n-2$ is the first in a couple of consecutive white nodes. It follows that $$(-1)^{n-1} (-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n'_i}{2} \rfloor} \text{sign}_{\cons}({\bf N'}) =(-1)^{n} (-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor} \text{sign}_{\cons}({\bf N}).$$ The rest of the proof is the same. \\ \begin{figure} \caption{Shown left is an example of a possible node coloring ${\bf N} \label{fig:signlemmaex2} \end{figure} \noindent {\bf Case 2.} In the second case, ${\bf N'}$ has two fewer couples of consecutive nodes of the same color compared to ${\bf N}$. Again, there are two ways this can occur: $x+1 < 2n$, or $x+1 = 2n$. \\ \noindent {\bf Case 2(a).} $x+1 < 2n$ We first assume that node 1 is black. As in Case 1(a), we illustrate the main ideas with an example. Let $G$ be a graph with $8$ nodes where nodes $1, 2, 5$ and $6$ are colored black (see Figure~\ref{fig:signlemmaex2}). In this example, $x = 6$, so ${\bf N'} = \{1, 2, 3, 4, 5, 6\}$ where nodes $1, 2,$ and $5$ are black. \noindent{\bf (i) Comparing $\text{sign}_{\cons}({\bf N})$ to $\text{sign}_{\cons}({\bf N'})$ and $(-1)^{\sum \lfloor \frac{n'_i}{2} \rfloor}$ to $(-1)^{\sum \lfloor \frac{n_i}{2} \rfloor}$.} Since we assumed ${\bf N'}$ has two fewer couples of consecutive nodes of the same color compared to ${\bf N}$, nodes $x-1$ and $x$ are both black. Recall that $(s_i, s_i + 1)$ denotes a couple of consecutive black nodes of the same color and $(u_i, u_i + 1)$ denotes a couple of consecutive white nodes of the same color. By our assumptions, we have $$\cdots < s_k < u_{k - (2n - x - 2)} < \cdots < u_{k-1} < u_k.$$ When we remove nodes $x$ and $x+1$, there is a one-to-one correspondence between the inversions with respect to the node coloring of ${\bf N}$ and the inversions with respect to the node coloring of ${\bf N'}$ except for the inversions in {\bf N} of the form $(s_k, u_i)$ for $k - (2n-x - 2) \leq i \leq k$. Thus we have that $$\text{sign}_{\cons}({\bf N}) = (-1)^{2n - x -1} \text{sign}_{\cons}({\bf N'}).$$ In our example, in ${\bf N}$ we have $s_1 < u_1 < s_2 < u_2$ and in ${\bf N'}$ we have $s'_1 < u'_1$. There is one fewer inversion in ${\bf N'}$ compared to ${\bf N}$ and so $\text{sign}_{\cons}({\bf N}) = - \text{sign}_{\cons}({\bf N'})$. Next we compare $(-1)^{\sum\limits_{i=1}^{2k-2} \lfloor \frac{n'_i}{2} \rfloor}$ to $(-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor}$. In our example, nodes $5$ and $7$ (nodes $x-1$ and $x+1$, respectively) are in $\{n_1, \ldots, n_{2k}\}$. In ${\bf N'}$, the black node $\psi(5) =5$ is adjacent to the white node $\psi(8)=6$ and the couple of consecutive white nodes $(7, 8)$ is not replaced by a new couple of consecutive nodes, so $$(-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor} = (-1)^{\lfloor \frac{1}{2} \rfloor} (-1)^{\lfloor \frac{3}{2} \rfloor} (-1)^{\lfloor \frac{5}{2} \rfloor} (-1)^{\lfloor \frac{7}{2} \rfloor} = 1$$ while $(-1)^{\sum\limits_{i=1}^{2k-2} \lfloor \frac{n'_i}{2} \rfloor} = (-1)^{\lfloor \frac{1}{2} \rfloor} (-1)^{\lfloor \frac{3}{2} \rfloor} = -1$. In general, since the nodes $x+1, \ldots, 2n$ are white and $x-1$ is black, the node $x-1$ and all nodes in the interval $[x+1, \ldots, 2n-1]$ are equal to $n_i$ for some $i$. In ${\bf N'}$, all nodes in the interval $[\psi(x+2), \ldots, \psi(2n-1)]$ are equal to $n'_i$ for some $i$. From the observations that \begin{itemize} \item to obtain ${\bf N'}$ we deleted nodes $x$ and $x+1$ from ${\bf N}$, \item $\psi(x-1)$ is adjacent to the white node $\psi(x+2)$ in ${\bf N'}$, and \item $\psi(y) = y - 2$ for $y > x+1$, \end{itemize} we get $$(-1)^{\sum\limits_{i=1}^{2k-2} \lfloor \frac{n'_i}{2} \rfloor} = (-1)^{2n - x - 2} (-1)^{ \lfloor \frac{x-1}{2} \rfloor} (-1)^{ \lfloor \frac{x+1}{2} \rfloor} (-1)^{ \sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor}.$$ It follows that $$(-1)^{\sum\limits_{i=1}^{2k-2} \lfloor \frac{n'_i}{2} \rfloor} = (-1)^{x+1} (-1)^{ \sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor},$$ so we conclude that $$\text{sign}_{\cons}({\bf N'}) (-1)^{\sum\limits_{i=1}^{2k-2} \lfloor \frac{n'_i}{2} \rfloor} = \text{sign}_{\cons}({\bf N}) (-1)^{ \sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor}.$$ \noindent {\bf (ii) Comparing $t$ to $t'$.} This portion of the proof is similar in structure to part (ii) of Case 1(a). In our example, $$M' = \left( \begin{array}{ c c c } -Y_{1, 3} & Y_{1, 4} & -Y_{1, 6} \\ Y_{2, 3} & - Y_{2, 4} & Y_{2, 6} \\ Y_{5, 3} & -Y_{5, 4} & Y_{5, 6} \\ \end{array} \right).$$ We multiply all three columns and the last row of $M'$ by $-1$ to obtain $M'_{(1)}$ and return node 6 to its original label of 8 to obtain $M'_{(2)}$, so \[ M'_{(1)} = \left( \begin{array}{ c c c } Y_{1, 3} & -Y_{1, 4} & Y_{1, 6} \\ -Y_{2, 3} & Y_{2, 4} & -Y_{2, 6} \\ Y_{5, 3} & -Y_{5, 4} & Y_{5, 6} \\ \end{array} \right) \text{ and } M'_{(2)} = \left( \begin{array}{ c c c } Y_{1, 3} & -Y_{1, 4} & Y_{1, 8} \\ -Y_{2, 3} & Y_{2, 4} & -Y_{2, 8} \\ Y_{5, 3} & -Y_{5, 4} & Y_{5, 8} \\ \end{array} \right). \] \noindent {\bf Add the column and row corresponding to nodes $x+1$ and $x$ to $M'_{(2)}$.} Now, add the column corresponding to node $x+1$ immediately to the left of the column corresponding to the node $x+2$ in $M'_{(2)}$. Also add the row corresponding to node $x$ as the last row. Change the sign of the entries in the new column in rows $R(a)$ if $R'(\psi(a))$ was a row we multiplied by $-1$. Similarly, change the sign of the entries in the new row in columns $C(b)$ if $C'(\psi(b))$ was a column we multiplied by $-1$, and call the resulting matrix $M'_{(3)}$, which is a block matrix with checkerboard blocks with properties (1) and (3)-(5) from Case 1(a). Property (2) has to be slightly modified: \begin{itemize} \item[(2)] The $j$th entry of the first column to the left of $C(x+1)$ and the $j$th entry of $C(x+2)$ have opposite sign because they were adjacent in $M'$, which is checkerboard. \end{itemize} In our example, \[ M'_{(3)} = \left( \begin{array}{ c c c c } Y_{1, 3} & -Y_{1, 4} & Y_{1, 7} & Y_{1, 8} \\ -Y_{2, 3} & Y_{2, 4} & - Y_{2, 7} & -Y_{2, 8} \\ Y_{5, 3} & -Y_{5, 4} & Y_{5, 7} & Y_{5, 8} \\ Y_{6, 3} & -Y_{6, 4} & Y_{6, 7} & Y_{6, 8} \\ \end{array} \right) . \] \noindent{\bf Compare $\widetilde{M}$ to the entries of $M'_{(3)}$ and conclusion.} Observe that if $i< x$ and $j > x + 1$ then \begin{equation*} \text{sign}( \psi(i), \psi(j) ) = (-1)^{ ( \psi(j) - \psi(i) + a_{\psi(i), \psi(j)} -1 )/2 } = (-1)^{( j-2 - i + a_{i, j} - 2 -1 )/2} \\ = \text{sign}(i, j), \end{equation*} so unlike in Case 1(a), the entries in the columns $C(x+2), \ldots, C(2n)$ are the same sign in $M$ as the entries in columns $C'(\psi(x+2)), \ldots, C'(\psi(2n))$ in $M'$. Returning to our example, we see that the entries in column $C(8) = 4$ have the same signs as the entries in column $C(6) = 3$, as \[M = \left( \begin{array}{ c c c c } -Y_{1, 3} & Y_{1, 4} & Y_{1, 7} & -Y_{1, 8} \\ Y_{2, 3} & - Y_{2, 4} & -Y_{2, 7} & Y_{2, 8} \\ Y_{5, 3} & -Y_{5, 4} & - Y_{5, 7} & Y_{5, 8} \\ - Y_{6, 3} & Y_{6, 4} & Y_{6, 7} & -Y_{6, 8} \\ \end{array} \right) \text{ and } M' = \left( \begin{array}{ c c c } -Y_{1, 3} & Y_{1, 4} & -Y_{1, 6} \\ Y_{2, 3} & - Y_{2, 4} & Y_{2, 6} \\ Y_{5, 3} & -Y_{5, 4} & Y_{5, 6} \\ \end{array} \right). \] Let $\widetilde{M}$ be the matrix $M$ obtained by doing all of the $t'$ multiplications we did to $M'$ to obtain $M'_{(3)}$. We see that $\widetilde{M} = M'_{(3)}$, so $\widetilde{M}$ is checkerboard except for the columns $C(x+2), \ldots, C(2n)$ and also possibly the row $R(x)$ and/or the column $C(x+1)$. There are two cases to consider. In the first case, $C(x+1)$ is not in the same block as the first column to its left, so we need to multiply $C(x+1)$ by $-1$. Then, since $C(x+1)$ is in the same block as $C(x+2), \ldots, C(2n)$, we need to multiply the remaining $2n-x-1$ columns by $-1$ as well. So we have done $t' + 2n - x$ total multiplications. It remains to consider whether or not we need to multiply row $R(x)$ by $-1$. Recall from Case 1(a) that after we are finished multiplying rows and columns and have obtained a checkerboard matrix, the entry $(R(x), C(x+1))$ must have positive sign if and only if $x$ is odd. Since $(-1)^{x > x+1} = 1$, $\text{sign}(x+1, x) = 1$, and we multiplied $C(x+1)$ by $-1$, we multiply $R(x)$ by $-1$ if and only if $x$ is odd. Therefore if $x$ is odd, we have done $t' + 2n - x + 1$ multiplications, and if $x$ is even, we have done $t' + 2n - x$ multiplications. We have thus shown that $t$ has the same parity as $t'$. If $C(x+1)$ is in the same block as the first column to its left, we do not need to multiply $C(x+1)$ by $-1$ but we still need to multiply the remaining $2n-x-1$ columns by $-1$. So we have done $t' + 2n - x -1$ total multiplications. Since we did not multiply $C(x+1)$ by $-1$, we multiply $R(x)$ by $-1$ if and only if $x$ is even. Therefore, if $x$ is even, we have done $t' + 2n - x$ total multiplications and if $x$ is odd we have done $t' + 2n - x + 1$ multiplications. Again, $t$ has the same parity as $t'$. In both cases, $t$ has the same parity as $\text{sign}_{\cons}({\bf N}) (-1)^{ \sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor},$ which completes the proof when node 1 is black. When node 1 is white, we have $$\cdots < s_k < u_{k - (2n - x - 1)} < \cdots < u_{k-1} < u_k$$ but $(s_k, u_k)$ is not an inversion since node 1 is white. So we still have $\text{sign}_{\cons}({\bf N}) = (-1)^{2n - x-1} \text{sign}_{\cons}({\bf N'})$. Since $(2n, 1)$ is a couple of consecutive white nodes, $$(-1)^{\sum\limits_{i=1}^{2k-2} \lfloor \frac{n'_i}{2} \rfloor} = (-1)^{2n - x - 1} (-1)^{ \lfloor \frac{x-1}{2} \rfloor} (-1)^{ \lfloor \frac{x+1}{2} \rfloor} (-1)^{ \sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor}.$$ It follows that $$(-1)^{\sum\limits_{i=1}^{2k-2} \lfloor \frac{n'_i}{2} \rfloor} = (-1)^{x} (-1)^{ \sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor},$$ so we conclude that $$(-1)^{n-1} \text{sign}_{\cons}({\bf N'}) (-1)^{\sum\limits_{i=1}^{2k-2} \lfloor \frac{n'_i}{2} \rfloor} =-(-1)^{n} (-1)^{2n-1} \text{sign}_{\cons}({\bf N}) (-1)^{ \sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor} = (-1)^{n}\text{sign}_{\cons}({\bf N}) (-1)^{ \sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor}. $$ The rest of the argument is the same. \\ \noindent {\bf Case 2(b).} $x+1 = 2n$ If ${\bf N'}$ has two fewer couples of consecutive nodes of the same color and compared to ${\bf N}$ and $x+1 = 2n$, it must be the case that nodes $2n-1$ and $2n-2$ are both black and node 1 is white. \noindent{\bf (i) Comparing $\text{sign}_{\cons}({\bf N})$ to $\text{sign}_{\cons}({\bf N'})$ and $(-1)^{\sum \lfloor \frac{n'_i}{2} \rfloor}$ to $(-1)^{\sum \lfloor \frac{n_i}{2} \rfloor}$.} Removing nodes $2n$ and $2n-1$ does not remove any inversions with respect to the node coloring of ${\bf N}$ (recall that $(s_k, u_k)$ is not an inversion when node $1$ is white). Thus $\text{sign}_{\cons}({\bf N}) = \text{sign}_{\cons}({\bf N'})$. Next observe that $$(-1)^{\sum\limits_{i=1}^{2k-2} \lfloor \frac{n'_i}{2} \rfloor} = (-1)^{\lfloor \frac{2n}{2} \rfloor} (-1)^{\lfloor \frac{2n-2}{2} \rfloor} (-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor} = - (-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor} . $$ Since node 1 is white, we have \begin{equation*} (-1)^{n-1} \text{sign}_{\cons}({\bf N'}) (-1)^{\sum\limits_{i=1}^{2j} \lfloor \frac{n'_i}{2} \rfloor} = (-1)^{n} \text{sign}_{\cons}({\bf N}) (-1)^{\sum\limits_{i=1}^{2j} \lfloor \frac{n_i}{2} \rfloor}. \end{equation*} \noindent {\bf (ii) Comparing $t$ to $t'$.} This argument is identical to (ii) in Case 1(b), and we conclude that $t$ has the same parity as $t'$, and therefore the same parity as $(-1)^{n} \text{sign}_{\cons}({\bf N}) (-1)^{\sum\limits_{i=1}^{2j} \lfloor \frac{n_i}{2} \rfloor}$. \end{proof} Now that we have established Lemma~\ref{lem:lifesaver}, the proof of Theorem~\ref{thm61} is very similar to Kenyon and Wilson's proof of Theorem~\ref{thm:kw61}. \begin{proof}[Proof of Theorem~\ref{thm61}]{\color{white} a} \noindent \begin{minipage}{.65\textwidth} \hspace{10pt} Without loss of generality\footnotemark, we may assume that when we list the nodes in counterclockwise order starting with the red ones, they are in the order $1, 2, \ldots, 2n$. Combining Theorems \ref{mythm42} and \ref{KWthm31} immediately gives a Pfaffian formula for the double-dimer model. For example, let $G$ be a graph with eight nodes where nodes 1, 3, 4, and 6 are black. Assume the nodes are colored red, green and blue as shown to the right, so $\sigma = ((1, 8), (3, 4), (5, 6), (7, 2))$. Then by Theorem~\ref{KWthm31}, \end{minipage} \begin{minipage}{.3\textwidth} \begin{center} \begin{tikzpicture}[scale=.65] \draw (0,0) circle (2); \foreach \x in {1,2,...,8} { \node[shape=circle,fill=black, scale=0.5,label={{((\x-1)*360/8)+90}:\color{red}{\small{\x}}}] (n\x) at ({((\x-1)*360/8)+90}:2) {}; }; \foreach \x in {1,2,3} { \node[shape=circle,fill=black, scale=0.5,label={{((\x-1)*360/8)+90}:\color{red}{\small{\x}}}] (n\x) at ({((\x-1)*360/8)+90}:2) {}; }; \foreach \x in {4, 5} { \node[shape=circle,fill=black, scale=0.5,label={{((\x-1)*360/8)+90}:\color{green}{\small{\x}}}] (n\x) at ({((\x-1)*360/8)+90}:2) {}; }; \foreach \x in {6, 7, 8} { \node[shape=circle,fill=black, scale=0.5,label={{((\x-1)*360/8)+90}:\color{blue}{\small{\x}}}] (n\x) at ({((\x-1)*360/8)+90}:2) {}; }; \foreach \x in {2, 5, 7, 8} { \node[shape=circle,fill=white, scale=0.4] (n\x) at ({((\x-1)*360/8)+90}:2) {}; }; \foreach \x/\y in {1/8, 3/4, 5/6, 7/2} { \draw (n\x) -- (n\y);}; \end{tikzpicture} \end{center} \end{minipage} \begin{equation} \label{eqn:Pfex} \dddot{\Pr }(18 | 34 | 56 | 72) = \begin{pmatrix} 0 & 0 & 0 & L_{1, 4} & L_{1, 5} & L_{1, 6} & L_{1, 7} & L_{1, 8} \\ 0 & 0 & 0 & L_{2, 4} & L_{2, 5} & L_{2, 6} & L_{2, 7} & L_{2, 8} \\ 0 & 0 & 0 & L_{3, 4} & L_{3, 5} & L_{3, 6} & L_{3, 7} & L_{3, 8} \\ -L_{4, 1} & -L_{4, 2} & -L_{4, 3} & 0 & 0 & L_{4, 6} & L_{4, 7} & L_{4, 8} \\ -L_{5, 1} & -L_{5, 2} & -L_{5, 3} & 0 & 0 & L_{5, 6} & L_{5, 7} & L_{5, 8} \\ -L_{6, 1} & -L_{6, 2} & -L_{6, 3} & -L_{6, 4} & -L_{6, 5} & 0 & 0& 0\\ -L_{7, 1} & -L_{7, 2} & -L_{7, 3} & -L_{7, 4} & -L_{7, 5} & 0 & 0& 0\\ -L_{8, 1} & -L_{8, 2} & -L_{8, 3} & -L_{8, 4} & -L_{8, 5} & 0 & 0& 0\\ \end{pmatrix}. \end{equation} So making the substitution in Theorem~\ref{mythm42} expresses $\widetilde{\Pr }(18 | 34 | 56 | 72)$ as a Pfaffian, up to a global sign. Presently, we explain how we can obtain a determinant formula from this Pfaffian formula. We make the substitution $L_{i, j} \to 0$ when $i$ and $j$ are both black or both white and we reorder the rows and columns so the black nodes are listed first. In the example, the above matrix becomes \[ \begin{pmatrix} 0 & 0 & 0 & 0 & 0 & L_{1, 5} & L_{1, 7} & L_{1, 8} \\ 0 & 0 & 0 & 0 & 0 & L_{3, 5} & L_{3, 7} & L_{3, 8} \\ 0 & 0 & 0 & 0 & -L_{4, 2} & 0 & L_{4, 7} & L_{4, 8} \\ 0 & 0 & 0 & 0 & -L_{6, 2} & -L_{6, 5} & 0 & 0 \\ 0 & 0& L_{2, 4} &L_{2, 6} & 0& 0 & 0& 0\\ -L_{5, 1} & -L_{5, 3} & 0 & L_{5, 6} & 0& 0 & 0& 0\\ -L_{7, 1} & -L_{7, 3} & -L_{7, 4} & 0 & 0& 0 & 0& 0\\ -L_{8, 1} & -L_{8, 3} & -L_{8, 4} & 0 & 0& 0 & 0& 0\\ \end{pmatrix}. \] \footnotetext{We can renumber the nodes while preserving their cyclic order without changing the global sign of the Pfaffian in Theorem~\ref{KWthm31}. This is because if we move the last row and column to be the first row and column, the sign of the Pfaffian changes. But since the entries above the diagonal must be non-negative, we negate the new first row and column and the Pfaffian changes sign again.} Simultaneous swaps of two different rows and corresponding columns changes the sign of the Pfaffian. Assuming the graph has $2k$ couples of consecutive nodes of the same color, we claim that the number of swaps needed so that the black nodes are listed first has the same parity as $$ \dfrac{n(n-1)}{2} + \sum\limits_{i=1}^{2k} \left\lfloor \frac{n_i}{2} \right\rfloor,$$ if node 1 is black. If node 1 is white, the number of swaps needed has the same parity as $$ \dfrac{n(n+1)}{2} + \sum\limits_{i=1}^{2k} \left\lfloor \frac{n_i}{2} \right\rfloor.$$ To prove this, we will first show that the number of node swaps needed to get from a node coloring with $2k$ couples of consecutive nodes of the same color to a node coloring that alternates black and white has the same parity as $\sum\limits_{i=1}^{2k} \left\lfloor \frac{n_i}{2} \right\rfloor.$ We will prove this by induction on $k$. When $k = 0$, $0$ swaps are needed, so the claim holds trivially. Assume the claim holds when ${\bf N}$ has $2(k-1)$ couples of consecutive nodes of the same color and suppose we have a set of nodes that has $2k$ couples of consecutive nodes of the same color. Let $h$ be the smallest integer so that $n_{h-1}$ and $n_h$ are different colors. Then $n_{h-1}$ and $n_h$ are the same parity and there are an even number of nodes in the interval $[n_{h-1} +1, \ldots, n_{h}]$, which alternate in color. If we swap $n_h$ with $n_{h} -1$, $n_{h} - 2$ with $n_{h} -3$ $,\ldots,$ $n_{h-1} + 2$ with $n_{h-1} +1$, we will have done $\frac{n_{h} - n_{h-1}}{2}$ swaps and we will have a node coloring with $2(k-1)$ couples of consecutive nodes of the same color. If $n_h$ and $n_{h-1}$ are both even then $\frac{n_{h} - n_{h-1}}{2}$ clearly has the same parity as $\left\lfloor \frac{ n_{h} }{2} \right\rfloor + \left\lfloor \frac{ n_{h-1} }{2} \right\rfloor$. If $n_h$ and $n_{h-1}$ are both odd then by writing $\frac{n_{h} - n_{h-1}}{2} = \frac{n_{h} -1 - (n_{h-1} -1)}{2}$ we see that $\frac{n_{h} - n_{h-1}}{2}$ and $\left\lfloor \frac{ n_{h} }{2} \right\rfloor + \left\lfloor \frac{ n_{h-1} }{2} \right\rfloor$ have the same parity. By the induction hypothesis, the number of swaps needed to get to a node coloring that alternates black and white has the same parity as $$\sum\limits_{\substack{ 1 \leq i \leq 2k \\ i \neq h, h-1} } \left\lfloor \frac{n_i}{2} \right\rfloor.$$ The claim follows. Assume node 1 is black. If there are no couples of consecutive nodes of the same color, the number of swaps needed to put the black nodes first is \[ 1 + 2 + 3 + \cdots + (n-1) = \dfrac{n(n-1)}{2} \] because the third node requires 1 swap, the fifth node requires 2 swaps, the seventh node requires 3 swaps$,\ldots,$ and the $(2n-1)$st node requires $n-1$ swaps. So if there are $2k$ couples of consecutive nodes of the same color, since $\sum\limits_{i=1}^{2k} \left\lfloor \frac{n_i}{2} \right\rfloor$ node swaps are needed to get to a node coloring that alternates black and white, the number of swaps needed so that the black nodes are listed first has the same parity as $$ \dfrac{n(n-1)}{2} + \sum\limits_{i=1}^{2k} \left\lfloor \frac{n_i}{2} \right\rfloor.$$ If node 1 is white, the number of swaps needed to put the black nodes first is \[ 1 + 2 + 3 + \cdots + n = \dfrac{n(n+1)}{2} \] because the second node requires 1 swap, the fourth node requires 2 swaps$,\ldots,$ and the $(2n)$th node requires $n$ swaps. So the number of swaps needed so that the black nodes are listed first has the same parity as $$ \dfrac{n(n+1)}{2} + \sum\limits_{i=1}^{2k} \left\lfloor \frac{n_i}{2} \right\rfloor.$$ Next, observe that after the rows and columns have been sorted, the matrix has the form \[ \begin{pmatrix} 0 & \pm L_{B, W} \\ \mp L_{W, B} & 0 \end{pmatrix} \] where $B$ represents the black nodes, $W$ the white nodes, and the signs of the entries in $\pm L_{B, W}$ are $+$ if the black node has a smaller label than the white node and $-$ otherwise. The Pfaffian of this matrix is the determinant of the upper right submatrix times $(-1)^{\frac{n(n-1)}{2}}$. To summarize, after making the substitution $L_{i, j} \to 0$ when $i$ and $j$ are both black or both white and sorting the rows and columns so the black nodes are listed first, \[ \text{Pf} \begin{pmatrix} 0 & L_{R, G} & L_{R, B} \\ -L_{G, R} & 0 & L_{G, B} \\ -L_{B, R} & -L_{B, G} & 0 \end{pmatrix} = (-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor} \det \begin{pmatrix} L_{B, W} \end{pmatrix}, \] when node 1 is black. When node 1 is white, \[ \text{Pf} \begin{pmatrix} 0 & L_{R, G} & L_{R, B} \\ -L_{G, R} & 0 & L_{G, B} \\ -L_{B, R} & -L_{B, G} & 0 \end{pmatrix} = (-1)^{n} (-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor} \det \begin{pmatrix} L_{B, W} \end{pmatrix}. \] In the example, after this substitution and reordering, the Pfaffian of matrix (\ref{eqn:Pfex}) is equal to \[ (-1)^{1 + 3} \det \begin{pmatrix} 0 & L_{1, 5} & L_{1, 7} & L_{1, 8} \\ 0 & L_{3, 5} & L_{3, 7} & L_{3, 8} \\ -L_{4, 2} & 0 & L_{4, 7} & L_{4, 8} \\ -L_{6, 2} & -L_{6, 5} & 0 & 0 \\ \end{pmatrix} \] because $n_1 = 3$ and $n_2 = 7$. Next we do the substitution $L_{i, j} \to \text{sign}(i, j) Y_{i, j}$. The result is the matrix $$M = [1_{i, j \text{ RGB-colored differently}} (-1)^{i > j} \text{sign}(i, j) Y_{i, j} ]^{i = b_1, b_2, \ldots, b_{n}}_{j = w_1, w_2, \ldots, w_{n} }$$ where $b_1 < b_2 < \cdots < b_n$ are the black nodes listed in increasing order and $w_1 < w_2 < \cdots < w_n$ are the white nodes listed in increasing order. By Theorem~\ref{mythm42}, $$\widetilde{\Pr}(\sigma) = \text{sign}_{OE}(\sigma) \text{sign}_{\cons}({\bf N}) (-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor} \det(M)$$ if node 1 is black and $$\widetilde{\Pr}(\sigma) = \text{sign}_{OE}(\sigma) \text{sign}_{\cons}({\bf N}) (-1)^{n} (-1)^{\sum\limits_{i=1}^{2k} \lfloor \frac{n_i}{2} \rfloor} \det(M)$$ is node 1 is white. By Lemma~\ref{lem:lifesaver}, $M$ is a block matrix where within each block, the signs of the entries are staggered in a checkerboard pattern. Next, we multiply rows and columns of $M$ by $-1$ so that the signs of the matrix entries are staggered in a checkerboard pattern and the upper left entry is positive. Call the resulting matrix $\widetilde{M}$. By Lemma \ref{lem:lifesaver}, $$\widetilde{\Pr}(\sigma) = \text{sign}_{OE}(\sigma) \det(\widetilde{M})$$ regardless of whether node 1 is black or white. Then, if we multiply every other row by $-1$ and every other column by $-1$, the signs of all matrix entries are positive and the determinant is unchanged. We conclude that $$\widetilde{\Pr}(\sigma)= \text{sign}_{OE}(\sigma) \det [1_{i, j \text{ RGB-colored differently } } Y_{i, j} ]^{i = b_1, b_2, \ldots, b_{n}}_{j = w_1, w_2, \ldots, w_{n} }.$$ Returning to our example, we find that \[\widetilde{ \Pr }(18 | 34 | 56 |72 ) = \text{sign}_{OE}(18|34|56|72) \det \begin{pmatrix} 0 & Y_{1, 5} & Y_{1, 7} & Y_{1, 8} \\ 0 & Y_{3, 5} & Y_{3, 7} & Y_{3, 8} \\ Y_{4, 2} & 0 & Y_{4, 7} & Y_{4, 8} \\ Y_{6, 2} & Y_{6, 5} & 0 & 0 \\ \end{pmatrix} = -\det \begin{pmatrix} 0 & Y_{1, 5} & Y_{1, 7} & Y_{1, 8} \\ 0 & Y_{3, 5} & Y_{3, 7} & Y_{3, 8} \\ Y_{4, 2} & 0 & Y_{4, 7} & Y_{4, 8} \\ Y_{6, 2} & Y_{6, 5} & 0 & 0 \\ \end{pmatrix}. \] \end{proof} \subsection{Proof of Theorem~\ref{thm:cond}} Now that we have established Theorem~\ref{thm61}, Theorem~\ref{thm:cond} follows from the proof method described in Section~\ref{sec:proofsketch}. \begin{customthm}{\ref{thm:cond}} Let $G = (V_1, V_2, E)$ be a finite edge-weighted planar bipartite graph with a set of nodes {\bf N}. Divide the nodes into three circularly contiguous sets $R$, $G$, and $B$ such that $|R|, |G|,$ and $|B|$ satisfy the triangle inequality and let $\sigma$ be the corresponding tripartite pairing. If $x, w \in V_1$ and $y, v \in V_2$ then \begin{eqnarray*} & & \text{sign}_{OE}(\sigma) \text{sign}_{OE}(\sigma'_{xywv})Z^{DD}_{\sigma}(G, {\bf N}) Z^{DD}_{\sigma_{xywv}}(G, {\bf N} - \{x, y, w, v\}) \hspace{.4cm}\\ &=& \text{sign}_{OE}(\sigma'_{xy}) \text{sign}_{OE}(\sigma'_{wv}) Z^{DD}_{\sigma_{xy}}(G, {\bf N} - \{x, y\}) Z^{DD}_{\sigma_{wv}}(G, {\bf N} - \{w, v\}) \\ && - \text{sign}_{OE}(\sigma'_{xv}) \text{sign}_{OE}(\sigma'_{wy}) Z^{DD}_{\sigma_{xv}}(G, {\bf N} - \{x, v\}) Z^{DD}_{\sigma_{wy}}(G, {\bf N} - \{w, y\}) \end{eqnarray*} where for $i, j \in \{x, y, w, v\}$, $\sigma_{ij}$ is the unique planar pairing on ${\bf N} - \{i, j\}$ in which like colors are not paired together, and $\sigma_{ij}'$ is the pairing after the the node set ${\bf N} - \{i, j\}$ has been relabeled so that the nodes are numbered consecutively. \end{customthm} \begin{proof} First we assume that all pairings in the theorem statement exist. Let $$M = [1_{i ,j \text{ RGB-colored differently } } Y_{i, j} ]^{i = b_1, b_2, \ldots, b_n}_{j = w_1, w_2, \ldots, w_n}$$ and let $r_x$ and $r_w$ denote the rows corresponding to nodes $x$ and $w$, respectively (so we are assuming that $x$ and $w$ are colored black). We first move the columns corresponding to $y$ and $v$ (i.e. the columns with entries $Y_{i, y}$ and $Y_{i, v}$, respectively) to the columns $r_x$ and $r_w$. We observe that we can do this without exchanging the column with entries $Y_{i, y}$ with the column with entries $Y_{i,v}$. For example, if $r_x < c_y < r_w < c_v$, we swap column $c_y$ with column $c_y - 1$, then column $c_y -1$ with column $c_y-2, \ldots,$ and column $r_{x} + 1$ with column $r_x$. Next, we swap column $c_v$ with column $c_v - 1, \ldots,$ and column $r_{w} + 1$ with column $r_w$. If instead $c_y < c_v < r_x < r_w$, we swap column $c_v$ with column $c_v +1, \ldots,$ and column $r_{w} - 1$ with column $r_w$ before swapping $c_y$ with column $c_y +1, \ldots,$ and column $r_{x} - 1$ with column $r_x$. Without loss of generality we assume that we move the column with entries $Y_{i, y}$ to column $r_x$ and the column with entries $Y_{i, v}$ to column $r_w$ to obtain the matrix $\widetilde{M}$. Let $s_{y}$ denote the number of column swaps we make in the process of moving the column with entries $Y_{i, y}$. Let $s_{v}$ denote the number of column swaps we make in the process of moving the column with entries $Y_{i, v}$. Note that $s_{y}$ and $s_v$ are well-defined up to parity. Note also that after making these swaps, the columns are still in ascending order, aside from the columns with entries $Y_{i, y}$ and $Y_{i, v}$. By the Desnanot-Jacobi identity, \begin{equation} \label{eqn:condpf} \det(\widetilde{M}) \det(\widetilde{M}^{r_{x}, r_{w}}_{r_{x}, r_{w}}) = \det (\widetilde{M}_{r_{x}}^{r_{x}}) \det(\widetilde{M}_{r_{w}}^{r_{w}}) - \det(\widetilde{M}^{r_{x}}_{r_{w}}) \det(\widetilde{M}^{r_{w}}_{r_{x}}), \end{equation} where recall that $M_{s}^{t}$ is the matrix $M$ with row $s$ and column $t$ removed. We apply Theorem \ref{thm61} to each term in equation (\ref{eqn:condpf}). First consider $\det (\widetilde{M}_{r_{x}}^{r_{x}})$. In order to apply Theorem~\ref{thm61} we must reorder the columns. Since we have removed the column $r_x$ which had entries $Y_{i, y}$, $s_{v}$ column swaps will put the columns in the correct (ascending) order. This follows from the previous observation that we moved the columns corresponding to $y$ and $v$ without exchanging the column with entries $Y_{i,y}$ with the column with entries $Y_{i,v}$. We must also relabel the nodes ${\bf N} - \{x, y \}$ so that they are numbered consecutively. Recall that $\sigma_{xy}$ denotes the unique planar pairing of ${\bf N} - \{x, y \}$ in which like colors are not paired together. When we relabel ${\bf N} - \{x, y \}$ we relabel $\sigma_{xy}$ as well. Call the resulting node set ${\bf N'}$ and the resulting pairing $\sigma'_{xy}$. Then by Theorem~\ref{thm61}, $$\det (\widetilde{M}_{r_{x}}^{r_{x}}) = (-1)^{s_{v}} \text{sign}_{OE}(\sigma'_{xy}) \dfrac{ Z^{DD}_{\sigma'_{xy}}(G, {\bf N'} )}{ (Z^{D}(G))^2 }, $$ and thus $$\det (\widetilde{M}_{r_{x}}^{r_{x}}) =(-1)^{s_{v}} \text{sign}_{OE}(\sigma'_{xy}) \dfrac{ Z^{DD}_{\sigma_{xy}}(G, {\bf N} - \{x, y \})}{ (Z^{D}(G))^2 }. $$ Similarly, we have \begin{equation} \label{eqn:RHS} \begin{split} \det (\widetilde{M}_{r_{w}}^{r_{w}}) &= (-1)^{s_{y}} \text{sign}_{OE}(\sigma'_{wv}) \dfrac{ Z^{DD}_{\sigma_{wv}}(G, {\bf N} - \{w, v \})}{ (Z^{D}(G))^2 }, \\ \det (\widetilde{M}_{r_{x}}^{r_{w}}) &= (-1)^{s_{y}} \text{sign}_{OE}(\sigma'_{xv}) \dfrac{ Z^{DD}_{\sigma_{xv}}(G, {\bf N} - \{x, v \})}{ (Z^{D}(G))^2 }, \text{ and} \\ \det (\widetilde{M}_{r_{w}}^{r_{x}}) &= (-1)^{s_{v}} \text{sign}_{OE}(\sigma'_{yw}) \dfrac{ Z^{DD}_{\sigma_{yw}}(G, {\bf N} - \{y, w \})}{ (Z^{D}(G))^2 }. \end{split} \end{equation} It follows that the right hand side of equation (\ref{eqn:condpf}) is \begin{eqnarray*} & & (-1)^{s_{y}} (-1)^{s_{v}} \bigg{(} \text{sign}_{OE}(\sigma'_{xy}) \text{sign}_{OE}(\sigma'_{wv})\dfrac{ Z^{DD}_{\sigma_{xy}}(G, {\bf N} - \{x, y \})}{ (Z^{D}(G))^2 } \dfrac{ Z^{DD}_{\sigma_{wv}}(G, {\bf N} - \{w, v \})}{ (Z^{D}(G))^2 }\\ & & - \text{sign}_{OE}(\sigma'_{xv}) \text{sign}_{OE}(\sigma'_{yw})\dfrac{Z^{DD}_{\sigma_{xv}}(G, {\bf N} - \{x, v \})}{ (Z^{D}(G))^2 } \dfrac{ Z^{DD}_{\sigma_{yw}}(G, {\bf N} - \{y, w \})}{ (Z^{D}(G))^2 } \bigg{). } \end{eqnarray*} Applying Theorem~\ref{thm61} to the left hand side of equation (\ref{eqn:condpf}), we have \begin{equation} \label{eqn:LHS} \begin{split} \det(\widetilde{M}) &= (-1)^{s_{y}} (-1)^{s_{v}} \text{sign}_{OE}(\sigma) \dfrac{Z^{DD}_{\sigma}(G, {\bf N})}{ (Z^{D}(G))^2 }, \text{ and} \\ \det(\widetilde{M}^{r_{x}, r_{w}}_{r_{x}, r_{w}}) &= \text{sign}_{OE}(\sigma'_{xywv}) \dfrac{ Z^{DD}_{\sigma_{xywv}}(G, {\bf N} - \{x, y, w, v\})}{ (Z^{D}(G))^2 }. \end{split} \end{equation} We conclude that \begin{eqnarray*} & & \text{sign}_{OE}(\sigma) \text{sign}_{OE}(\sigma'_{xywv})Z^{DD}_{\sigma}(G, {\bf N}) Z^{DD}_{\sigma_{xywv}}(G, {\bf N} - \{x, y, w, v\}) \hspace{.4cm}\\ &=& \text{sign}_{OE}(\sigma'_{xy}) \text{sign}_{OE}(\sigma'_{wv}) Z^{DD}_{\sigma_{xy}}(G, {\bf N} - \{x, y\}) Z^{DD}_{\sigma_{wv}}(G, {\bf N} - \{w, v\}) \\ && - \text{sign}_{OE}(\sigma'_{xv}) \text{sign}_{OE}(\sigma'_{wy}) Z^{DD}_{\sigma_{xv}}(G, {\bf N} - \{x, v\}) Z^{DD}_{\sigma_{wy}}(G, {\bf N} - \{w, y\}). \end{eqnarray*} It is not necessarily the case that the pairings $\sigma_{xy},\sigma_{wv}, \sigma_{xv}, \sigma_{wy}$, and $\sigma_{xywv}$ all exist. First consider the case where one of the pairings $\sigma_{xy},\sigma_{wv}, \sigma_{xv}, \sigma_{wy}$ does not exist. Without loss of generality, assume that $\sigma_{xy}$ does not exist. This means that the number of nodes of different colors in ${\bf N} - \{x, y\}$ do not satisfy the triangle inequality. Then $\det(\widetilde{M}_{r_{x}}^{r_{x}}) = 0$ since every black-white pairing contains an $RGB$-monochromatic pair. There are two possibilities in this case: either the theorem statement holds trivially, or \begin{equation} \label{eqn:reallyspecialcase} Z^{DD}_{\sigma}(G, {\bf N}) Z^{DD}_{\sigma_{xywv}}(G, {\bf N} - \{x, y, w, v\}) = Z^{DD}_{\sigma_{xv}}(G, {\bf N} - \{x, v\}) Z^{DD}_{\sigma_{wy}}(G, {\bf N} - \{w, y\}). \end{equation} Since the numbers of nodes of different colors in ${\bf N} - \{x, y\}$ do not satisfy the triangle inequality, without loss of generality, we may assume there are more red nodes than the combined number of blue and green nodes in ${\bf N} - \{x, y\}$. Since we assumed that in ${\bf N}$, $|R|, |G|,$ and $|B|$ satisfy the triangle inequality, it follows that $|R| = |G| + |B|$ in ${\bf N}$. Assuming without loss of generality that when we list the nodes in counterclockwise order starting with the red ones, they are in the order $1, 2, \ldots, 2n$, this means $\sigma$ is the pairing $((1, 2n), (2, 2n-1), \ldots, (n, n+1))$. It must be the case that $x$ and $y$ are both green or blue, so if either $w$ or $v$ is green, then $\sigma_{xywv}$ does not exist and either $\sigma_{xv}$ or $\sigma_{wv}$ does not exist, so the equality holds trivially. If both $w$ and $v$ are red, then $\sigma_{xv}$, $\sigma_{wv}$ and $\sigma_{xywv}$ all exist. In this case, the rest of the proof proceeds as above and we have \begin{eqnarray*} & & \text{sign}_{OE}(\sigma) \text{sign}_{OE}(\sigma'_{xywv})Z^{DD}_{\sigma}(G, {\bf N}) Z^{DD}_{\sigma_{xywv}}(G, {\bf N} - \{x, y, w, v\}) \\ &=& - \text{sign}_{OE}(\sigma'_{xv}) \text{sign}_{OE}(\sigma'_{wy}) Z^{DD}_{\sigma_{xv}}(G, {\bf N} - \{x, v\}) Z^{DD}_{\sigma_{wy}}(G, {\bf N} - \{w, y\}). \end{eqnarray*} Recall that inversions in a planar pairing correspond to nestings (see Remark~\ref{rem:inversionsarenestings}). Because $\sigma$ is the pairing $((1, 2n), (2, 2n-1), \ldots, (n, n+1))$, $\text{sign}(\sigma'_{xv}) = \text{sign}(\sigma'_{wy})$ and $\text{sign}(\sigma'_{xywv}) = \text{sign}(\sigma) \cdot (-1)^{n-1} \cdot (-1)^{n-2}$. Equation (\ref{eqn:reallyspecialcase}) follows. If $\sigma_{xywv}$ does not exist, then this means that the numbers of nodes of different colors in ${\bf N} - \{x, y, w, v\}$ do not satisfy the triangle inequality. Without loss of generality, we may assume there are more red nodes than the combined number of blue and green nodes in ${\bf N} - \{x, y, w, v\}$. By the same reasoning as above, $\det(\widetilde{M}_{r_{x}, r_{w}}^{r_{x}, r_{w}}) = 0$. If any one of $x, y, w,$ or $v$ is red, then the equation holds trivially. If all of $x, y, w$, and $v$ are green or blue, then in the original node set ${\bf N}$, $|R| +2= |G| + |B|$. So each of the pairings $\sigma'_{xy}, \sigma'_{wv}, \sigma'_{xv}$, and $\sigma'_{wy}$ is $((1, 2n-2), (2, 2n-1), \ldots)$. Then we have \begin{eqnarray*} & & \text{sign}_{OE}(\sigma'_{xy}) \text{sign}_{OE}(\sigma'_{wv}) Z^{DD}_{\sigma_{xy}}(G, {\bf N} - \{x, y\}) Z^{DD}_{\sigma_{wv}}(G, {\bf N} - \{w, v\}) \\ &= & \text{sign}_{OE}(\sigma'_{xv}) \text{sign}_{OE}(\sigma'_{wy}) Z^{DD}_{\sigma_{xv}}(G, {\bf N} - \{x, v\}) Z^{DD}_{\sigma_{wy}}(G, {\bf N} - \{w, y\}). \end{eqnarray*} Since inversions in a planar pairing correspond to nestings, all pairings have the same sign. So in this case, \[ Z^{DD}_{\sigma_{xy}}(G, {\bf N} - \{x, y\}) Z^{DD}_{\sigma_{wv}}(G, {\bf N} - \{w, v\}) = Z^{DD}_{\sigma_{xv}}(G, {\bf N} - \{x, v\}) Z^{DD}_{\sigma_{wy}}(G, {\bf N} - \{w, y\}) . \] \end{proof} \begin{rem} \label{rem:caseanalysis} To simplify the expression in Theorem~\ref{thm:cond}, it suffices to know the RGB-coloring of the nodes $x, y, w$, $v$. Without loss of generality, assume that when we list the nodes in counterclockwise order starting with the red ones, they are in the order $1, 2, \ldots, 2n$. Let $|RG(\sigma)|$ be the number of red-green pairs in $\sigma$. Define $|GB(\sigma)|$ and $|RB(\sigma)|$ similarly. Assume that $|RG(\sigma)|, |GB(\sigma)|,$ and $|RB(\sigma)|$ are all nonzero. If the set of nodes $\{x, y\}$ contains one red node and one blue node, then $\sigma_{xy}$ has one fewer red-blue pair than $\sigma$, but the number of red-green and green-blue pairs is the same (see Figure~\ref{fig:specialcase}). By Remark~\ref{rem:inversionsarenestings}, to determine the relationship between $\text{sign}_{OE}(\sigma)$ and $\text{sign}_{OE}(\sigma'_{xy})$ it suffices to count the number of nestings in the diagram of $\sigma$ that involve a red-blue pair $(n_r, n_b)$. There is one nesting for each red-blue pair other than $(n_r, n_b)$, one nesting for each red-green pair, and one nesting for each green-blue pair (see Figure~\ref{fig:specialcase1}). Therefore, $$\text{sign}(\sigma'_{xy}) = \text{sign}_{OE}(\sigma) \cdot (-1)^{|RG(\sigma)|} \cdot (-1)^{|GB(\sigma)|} \cdot (-1)^{|RB(\sigma)|-1}. $$ \begin{figure}\label{fig:specialcase} \end{figure} If the set of nodes $\{x, y\}$ contains one red node and one green node, then $\sigma_{xy}$ has one fewer red-green pair than $\sigma$, but the number of red-blue and green-blue pairs is the same. So we count the number of nestings in the diagram of $\sigma$ that involve a red-green pair $(n_r, n_g)$. There is one nesting for each red-green pair other than $(n_r, n_g)$, and one nesting for each red-blue pair. Therefore, $$\text{sign}_{OE}(\sigma'_{xy}) = \text{sign}_{OE}(\sigma) \cdot (-1)^{|RB(\sigma)|} \cdot (-1)^{|RG(\sigma)| - 1}.$$ Similarly, if the set of nodes $\{x, y\}$ contains one green node and one blue node, then $$\text{sign}_{OE}(\sigma'_{xy}) = \text{sign}_{OE}(\sigma) \cdot (-1)^{|RB(\sigma)|} \cdot (-1)^{|GB(\sigma)| - 1}.$$ \begin{figure}\label{fig:specialcase1} \end{figure} If both $x$ and $y$ are green nodes, then $\sigma_{xy}$ has one fewer red-green pair, one fewer green-blue pair, and one more red-blue pair, as shown in Figure \ref{fig:specialcase}. Removing a red-green pair from $\sigma$ removes $|RB(\sigma)| + |RG(\sigma)| -1$ nestings. Then, removing a green-blue pair removes $|RB(\sigma)| + |GB(\sigma)| -1$ nestings. After these pairs have been removed, adding a red-blue pair results in $|RB(\sigma)| + |GB(\sigma)|-1 + |RG(\sigma)|-1$ additional nestings. Therefore, $$ \text{sign}_{OE}(\sigma'_{xy}) = \text{sign}_{OE}(\sigma) \cdot (-1)^{|RB(\sigma)|}.$$ If both $x$ and $y$ are red nodes, $\sigma_{xy}$ has one fewer red-blue pair, one fewer red-green pair, and one more green-blue pair. Removing a red-blue pair from $\sigma$ removes $|RG(\sigma)| + |GB(\sigma)| + |RB(\sigma)| -1$ nestings. Then, removing a red-green pair removes $|RB(\sigma)| -1 + |RG(\sigma)| -1$ nestings. After these pairs have been removed, adding a green-blue pair results in $|RB(\sigma)| -1 + |GB(\sigma)|$ additional nestings. Thus $$ \text{sign}_{OE}(\sigma'_{xy}) = \text{sign}_{OE}(\sigma) \cdot (-1)^{|RB(\sigma)|}.$$ Similarly, if both $x$ and $y$ are blue, $$ \text{sign}_{OE}(\sigma'_{xy}) = \text{sign}_{OE}(\sigma) \cdot (-1)^{|RB(\sigma)|}.$$ \end{rem} If we assume that the nodes $x, y, w, v$ alternate black and white and the set $\{x,y,w,v\}$ contains at least one node of each RGB color, we can use Remark~\ref{rem:caseanalysis} to show that the all the signs in Theorem~\ref{thm:cond} are positive. \begin{customthm}{\ref{cor:cond}} Divide the nodes into three circularly contiguous sets $R$, $G$, and $B$ such that $|R|, |G|$ and $|B|$ satisfy the triangle inequality and let $\sigma$ be the corresponding tripartite pairing. Let $x, y, w, v$ be nodes appearing in a cyclic order such that the set $\{x,y,w,v\}$ contains at least one node of each RGB color. If $x$ and $w$ are both black and $y$ and $v$ are both white, then \begin{eqnarray*} Z^{DD}_{\sigma}(G, {\bf N}) Z^{DD}_{\sigma_{xywv}}(G, {\bf N} - \{x, y, w, v\}) &=& Z^{DD}_{\sigma_{xy}}(G, {\bf N} - \{x, y\}) Z^{DD}_{\sigma_{wv}}(G, {\bf N} - \{w, v\}) \\ & & + Z^{DD}_{\sigma_{xv}}(G, {\bf N} - \{x, v\}) Z^{DD}_{\sigma_{wy}}(G, {\bf N} - \{w, y\}). \end{eqnarray*} \normalsize \end{customthm} \begin{proof} Without loss of generality, assume that when we list the nodes in counterclockwise order starting with the red ones, they are in the order $1, 2, \ldots, 2n$. Assume also that one of the nodes $x, y, w, v$ is red, two are green, and one is blue. The other cases are very similar. By the assumption that the nodes are in cyclic order, there are two possibilities\footnotemark : \footnotetext{The assumption that the nodes $x, y, w, v$ are in cyclic order is required. Otherwise, it would be possible for $x$ to be red, $y$ to be green, $w$ to be blue, and $v$ to be green. In this case, the sets $\{x, y\}$ and $\{x, v\}$ consist of one red node and one green node, and the sets $\{w, v\}$ and $\{y, w\}$ consists of one green node and one blue node.} \begin{itemize} \item[(i)] One of the sets $\{x, y\}, \{w, v\}$ consists of a red node and a green node and the other consists of a green node and a blue node. Also, one of the sets $\{x, v\},\{y, w\}$ consists consists of one red node and one blue node, and the other consists of two green nodes. \item[(ii)] One of the sets $\{x, v\},\{y, w\}$ consists of a red node and a green node and the other consists of a green node and a blue node. Also, one of the sets $\{x, y\}, \{w, v\}$ consists consists of one red node and one blue node, and the other consists of two green nodes. \end{itemize} We only prove case (i), as case (ii) is essentially the same. By Remark~\ref{rem:caseanalysis}, \begin{eqnarray*} \text{sign}_{OE}(\sigma'_{xy}) \text{sign}_{OE}(\sigma'_{wv}) &=& (-1)^{|RB(\sigma)|} (-1)^{|RG(\sigma)| -1} (-1)^{|RB(\sigma)|} (-1)^{|GB(\sigma)| -1} \\ &=&(-1)^{|RG(\sigma)|}(-1)^{|GB(\sigma)|}, \text{ and} \\ \text{sign}_{OE}(\sigma'_{xv}) \text{sign}_{OE}(\sigma'_{wy}) &=& (-1)^{|RG(\sigma)|} (-1)^{|GB(\sigma)|} (-1)^{|RB(\sigma)|-1} (-1)^{|RB(\sigma)|} \\ &=&- (-1)^{|RG(\sigma)|}(-1)^{|GB(\sigma)|}. \end{eqnarray*} Since we can obtain $\sigma_{xywv}$ by first removing the nodes $x, y$, and then removing the nodes $w, v$, $\text{sign}_{OE}(\sigma'_{xywv}) = \text{sign}_{OE}(\sigma'_{xy}) \text{sign}_{OE}(\sigma'_{wv}) $. Thus by Theorem~\ref{thm:cond}, \begin{eqnarray*} Z^{DD}_{\sigma}(G, {\bf N}) Z^{DD}_{\sigma_{xywv}}(G, {\bf N} - \{x, y, w, v\}) &=& Z^{DD}_{\sigma_{xy}}(G, {\bf N} - \{x, y\}) Z^{DD}_{\sigma_{wv}}(G, {\bf N} - \{w, v\}) \\ &&+ Z^{DD}_{\sigma_{xv}}(G, {\bf N} - \{x, v\}) Z^{DD}_{\sigma_{wy}}(G, {\bf N} - \{w, y\}) . \end{eqnarray*} \normalsize \end{proof} \end{document}
\begin{document} \titlerunning{Log-related portfolios under random horizon} \title{Log-optimal and num\'eraire portfolios for market models stopped at a random time} \begin{abstract} This paper focuses on num\'eraire portfolio and log-optimal portfolio (portfolio with finite expected utility that maximizes the expected logarithm utility from terminal wealth), when a market model $(S,\mathbb F)$ --specified by its assets' price $S$ and its flow of information $\mathbb F$-- is stopped at a random time $\tau$. This setting covers the areas of credit risk and life insurance, where $\tau$ represents the default time and the death time respectively. Thus, the progressive enlargement of $\mathbb F$ with $\tau$, denoted by $\mathbb G$, sounds tailor-fit for modelling the new flow of information that incorporates both $\mathbb F$ and $\tau$. For the resulting stopped model $(S^{\tau},\mathbb G)$, we study the two portfolios in different manners, and describe their computations in terms of the $\mathbb F$-observable parameters of the pair $(S, \tau)$. As existence of num\'eraire portfolios for $(S, \mathbb F)$ and $(S^{\tau},\mathbb G)$ is well understood due to \cite{ACDJ1,ACDJ3,CD1,ChoulliDengMa,KardarasKaratzas}, herein we characterize num\'eraire portfolio of $(S^{\tau},\mathbb G)$ in various manners and we single out the types of risks borne by $\tau$ that really affect the portfolio. In contrast to num\'eraire portfolio, both existence and characterization of log-optimal portfolio of $(S^{\tau},\mathbb G)$ pose serious challenges. Among these we mention the following. a) What are the conditions on $\tau$ (preferably in terms of information theoretic concepts) that fully characterize the existence of log-optimal portfolio of $(S^{\tau},\mathbb G)$ when that of $(S,\mathbb F)$ already exists? b) What are the factors that fully determine {\it the increment in maximum expected logarithmic utility from terminal wealth} for the two models $(S^{\tau},\mathbb G)$ and $(S,\mathbb F)$, and how to quantify them? This problem rises naturally, given that the investor endowed with the flow $\mathbb G$ possesses an informational advantage, as she can see $\tau$ occurring, but also faces various risks. Besides answering deeply these problems and other related challenges, our paper proposes in its details another modelling method for random times and shows their positive and negative impacts on log-optimal portfolio. \end{abstract} \section{Introduction} This paper addresses two portfolios that intimately related to the logarithmic utility. These portfolios, are known in the literature as num\'eraire and log-optimal portfolios, that we start by defining below for the sake of full precision. To this end, we denote by $W^{\theta}$ the wealth process of the portfolio $\theta$. \begin{definition}\label{NP/LogOP} Let $(X, \mathbb H, Q)$ be a market model, where $X$ is the assets' price process, $\mathbb H$ is a filtration, and $Q$ is a probability measure. Consider a fixed investment horizon $T\in(0,+\infty)$, and a portfolio $\theta^*$.\\ {\rm{(a)}} $\theta^*$ is {\it num\'eraire portfolio} for $(X, \mathbb H, Q)$ if $W^{\theta^*}>0$ and \begin{eqnarray}\label{NP} {{W^{\theta}}\over{W^{\theta^*}}}\ \textcolor{blue} ox{is a supermartingale under $(\mathbb H, Q)$, any portfolio $\theta$ with $W^{\theta}\geq 0$}.\hskip 0.5cm \end{eqnarray} {\rm{(b)}} $\theta^*$ is called {\it log-optimal portfolio} for $(X, \mathbb H, Q)$ if $\theta^*\in \Theta(X,\mathbb H, Q)$ and \begin{eqnarray} u_T(X,\mathbb H, Q):=\sup_{\theta\in\Theta}E_Q\left[\ln(W^{\theta}_T)\right]= E_Q\left[\ln(W^{\theta^*}_T)\right],\label{LogInfinite}\end{eqnarray} where $E_Q[.]$ is the expectation under $Q$, and $\Theta:=\Theta(X,\mathbb H, Q)$ is given by \begin{eqnarray}\label{AdmissibleSet0} \hskip -0.6cm \Theta(X,\mathbb H, Q):=\left\{\textcolor{blue} ox{portfolio}\ \theta\ \Big|\ W^{\theta}> 0\quad \textcolor{blue} ox{and}\quad E_Q\left[\vert \ln(W^{\theta}_T)\vert \right]<+\infty\right\}.\end{eqnarray} \end{definition} The problem of maximization of expected logarithm-utility from terminal wealth, defined in (\ref{LogInfinite})-(\ref{AdmissibleSet0}), received a lot of attention in the literature, even though it is a particular case of the utility maximization theory problem. This latter problem is addressed at various levels of generality, and for further details about it we refer to \cite{CSW,Karatzas,KW99,KZ,merton71,merton73} and the references therein. \\ The num\'eraire portfolio was introduced --up to our knowledge-- in \cite{Long}, where $W^{\theta}/W^{\theta^*}$ is required to be a martingale, while Definition \ref{NP/LogOP}-(a) goes back to \cite[Definition 4.1]{Becherer}, who remarked that the martingality requirement for $W^{\theta}/W^{\theta^*}$ is too stringent to obtain a general existence result. Then these works was extended and investigated extensively in different directions in \cite{Becherer,ChoulliDengMa,ChristensenLarsen2007,HulleySchweizer,KardarasKaratzas} and the references therein. In \cite{Becherer,ChristensenLarsen2007,HulleySchweizer,GollKallsen}, it was proved that under no-free-lunch-with-vanishing-risk assumption (NFLVR hereafter) and/or $\sup_{\theta\in\Theta}E\left[\ln(W^{\theta}_T)\right]<+\infty$, the two portfolios (log-optimal and num\'eraire) coincide. Using the change of probability technique, deep and precise connection between the two portfolios is established in \cite{ChoulliDengMa} under no assumption. Furthermore, very recently in \cite{ChoulliYansori2} and under no assumption at all, this connection is elaborated without changing the probability and the explicit computation of log-optimal portfolio and other related properties were also developed. This latter work, for which we also refer for more detailed discussions about the literature on these portfolios, definitely seals these questions about the two portfolios, and is vital for our current paper as it assumes no assumption. \subsection{What are our objectives and what does the literature say about them?} In this paper, we consider an initial market model represented by the pair $(S,\mathbb F)$, where $S$ represents the discounted stock prices for $d$-stocks, and $\mathbb F$ is the ``public" information that is available to all agents. To this initial market model, we add a random time $\tau$ that might not be seen through $\mathbb F$ when it occurs (mathematically speaking $\tau$ might not be an $\mathbb F$-stopping time). In this context, we adopt the progressive enlargement of filtration to model the larger information that includes both $\mathbb F$ and $\tau$. The obtained new informational system, that we denote by $(S^{\tau},\mathbb G)$, allows us to keep in mind credit risk theory and life insurance as potential applications of our results, besides the general financial setting of markets with random horizons. For this informational market, our ultimate goal lies in measuring the impact of $\tau$ on num\'eraire and log-optimal portfolios, no matter what is the model for $(S,\mathbb F)$ and no matter how it is related to $\tau$ that is an arbitrary random time with positive ``survival probability" (i.e. Az\'ema supermartingale). Our setting falls into the vague topic of {\it portfolio problem under asymmetries of information}. The mathematical literature on information modelling proposes only two cases of incorporating the extra information depending on whether this information is added at the beginning of the investment interval or progressively over time. The first case corresponds mathematically to the initial enlargement of filtration and is known in the finance and mathematical finance literatures as {\it the insider trading} setting. For this insider framework, log-optimal portfolio is extensively studied and we refer the reader to \cite{amendingerimkellerschweizer98,ADImkeller,AImkeller,JImkellerKN,GrorudPontier,pikovskykaratzas96,kohatsusulem06} and the references therein to cite few. Most of this literature focuses on two intimately related questions on log-optimal portfolio for the model $(S,{\mathbb G}^*)$, where ${\mathbb G}^*$ is the initial enlargement of $\mathbb F$ with a random variable $L$ that represents the extra knowledge. In fact, under some assumption on the pair $(L, \mathbb F)$, frequently called Jacod's assumption, the existence of log-optimal portfolio and the evaluation of the {\bf increment of expected logarithm-utility from terminal wealth} (denoted hereafter by IEU$_{log}(S,{\mathbb G}^*, \mathbb F)$) for both models $(S,{\mathbb G}^*)$ and $(S,\mathbb F)$ represent the core contribution of these papers, where it is proven that \begin{eqnarray}\label{InsiderFormula} \textcolor{blue} ox{IEU}_{log}(S,{\mathbb G}^*, \mathbb F):=u_T(S, {\mathbb G}^*)-u_T(S, \mathbb F)=\textcolor{blue} ox{relative entropy}(P\big| Q^*). \end{eqnarray} Hence, in this insider setting, log-optimal portfolio for $(S,{\mathbb G}^*)$ exists if and only if $P$ has a finite entropy with respect to $Q^*$ , a precise probability measure associated to $L$ that is explicitly described. In particular, the quantity $\textcolor{blue} ox{IEU}_{log}(S,{\mathbb G}^*, \mathbb F)$ is always a true gain due to the advantage of knowing fully $L$ by the investor endowed with the flow $\mathbb G^*$. The formula (\ref{InsiderFormula}) was initially derived in \cite{pikovskykaratzas96} for the Brownian filtration, and it was extended to models driven by general continuous local martingales in \cite{amendingerimkellerschweizer98}, where the authors connect this formula with Shannon entropy of $L$ for some models. The Shannon concept was exploited deeply in \cite{ADImkeller} afterwards, where the authors show its important role in measuring the impact of inside-information on log-optimal portfolio. The second case of information modelling, which suggests to add the extra information over time as it occurs, leads to the progressive enlargement filtration, and is tailor-fit to our current financial setting in contrast to the initial enlargement. Our economic and financial problem that deals with how a random horizon will impact an investment (in particular num\'eraire and log-optimal portfolios) can be traced back to Fisher \cite{fisher1931}. Since then, the problem has been addressed in the economic literature by focusing on discrete market models and the impact of the distribution of $\tau$ only, see \cite{Hakansson,Yaari1965} and the references therein. Thus, our paper seems to be the {\it first of its kind} in virtue of the general setting for $(S, \mathbb F, \tau)$ and both the qualitative and quantitative results obtained. Below, we highlight the intuitive ideas that leaded to these results. Given that a log-optimal portfolio is a num\'eraire portfolio, see \cite{ChoulliYansori2} and the references therein, we start by addressing num\'eraire portfolio of $(S^{\tau},\mathbb G)$. Thanks to \cite{ChoulliDengMa,KardarasKaratzas} that connects the existence of num\'eraire portfolio to the concept of No-Unbounded-Profit-with-bounded-risk (NUPBR hereafter), and the recent works \cite{ACDJ1,ACDJ3,CD1} on NUPBR for the stopped model $(S^{\tau},\mathbb G)$, the problem of existence of num\'eraire portfolio for $(S^{\tau},\mathbb G)$ is completely understood. Thus, herein, we focus on describing this num\'eraire portfolio in terms of the $\mathbb F$-observable data and processes, and mainly single out \begin{eqnarray}\label{Q1} \textcolor{blue} ox{which types of risks borne by $\tau$ that affect num\'eraire portfolio.}\hskip 0.65cm\end{eqnarray} For log-optimal portfolio of $(S^{\tau}, \mathbb G)$, the situation is more challenging, and the problem of its existence is the first obstacle. To address this, we appeal to the explicit description of the set of deflators for $(S^{\tau}, \mathbb G)$, recently developed together with its application to NFLVR in \cite{ChoulliYansori1}, and answer the following. \begin{eqnarray}\label{Q2} \textcolor{blue} ox{For which models of $(S, \tau)$, log-optimal portfolio of $(S^{\tau}, \mathbb G)$ exists?}\end{eqnarray} It is worth mentioning that this existence question is much deeper and general than the corresponding one addressed in the insider setting. Indeed, in our framework, there is no hope for (\ref{InsiderFormula}) to hold in its current form, and only a practical answer to (\ref{Q2}) will allow us to answer the question below. \begin{eqnarray}\label{Q3} &&\textcolor{blue} ox{What {\it informational condition} on $\tau$ that characterizes the existence }\nonumber\\ &&\textcolor{blue} ox{ of log-optimal portfolio for $(S^{\tau}, \mathbb G)$ if $(S,\mathbb F)$ has log-optimal portfolio?}\hskip 0.75cm\end{eqnarray} For our case of random horizon, the {\it increment of expected logarithm-utility} between $(S^{\tau}, \mathbb G)$ and $(S,\mathbb F)$, that we denote by IEU$_{log}(S, \tau, \mathbb F)$, is defined by \begin{eqnarray}\label{Delta(S, Tau)} \textcolor{blue} ox{IEU}_{log}(S,\tau, \mathbb F):=\Delta_T(S, \tau, \mathbb F):=u_T(S^{\tau}, \mathbb G)-u_T(S, \mathbb F),\end{eqnarray} and is affected by many factors, and hence we address the question of \begin{eqnarray}\label{Q5} \textcolor{blue} ox{which factors that explain how sensitive IEU$_{log}(S,\tau,\mathbb F)$ to $\tau$}?\end{eqnarray} To answer this question, we prefer study the much deeper question below, that deals with the explicit computation of log-optimal portfolio. \begin{eqnarray}\label{Q4} \textcolor{blue} ox{How log-optimal portfolio of $(S^{\tau},\mathbb G)$ can be described using $\mathbb F$ only?}\hskip 0.75cm\end{eqnarray} \subsection{What are our achievements?} Our mathematical and financial achievements are numerous and highly novel in both conceptual and methodological aspects. In fact, we answer all the aforementioned questions above (i.e. (\ref{Q1}), (\ref {Q2}),(\ref{Q3}), (\ref{Q5}), (\ref{Q4})) and other related problems in a very detailed and deep analysis and in various manners. In fact, we describe log-optimal portfolio, the structures of its associated log-optimal deflator, and num\'eraire portfolio for $(S^{\tau},\mathbb G)$ in different manners. As a result, we prove that the random horizon induces randomness in agent's preference (or the agent's impatience as called in Fisher \cite{fisher1931}). This connects the random horizon issue to random utilities that appeared in economics within the {\it random utility model theory} due to the psychometric literature that gave empirical evidence for stochastic choice behaviour. For details about this theme, we refer to \cite{Clark96,Cohen80,Mcfadden90,Suppes89} and for applications we refer to \cite{ChoulliMaMorlais,ChoulliMa,Ma,MusielaZariphoupoulou,KZ}.\\ Our results show that both portfolios (num\'eraire and log-optimal) are affected by the correlation between $S$ and $\tau$ only, and this correlation is explicitly parametrized using $\mathbb F$-adapted processes. We prove that $\Delta_T(S,\tau,\mathbb F)$, defined in (\ref{Delta(S, Tau)}), depends on four factors. These factors, that we quantify explicitly, are ``the cost-of-leaving-earlier", ``the information-premium", which is due to the advantage of knowing the occurring of $\tau$ when it happens, ``the correlation" between num\'eraire portfolio of $(S,\mathbb F)$ and $\tau$, and ``the correlation-risk" between $\tau$ and $S$. These factors explain how complex is the impact of a random horizon on portfolio compared to the impact of an {\it inside-information}. This paper contains five sections including the current one. Section \ref{section2} presents the mathematical and the financial model besides the corresponding required notation and some preliminaries that state some existing results that are important herein. Section \ref{section3} addresses num\'eraire portfolio for $(S^{\tau},\mathbb G)$, while Section \ref{section4} focuses on the existence of log-optimal portfolio and the duality. Section \ref{section5} describes explicitly both num\'eraire and log-optimal portfolios using the $\mathbb F$-predictable characteristics of the model, and discusses its financial applications and consequences. The paper contains an appendix where some proofs are relegated and some technical (new and existing) results are detailed. \section{The mathematical model and preliminaries}\label{section2} Throughout the paper, by ${\mathbb H}$ we denote an arbitrary filtration that satisfies the usual conditions of completeness and right continuity. For any process $X$, the $\mathbb H$-optional projection and dual $\mathbb H$-optional projection of $X$, when they exist, will be denoted by $^{o,\mathbb H}X$ and $X^{o,\mathbb H}$ respectively. Similarly, we denote by $^{p,\mathbb H}X$ and $X^{p,\mathbb H}$ the $\mathbb H$-predictable projection and dual predictable projection of $X$ when they exist. The set ${\cal M}(\mathbb H, Q)$ denotes the set of all $\mathbb H$-martingales under $Q$, while ${\cal A}(\mathbb H, Q)$ denotes the set of all optional processes with integrable variation under $Q$. When there is no risk of confusion, we simply omit the probability for the sake of simplifying notation. For an $\mathbb H$-semimartingale $X$, by $L(X,\mathbb H)$ we denote the set of $\mathbb H$-predictable processes that are $X$-integrable in the semimartingale sense. For $\varphi\in L(X,\mathbb H)$, the resulting integral of $\varphi$ with respect to $X$ is denoted by $\varphi\bigcdot X$. For $\mathbb H$-local martingale $M$, we denote by $L^1_{loc}(M,\mathbb H)$ the set of $\mathbb H$-predictable processes $\varphi$ that are $X$-integrable and the resulting integral $\varphi\bigcdot M$ is an $\mathbb H$-local martingale. If ${\cal C}(\mathbb H)$ is the set of processes that are adapted to $\mathbb H$, then ${\cal C}_{loc}(\mathbb H)$ is the set of processes, $X$, for which there exists a sequence of $\mathbb H$-stopping times, $(T_n)_{n\geq 1}$, that increases to infinity and $X^{T_n}$ belongs to ${\cal C}(\mathbb H)$, for each $n\geq 1$. For any $\mathbb H$-semimartinagle, $L$, we denote by ${\cal E}(L)$ the Doleans-Dade (stochastic) exponential, it is the unique solution to the stochastic differential equation $dX=X_{-}dL,\quad X_0=1,$ given by $$ {\cal E}_t(L)=\exp(L_t-{1\over{2}}\langle L^c\rangle_t)\prod_{0<s\leq t}(1+\Delta L_s)e^{-\Delta L_s}.$$ {\bf How our financial model is parametrized?} Our model starts with a filtered probability space $\left(\Omega, {\cal F}, \mathbb F,P\right)$. Here the filtration $\mathbb F:=({\cal F}_t)_{t\geq 0}$, which represents the ``public" flow of information available to all agent over time, satisfies the usual conditions of right continuity and completeness. On this stochastic basis, we suppose given a d-dimensional $\mathbb F$-semimartingale, $S$, that models the discounted price process of d risky assets. In addition to this initial market model $(S, \mathbb F)$, we consider a random time $\tau$, that might represent the death time of an agent or the default time of a firm, and hence it might not be an $\mathbb F$-stopping time in general. To this random time, we associate the non-decreasing process $D$ and the filtration $\mathbb G:=({\cal G}_t)_{t\geq 0}$ given by \begin{equation}\label{processD} D:=I_{\Rbrack\tau,+\infty\Rbrack},\ \ \ \ {\cal G}_t:={\cal G}^0_{t+}\ \ \textcolor{blue} ox{where} \ \ {\cal G}_t^0:={\cal F}_t\vee\sigma\left(D_s,\ s\leq t\right). \end{equation} It is clear that $\mathbb G$ makes $\tau$ a stopping time. In fact, it is the smallest filtration, satisfying the usual conditions, that makes $\tau$ a stopping time and contains $\mathbb F$. It is the progressive enlargement of $\mathbb F$ with $\tau$. Besides $D$ and $\mathbb G$, other $\mathbb F$-adapted processes intimately related to $\tau$ play central roles in our analysis. Among these, the following survival probabilities, also called Az\'ema supermartingales in the literature, and are given by \begin{eqnarray}\label{GGtilde} G_t :=^{o,\mathbb F}(I_{\Rbrack0,\tau\Rbrack})_t= P(\tau > t | {\cal F}_t) \ \textcolor{blue} ox{ and } \ \widetilde{G}_t :=^{o,\mathbb F}(I_{\Rbrack0,\tau\Lbrack})_t= P(\tau \ge t | {\cal F}_t),\hskip 0.5cm\end{eqnarray} while the process \begin{equation} \label{processm} m := G + D^{o,\mathbb F}, \end{equation} is an $\mathbb F$-martingale. Then thanks to \cite{ACJ} and \cite{ChoulliDavelooseVanmaele}, we claim the following. \begin{theorem} The following assertions hold.\\ {\rm{(a)}} For any $M\in{\cal M}_{loc}(\mathbb F)$, the process \begin{equation} \label{processMhat} {\cal T}(M) := M^\tau -{\widetilde{G}}^{-1} I_{\Lbrack 0,\tau\Lbrack} \bigcdot [M,m] + I_{\Lbrack 0,\tau\Lbrack} \bigcdot \Big(\sum \Delta M I_{\{\widetilde G=0<G_{-}\}}\Big)^{p,\mathbb F},\end{equation} is a $\mathbb G$-local martingale.\\ {\rm{(b)}}We always have \begin{equation} \label{processNG} N^{\mathbb G}:=D - \widetilde{G}^{-1} I_{\Lbrack 0,\tau\Lbrack} \bigcdot D^{o,\mathbb F}\in{\cal M}(\mathbb G)\cap{\cal A}(\mathbb G), \end{equation} and $H\bigcdot N^{\mathbb G}\in {\cal M}_{loc}(\mathbb G)\cap{\cal A}_{loc}(\mathbb G)$ for any $H$ belonging to \begin{equation} \label{SpaceLNG} {\mathcal{I}}^o_{loc}(N^{\mathbb G},\mathbb G) := \Big\{K\in \mathcal{O}(\mathbb F)\ \ \big|\quad \vert{K}\vert G{\widetilde G}^{-1} I_{\{\widetilde{G}>0\}}\bigcdot D\in{\cal A}^+_{loc}(\mathbb G)\Big\}. \end{equation} \end{theorem} For $p\in [1,+\infty)$ and a $\sigma$-algebra ${\cal H}$ on $\Omega\times [0,+\infty[$, we define $L^p_{loc}\left({\cal H}, P\otimes D\right)$ as the set of all processes $X$ for which there exists a sequence of $\mathbb F$-stopping times $(T_n)_{n\geq 1}$ that increases to infinity almost surely and $X^{T_n}$ belongs to $L^p\left({\cal H}, P\otimes D\right)$ given by \begin{equation}\label{L1(PandD)Local} L^p\left({\cal H}, P\otimes D\right):=\left\{ X\ {\cal H}\textcolor{blue} ox{-measurable}\big|\ \mathbb E[\vert X_{\tau}\vert^p I_{\{\tau<+\infty\}}]<+\infty\right\}.\end{equation} The explicit description of the set of all deflators for $(S^{\tau},\mathbb G)$ is vital for our analysis of log-optimal and num\'eraire portfolios undertaken in the coming sections. Thus, we start by recalling the mathematical definition of delators. \begin{definition}\label{DeflatorDefinition} Let $X$ be an $\mathbb H$-semimartingale and $Z$ be a process.\\ We call $Z$ a deflator for $(X,\mathbb H)$ if $Z>0$ and $Z{\cal E}(\varphi\bigcdot X)$ is an $\mathbb H$-supermartingale, for any $\varphi\in L(X, \mathbb H)$ such that $\varphi\Delta X\geq -1$. \\ The set of all deflators for $(X,\mathbb H)$ will be denoted by ${\cal D}(X,\mathbb H)$. \end{definition} Throughout the paper, the following subset of ${\cal D}(X,\mathbb H)$ will be very useful \begin{eqnarray} {\cal D}_{log}(X,\mathbb H)&&:=\Bigl\{Z\in {\cal D}(X,\mathbb H)\ \big| E[-\ln(Z_T)]<+\infty\Bigr\}.\label{DeflatorsLOG} \end{eqnarray} The following is borrowed from \cite{ChoulliYansori1}, and parametrizes explicitly ${\cal D}(S^{\tau}, \mathbb G)$. \begin{theorem}\label{GeneralDeflators} Suppose $G > 0$, and let ${\cal T}(\cdot)$ be the operator defined in (\ref{processMhat}). Then the following assertions hold.\\ {\rm{(a)}} $Z^{\mathbb G}$ is a deflator for $(S^{\tau}, \mathbb G)$ (i.e. $Z^{\mathbb G}\in {\cal D}(S^{\tau}, \mathbb G)$) if and only if there exists unique $\left(Z^{\mathbb F}, \varphi^{(o)}, \varphi^{(pr)}\right)$ such that $Z^{\mathbb F}\in{\cal D}(S, \mathbb F)$, $(\varphi^{(o)},\varphi^{(pr)})$ belongs to ${\cal I}^o_{loc}(N^{\mathbb G},\mathbb G)\times L^1_{loc}({\rm{Prog}}(\mathbb F),P\otimes D)$, \begin{eqnarray} &&\varphi^{(pr)}>-1,\quad -{\widetilde G}/ G<\varphi^{(o)},\ \varphi^{(o)}(\widetilde G -G)<{\widetilde G},\ P\otimes D\textcolor{blue} ox{-a.e.,} \label{ineqMultiGeneral1}\\ &&\textcolor{blue} ox{and}\quad Z^{\mathbb G}={{(Z^{\mathbb F})^{\tau}}\over{{\cal E}(G_{-}^{-1}\bigcdot m)^{\tau}}}{\cal E}(\varphi^{(o)}\bigcdot N^{\mathbb G}){\cal E}(\varphi^{(pr)}\bigcdot D).\label{repKGMultiGEneral}\end{eqnarray} {\rm{(b)}} For any $K\in {\cal M}_{0,loc}(\mathbb F)$, we always have \begin{eqnarray*} {{{\cal E}(K)^{\tau}/{\cal E}(G_{-}^{-1}\bigcdot m)^{\tau}}}={\cal E}\Bigl({\cal T}(K-G_{-}^{-1}\bigcdot m)\Bigr)\in {\cal M}_{loc}(\mathbb G).\end{eqnarray*} {\rm{(c)}} The process $1/ { \cal E}(G_{-}^{-1}\bigcdot m)^{\tau}$ is a $\mathbb G$-martingale, and for any $T\in (0,+\infty)$ we denote by ${\widetilde Q}_T$ the probability measure is given by \begin{eqnarray}\label{Qtilde} d{\widetilde Q}_T:=\Bigl({ \cal E}_{T\wedge\tau}\left(G_{-}^{-1}\bigcdot m\right)\Bigr)^{-1}dP.\end{eqnarray} \end{theorem} \section{Num\'eraire portfolio under random horizon}\label{section3} This section addresses the impact of $\tau$ on the num\'eraire portfolio. To this end, we start by giving a mathematical sense to Definition \ref{NP/LogOP} as follows. \begin{definition}\label{Math4Definition1.1} Let $(X,\mathbb H, Q)$ be a market model, where $\mathbb H$ is a filtration, $Q$ is a probability measure, and $X$ is an $\mathbb H$-semimartingale under $Q$. \\ {\rm{(a)}} A portfolio $\theta$ is a predictable process that is $X$-integrable (i.e. $\theta\in L(X, Q,\mathbb H)$). A wealth process $W^{\theta}$ associated to the pair $(\theta, x)$ of a portfolio and an initial capital (i.e. $x>0$) is given by \begin{eqnarray}\label{WealthProcess} W^{\theta}:=x+\theta\bigcdot X.\end{eqnarray} {\rm{(b)}} Let $\theta$ be a portfolio and $x>0$ be an initial capital. If the wealth process for the pair $(\theta, x)$ satisfies $W^{\theta}>0$ and $W^{\theta}_{-}>0$, then the process \begin{eqnarray}\label{PrtfolioRate} \varphi^{(\theta)}:={{\theta/W^{\theta}_{-}}}\quad \textcolor{blue} ox{ is called {\it portfolio rate}},\end{eqnarray} \end{definition} \begin{remark}\label{Remark4Definitions} {\rm{(a)}} It is important to remark that the portfolio rate $\varphi^{(\theta)}$ does not depend on the initial capital $x$ and it depends on the portfolio $\theta$ only. Furthermore, the triplet $(\theta, x, \varphi^{(\theta)})$ also satisfies $W^{\theta}=x{\cal E}(\varphi^{(\theta)}\bigcdot X)$.\\ {\rm{(b)}} If ${\cal D}(X,\mathbb H)\not=\emptyset$, then for any pair $(\theta, x)$ with $W^{\theta}>0$ we also have $W^{\theta}_{-}>0$ and the portfolio rate exists. Thus, in this case, there is no loss of generality in assuming $x=1$ when addressing the problem (\ref{LogInfinite}). \\ {\rm{(c)}} By comparing Definitions \ref{DeflatorDefinition} and \ref{NP/LogOP}, it is clear that, if the num\'eraire portfolio rate $\widetilde\phi$ for $(X,\mathbb H)$ exists, then ${\widetilde Z}:=1/{\cal E}(\widetilde\phi\bigcdot X)$ belongs to ${\cal D}(X,\mathbb H)$.\end{remark} Below, we elaborate the principal result of this section. \begin{theorem}\label{NumeraireGeneral} Suppose that $G>0$. Then the following assertions hold.\\ {\rm{(a)}} The num\'eraire portfolio for $(S,\mathbb F)$ exists if and only if the num\'eraire portfolio for $(S^{\tau}, \mathbb G)$ exists also.\\ {\rm{(b)}} If $\widetilde\varphi$ is num\'eraire portfolio rate for $(S,\mathbb F)$, then ${\widetilde\varphi}I_{\Lbrack0,\tau\Lbrack}$ is num\'eraire portfolio rate for $(S^{T\wedge\tau},\mathbb G, {\widetilde Q}_T)$, for any $T\in (0,+\infty)$, where ${\widetilde Q}_T$ is given by (\ref{Qtilde}). {\rm{(c)}} If ${\widetilde\varphi}^{\mathbb G}$ is the num\'eraire portfolio rate for $(S^{\tau},\mathbb G)$, then $^{p,\mathbb F}({\widetilde\varphi}^{\mathbb G}I_{\Lbrack0,\tau\Lbrack})/G_{-}$ is num\'eraire portfolio rate for $(S^{\sigma}, \mathbb F, {\widehat Q}_{\sigma})$, for any $\mathbb F$-stopping time $\sigma$ such that ${\cal E}(G_{-}^{-1}\bigcdot m)^{\sigma}$ is martingale, where \begin{eqnarray}\label{Qsigma} d{\widehat Q}_{\sigma}:={\cal E}_{\sigma}(G_{-}^{-1}\bigcdot m)dP.\end{eqnarray} \end{theorem} Herein, we discuss some of the ingredients of the theorem and importantly its meaning and contributions, while its proof will be given afterwards. In virtue of \cite[Proposition 3.4 and Theorem 4.2]{ChoulliYansori1} (applied to the constant process $S\equiv 1$), the process ${\cal E}(-G_{-}^{-1}\bigcdot {\cal T}(m))=1/{\cal E}(G_{-}^{-1}\bigcdot m)^{\tau}$ is a $\mathbb G$-martingale, and hence ${\widetilde Q}_T$ is a well defined probability for any $T\in (0,+\infty)$. Thanks to a combination of \cite[Theorem 2.8]{ChoulliDengMa} (see also \cite{KardarasKaratzas}) with \cite[Theorem 2.15]{ACDJ1} and \cite[Theorem 2.4 or 2.7]{ACDJ3}, it is clear that under the condition $G>0$, the proof of assertion (a) follows immediately. Hence, the principal contribution of our theorem lies in describing precisely and explicitly how num\'eraire portfolio for $(S^{\tau}, Q^{\mathbb G}, \mathbb G)$ can be obtained from num\'eraire portfolio of $(S, Q^{\mathbb F}, \mathbb F)$ and vice-versa, where $Q^{\mathbb G}$ and $Q^{\mathbb F}$ are probabilities on ${\cal G}_T$ and ${\cal F}_T$ respectively, that quantify stochastically some how the {\it correlated risks} borne by $\tau$. \begin{proof}{\it of Theorem \ref{NumeraireGeneral}.} In virtue of the above discussion, this proof deals with assertions (b) and (c) only, and it will be given in two parts.\\ {\bf Part 1.} Here, we prove assertion (b). Suppose that num\'eraire portfolio for $(S,\mathbb F)$ exists, and denote by $\widetilde\varphi$ its num\'eraire portfolio rate. Thus, for any $\varphi\in {\cal L}(S, \mathbb F)\cap L(S, \mathbb F)$, the process \begin{eqnarray*} X:={{{\cal E}(\varphi\bigcdot S)/{\cal E}(\widetilde\varphi\bigcdot S)}}\quad\textcolor{blue} ox{is an $\mathbb F$-supermartingale}.\end{eqnarray*} Hence, in virtue of Proposition \ref{Hzero4Log(Z)} -(a), there exist unique $M\in {\cal M}_{loc}(\mathbb F)$ and a nondecreasing and $\mathbb F$-predictable process $V$ such that $X={\cal E}(M)\exp(-V)$. Therefore, due to Theorem \ref{GeneralDeflators} -(b) (see also \cite[Proposition 3.4]{ChoulliYansori1}) that states that ${\cal E}(M)^{\tau}/{\cal E}(G_{-}^{-1}\bigcdot m)^{\tau}$ is a $\mathbb G$-local martingale, we deduce that $X^{\tau}/{\cal E}(G_{-}^{-1}\bigcdot m)^{\tau}$ is $\mathbb G$-supermartinagle, or equivalently $X^{\tau\wedge T}$ is $\mathbb G$-supermartingale under ${\widetilde Q}_T$ given in (\ref{Qtilde}), for any $T\in (0,+\infty)$. As $\varphi$ spans the set $ {\cal L}(S, \mathbb F)\cap L(S, \mathbb F)$, the proof of assertion (b) follows from Lemma \ref{PortfolioGtoF}-(b) and (c).\\ {\bf Part 2.} This part proves assertion (c). Suppose that num\'eraire portfolio for $(S^{\tau},\mathbb G)$ exists, and denote by $\widetilde\varphi^{\mathbb G}$ its portfolio rate. Then put \begin{eqnarray*} \varphi^{\mathbb F}:=^{p,\mathbb F}(\widetilde\varphi^{\mathbb G} I_{\Lbrack0,\tau\Lbrack})/G_{-},\end{eqnarray*} and remark that $\varphi^{\mathbb F}=\widetilde\varphi^{\mathbb G} $ on $\Lbrack0,\tau\Lbrack$, and $\varphi^{\mathbb F}\in {\cal L}(S,\mathbb F)\cap L(S,\mathbb F)$ due to Lemma \ref{PortfolioGtoF}-(b) and (c). Then for any $\varphi\in {\cal L}(S,\mathbb F)\cap L(S,\mathbb F)$, the process ${\cal E}(\varphi\bigcdot S)^{\tau}/{\cal E}(\varphi^{\mathbb F}\bigcdot S)^{\tau}$ is a $\mathbb G$-supermartingale. As a result, for an $\mathbb F$-stopping time $\sigma$ such that ${\cal E}(G_{-}^{-1}\bigcdot m)^{\sigma}$ is martingale and for any $0\leq s\leq t\leq\sigma$, we have \begin{eqnarray*} E\left[{{{\cal E}_{t\wedge\tau}(\varphi\bigcdot S)}\over{{\cal E}_{t\wedge\tau}(\varphi^{\mathbb F}\bigcdot S)}}\Big|\ {\cal G}_s\right]I_{\{\tau>s\}}\leq {{{\cal E}_{s}(\varphi\bigcdot S)}\over{{\cal E}_{s}(\varphi^{\mathbb F}\bigcdot S)}}I_{\{\tau>s\}}. \end{eqnarray*} By taking conditional expectation with respect to ${\cal F}_s$ on both sides, we obtain \begin{eqnarray}\label{Inequality200} E\left[{{{\cal E}_{t\wedge\tau}(\varphi\bigcdot S)}\over{{\cal E}_{t\wedge\tau}(\varphi^{\mathbb F}\bigcdot S)}}I_{\{\tau>s\}}\Big|\ {\cal F}_s\right]\leq G_s{{{\cal E}_{s}(\varphi\bigcdot S)}\over{{\cal E}_{s}(\varphi^{\mathbb F}\bigcdot S)}}. \end{eqnarray} It is clear that we always have \begin{eqnarray*} {{{\cal E}_{t\wedge\tau}(\varphi\bigcdot S)}\over{{\cal E}_{t\wedge\tau}(\varphi^{\mathbb F}\bigcdot S)}}I_{\{\tau>s\}} &&={{{\cal E}_{t}(\varphi\bigcdot S)}\over{{\cal E}_{t}(\varphi^{\mathbb F}\bigcdot S)}}I_{\{\tau>t\}}+\int_s^t{{{\cal E}_{u}(\varphi\bigcdot S)}\over{{\cal E}_{u}(\varphi^{\mathbb F}\bigcdot S)}}dD_u.\end{eqnarray*} By inserting this and $G=G_0{\cal E}(-{\widetilde G}^{-1}\bigcdot D^{o,\mathbb F}){\cal E}(G_{-}^{-1}\bigcdot m)$ in (\ref{Inequality200}), we derive \begin{eqnarray*} &&E_{{\widehat Q}_{\sigma}}\left[{{{\cal E}_{t}(\varphi\bigcdot S)}\over{{\cal E}_{t}(\varphi^{\mathbb F}\bigcdot S)}}{\cal E}_t(-{1\over{\widetilde G}}\bigcdot D^{o,\mathbb F})+\int_s^t {{{\cal E}_{u}(\varphi\bigcdot S)}\over{{\cal E}_{u}(\varphi^{\mathbb F}\bigcdot S)}}{\cal E}_u(-{1\over{\widetilde G}}\bigcdot D^{o,\mathbb F}){{dD^{o,\mathbb F}_u}\over{G_u}}\Big|\ {\cal F}_s\right]\\ &&\leq {{{\cal E}_{s}(\varphi\bigcdot S)}\over{{\cal E}_{s}(\varphi^{\mathbb F}\bigcdot S)}}{\cal E}_s(-{\widetilde G}^{-1}\bigcdot D^{o,\mathbb F}) \end{eqnarray*} Then put $X_u:= {\cal E}_{u\wedge\sigma}(\varphi\bigcdot S)/{\cal E}_{u\wedge\sigma}(\varphi^{\mathbb F}\bigcdot S)$ for $u\geq 0$, and deduce that the above inequality is equivalent to the fact that \begin{eqnarray*} X{\cal E}(-{1\over{\widetilde G}}\bigcdot D^{o,\mathbb F})^{\sigma}+\left({{X}\over{G}}{\cal E}_u(-{1\over{\widetilde G}}\bigcdot D^{o,\mathbb F})\bigcdot D^{o,\mathbb F}\right)^{\sigma}\ \textcolor{blue} ox{is an $(\mathbb F, \widehat Q_{\sigma})$-supermartingale.}\end{eqnarray*} By combining ${\cal E}(-{\widetilde G}^{-1}\bigcdot D^{o,\mathbb F})/G={\cal E}_{-}(-{\widetilde G}^{-1}\bigcdot D^{o,\mathbb F})/{\widetilde G}$ with integration by part formula, we deduce that the above fact is equivalent to ${\cal E}_{-}(-{\widetilde G}^{-1}\bigcdot D^{o,\mathbb F})\bigcdot X$ being an $\mathbb F$-supermartingale under $ \widehat Q_{\sigma}$. Therefore, we conclude that $X$ is a nonnegative $\mathbb F$-local supermartingale under $ \widehat Q_{\sigma}$, and hence assertion (c) follows immediately. This ends the proof of the theorem.\qed \end{proof} \begin{corollary}\label{ParticularCases} The following assertions hold.\\ {\rm{(a)}} Suppose ${\cal E}(G_{-}^{-1}\bigcdot m)$ is a martingale. Then the num\'eraire portfolio for $(S^{\tau},\mathbb G)$ exists if and only if for any $T\in (0,+\infty)$, the num\'eraire portfolio for $(S^T,\mathbb F, {\widehat Q}_T)$ exists, where ${\widehat Q}_T$ is given by (\ref{Qsigma}). Furthermore, both portfolios coincide on $\Lbrack0,\tau\wedge T\Lbrack$. \\ {\rm{(b)}} Suppose $\tau$ is a pseudo-stopping time (i.e. $E[M_{\tau}]=E[M_0]$ for any bounded $\mathbb F$-martingale). Then num\'eraire portfolio for $(S^{\tau},\mathbb G)$ exists if and only if num\'eraire portfolio for $(S,\mathbb F)$ exists, and both portfolios coincide on $\Lbrack0,\tau\Lbrack$. \end{corollary} \begin{proof} Assertion (a) is a direct consequences of Theorem \ref{NumeraireGeneral}, while assertion (b) follows from combining assertion (a) and the fact that when $\tau$ is a pseudo-stopping time then $m\equiv m_0$ and hence ${\cal E}(G_{-}^{-1}\bigcdot m)=1$ and ${\widehat Q}_T=P$ on ${\cal F}_T$ for any $T\in(0,+\infty)$. This ends the proof of the corollary.\qed \end{proof} \section{ Log-optimal portfolio for $(S^{\tau},\mathbb G)$: Existence and duality}\label{section4} This section quantifies the impact of $\tau$ on the existence of log-optimal portfolio and hence answers (\ref{Q2}), and it is not technical at all. In virtue of \cite[Theorem 1]{ChoulliYansori2}, see Theorem \ref{LemmaCrucial} in the appendix, the practical and easy manner to deal with log-optimal portfolio is to look at the dual set, i.e. the set ${\cal D}_{log}(S^{\tau},\mathbb G)$ given by (\ref{DeflatorsLOG}), or equivalently to address the dual minimization problem \begin{eqnarray}\label{dualproblem} \min_{Z\in {\cal D}_{log}(S^{\tau},\mathbb G)}E\left[-\ln(Z_T)\right]. \end{eqnarray} We start by defining Hellinger processes, previously defined in \cite{ChoulliStricker2005,ChoulliStricker2007}. These processes appear naturally in quantifying the information borne in $\tau$. \begin{definition}\label{Hellinger} Let $N$ be an $\mathbb H$-local martingale such that $1+\Delta N>0$.\\ 1) We call a Hellinger process of order zero for $N$, denoted by $h^{(0)}(N,\mathbb H)$, the process $ h^{(0)}(N,\mathbb H):=\left( H^{(0)}(N,\mathbb H)\right)^{p,\mathbb H}$ when this projection exists, where \begin{eqnarray}\label{HellingerLog} H^{(0)}(N,\mathbb H):={1\over{2}}\langle N^c\rangle^{\mathbb H}+\sum\left(\Delta N-\ln(1+\Delta N)\right).\end{eqnarray} 2) We call an entropy-Hellinger process for $N$, denoted by $h^{(E)}(N,\mathbb H)$, the process $h^{(E)}(N,\mathbb H):=\left( H^{(E)}(N,\mathbb H) \right)^{p,\mathbb H}$ when this projection exists, where \begin{eqnarray}\label{HellingerExpo} H^{(E)}(N,\mathbb H):={1\over{2}}\langle N^c\rangle^{\mathbb H}+\sum\left((1+\Delta N)\ln(1+\Delta N)-\Delta N\right).\end{eqnarray} 3) Let $Q^1$ and $Q^2$ be two probabilities such that $Q^2\ll Q^1$ and $T\in (0,+\infty)$. If $Q^i_T:=Q^i\big|_{{\cal H}_T}$ denote the restriction of $Q^i$ on ${\cal H}_T$ ($i=1,2$), then \begin{eqnarray}\label{entropy} {\cal H}_{\mathbb H}(Q^1_T\big| Q^2_T):=E_{Q^2}\left[{{dQ^1_T}\over{dQ^2_T}}\ln\left({{dQ^1_T}\over{dQ^2_T}}\right)\right].\end{eqnarray} \end{definition} Now, we are in the stage of stating one of our principal results of this section. \begin{theorem}\label{LogOPexistence} Suppose $G>0$. Then the following assertions are equivalent.\\ {\rm{(a)}} Log-optimal portfolio for $(S^{\tau},\mathbb G)$ exists, and $\widetilde\varphi^{\mathbb G}$ denotes its portfolio rate. \\ {\rm{(b)}} There exist $K\in{\cal M}_{loc}(\mathbb F)$ and a nondecreasing and $\mathbb F$-predictable process $V$ such that $K_0=V_0=0$, ${\cal E}(K)\exp(-V)\in {\cal D}(S,\mathbb F)$, and \begin{eqnarray*} E\left[\left({\widetilde G}\bigcdot (V+H^{(0)}(K,P))\right)_T+(G_{-}\bigcdot h^{(E)}(G_{-}^{-1}\bigcdot m,P))_T-\langle K,m\rangle^{\mathbb F}_T\right]<+\infty.\end{eqnarray*} {\rm{(c)}} The dual problem (\ref{dualproblem}) admits a unique solution, i.e. there exists a unique ${\widetilde Z}^{\mathbb G}\in {\cal D}_{log}(S^{\tau},\mathbb G)$ such that \begin{eqnarray*}\label{MinimizationLog} \min_{Z\in {\cal D}(S^{\tau},\mathbb G)}E\Bigl[-\ln(Z_T)\Bigr]= E\Bigl[-\ln({\widetilde Z}^{\mathbb G}_T)\Bigr]. \end{eqnarray*} {\rm{(d)}} There exists ${\widetilde Z}^{\mathbb F}\in {\cal D}(S, \mathbb F)$ such that $({\widetilde Z}^{\mathbb F})^{\tau}/{\cal E}(G_{-}^{-1}\bigcdot m)^{\tau}\in {\cal D}_{log}(S^{\tau},\mathbb G)$ and \begin{eqnarray}\label{MinimiDualEquivLog} \inf_{Z^{\mathbb G}\in {\cal D}(S^{\tau},\mathbb G)}E\Bigl[-\ln(Z^{\mathbb G}_T)\Bigr]=E\Bigl[-\ln({\widetilde Z}^{\mathbb F}_{T\wedge{\tau}}/{\cal E}_{\tau\wedge T}(G_{-}^{-1}\bigcdot m))\Bigr]. \end{eqnarray} Furthermore, when the triplet $( \widetilde\varphi^{\mathbb G}, {\widetilde Z}^{\mathbb G}, {\widetilde Z}^{\mathbb F})$ exists, it satisfies the following \begin{eqnarray}\label{Relationship} {\cal E}( \widetilde\varphi^{\mathbb G}\bigcdot S^{\tau})={1\over{{\widetilde Z}^{\mathbb G}}}={{{\cal E}(G_{-}^{-1}\bigcdot m)^{\tau}}\over{({\widetilde Z}^{\mathbb F})^{\tau}}}.\end{eqnarray} \end{theorem} The importance of this theorem resides in assertions (b) and (d) and the second equality in (\ref{Relationship}). Assertion (b) gives us a practical way to check the existence of log-optimal portfolio for $(S^{\tau},\mathbb G)$, while assertion (d) and the second equality in (\ref{Relationship}) describe the structures of the optimal deflator dual to log-optimal portfolio. In fact, in virtue to \cite{ChoulliYansori1} --see also Theorem \ref{GeneralDeflators}--, any deflator $Z^{\mathbb G}$ for $(S^{\tau},\mathbb G)$ is the product of three orthogonal positive local martingales that are deflators for three orthogonal risks respectively, and is represented by a triplet of $\mathbb F$-observable processes $(Z^{\mathbb F}, \varphi^{(0)}, \varphi^{(pr)})$. Assertion (d) claims that the optimal deflator does not bear ''pure default" risks at all, and its triplet characterization takes the form of $({\widetilde Z}^{\mathbb F}, 0,0)$. \begin{proof}{\it Theorem \ref{LogOPexistence}} The proof of (a)$\Longleftrightarrow$ (c) is a direct application of Theorem \ref{LemmaCrucial} for the model $(X,\mathbb H)=(S^{\tau}, \mathbb G)$ (see the equivalence (c) $\Longleftrightarrow$ (d) of this latter theorem), while (d)$\Longrightarrow$ (c) is obvious. Thus, the rest of the proof focuses on the remaining equivalences and implications. To this end, we start deriving some important and useful remarks that we summarize in the following lemma. \begin{lemma}\label{Lemma4proof} The following assertions hold.\\ {\rm{(i)}} For any $Z^{\mathbb G}\in {\cal D}_{log}(S^{\tau},\mathbb G)$, there exists $Z^{\mathbb F}\in {\cal D}(S,\mathbb F)$ such that \begin{eqnarray}\label{DominationPrincipal} E\left[ -\ln( Z^{\mathbb G}_T)\right]\geq E\Bigl[-\ln\left({{Z^{\mathbb F}_{T\wedge\tau}/{\cal E}_{T\wedge\tau}(G_{-}^{-1}\bigcdot m)}}\right)\Bigr].\end{eqnarray} {\rm{(ii)}} We always have \begin{eqnarray}\label{MinimizationReduction} \inf_{ Z^{\mathbb G}\in{\cal D}(S^{\tau},\mathbb G)} E\left[ -\ln( Z^{\mathbb G}_T)\right]= \inf_{ Z^{\mathbb F}\in{\cal D}(S,\mathbb F)}E\Bigl[-\ln\left({{Z^{\mathbb F}_{T\wedge\tau}/{\cal E}_{T\wedge\tau}(G_{-}^{-1}\bigcdot m)}}\right)\Bigr].\end{eqnarray} {\rm{(iii)}} If $Z^{\mathbb F}\in {\cal D}(S,\mathbb F)$ such that $(Z^{\mathbb F})^{\tau}/{\cal E}(G_{-}^{-1}\bigcdot m)^{\tau}\in {\cal D}_{log}(S^{\tau},\mathbb G)$, there exist $K\in {\cal M}_{0,loc}(\mathbb F)$ and a nondecreasing and $\mathbb F$-predictable process $V$ such that $V_0=K_0=0$, $Z^{\mathbb F}:={\cal E}(K)\exp(-V)$, and \begin{eqnarray}\label{Equality100} &&E\left[-\ln\left({{Z^{\mathbb F}_{T\wedge\tau}}\over{{\cal E}_{T\wedge\tau}(G_{-}^{-1}\bigcdot m)}}\right)\right]=\\ &&E\left[(G_{-}\bigcdot V+{\widetilde G}\bigcdot H^{(0)}(K,P))_T-\langle K, m\rangle^{\mathbb F}_T+G_{-}\bigcdot h^{E}(G_{-}^{-1}\bigcdot m,P)_T\right].\nonumber\end{eqnarray} \end{lemma} Then the proof of (c)$\Longrightarrow$ (d) follows from combining (\ref {MinimizationReduction}) with assertion (i) of the lemma applied to ${\widetilde Z}^{\mathbb G}$. Therefore, the proof of the theorem will end as soon we prove (c)$\Longleftrightarrow$ (b). To this end, due to Theorem \ref{LemmaCrucial}, assertion (c) is equivalent to ${\cal D}_{log}(S^{\tau},\mathbb G)\not=\emptyset$. Thanks to (\ref{MinimizationReduction}) again, this latter claim holds if and only if there exists $Z^{\mathbb F}\in {\cal D}(S, \mathbb F)$ such that $(Z^{\mathbb F})^{\tau}/{\cal E}(G_{-}^{-1}\bigcdot m)^{\tau}\in {\cal D}_{log}(S^{\tau},\mathbb G)$. Thus, the equivalence (c)$\Longleftrightarrow$ (b) follows immediately form combining these facts with Lemma \ref{Lemma4proof}-(iii). Therefore, the rest of this proof focuses on proving Lemma \ref{Lemma4proof} in two parts.\\ {\bf Part 1.} Here we prove assertions (i) and (ii) of Lemma \ref{Lemma4proof}. On the one hand, for any $Z^{\mathbb G}\in{\cal D}(S^{\tau},\mathbb G)$, we apply Theorem \ref{GeneralDeflators} and deduce the existence of a triplet $\left(Z^{\mathbb F}, \varphi^{(o)}, \varphi^{(pr)}\right)$ that belongs to ${\cal D}(S, \mathbb F)\times {\cal I}^o_{loc}(N^{\mathbb G},\mathbb G)\times L^1_{loc}({\rm{Prog}}(\mathbb F),P\otimes D)$ and satisfies $$ \varphi^{(pr)}>-1,\quad P\otimes D-a.e.,\quad -{{\widetilde G}\over{ G}}<\varphi^{(o)}<{{\widetilde G}\over{\widetilde G -G}},\quad\quad\quad P\otimes D^{o,\mathbb F}\textcolor{blue} ox{-a.e..} $$ and \begin{eqnarray}\label{ZG2ZF} Z^{\mathbb G}={{(Z^{\mathbb F})^{\tau}}\over{{\cal E}(G_{-}^{-1}\bigcdot m)^{\tau}}}{\cal E}(\varphi^{(0)}\bigcdot N^{\mathbb G}){\cal E}(\varphi^{(pr)}\bigcdot D).\end{eqnarray} This implies that $$ -\ln( Z^{\mathbb G})=-\ln({{(Z^{\mathbb F})^{\tau}/{\cal E}(G_{-}^{-1}\bigcdot m)^{\tau}}})-\ln( {\cal E}(\varphi^{(0)}\bigcdot N^{\mathbb G}))-\ln({\cal E}(\varphi^{(pr)}\bigcdot D)).$$ Hence, thanks to Proposition \ref{Hzero4Log(Z)}-(c), we deduce that $Z^{\mathbb G}\in {\cal D}_{log}(S^{\tau},\mathbb G)$ if and only if $(Z^{\mathbb F})^{\tau}/{\cal E}(G_{-}^{-1}\bigcdot m)^{\tau}$ belongs to ${\cal D}_{log}(S^{\tau},\mathbb G)$ and $-\ln\left({\cal E}(\varphi^{(0)}\bigcdot N^{\mathbb G})\right)$ and $-\ln\left({\cal E}(\varphi^{(pr)}\bigcdot D)\right)$ are uniformly integrable $\mathbb G$-submartingales, and (\ref{DominationPrincipal}) follows immediately. This proves assertion (a) of the lemma. On the other hand, thanks to Theorem \ref{GeneralDeflators} -(c), the process $Z^{\tau}/{\cal E}(G_{-}^{-1}\bigcdot m)^{\tau}$ always belongs to ${\cal D}(S^{\tau},\mathbb G)$ as soon as $Z\in {\cal D}(S,\mathbb F)$, and hence $$\inf_{Z^{\mathbb G}\in {\cal D}(S^{\tau},\mathbb G)}E\Bigl[-\ln(Z^{\mathbb G}_T)\Bigr]\leq \inf_{Z\in {\cal D}(S,\mathbb F)}E\Bigl[-\ln(Z_{T\wedge{\tau}}/{\cal E}_{\tau\wedge T}(G_{-}^{-1}\bigcdot m))\Bigr].$$ Therefore, by combining this latter inequality with assertion (i) of the lemma, we conclude that (\ref{MinimizationReduction}) always holds, and assertion (ii) is proved. \\ {\bf Part 2.} Here, we prove assertion (iii) of the lemma. To this end, we consider $Z^{\mathbb F}\in {\cal D}(S,\mathbb F)$. Thus, thanks to Proposition \ref{Hzero4Log(Z)}-(a), we deduce the existence of $K\in {\cal M}_{0,loc}(\mathbb F)$ and a nondecreasing and $\mathbb F$-predictable process $V$ such that $V_0=K_0=0$ and $Z^{\mathbb F}:={\cal E}(K)\exp(-V)$. Therefore, we derive \begin{eqnarray}\label{Optimization1} &&-\ln\left({{(Z^{\mathbb F})^{\tau}}\over{{\cal E}(G_{-}^{-1}\bigcdot m)^{\tau}}}\right)=-\ln((Z^{\mathbb F})^{\tau})+\ln( {\cal E}(G_{-}^{-1}\bigcdot m)^{\tau})\nonumber\\ &&={\mathbb G}\textcolor{blue} ox{-local martingale}-{{I_{\Lbrack0,\tau\Lbrack}}\over{G_{-}}}\bigcdot \langle K, m\rangle^{\mathbb F}+H^{(0)}(K,P)^{\tau}\nonumber\\ &&+V^{\tau}+{{I_{\Lbrack0,\tau\Lbrack}}\over{G_{-}^2}}\bigcdot \langle m\rangle^{\mathbb F}-H^{(0)}(G_{-}^{-1}\bigcdot m,P)^{\tau}.\end{eqnarray} Remark that the two processes $I_{\Rbrack0,\tau\Lbrack}G_{-}^{-2}\bigcdot \langle m\rangle^{\mathbb F}$ and $H^{(0)}(G_{-}^{-1}\bigcdot m,P)^{\tau}$ have variations that are $\mathbb F$-locally integrable and \begin{eqnarray}\label{HellingerE} &&\left({{I_{\Lbrack0,\tau\Lbrack}}\over{G_{-}^2}}\bigcdot \langle m\rangle^{\mathbb F}-H^{(0)}(G_{-}^{-1}\bigcdot m,P)^{\tau}\right)^{p,\mathbb F}\nonumber\\ &&={1\over{G_{-}}}\bigcdot \langle m\rangle^{\mathbb F}-\left({\widetilde G}\bigcdot H^{(0)}({1\over{G_{-}}}\bigcdot m,P)\right)^{p,\mathbb F}\nonumber\\ &&={1\over{2G_{-}}}\bigcdot \langle m^c\rangle^{\mathbb F}+G_{-}\bigcdot \left(\sum ({{\Delta m}\over{G_{-}}})^2\right)^{p,\mathbb F}-\left(\sum {\widetilde G}({{\Delta m}\over{G_{-}}}-\ln(1+{{\Delta m}\over{G_{-}}}))\right)^{p,\mathbb F}\nonumber\\ &&={1\over{2G_{-}}}\bigcdot \langle m^c\rangle^{\mathbb F}+\left(\sum G_{-}\left((1+{{\Delta m}\over{G_{-}}})\ln(1+{{\Delta m}\over{G_{-}}}) -{{\Delta m}\over{G_{-}}}\right)\right)^{p,\mathbb F}\nonumber\\ &&=G_{-}\bigcdot h^{(E)}(G_{-}^{-1}\bigcdot m,P). \end{eqnarray} Thus, by combining this with (\ref {Optimization1}), Proposition \ref{Hzero4Log(Z)}-(b), and the easy fact that $U\in {\cal A}^+(\mathbb G)$ iff $U^{p,\mathbb F}\in {\cal A}^+(\mathbb F)$ for any nondecreasing process $U$, the equality (\ref{Equality100}) follows immediately. This ends the proof of the lemma. \qed \end{proof} At the practical level, one starts with an initial model $(S,\mathbb F)$ admitting log-optimal portfolio, and tries to describe models of $\tau$ allowing $(S^{\tau},\mathbb G)$ to admit log-optimal portfolio. This boils down to the question (\ref{Q3}), and the answer to this follows from Theorem \ref{LogOPexistence} and is given by the following. \begin{theorem}\label{thoeremAppl1} Suppose $G>0$, and log-optimal portfolio for $(S,\mathbb F)$ exists. Then log-optimal portfolio for $(S^{\tau},\mathbb G)$ exists if and only if \begin{eqnarray}\label{EntropyCondition} {\cal H}_{\mathbb G}(P_T\big| {\widetilde Q}_T)=E\left[ (G_{-}\bigcdot h^{(E)}(G_{-}^{-1}\bigcdot m,P))_T\right]<+\infty. \end{eqnarray} Here $h^{(E)}(N,P)$, for any $N\in {\cal M}_{0,loc}(\mathbb F)$ with $1+\Delta N\geq 0$, and the entropy ${\cal H}_{\mathbb G}(P_T\big| {\widetilde Q}_T)$ are given by Definition \ref{Hellinger}, and ${\widetilde Q}_T$ is defined in (\ref{Qtilde}). \end{theorem} \begin{proof} Due to Theorem \ref{LemmaCrucial} and Proposition \ref{Hzero4Log(Z)}-(b), $(S,\mathbb F)$ admits log-optimal portfolio iff there exist $K\in {\cal M}_{loc}(\mathbb F)$ and a nondecreasing and $\mathbb F$-predictable process $V$ such that $K_0=V_0=0$, $Z:={\cal E}(K)\exp(-V)\in {\cal D}(S,\mathbb F)$, and $$E[-\ln(Z_T)]=E[V_T+H^{(0)}(K,P)_T]<+\infty.$$ Thus, due to Lemma \ref{H0toH1martingales}, we conclude that $\sqrt{[K,K]}$ is an integrable process ( or equivalently $K$ is a martingale such that $\displaystyle\sup_{0\leq t\leq T}\vert K_t\vert\in L^1(P)$), and hence the process $\langle K,m\rangle^{\mathbb F}$ has integrable variation as $m$ is a BMO $\mathbb F$-martingale. I.e. there exists a positive constant $C>0$ such that for any $\mathbb F$-stopping times $\sigma_1$ and $\sigma_2$ such that $\sigma_1\leq \sigma_2$, we have \begin{eqnarray*} (\Delta m_{\sigma_1})^2+ E\left[[m,m]_{\sigma_2}-[m,m]_{\sigma_1}\big|{\cal F}_{\sigma_1}\right]\leq C\quad P\textcolor{blue} ox{-a.s.}.\end{eqnarray*} Therefore, the process $G_{-}\bigcdot V+({\widetilde G}\bigcdot H^{(0)}(K,P))^{p,\mathbb F}-\langle K,m\rangle^{\mathbb F}$ belongs to ${\cal A}(\mathbb F)$. Thus, in virtue of Theorem \ref{LogOPexistence}, log-optimal portfolio for $(S^{\tau},\mathbb G)$ exists if and only if the second condition in (\ref{EntropyCondition}) holds. Thus, the proof of the theorem follows immediately from the following two equalities \begin{eqnarray}\label{equa401} {\cal H}_{\mathbb G}\left(P\big|{\widetilde Q}_T\right)&&=E\left[\ln({\cal E}_{T\wedge\tau}( {1\over{G_{-}}}\bigcdot m))\right]=E\left[G_{-}\bigcdot h^{(E)}({1\over{G_{-}}}\bigcdot m, \mathbb F)_T\right].\hskip 0.85cm \end{eqnarray} The last equality is a direct result of (\ref{HellingerE}). This proves the theorem.\qed \end{proof} In the remaining part of this section, we {\it naturally} connects log-optimal portfolio of $(S^{\tau},\mathbb G)$ with log-optimal portfolio of $(S,\mathbb F)$ under adequate change of probability, and with the optimal portfolio for the economic model $(S,\mathbb F, \widetilde{U})$ where $\widetilde U$ is a random field utility that will be specified. To this end, we define the set of admissible portfolios for $\widetilde U$ as follows \begin{eqnarray}\label{AdmissibleSet4Utilde} \hskip -0.70cm{\cal A}_{adm}(S, \widetilde U):=\left\{\theta\in L(S, \mathbb F)\Bigg|\begin{array}{lll}1+\theta\bigcdot S>0,\\ E\left[\max(0,-{\widetilde U}(T, 1+(\theta\bigcdot S)_T))\right]<+\infty\end{array}\right\}. \end{eqnarray} \begin{theorem}\label{theorem3.8}Suppose $G>0$ and \begin{eqnarray}\label{FiniteEntropy} E\Bigl[{\cal E}_T(G_{-}^{-1}\bigcdot m)\ln\left({\cal E}_T(G_{-}^{-1}\bigcdot m)\right)\Bigr]<+\infty.\end{eqnarray} Then the following assertions are equivalent and sufficient for the existence of log-optimal portfolio for the model $(S^{\tau},\mathbb G)$.\\ {\rm{(a)}} $({\widetilde U}, S, \mathbb F)$ admits an optimal portfolio, with ${\widetilde U}(t,x):={\cal E}_t(G_{-}^{-1}\bigcdot m)\ln(x)$ for $ x>0$, i.e. there exists $\theta^*\in {\cal A}_{adm}(S, \widetilde U)$ such that \begin{eqnarray}\label{Utilde} \max_{\theta\in{\cal A}_{adm}(S,\widetilde U)} E\left[{\widetilde U}(T, 1+(\theta\bigcdot S)_T)\right]= E\left[{\widetilde U}(T, 1+(\theta^*\bigcdot S)_T)\right].\end{eqnarray} {\rm{(b)}} Log-optimal portfolio for $(S, {\widehat Q}, \mathbb F)$ exists, where $d{\widehat Q}:={\cal E}_T(G_{-}^{-1}\bigcdot m)dP$.\\ Furthermore, the three portfolios coincide on $\Lbrack0,\tau\Lbrack$ when they exists. \end{theorem} \begin{proof} Remark that, by combining \cite[Proposition 3.6]{ChoulliStricker2005} or \cite[Theorem 2.9]{ChoulliStricker2006} and $G_{-}=G_0{\cal E}_{-}(-{\widetilde G}^{-1}\bigcdot D^{o,\mathbb F}){\cal E}_{-}(G_{-}^{-1}\bigcdot m)\leq {\cal E}_{-}(G_{-}^{-1}\bigcdot m)$, we derive \begin{eqnarray*} E\Bigl[{\cal E}_T(G_{-}^{-1}\bigcdot m)\ln\left({\cal E}_T(G_{-}^{-1}\bigcdot m)\right)\Bigr]&&=E\left[\int_0^T{\cal E}_{s-}(G_{-}^{-1}\bigcdot m) dh^{(E)}_s(G_{-}^{-1}\bigcdot m, P)\right]\\ &&\geq E\left[\left(G_{-}\bigcdot h^{(E)}(G_{-}^{-1}\bigcdot m, P)\right)_T\right]. \end{eqnarray*} Thus, if (\ref{FiniteEntropy}) holds, then the condition (\ref{EntropyCondition}) holds, ${\cal E}(G_{-}^{-1}\bigcdot m)^T$ becomes a uniformly integrable martingale, and $d{\widehat Q}:={\cal E}_T(G_{-}^{-1}\bigcdot m)dP$ is a well defined probability measure with finite entropy. Therefore, on the one hand, direct calculations show that ${\cal A}_{adm}(S,\widetilde U)=\Theta(S, \mathbb F, {\widehat Q})$. On the other hand, for any $\theta\in{\cal A}_{adm}(S,\widetilde U)$, \begin{eqnarray*} E\left[{\widetilde U}(T, 1+(\theta\bigcdot S)_T)\right]=E^{{\widehat Q}}\left[\ln( 1+(\theta\bigcdot S)_T)\right].\end{eqnarray*} This proves the equivalence between assertions (a) and (b). The rest of this proof focuses on proving that these assertions imply the existence of log-optimal portfolio for $(S^{\tau}, \mathbb G)$. To this end, we suppose that log-optimal portfolio of $(S, \mathbb F, {\widehat Q})$ exists. Thanks to Theorem \ref{LemmaCrucial}, this fact is equivalent to the existence of $Z:={\cal E}(K)e^{-V}\in {\cal D}(S, \mathbb F)$, where $K\in {\cal M}_{loc}(\mathbb F)$ and $V$ is a nondecreasing and $\mathbb F$-predictable such that $Z/ {\cal E}(G_{-}^{-1}\bigcdot m)\in {\cal D}_{log}(S,\mathbb F, {\widehat Q})$. Due to (\ref{FiniteEntropy}), $K-\langle K, G_{-}^{-1}\bigcdot m\rangle^{\mathbb F}\in {\cal M}_{loc}(\mathbb F, {\widehat Q})$ and direct Ito's calculations, we derive \begin{eqnarray*} &&E^{\widehat Q}\left[-\ln\left(Z_T/ {\cal E}_T(G_{-}^{-1}\bigcdot m)\right)\right]\\ &&= E^{\widehat Q}\left[-\ln\left(Z_T\right)\right]+E^{\widehat Q}\left[\ln\left( {\cal E}_T(G_{-}^{-1}\bigcdot m)\right)\right]\\ &&=G_0^{-1}E\left[\int_0^T {\cal E}_{s-}(G^{-1}\bigcdot D^{o,\mathbb F})^{-1}d U_s\right]+E\left[{\cal E}_T(G_{-}^{-1}\bigcdot m)\ln\left({\cal E}_T(G_{-}^{-1}\bigcdot m)\right)\right],\\ &&\geq G_0^{-1}E[U_T] +E\Bigl[{\cal E}_T(G_{-}^{-1}\bigcdot m)\ln\left({\cal E}_T(G_{-}^{-1}\bigcdot m)\right)\Bigr], \end{eqnarray*} where $U:=G_{-}\bigcdot V+{\widetilde G}\bigcdot H^{(0)}(K, P)-\langle K, m\rangle^{\mathbb F}$. This proves that, under (\ref{FiniteEntropy}), the condition $E^{\widehat Q}\left[-\ln\left(Z_T/ {\cal E}_T(G_{-}^{-1}\bigcdot m)\right)\right]<+\infty$ implies that $E[U_T]<+\infty$, which is equivalent to the existence of log-optimal portfolio for $(S^{\tau}, \mathbb G)$ in virtue of Theorem \ref{LogOPexistence}. This ends the proof of the theorem.\qed \end{proof} \begin{corollary}\label{pseudo} Suppose $G>0$, and log-optimal portfolio for $(S,\mathbb F)$ exists. Then the following conditions are all sufficient for the existence of log-optimal portfolio of $(S^{\tau},\mathbb G)$. Furthermore, both portfolios coincide on $\Lbrack0,\tau\Lbrack$.\\ {\rm{(a)}} $\tau$ is a pseudo-stopping time.\\ {\rm{(b)}} $\tau$ is independent of $\mathbb F$.\\ {\rm{(c)}} Every $\mathbb F$-martingale is a $\mathbb G$-local martingale (i.e. immersion holds). \end{corollary} \begin{proof} On the one hand, it is easy to remark that assertion (a) is implied by either assertions (b) or (c). On the other hand, when assertion (a) holds, the existence of log-optimal portfolio for $(S^{\tau},\mathbb G)$ is a direct application of Theorem \ref{theorem3.8} and the fact that $m\equiv m_0$ --as this is a characterization for $\tau$ being a pseudo-stopping time due to \cite{Nik2005}--, which implies (\ref{FiniteEntropy}). This proves the corollary. \qed \end{proof} \section{ Log-optimal portfolio for $(S^{\tau},\mathbb G)$: Description and applications}\label{section5} This section addresses the explicit computation of num\'eraire and log-optimal portfolios for $(S^{\tau},\mathbb G)$ in terms of $\mathbb F$-observable data of the model, and this will answer completely (\ref{Q4}). In virtue of \cite{ChoulliYansori2}, see also Theorem \ref{LemmaCrucial}, this will be possible only by using the {\it predictable characteristics} of semimartingales which are technical but powerful statistical tools for parametrization. Thus, this section starts recalling these characteristics for a general model $( X,\mathbb H)$ defined on the complete probability space $(\Omega, {\cal F}, P)$, where $\mathbb H$ is a filtration satisfying the usual conditions of completeness and right continuity, and $X$ is an $\mathbb H$-semimartingale. We denote $$\label{sigmaFields} \widetilde {\cal O}(\mathbb H):={\cal O}(\mathbb H)\otimes {\cal B}({\mathbb R}^d),\ \ \ \ \ \widetilde{\cal P}(\mathbb H):= {\cal P}(\mathbb H)\otimes {\cal B}({\mathbb R}^d), $$ where ${\cal B}({\mathbb R}^d)$ is the Borel $\sigma$-field on ${\mathbb R}^d$, the $\mathbb H$-optional and $\mathbb H$-predictable $\sigma$-fields respectively on the $\Omega\times[0,+\infty)\times{\mathbb R}^d$. With a c\`adl\`ag $\mathbb H$-adapted process $X$, we associate the optional random measure $\mu_X$ defined by \begin{eqnarray*}\label{mesuresauts} \mu_X(dt,dx):=\sum_{u>0} I_{\{\Delta X_u \neq 0\}}\delta_{(u,\Delta X_u)}(dt,dx)\,.\end{eqnarray*} For a product-measurable functional $W\geq 0$ on $\Omega\times \mathbb R_+\times{\mathbb R}^d$, we denote $W\star\mu_X$ (or sometimes, with abuse of notation, $W(x)\star\mu_X$) the process \begin{eqnarray*}\label{Wstarmu} (W\star\mu_X)_t:=\int_0^t \int_{{\mathbb R}^d\setminus\{0\}} W(u,x)\mu_X(du,dx)=\sum_{0<u\leq t} W(u,\Delta X_u) I_{\{ \Delta X_u\not=0\}}.\end{eqnarray*} On $\Omega\times\mathbb R_+\times{\mathbb R}^d$, we define the measure $M^P_{\mu_X}:=P\otimes\mu_X$ by $$M^P_{\mu_X}\left(W\right):=\int W dM^P_{\mu_X}:=E\left[(W\star\mu)_\infty\right],$$ (when the expectation is well defined). The \emph{conditional expectation} given $ \widetilde{\cal P}$ of a product-measurable functional $W$, denoted by $M^P_{\mu_X}(W\big|\widetilde{\cal P})$, is the unique $ \widetilde{\cal P}$-measurable functional $\widetilde W$ satisfying $ E\left[(W I_{\Sigma}\star\mu_X)_\infty \right]=E\left[({\widetilde W} I_{\Sigma}\star\mu_X)_\infty \right]$ for any $\Sigma$ belonging to $\widetilde{\cal P}.$ For the reader's convenience, we recall {\it the canonical decomposition} of $X$ (for more related details, we refer the reader to \cite[Theorem 2.34, Section II.2]{JS03}) \begin{equation}\label{modelSbis} X=X_0+X^c+h\star(\mu_X-\nu_X)+b\bigcdot A^X+(x-h)\star\mu_X,\end{equation} where $h$, defined as $h(x):=xI_{\{ \vert x\vert\leq 1\}}$, is the truncation function, and \textcolor{blue} ox{$h\star(\mu_X-\nu_X)$} is the unique pure jump $\mathbb H$-local martingale with jumps given by $h(\Delta X)I_{\{\Delta X\not=0\}}-\ ^{p,\mathbb H}(h(\Delta X)I_{\{\Delta X\not=0\}})$. For the matrix $C^X$ with entries $C^{ij}:=\langle X^{c,i}, X^{c,j}\rangle $, and $\nu_X$, we can find a version satisfying $$ C^X=c^X\bigcdot A^X,\ \nu_X(d t,\ d x)=d A_t^XF_t^X(d x),\ F_t^X(\{0\})=0,\ \displaystyle{\int} (\vert x\vert^2\wedge 1)F_t^X(d x)\leq 1. $$ Here $A^X$ is increasing and predictable, $b^X$ and $c^X$ are predictable processes, $F_t^X(d x)$ is a predictable kernel, $b_t^X(\omega)$ is a vector in $\hbox{I\kern-.18em\hbox{R}}^d$ and $c_t^X(\omega)$ is a symmetric $d\times d$-matrix, for all $(\omega,\ t)\in\Omega\times \mathbb R_+$. For $W\geq 0$ and $\widetilde{\cal P}$-measurable, we put \begin{eqnarray}\label{What} {\widehat W}_t:=\int W(t,x)\nu_X(\{t\}, dx),\quad a_t:=\widehat{1}_t=\nu_X(\{t\},\mathbb R^d). \end{eqnarray} The quadruplet \begin{eqnarray}\label{PCharac4X} (b^X,c^X, F^X, A^X)\ \textcolor{blue} ox{are the predictable characteristics of}\ (X,\mathbb H).\end{eqnarray} In the rest of paper, except the appendix, $(X, \mathbb H)\in\{(S, \mathbb F),(S^{\tau},\mathbb G)\}$. Thus, throughout the rest of the paper, for the sake of simplicity, the random measure $\mu_S$ associated with the jumps of $S$ will be denoted by $\mu$, while $S^c$ denotes the continuous $\mathbb F$-local martingale part of $S$, and the quadruplet \begin{eqnarray*}\label{FpredictCharac} \left(b,c,F, A\right)\ \textcolor{blue} ox{are the predictable characteristics of}\ (S,\mathbb F).\end{eqnarray*} Or equivalently {\it the canonical decomposition} of $S$ (see Theorem 2.34, Section II.2 of \cite{JS03} for details) is given by \begin{equation}\label{modelSbis} S=S_0+S^c+h\star(\mu-\nu)+b\bigcdot A+(x-h)\star\mu,\quad h(x):=xI_{\{ \vert x\vert\leq 1\}}.\end{equation} Throughout the rest of this section, we consider Jacod's decomposition for the $\mathbb F$-martingale $G_{-}^{-1}\bigcdot m$, see Theorem \ref{tmgviacharacteristics} for details, \begin{eqnarray}\label{Jacod4m} G_{-}^{-1}\bigcdot {m}&&= \beta \bigcdot S^c +U^{(m)} \star (\mu - \nu) + g_m\star \mu + m^{\perp},\\ && U^{(m)}:=f_m-1+{{\widehat{f_m}-a}\over{1-a}}I_{\{a<1\}}.\nonumber\end{eqnarray} We end this subsection by defining the space ${\cal L}(S,\mathbb F)$ and the function ${\cal K}_{log}$, that will be used throughout the paper, as follows. \begin{eqnarray} {\cal L}(S,\mathbb F)&&:=\Bigl\{\theta\in{\cal P}(\mathbb F)\ \big|\ 1+x^{tr}\theta_t(\omega)>0\quad P\otimes dA\otimes F\textcolor{blue} ox{-.a.e.}\Bigr\},\label{SelL(F)}\\ {\cal K}_{log}(y)&&:={{ -y}\over{1 +y }} +\ln(1 +y)\quad \textcolor{blue} ox{for any}\quad y>-1.\label{Kfunction} \end{eqnarray} The rest of this section is divided into three subsections. The first subsection states the main results, while their proofs will detailed in the second subsection. The last subsection illustrates the main results on the case when the initial model $(S,\mathbb F)$ follows a jump-diffusion model. \subsection{Main results and their applications and financial interpretations}\label{Subsection4Results} This subsection describes explicitly log-optimal portfolio for $(S^{\tau},\mathbb G)$ using the model's data seen through $\mathbb F$ only, and discusses their applications afterwards. \begin{theorem}\label{optimalportfoliogen} Suppose $G>0$, and let ${\cal K}_{log}$ be the function given by (\ref{Kfunction}). Then the following three assertions are equivalent.\\ {\rm{(a)}} Log-optimal portfolio for $(S^{\tau}, \mathbb G)$, denoted by ${\widetilde\theta}^{\mathbb G}$, exists. \\ {\rm{(b)}} There exists $\widetilde\varphi\in {\cal L}(S,\mathbb F)$ such that, for any $\theta\in{\cal L}(S,\mathbb F)$, the following hold \begin{eqnarray} &&\hskip -0.55cm(\theta-\widetilde\varphi)^{tr}(b +c(\beta-\widetilde\varphi)) + \int(\theta-\widetilde\varphi)^{tr}\left({{f_m(x) }\over{1 + \widetilde\varphi^{tr} x }} x-h(x)\right) F(dx)\leq 0,\label{Cond4ptimalityG}\\ &&\hskip -0.55cmE\left[(G_{-}\bigcdot \widetilde V)_T+(G_{-}\widetilde\varphi^{tr}c\widetilde\varphi\bigcdot A)_T+(G_{-}{\cal K}_{log}(\widetilde\varphi^{tr}x) f^{(m)}\star\nu)_T\right]<+\infty,\label{Cond4integrabilityG}\\ &&\hskip -0.55cm\widetilde{V}:=\left(\widetilde\varphi^{tr} b +\widetilde\varphi^{tr}c(\beta-\widetilde\varphi)\right)\bigcdot A + \left({{f_m(x)\widetilde\varphi^{tr}x}\over{1 + \widetilde\varphi^{tr} x }} -\widetilde\varphi^{tr}h(x)\right)\star\nu.\label{VtildeG} \end{eqnarray} {\rm{(c)}} Num\'eraire portfolio for $(S^{\tau},\mathbb G)$ exists, and its rate $\widetilde\varphi^{\mathbb G}$ satisfies (\ref{Cond4integrabilityG}). \\ Furthermore, the processes ${\widetilde\theta}^{\mathbb G}$, $\widetilde{\varphi}$, $\widetilde\varphi^{\mathbb G}$ , ${\widetilde Z}^{\mathbb G}$ solution to (\ref{dualproblem}), and ${\widetilde Z}^{\mathbb F}\in {\cal D}(S,\mathbb F)$ described via (\ref{MinimiDualEquivLog}) are related to each other by the following \begin{eqnarray} &&{\widetilde\theta}^{\mathbb G}\left(1+({\widetilde\theta}^{\mathbb G}\bigcdot S^{\tau})_{-}\right)^{-1}=\widetilde{\varphi}= \widetilde\varphi^{\mathbb G}\quad \textcolor{blue} ox{on}\quad \Lbrack0,\tau\Lbrack,\label{LogOPrate4G}\\ &&\hskip -0.45cm {1\over{{\cal E}(\widetilde\varphi\bigcdot S)^{\tau}}}={\widetilde Z}^{\mathbb G}:={\cal E}({\widetilde K}^{\mathbb G}){\cal E}(-\widetilde{V}^{\tau})={{{\cal E}({\widetilde K}^{\mathbb F})^{\tau}{\cal E}(-\widetilde{V}^{\tau})}\over{{\cal E}(G_{-}^{-1}\bigcdot m)^{\tau}}}=:{{({\widetilde Z}^{\mathbb F})^{\tau}}\over{{\cal E}(G_{-}^{-1}\bigcdot m)^{\tau}}} ,\label{OptimalDualLoggeneral}\\ &&{\widetilde K}^{\mathbb F}:=-\widetilde\varphi\bigcdot S^c+ {{\widetilde\Gamma}\over{G_{-}}}\bigcdot m -{{{\widetilde\Gamma}(\widetilde\varphi^{tr}x) f_m}\over{1+\widetilde\varphi^{tr}x}}\star(\mu-\nu)-{{{\widetilde\Gamma}(\widetilde\varphi^{tr}x)g_m}\over{1+\widetilde\varphi^{tr}x}}\star\mu.\hskip 1cm\label{K-F}\\ &&{\widetilde K}^{\mathbb G}:=-{\widetilde\varphi}\bigcdot {\cal T}(S^c)+{{-{\widetilde\Gamma}{\widetilde\varphi}^{tr}x}\over{1+{\widetilde\varphi}^{tr}x}}I_{\Lbrack0,\tau\Lbrack}\star(\mu-f_m\cdot\nu)\label{K-G} \\ &&\hskip -0.55cm\widetilde\Gamma:=\left(1-\widehat{f_m}+\widehat{f_m f^{(op)}}\right)^{-1},\quad f^{(op)}(x):=\left(1+\widetilde\varphi^{tr}x\right)^{-1}.\label{GammaTilde}\end{eqnarray} \end{theorem} Thanks to \cite[Theorems 2.17 and 2.20]{ChoulliDavelooseVanmaele}, the uncertainty in $\tau$ bears three types of orthogonal risks. These risks are the correlation risks whose generator is the $\mathbb F$-martingale $m$ (or equivalently ${\cal E}(G_{-}^{-1}\bigcdot m)$), the pure default risk of type one generated by $N^{\mathbb G}$ defined in (\ref{processNG}), and the second type of pure default risk that takes the form of $k\bigcdot D$ where $k$ spans the space $L^1_{loc}(\widetilde\Omega, \textcolor{blue} ox{Prog}(\mathbb F), P\otimes D)$ and satisfies $E[k_{\tau}I_{\{\tau<+\infty\}}\big|{\cal F}_{\tau}]=0$ $P$-a.s..\\ In virtue of Theorems \ref{NumeraireGeneral} and \ref{LogOPexistence}, it is clear that the correlation risk is the only risk that impacts num\'eraire and log-optimal portfolios. Theorem 5.1 gives a more ``chirurgical" and deep description of the impact of $\tau$ on the log-optimal portfolio. To be more precise in our analysis, we appeal to (\ref{Jacod4m}), and write \begin{eqnarray}\label{Decomposition4m} {\cal E}(G_{-}^{-1}\bigcdot m)=\underbrace{{\cal E}\Biggl(\beta\bigcdot S^c+U^{(m)}\star(\mu-\nu)\Biggr)}_{\textcolor{blue} ox{$S$-correlation source}}\times\underbrace{{\cal E}\Biggl( (g_m/f_m)\star\mu+m^{\perp}\Biggr)}_{\textcolor{blue} ox{$S$-non-correlation source}}.\hskip 0.5cm \end{eqnarray} This decomposes the source of correlation risks between $\tau$ and $\mathbb F$ into the product of $S$-correlation and $S$-non-correlation risk generators, which leads naturally to the following definition. \begin{definition} Let $X$ be an $\mathbb F$-semimartingale.\\ $\tau$ is ``{\it non-correlated}" to $X$ if $[m, M^X]\in {\cal M}_{loc}(\mathbb F)$ for any $M^X\in {\cal M}_{loc}(\mathbb F)$ generated by $X$. I.e. it is of the form of $M^X=\varphi\bigcdot X^c+W\star(\mu^X-\nu^X)$, for $\mathbb F$-predictable $\varphi$ and $W$ fulfilling the integrability conditions of Theorem \ref{tmgviacharacteristics}. \end{definition} This defines a large class of random times that covers the class of pseudo-stopping times, and it possesses similar ``nice" properties as them. In fact, it is easy to prove that $\tau$ is an $\mathbb F$-pseudo-stopping time if and only if it is non-correlated to any bounded $\mathbb F$-martingale. Furthermore, in virtue of (\ref{Jacod4m}), it is clear that $\tau$ is non-correlated to $S$ if and only if $(\beta,f_m)\equiv (0,1)$ $P\otimes A\otimes F$-a.e.. Thus, being non-correlated to $S$ is a much weaker assumption than being a pseudo-stopping time. Furthermore, our notion of ``non-correlation" in Definition \ref{Decomposition4m} allows us to go deeper than Corollaries \ref{ParticularCases} and \ref{pseudo} in describing a larger class of random times $\tau$ that do not affect num\'eraire and log-optimal portfolios when passing from $(S, \mathbb F)$ to $(S^{\tau},\mathbb G)$. This is the aim of the following. \begin{proposition}\label{Corollary5.3} Suppose ${\cal D}(S,\mathbb F)\not=\emptyset$, and let $T\in (0,+\infty)$. Then the following assertions hold.\\ {\rm{(a)}} If $\tau$ is non-correlated to $S$, then num\'eraire porfolios for $(S^{\tau},\mathbb G)$ and $(S, \mathbb F)$ coincide on $\Rbrack0,\tau\Lbrack$.\\ {\rm{(b)}} Suppose that $\tau$ is non-correlated to $S$. If log-optimal portfolio for $(S,\mathbb F)$ exists, then log-optimal portfolio for $(S^{\tau\wedge T}, \mathbb G)$ does exist also, and both portfolios coincide on $\Rbrack0,T\wedge\tau\Lbrack$.\\ {\rm{(c)}} Suppose that $S$ is continuous and denote by $\widetilde\lambda$ the num\'eraire portfolio rate for $(S,\mathbb F)$. Then the following properties hold.\\ {\rm{(c.1)}} Num\'eraire portfolio rate for $(S^{\tau},\mathbb G)$ --denoted by $\widetilde\varphi$-- exists and is given by $\widetilde\varphi=\widetilde\lambda+\beta$.\\ {\rm{(c.2)}} $\widetilde\varphi$ is log-optimal portfolio rate for $(S^{T\wedge\tau},\mathbb G)$ if and only if $$E\left[\int_0^T G_{s-}{\widetilde\varphi}_s^{tr}c_s{\widetilde\varphi}_s dA_s\right]<+\infty.$$ {\rm{(c.3)}} If $E\left[\int_0^T{\widetilde\lambda}_s^{tr}c_s{\widetilde\lambda}_s dA_s\right]<+\infty$, then log-optimal portfolio rate for $(S^{T\wedge\tau},\mathbb G)$ exists if and only if $$E\left[\int_0^T {1\over{G_{s-}}}d\langle m^c, m^c\rangle_s\right]=E\left[\int_0^T G_{s-}{\beta}_s^{tr}c_s{\beta}_s dA_s\right]<+\infty.$$ \end{proposition} We relegate the proof this corollary to Subsection \ref{Subsection4Proofs}, for the sake of simple exposition, while herein we focus on discussing the meaning of its results and its connection to the literature. Both assertions (a) and (b) explain that the only risk that affects the structures and/or the existence of num\'eraire and log-optimal portfolios is the risk generated by the correlation between $\tau$ and $S$. Assertion (c) considers the case of continuous $S$, which is considered in the majority of (or all) the insider information literature, see \cite{amendingerimkellerschweizer98,ADImkeller,AImkeller,JImkellerKN,GrorudPontier,pikovskykaratzas96,kohatsusulem06}. Thus, assertion (c) shows what will become in our progressive setting those results of the insider-information framework using {\it information drifts}.\\ The converse of assertion (b) is not true in general. In other words, in some cases, $\tau$ might have a positive impact on $(S, \mathbb F)$ instead. Indeed, for model $(S, \mathbb F)$ admitting num\'eraire portfolio with rate $\widetilde\lambda$ which is {\it locally} a log-optimal portfolio but {\it not globally}, we can always find models for $\tau$ such that $(S^{T\wedge\tau}, \mathbb G)$ admits log-optimal portfolio, as soon as the probability space $(\Omega, {\cal F}, P)$ is rich enough. This claim can be also viewed as a complement result to Theorem \ref{thoeremAppl1}, as it addresses a case when log-optimal portfolio for $(S,\mathbb F)$ might fail to exist. \begin{theorem}\label{ConverseTheorem} Suppose that $S$ is quasi-left-continuous (i.e. it does not jump on predictable stopping times) and $(\Omega, {\cal F}, P)$ supports an exponentially distributed random variable $\xi$ independent of ${\cal F}_{\infty}:=\sigma(\cup_{s\geq 0}{\cal F}_s)$ and $E[\xi]=1$. \\ If $\left({\cal D}_{log}(S, \mathbb F)\right)_{loc}\not=\emptyset$, then there exists a random time $\tau$ having a positive Az\'ema supermartingale and $(S^{\tau}, \mathbb G)$ admits log-optimal portfolio. Furthermore, this portfolio coincides with num\'eraire portfolio of $(S,\mathbb F)$ on $\Lbrack0,\tau\Lbrack$. \end{theorem} It is clear that when num\'eraire portfolio rate $\widetilde\lambda$ exists (see Theorem \ref{LemmaCrucial}), \begin {eqnarray}\label{Psi-tilde} \widetilde\Psi:=\left({\widetilde\lambda}^{tr}b-{1\over{2}}{\widetilde\lambda}^{tr}c\widetilde\lambda\right)\bigcdot A +\left(\ln(1 + \widetilde\lambda^{tr} x) -{\widetilde\lambda}^{tr}h\right)\star\nu \end{eqnarray} is a well defined, predictable, and nondecreasing process with values in $[0,+\infty]$. Due to Theorem \ref{LemmaCrucial}, the assertions below are equivalent to $\left({\cal D}_{log}(S, \mathbb F)\right)_{loc}\not=\emptyset$:\\ i) Log-optimal portfolio for $(S,\mathbb F)$ exists locally (see \cite{ChoulliDengMa} for this concept). This means that there exists a sequence of stopping times $(T_n)_n$ that increases to infinity such that each $(S^{T_n},\mathbb F)$ admits log-optimal portfolio.\\ ii) Num\'eraire portfolio exists with rate $\widetilde\lambda$ and the nondecreasing process ${\widetilde\Psi}$ defined by (\ref{Psi-tilde}) has finite values (i.e. ${\widetilde\Psi}_t<+\infty$ $P$-a.s. for all $t\in[0,+\infty)$).\\ We think that the claim of Theorem \ref{ConverseTheorem} remains valid under the weaker assumption ${\cal D}(S, \mathbb F)\not=\emptyset$. I.e. we believe that the following {\bf conjecture} holds:\\ {\it If $(S,\mathbb F)$ admits num\'eraire portfolio and $(\Omega, {\cal F}, P)$ is rich enough as in Theorem \ref{ConverseTheorem}, then there exists $\tau$ such that $(S^{\tau},\mathbb G)$ admits log-optimal portfolio}. \begin{proof}{\it of Theorem \ref{ConverseTheorem}} Consider a nonnegative and $\mathbb F$-predictable process $\Phi$ such that $\Phi_0=0$ and the process $\textcolor{blue} ox{ess}\sup_{0<s\leq \cdot}\Phi_s$ has finite values, and put \begin{eqnarray} \tau:=\inf\left\{t\geq 0\ \Bigg|\quad{\Phi}_t\geq \xi\right\}\quad\textcolor{blue} ox{and}\quad {\widetilde\Phi}_t:=\textcolor{blue} ox{ess}\sup_{0<s\leq t}\Phi_s.\label{tauDefinition} \end{eqnarray} Then thanks to the independence between ${\cal F}_{\infty}$ and $\xi$, we calculate the triplet $(G, \widetilde G, G_{-})$ associated to $\tau$ as follows. \begin{eqnarray*} G_t&&:=P(\tau>t\big|{\cal F}_t)=P(\xi>{\widetilde\Phi}_t\big|{\cal F}_t)=\exp(-{\widetilde\Phi}_t)>0,\\ {\widetilde G}_t&&:=P(\tau\geq t\big|{\cal F}_t)=P(\xi\geq {\widetilde\Phi}_{t-}\big|{\cal F}_t)=\exp(-{\widetilde\Phi}_{t-})=G_{t-} .\end{eqnarray*} This proves that, in our current case, we have the immersion case as $G$ is nondecreasing process. Hence $m\equiv m_0$, and by applying Theorem \ref{optimalportfoliogen} to this case and letting $T$ goes to $+\infty$, we deduce that num\'eraire portfolio for $(S^{\tau}, \mathbb G)$ exists and its rate coincides with $\widetilde\lambda$ on $\Lbrack0,\tau\Lbrack$. Thus, in order to complete the proof of the theorem we need to prove that $\widetilde\lambda$ fulfills (\ref{Cond4integrabilityG}). To this end, in virtue of the above discussion (paragraph after the theorem), we remark that ${\widetilde\Psi}\in {\cal A}^+_{loc}.$ Hence, by choosing $\Phi={\widetilde\Psi}$ we deduce that $\widetilde\Phi={\widetilde\Psi}$ and due to $f_m\equiv 1$, $\beta\equiv 0$ and the continuity of $A$ --which is implied by the quasi-left-continuity of $S$--, the process $\widetilde V$ of (\ref{VtildeG}) satisfies $\widetilde V+{1\over{2}}\widetilde\lambda^{tr}c{\widetilde\lambda}\bigcdot A+{\cal K}_{log}(\widetilde\lambda^{tr}x)\star\nu={\widetilde\Psi}$ and \begin{eqnarray*} E\left[\int_0^{\infty} G_{s-}d{\widetilde\Psi}_s\right]=E\left[\int_0^{\infty} {\cal E}_{s-}(-{\widetilde\Psi})d{\widetilde\Psi}_s\right]=E\left[1-{\cal E}_{\infty}(- {\widetilde\Psi})\right]\leq 1. \end{eqnarray*} This proves that $\widetilde\lambda I_{\Lbrack0.\tau\Lbrack}$ is the log-optimal portfolio rate for $(S^{\tau},\mathbb G)$ and the proof of theorem is completed.\qed \end{proof} The following theorem answers (\ref{Q5}) by evaluating $u_T(S^{\tau}, \mathbb G)-u_T(S, \mathbb F)$ and singling out its important parts that we analyze afterwards. \begin{theorem}\label{Difference4u}Suppose $G>0$, the log-optimal portfolio rate $\widetilde\lambda$ for $(S, \mathbb F)$ exists, and (\ref{EntropyCondition}) holds. Then there exists $\widetilde\varphi\in {\cal L}(S, \mathbb F)$ satisfying (\ref{Cond4ptimalityG}), and \begin{eqnarray} &&\Delta_T(S,\tau,\mathbb F):=u_T(S^{\tau}, \mathbb G)-u_T(S, \mathbb F)\nonumber\\ &&=-\underbrace{E\left[-({\widetilde G}\bigcdot {\widetilde{\cal H}}(\mathbb G))_T+({\widetilde G}\bigcdot {\widetilde{\cal H}}(\mathbb F))_T+\langle {\widetilde K}^{\mathbb F}- {\widetilde L}^{\mathbb F}, m\rangle^{\mathbb F}_T\right]}_{\textcolor{blue} ox{correlation-risk}}\label{Delta(S,tau)1}\hskip 1cm\\ \nonumber\\ &&-\underbrace{E\left[\left((1-\widetilde G)\bigcdot {\widetilde{\cal H}}(\mathbb F)\right)_T\right]}_{\textcolor{blue} ox{cost-of-leaving-earlier}}-\underbrace{E\left[\langle {\widetilde L}^{\mathbb F},m\rangle^{\mathbb F}_T\right]}_{\textcolor{blue} ox{NP($\mathbb F$)-correlation}}+\underbrace{ {\cal H}_{\mathbb G}\left(P\big|{\widetilde Q}_T\right)}_{\textcolor{blue} ox{information-premium}},\nonumber\\ &&=-\underbrace{E\left[\left((1-\widetilde G)\bigcdot {\widetilde{\cal H}}(\mathbb F)\right)_T\right]}_{\textcolor{blue} ox{cost-of-leaving-earlier}}-\underbrace{E\left[\langle {\widetilde L}^{\mathbb F},m\rangle^{\mathbb F}_T\right]}_{\textcolor{blue} ox{NP($\mathbb F$)-correl.}}+\underbrace{E\left[\int_0^T{\widetilde {\cal R}}_tG_{t-}dA_t\right]}_{\textcolor{blue} ox{num\'eraire-change-premium}}\label{Delta(S,tau)2}\hskip 0.25cm \end{eqnarray} Furthermore, the ``correlation-risk", the ``cost-of-leaving-earlier", the ``information-premium", and the ``num\'eraire-change-premium" are nonnegative quantities.\\ Here, ${\widetilde Q}_T$, $\widetilde V$ and $ {\widetilde K}^{\mathbb F}$ are given by (\ref{Qtilde}), (\ref{VtildeG}) and (\ref{K-F}) respectively, and the processes ${\widetilde {\cal R}}$, ${\widetilde{\cal H}}(\mathbb G)$ and ${\widetilde{\cal H}}(\mathbb F)$ are given by \begin{eqnarray} {\widetilde {\cal R}}_t&&:=({\widetilde\varphi}_t-{\widetilde\lambda}_t)^{tr}b_t+{\widetilde\varphi}^{tr}_t c_t(\beta_t-{1\over{2}}{\widetilde\varphi}_t)-{\widetilde\lambda}^{tr}_t c_t(\beta_t-{1\over{2}}{\widetilde\lambda}_t) \label{NumeraireChanegPremuim}\\ &&+\int \left(f_m(t,x)\ln((1+{\widetilde\varphi}^{tr}_t x)(1+{\widetilde\lambda}^{tr}_t x)^{-1})-({\widetilde\varphi}_t-{\widetilde\lambda}_t)^{tr} h(x)\right)F_t(dx) \nonumber\\ {\widetilde{\cal H}}(\mathbb G)&&:=\widetilde V+\sum(-\Delta{\widetilde V}-\ln(1-\Delta{\widetilde V}))+H^{(0)}({\widetilde K}^{\mathbb F},P),\label{Htilde(G)}\\ {\widetilde{\cal H}}(\mathbb F)&&:={\widetilde V}^{\mathbb F}+\sum(-\Delta{\widetilde V}^{\mathbb F}-\ln(1-\Delta{\widetilde V}^{\mathbb F}))+H^{(0)}({\widetilde L}^{\mathbb F},P),\label{Htilde(F)} \end{eqnarray} where ${\widetilde L}^{\mathbb F}$ and ${\widetilde V}^{\mathbb F}$ are defined by \begin{eqnarray} &&{\widetilde L}^{\mathbb F}:=-{\widetilde\lambda}\bigcdot S^c-{{{\widetilde\Xi}{\widetilde\lambda}^{tr}x}\over{1+{\widetilde\lambda}^{tr}x}}\star(\mu-\nu),\ {\widetilde\Xi}_t^{-1}:=1-a_t+\int{{\nu(\{t\},dx)}\over{1+{\widetilde\lambda_t}^{tr}x}},\hskip 1cm\label{Ltilde}\\ &&{\widetilde V}^{\mathbb F}:=\left({\widetilde\lambda}^{tr}b-{1\over{2}}{\widetilde\lambda}^{tr}c\widetilde\lambda\right)\bigcdot A +\left({{\widetilde\lambda^{tr} x}\over{1 + \widetilde\lambda^{tr} x}} -{\widetilde\lambda}^{tr}h\right)\star\nu.\label{Utilde} \end{eqnarray} \end{theorem} Our theorem measures the quantity IEU$_{log}(S,\tau,\mathbb F)$, denoted by $\Delta_T(S,\tau,\mathbb F)$ and defined in (\ref{Delta(S, Tau)}), in different manners by quantifying deeply the various factors that directly affect log-optimal portfolio when passing from $(S, \mathbb F)$ to the stopped model $(S^{\tau}, \mathbb G)$. Two of these factors were {\it intuitively} known and understood, while herein we quantify them with a sharpe precision using the model's data that is observable in $\mathbb F$ no matter how general the model $(S, \mathbb F, \tau)$ is. In fact, we know intuitively that the agent endowed with the flow $\mathbb G$ has the information advantage of seeing the occurrence of $\tau$, and this {it naturally} should generate to her some premium that we quantified and called ``information-premium". Simultaneously to this advantageous information, our $\mathbb G$-agent might face two types of risks due to the stochasticity of $\tau$. The first risk comes from the length of the random horizon, which might lead to the cost-of-leaving earlier, as the horizon $T\wedge\tau$ is shorter in general than T with positive probability. This fact is also well known for the case when $\tau$ is an $\mathbb F$-stopping time, a fortiori when it is fixed/deterministic horizon, due to the myopic feature of the logarithmic utility. The second risk, that our $\mathbb G$-investor might face, is essentially intrinsic to the correlation between $\tau$ and $S$. We baptize this risk as ``correlation-risk" and we precisely quantify it in terms of the $\mathbb F$-observable model's data. This third factor appears naturally in our analysis and was not intuitively clear, in contrast to the two previous factors. \\ The fourth factor, which appears in both expression (\ref{Delta(S,tau)1}) and (\ref{Delta(S,tau)2}), represents the correlation between $\tau$ and num\'eraire portfolio of $(S, \mathbb F)$. We naturally quantify this latter factor with the expression $E[\langle {\widetilde L}^{\mathbb F}, m\rangle^{\mathbb F}_T]$, as $ {\widetilde L}^{\mathbb F}$ is the main randomness-source of the optimal-deflator dual to num\'eraire portfolio.\\ By comparing (\ref{Delta(S,tau)1}) and (\ref{Delta(S,tau)2}), we obviously deduce that \begin{eqnarray*} \textcolor{blue} ox{\it num\'eraire-change-premium}= \left(\textcolor{blue} ox{\it information-premium}\right)-\left(\textcolor{blue} ox{\it correlation-risk}\right)\geq 0.\end{eqnarray*} This shows that the ``correlation-risk" liability does not exceed the ``information-premium", and hence it diminishes the informational advantage of our $\mathbb G$-investor without entailing a loss. This ``num\'eraire-change-premium" is null if and only if both num\'eraire portfolios $\widetilde\lambda$ and $\widetilde\varphi$ are equal in some sense. This fact and further discussions on these factors are summarized in the following.\\ \begin{theorem}\label{Proposition4DifferenceU} Suppose that assumptions of Theorem \ref{Difference4u} hold. Then the following assertions hold.\\ {\rm{(a)}} The correlation-risk is null and both num\'eraire portfolios for $(S,\mathbb F)$ and $(S^{\tau},\mathbb G)$ coincide on $\Rbrack0,\tau\Lbrack$ if and only if the information-premium is null if and only if $\tau$ is a pseudo-stopping time (i.e. $E[M_{\tau}]=E[M_0]$ for any bounded $\mathbb F$-martingale). In this case, the NP$(\mathbb F)$-correlation is also null, and \begin{eqnarray}\label{Case1} \Delta_T(S,\tau,\mathbb F)=-\left(\textcolor{blue} ox{cost-of-leaving-earlier}\right)\leq 0.\end{eqnarray} {\rm{(b)}} Suppose that $\tau$ is non-correlated to $S$. Then (\ref{Case1}) holds, the correlation-risk coincide with the information-premium, and the NP$(\mathbb F)$-correlation is null. {\rm{(c)}} The ``num\'eraire-change-premium" is null iff the num\'eraire portfolio rates $\widetilde\lambda$ and ${\widetilde\varphi}^{\mathbb G}$, for $(S, \mathbb F)$ and $(S^{\tau},\mathbb G)$ respectively, coincide on $\Lbrack0,\tau\Lbrack$, i.e. $\widetilde\lambda\bigcdot S^{\tau}={\widetilde\varphi}^{\mathbb G}\bigcdot S^{\tau}$ or on $\Lbrack0,\tau\Lbrack$ $P\otimes A\otimes F$-a.e. $c\widetilde\lambda=c{\widetilde\varphi}^{\mathbb G}$, $b^{tr}\widetilde\lambda= b^{tr}{\widetilde\varphi}^{\mathbb G} $ and $x^{tr}\widetilde\lambda=x^{tr}{\widetilde\varphi}^{\mathbb G}$. \\ {\rm{(d)}} If $S$ is a local martingale, then \begin{eqnarray}\label{Case3} \Delta_T(S,\tau,\mathbb F)=\textcolor{blue} ox{num\'eraire-change-premium}\geq 0.\end{eqnarray} {\rm{(e)}} Suppose $S$ is continuous. Then the pair $(\widetilde\lambda, \widetilde\varphi)$ defined in Theorem \ref{Difference4u} satisfies $\widetilde\lambda+\beta=\widetilde\varphi$ and the following hold. \begin{eqnarray} &&\textcolor{blue} ox{information-premium}=E\left[\int_0^T{{G_{s-}}\over{2}}\beta^{tr}_s c_s\beta_s dA_s\right]+\textcolor{blue} ox{correlation-risk},\hskip 0.75cm\label{Case4}\\ &&\textcolor{blue} ox{correlation-risk}=E\left[\int_0^T G_{s-}dh^{(E)}_s(m^{\perp},\mathbb F)\right],\label{Case5}\\ &&\textcolor{blue} ox{cost-of-leaving-earlier}=E\left[\int_0^T{{1-G_{s-}}\over{2}}{\widetilde\lambda}^{tr}_s c_s {\widetilde\lambda}_s dA_s\right].\label{Case6} \end{eqnarray} \end{theorem} \subsection{Proofs of Theorems \ref{optimalportfoliogen}, \ref{Difference4u} and \ref{Proposition4DifferenceU} and Proposition \ref{Corollary5.3}}\label{Subsection4Proofs} This subsection focuses on proving the results of the previous subsection, and is divided into four sub-subsections. \subsubsection{Proof of Theorem \ref{optimalportfoliogen}} To prove this theorem, we start by deriving the predictable characteristics for $(S^{\tau},\mathbb G)$, that we denote by $(b^{\mathbb G}, c^{\mathbb G}, F^{\mathbb G}, A^{\mathbb G})$, as follows. \begin{eqnarray}\label{Charac4G} && b^{\mathbb G}:=b+c\beta+\int h(x)(f_m(x)-1)F(dx),\ \mu^{\mathbb G}:=I_{\Lbrack0,\tau\Lbrack}\star\mu,\ c^{\mathbb G}:=c\nonumber\\ && d\nu^{\mathbb G}:=I_{\Lbrack0,\tau\Lbrack}f_{m}d\nu,\quad F^{\mathbb G}(dx):=I_{\Lbrack0,\tau\Lbrack}f_{m}(x)F(dx),\quad A^{\mathbb G}:=A^{\tau}. \end{eqnarray} Thus, by directly applying Theorem \ref{LemmaCrucial} to the model $(S^{\tau},\mathbb G)$, we deduce the equivalences between the existence of the log-optimal portfolio $\widetilde\theta^{\mathbb G}$ for the model, the existence of $\varphi\in {\cal L}(S^{\tau},\mathbb G)$ satisfying \begin{eqnarray} &&(\theta-\varphi)^{tr}(b^{\mathbb G}-c^{\mathbb G}\varphi)+ \int \left( {{(\theta-\varphi)^{tr}x}\over{1+{\varphi}^{tr}x}}-(\theta-\varphi)^{tr}h(x)\right)F^{\mathbb G}(dx)\leq 0,\label{C3forStau}\hskip 1cm\\ &&E\left[ V^{\mathbb G}_T+{1\over{2}}(\varphi^{tr}c^{\mathbb G}\varphi\bigcdot A^{\mathbb G})_T+({\cal K}_{log}(\varphi^{tr}x)\star \nu^{\mathbb G})_T\right]<+\infty,\label{C1forStau}\\ && V^{\mathbb G}:=\Big\vert \varphi^{tr}b^{\mathbb G}-\varphi^{tr}c^{\mathbb G}\varphi+\int \left[{{\varphi^{tr}x}\over{1+\varphi^{tr}x}}-\varphi^{tr}h(x)\right] F^{\mathbb G}(dx)\Big\vert\bigcdot A^{\mathbb G}\label{VG} \end{eqnarray} for any bounded $\theta\in {\cal L}(S^{\tau},\mathbb G)$, and the existence of num\'eraire portfolio rate $\widetilde\varphi^{\mathbb G}$ satisfying (\ref{C3forStau}). Furthermore, \begin{eqnarray} &&{\widetilde Z}^{\mathbb G}={\cal E}({\widetilde\varphi}^{\mathbb G}\bigcdot S^{\tau})^{-1}\in {\cal D}_{log}(S^{\tau}, \mathbb G),\quad \varphi=\varphi^{\mathbb G},\label{Ztilde1}\\ &&{\widetilde Z}^{\mathbb G}={\cal E}({\widetilde K}^{\mathbb G}){\cal E}(-\widetilde V^{\tau}),\ {\widetilde K}^{\mathbb G}:=-\varphi\bigcdot S^{c,\mathbb G}+{{-{\widetilde\Gamma}^{\mathbb G}\varphi^{tr}x}\over{1+\varphi^{tr}x}}\star(\mu^{\tau}-\nu^{\mathbb G}),\label{Ztilde2}\\ &&{\widetilde\Gamma}^{\mathbb G}_t:=\left(1+\int (f^{(op,\mathbb G)}_t(x)-1)\nu^{\mathbb G}(\{t\},dx)\right)^{-1}, f^{(op, \mathbb G)}_t(x):={1\over{1+\varphi^{tr}_t x}},\hskip 1cm\label{GammaG} \end{eqnarray} Thanks to Lemma \ref{PortfolioGtoF}, we deduce the existence of $\widetilde\varphi\in {\cal L}(S,\mathbb F)\cap L(S,\mathbb F)$ and \begin{eqnarray}\label{PhiEquality} \varphi I_{\Lbrack0,\tau\Lbrack}=\widetilde\varphi I_{\Lbrack0,\tau\Lbrack},\quad P\otimes A\textcolor{blue} ox{-a.e.}.\end{eqnarray} Therefore, by inserting this and (\ref{Charac4G}) in (\ref{VG}) and (\ref{GammaG}), we conclude that \begin{eqnarray}\label{Equality4Gamma} V^{\mathbb G}={\widetilde V}^{\tau},\quad {\widetilde\Gamma}^{\mathbb G}I_{\Lbrack0,\tau\Lbrack}={\widetilde\Gamma}I_{\Lbrack0,\tau\Lbrack},\quad f^{(op,\mathbb G)}I_{\Lbrack0,\tau\Lbrack}=f^{(op)}I_{\Lbrack0,\tau\Lbrack},\end{eqnarray} where $ \widetilde\Gamma$ and $f^{(op)}$are given by (\ref{GammaTilde}). Thus, by inserting (\ref{PhiEquality}), (\ref{Equality4Gamma}) and (\ref{Charac4G}) in (\ref{C3forStau}) and (\ref{C1forStau}) and using Lemma \ref{PortfolioGtoF}-(d), we deduce that both (\ref{Cond4ptimalityG}) and (\ref{Cond4integrabilityG}) hold for $\widetilde\varphi$. Hence, (\ref{LogOPrate4G}), and the equivalence between assertions (a), (b) and (c) follow immediately. By inserting (\ref{PhiEquality}) in (\ref{Ztilde1}), we obtain the first equality in (\ref{OptimalDualLoggeneral}), while the rest of the proof focuses in proving the second equality of (\ref{OptimalDualLoggeneral}). To this end, thanks to Theorem \ref{GeneralDeflators}-(b), we use (\ref{Ztilde2}) after inserting (\ref{PhiEquality}) and (\ref{Equality4Gamma}), and we look for ${\widetilde K}^{\mathbb F}\in {\cal M}_{0,loc}(\mathbb F)$ such that \begin{eqnarray}\label{fromKG2KF} -\widetilde\varphi\bigcdot S^{c,\mathbb G}+{{-{\widetilde\Gamma}\widetilde\varphi^{tr}x}\over{1+\widetilde\varphi^{tr}x}}\star(\mu^{\tau}-\nu^{\mathbb G})={\cal T}\left({\widetilde K}^{\mathbb F}-{1\over{G_{-}}}\bigcdot m\right). \end{eqnarray} By combining the fact that two local martingales are equal if and only if their continuous martingale parts are equal and their jumps are also equal, the fact that ${\cal T}(S^c)=S^{(c,\mathbb G)}$ defined in (\ref{Charac4G}), and the fact that \begin{eqnarray*} \Delta {\cal T}(X)={{G_{-}}\over{\widetilde G}}\Delta X I_{\Lbrack0,\tau\Lbrack}\quad\textcolor{blue} ox{and}\ \Delta {\widetilde K}^{\mathbb G}=\left({{{\widetilde\Gamma}}\over{1+{\widetilde\varphi}^{tr}\Delta S}}-1\right)I_{\Lbrack0,\tau\Lbrack},\end{eqnarray*} we deduce that (\ref{fromKG2KF}) is equivalent to \begin{eqnarray} \hskip -0.55cm({\widetilde K}^{\mathbb F})^c=-\widetilde\varphi\bigcdot S^{c}+{1\over{G_{-}}}\bigcdot m^c,\ {{\widetilde\Gamma}\over{1+\widetilde\varphi^{tr}\Delta S}}I_{\Lbrack0,\tau\Lbrack}={{G_{-}}\over{\widetilde G}}\left(\Delta {\widetilde K}^{\mathbb F}+1\right)I_{\Lbrack0,\tau\Lbrack}.\label{fromKG2KFbis} \end{eqnarray} By taking the $\mathbb F$-optional projection on both sides above, we get \begin{eqnarray*} \Delta{\widetilde K}^{\mathbb F}={{{\widetilde G}\widetilde\Gamma}\over{G_{-}(1+\widetilde\varphi^{tr}\Delta S)}}-1. \end{eqnarray*} Thanks to ${\widetilde G}=G_{-}(f_m(\Delta S)+g_m(\Delta S))$ on $(\Delta S\not=0)$ and $\Delta m={\widetilde G}-G_{-}$, the above equality is equivalent to \begin{eqnarray}\label{DeltaKF} \Delta{\widetilde K}^{\mathbb F}=-{{{\widetilde\Gamma}g_m(\Delta S)\widetilde\varphi^{tr}\Delta S}\over{1+\widetilde\varphi^{tr}\Delta S}}-{{{\widetilde\Gamma}f_m(\Delta S)\widetilde\varphi^{tr}\Delta S}\over{1+\widetilde\varphi^{tr}\Delta S}}+{{{\Delta m}{\widetilde\Gamma}}\over{G_{-}}}+{\widetilde\Gamma}-1. \end{eqnarray} Remark that it is not difficult to check that the two processes \begin{eqnarray*} {{{\widetilde\Gamma}g_m\widetilde\varphi^{tr}x}\over{1+\widetilde\varphi^{tr}x}}\star\mu\quad \textcolor{blue} ox{and}\quad-{{{\widetilde\Gamma}f_m\widetilde\varphi^{tr}x}\over{1+\widetilde\varphi^{tr}x}}\star(\mu-\nu)\end{eqnarray*} are well defined $\mathbb F$-local martingales (see Theorem \ref{tmgviacharacteristics} for the conditions that guarantree their existence), and \begin{eqnarray*} && {{{\Delta m}{\widetilde\Gamma}}\over{G_{-}}}=\Delta\left({\widetilde\Gamma}G_{-}^{-1}\bigcdot m\right),\ -{{{\widetilde\Gamma}g_m(\Delta S)\widetilde\varphi^{tr}\Delta S}\over{1+\widetilde\varphi^{tr}\Delta S}}=\Delta\left(-{{{\widetilde\Gamma}g_m\widetilde\varphi^{tr}x}\over{1+\widetilde\varphi^{tr}x}}\star\mu\right),\\ &&-{{{\widetilde\Gamma}f_m(\Delta S)\widetilde\varphi^{tr}\Delta S}\over{1+\widetilde\varphi^{tr}\Delta S}}+{\widetilde\Gamma}-1=\Delta\left(-{{{\widetilde\Gamma}f_m\widetilde\varphi^{tr}x}\over{1+\widetilde\varphi^{tr}x}}\star(\mu-\nu)\right).\end{eqnarray*} Thus, by combining theses facts with $\left({\widetilde\Gamma}G_{-}^{-1}\bigcdot m\right)^c=G_{-}^{-1}\bigcdot m^c$, the first equality in (\ref{fromKG2KFbis}) and (\ref{DeltaKF}), we deduce that ${\widetilde K}^{\mathbb F}$ is given by (\ref{K-F}), and the proof of the theorem is completed. \qed \subsubsection{Proof of Proposition \ref{Corollary5.3}.} Remark that $\tau$ is non-correlated to $S$ if and only if \begin{eqnarray}\label{Beta0F1} c\beta=0\quad dP\otimes dA\textcolor{blue} ox{-a.e.}\quad f_m(t,x)=1\quad P\otimes dA\otimes F\textcolor{blue} ox{-a.e..}\end{eqnarray} 1) Here we prove assertion (a). Then thanks to Theorem \ref{LemmaCrucial} and to the assumption ${\cal D}(S,\mathbb F)\not=\emptyset$, the two num\'eraire portfolios with rates $\widetilde\varphi$ and $\widetilde\lambda$, for $(S^{\tau},\mathbb G)$ and $(S, \mathbb F)$ respectively, solve the following inequality-equation \begin{eqnarray}\label{OpmisationInequality} (\theta-{\widetilde\theta})^{tr}(b-c{\widetilde\theta})+\int (\theta-\widetilde\theta)^{tr}({{x}\over{1+{\widetilde\theta}^{tr}x}}-h(x))F(dx)\leq 0,\ \theta\in {\cal L}(S,\mathbb F). \hskip 0.75cm\end{eqnarray} Thus, in virtue of the lemma below, whose proof is relegated to Appendix \ref{Section4Corollary5.3}, \begin{lemma}\label{Lemma4Uniqueness}There is at most one $\widetilde\theta\in {\cal L}(S,\mathbb F)$ satisfying (\ref{OpmisationInequality}). Herein, $\psi$ and $\varphi$ --elements of ${\cal L}(S,\mathbb F)$-- are said to be equal if $\psi(1+\vert\psi\vert)^{-1}\bigcdot S=\varphi(1+\vert\varphi\vert)^{-1}\bigcdot S$. \end{lemma} we deduce that both portfolios coincides on $\Lbrack0,\tau\Lbrack$. This ends the proof of assertion (a) of the proposition.\\ 2) In virtue of assertion (a) of the proposition, (\ref{Beta0F1}), and Theorem \ref{optimalportfoliogen}-(c), we claim that the log-optimal portfolio for $(S^{\tau},\mathbb G)$ exists if and only if the num\'eraire portfolio rate $\widetilde\lambda$ satisfies \begin{eqnarray*} E\left[G_{-}\bigcdot {\widetilde V}^{\mathbb F}_T+G_{-}{\widetilde\lambda}^{tr}c{\widetilde\lambda}\bigcdot A_T+G_{-}{\cal K}_{log}({\widetilde\lambda}^{tr}x)\star\nu_T\right]<+\infty, \end{eqnarray*} where $ {\widetilde V}^{\mathbb F}$ is given by \begin{eqnarray*} {\widetilde V}^{\mathbb F}:=\left({\widetilde\lambda}^{tr}b-{\widetilde\lambda}^{tr}c\widetilde\lambda\right)\bigcdot A +\left({{{\widetilde\lambda}^{tr}x}\over{1 + \widetilde\lambda^{tr} x }} -{\widetilde\lambda}^{tr}h\right)\star\nu.\end{eqnarray*} Therefore, due to $G_{-}\leq 1$, the above condition is implied by the existence of log-optimal portfolio for $(S, \mathbb F)$, see Theorem \ref{LemmaCrucial} applied to the model $(S,\mathbb F)$. This ends the proof of assertion (b).\\ 3) Suppose that $S$ is continuous. Thus, both inequalities-equations for $\widetilde\varphi$ and $\widetilde\lambda$, given by (\ref{Cond4ptimalityG}) and (\ref{C6forX}) applied to $(S,\mathbb F)$ respectively, become \begin{eqnarray*} (\theta-\widetilde\varphi)^{tr}(b+c(\beta-\widetilde\varphi))\leq 0,\quad (\theta-\widetilde\lambda)^{tr}(b-c\widetilde\lambda)\leq 0,\quad\forall\ \theta\in {\cal L}(S,\mathbb F).\end{eqnarray*}By combining this with the fact that any $\mathbb F$-predictable process belongs to ${\cal L}(S,\mathbb F)$, we conclude that $\widetilde\varphi=\beta+\widetilde\lambda$, and the proof of assertion (c.1) is completed. To prove assertion (c.2), it is enough to remark that assertion (c.1) implies that $\widetilde V\equiv 0$ and hence (\ref{Cond4integrabilityG}) becomes $E\left[\int_0^T G_{s-}{\widetilde\varphi}^{tr}_sc_s{\widetilde\varphi}_s dA_s\right]<+\infty$. Assertion (c.3) follows immediately from combining assertions (c.1) and (c.2). This ends the proof of the proposition.\qed \subsubsection{Proof of Theorem \ref{Difference4u}} Direct calculations, see also \cite{ChoulliStricker2007} for similar details, for any $\mathbb H$-local martingale $X$ such that such that $\Delta X>-1$, we have \begin{eqnarray} -\ln({\cal E}(X))=-X +H^{(0)}(X, \mathbb H).\label{Hellinger4X}\end{eqnarray} {\bf Part 1.} Here we prove the equality (\ref{Delta(S,tau)2}). To this end, by applying Theorem \ref{LemmaCrucial} to the model $(S,\mathbb F)$ and using Proposition \ref{Hzero4Log(Z)}, we derive \begin{eqnarray} &&u_T(S, \mathbb F)=E\left[\ln({\cal E}_T(\widetilde\lambda\bigcdot S))\right]=E\left[-\ln({\cal E}_T(-{\widetilde V}^{\mathbb F})\right]+E\left[-\ln({\cal E}_T({\widetilde L}^{\mathbb F}))\right]\nonumber\\ &&=E\left[{\widetilde V}^{\mathbb F}_T+\sum_{0<s\leq T}(-\Delta{\widetilde V}^{\mathbb F}_s-\ln(1-\Delta{\widetilde V}^{\mathbb F}_s))+H^{(0)}_T({\widetilde L}^{\mathbb F}, \mathbb F)\right]\nonumber\\ &&=E\left[{\widetilde{\cal H}}_T({\mathbb F})\right]=E\left[({\widetilde G}\bigcdot {\widetilde{\cal H}}({\mathbb F}))_T\right]+E\left[((1-\widetilde G)\bigcdot {\widetilde{\cal H}}({\mathbb F}))_T\right].\label{Htilde4F1}\\ &&=E\left[{\widetilde\lambda}^{tr}(b-{1\over{2}}c\widetilde\lambda)\bigcdot A_T+\left(\ln(1+{\widetilde\lambda}^{tr}x)-{\widetilde\lambda}^{tr}h\right)\star\nu_T\right].\label{Htilde4F2} \end{eqnarray} Thanks to the duality (\ref{OptimalDualLoggeneral}) of Theorem \ref{optimalportfoliogen} and Proposition \ref{Hzero4Log(Z)}, we derive \begin{eqnarray} &&u_T(S^{\tau},\mathbb G)=E\left[\ln({\cal E}_T(\widetilde\varphi\bigcdot S^{\tau}))\right]=E\left[-\ln({\widetilde Z}^{\mathbb G}_T)\right]\nonumber\\ &&=E\left[-\ln({\cal E}_{\tau\wedge T}(-\widetilde V))\right]+E\left[-\ln({\cal E}_T({\widetilde K}^{\mathbb G}))\right]\nonumber\\ &&=E\left[{\widetilde V}_{\tau\wedge T}+\sum_{0<s\leq T\wedge\tau}(-\Delta{\widetilde V}_s-\ln(1-\Delta{\widetilde V}_s))+H^{(0)}_T({\widetilde K}^{\mathbb G}, \mathbb G)\right]\label{Equa1theorem5.1}\hskip 0.5cm \end{eqnarray} Remark that, in virtue of (\ref{K-G}) and (\ref{GammaTilde}), we get \begin{eqnarray*} \Delta{\widetilde K}^{\mathbb G}=\left({{{\widetilde\Gamma}}\over{1+{\widetilde\varphi}^{tr}\Delta S}}-1\right)I_{\Lbrack0,\tau\Lbrack}, \end{eqnarray*} and hence by combining this with Definition \ref{Hellinger}, we derive \begin{eqnarray*} &&H^{(0)}({\widetilde K}^{\mathbb G}, \mathbb G)\\ &&={1\over{2}}{\widetilde\varphi}^{tr}c{\widetilde\varphi}\bigcdot A^{\tau}+\sum\left({{{\widetilde\Gamma}}\over{1+{\widetilde\varphi}^{tr}\Delta S}}-1-\ln\left({{{\widetilde\Gamma}}\over{1+{\widetilde\varphi}^{tr}\Delta S}}\right)\right)I_{\Lbrack0,\tau\Lbrack}\\ &&={1\over{2}}{\widetilde\varphi}^{tr}c{\widetilde\varphi}\bigcdot A^{\tau}+\sum\left({\widetilde\Gamma}-1-\ln({\widetilde\Gamma})\right)I_{\Lbrack0,\tau\Lbrack}+\left({{-{\widetilde\Gamma}{\widetilde\varphi}^{tr}x}\over{1+{\widetilde\varphi}^{tr}x}}+\ln(1+{\widetilde\varphi}^{tr}x)\right)\star\mu^{\tau}. \end{eqnarray*} By inserting this in (\ref{Equa1theorem5.1}) and using afterwards (\ref{GammaTilde}), $1-\Delta{\widetilde V}=1/{\widetilde\Gamma}$ and the fact that $(1-{\widetilde\Gamma}){\widetilde\varphi}^{tr}x(1+ {\widetilde\varphi}^{tr}x)^{-1}f_m\star\nu=-\sum(1-\widetilde\Gamma)^2/{\widetilde\Gamma}$, we obtain \begin{eqnarray} &&u_T(S^{\tau}, \mathbb G)\nonumber\\ &&=E\left[G_{-}\bigcdot \left({\widetilde V}+\sum_{0<s\leq\cdot}(-\Delta{\widetilde V}_s-\ln(1-\Delta{\widetilde V}_s))+\sum_{0<s\leq\cdot}({\widetilde\Gamma}_s-1-\ln({\widetilde\Gamma}_s))\right)_T\right]\nonumber\\ &&+E\left[{{G_{-}}\over{2}}{\widetilde\varphi}^{tr}c{\widetilde\varphi}\bigcdot A_T\right]+E\left[G_{-}\left({{-{\widetilde\Gamma}{\widetilde\varphi}^{tr}x}\over{1+{\widetilde\varphi}^{tr}x}}+\ln(1+{\widetilde\varphi}^{tr}x)\right)f_m\star\nu_T\right]\nonumber\\ &&=E\left[(G_{-}\bigcdot {\widetilde V})_T+{{G_{-}}\over{2}}{\widetilde\varphi}^{tr}c{\widetilde\varphi}\bigcdot A_T+G_{-}\left({{-{\widetilde\varphi}^{tr}x}\over{1+{\widetilde\varphi}^{tr}x}}+\ln(1+{\widetilde\varphi}^{tr}x)\right)f_m\star\nu_T\right]\nonumber\\ &&=E\left[G_{-}{\widetilde\varphi}^{tr}(b+c(\beta- {{\widetilde\varphi}\over{2}}))\bigcdot A_T+G_{-}\left(f_m\ln(1+{\widetilde\varphi}^{tr}x)-{\widetilde\varphi}^{tr}h\right)\star\nu_T\right] \label{Equa2theorem5.1}\end{eqnarray} The last equality follows from using (\ref{VtildeG}). Denote by ${\widetilde\Phi}$ the functional defined on ${\cal L}(S,\mathbb F)\times{\cal L}(S,\mathbb F)$ as follows. \begin{eqnarray}\label{PhiTilde} {\widetilde\Phi}(\lambda,\varphi)&&:=(\varphi-\lambda)^{tr}b+(\varphi-\lambda)^{tr}c\beta-{1\over{2}}\varphi^{tr}c\varphi+{1\over{2}}\lambda^{tr}c\lambda\\ &&+\int\left(f_m(x)\ln\left({{1+\varphi^{tr}x}\over{1+\lambda^{tr}x}}\right)-(\varphi-\lambda)^{tr}h(x)\right)F(dx).\nonumber\end{eqnarray} Then by using the convexity of both functions of $\varphi$, $\varphi^{tr}c\varphi$ and $-\ln(1+{\varphi}^{tr}x)$ and (\ref{Cond4ptimalityG}), we deduce the nonnegativity of the process ${\widetilde {\cal R}}$, i.e. \begin{eqnarray}\label{Rpositive} &&{\widetilde {\cal R}}={\widetilde\Phi}(\widetilde\varphi,\widetilde\lambda)\geq 0,\end{eqnarray} and in virtue of (\ref{Equa2theorem5.1}), (\ref{Htilde4F1}) and (\ref{Htilde4F2}), we get \begin{eqnarray*} \Delta_T(S, \tau, \mathbb F)&&= u_T(S^{\tau}, \mathbb G)-u_T(S, \mathbb F)\\ &&=-E\left[(1-\widetilde G)\bigcdot {\widetilde{\cal H}}(\mathbb F))_T\right]+E\left[G_{-}{\widetilde\Phi}(\widetilde\varphi,\widetilde\lambda)\bigcdot A_T\right]\\ &&+E\left[G_{-}{{(f_m-1)({\widetilde\lambda}^{tr}x)}\over{1+{\widetilde\lambda}^{tr}x}}\star\nu_T+G_{-}{\widetilde\lambda}^{tr}c\beta\bigcdot A_T\right].\nonumber\end{eqnarray*} Thus, by combining this latter equality with \begin{eqnarray*} E\left[\langle {\widetilde L}^{\mathbb F}, m\rangle^{\mathbb F}_T\right]=-E\left[G_{-}{{(f_m-1)({\widetilde\lambda}^{tr}x)}\over{1+{\widetilde\lambda}^{tr}x}}\star\nu_T+G_{-}{\widetilde\lambda}^{tr}c\beta\bigcdot A_T\right] \end{eqnarray*} which follows from direct calculations, we deduce that (\ref{Delta(S,tau)2}) holds.\\ {\bf Part 2.} Here we prove the equality (\ref{Delta(S,tau)1}). To this end, we apply (\ref{Hellinger4X}) to $({\widetilde K}^{\mathbb F}, \mathbb F)$ and $(G_{-}^{-1}\bigcdot m, \mathbb F)$, and we use the notation (\ref{Htilde(G)}) afterwards to get \begin{eqnarray} &&-\ln\left({\cal E}(-{\widetilde V}^{\tau})\right)-\ln\left({\cal E}({\widetilde K}^{\mathbb F})^{\tau}\right)+\ln\left({\cal E}(G_{-}^{-1}\bigcdot m)^{\tau}\right)\nonumber\\ &&={\widetilde V}^{\tau}+\sum\left(-\Delta{\widetilde V}^{\tau}-\ln(1-\Delta{\widetilde V}^{\tau})\right)-({\widetilde K}^{\mathbb F})^{\tau} +H^{(0)}({\widetilde K}^{\mathbb F}, \mathbb F)^{\tau}\nonumber\\ &&+{1\over{G_{-}}}\bigcdot m^{\tau}-H^{(0)}\left({1\over{G_{-}}}\bigcdot m, \mathbb F\right)^{\tau}\nonumber\\ &&={\widetilde{\cal H}}(\mathbb G)^{\tau}-({\widetilde K}^{\mathbb F})^{\tau}+{1\over{G_{-}}}\bigcdot m^{\tau}-H^{(0)}\left({1\over{G_{-}}}\bigcdot m, \mathbb F\right)^{\tau}.\label{Log(KG)} \end{eqnarray} Thus, by taking the expectation on both sides and using (\ref{OptimalDualLoggeneral}), we derive \begin{eqnarray} &&u_T(S^{\tau}, \mathbb G)=E\left[\ln({\cal E}_T(\widetilde\varphi\bigcdot S^{\tau}))\right]=E\left[-\ln({\cal E}_{T\wedge\tau}(-\widetilde V)\right]+E\left[-\ln({\cal E}_T({\widetilde K}^{\mathbb G}))\right]\nonumber\\ &&=E\left[({\widetilde G}\bigcdot {\widetilde{\cal H}}({\mathbb G}))_T-\langle{\widetilde K}^{\mathbb F}, m\rangle^{\mathbb F}_T+{1\over{G_{-}}}\bigcdot m_{\tau\wedge T}-H^{(0)}\left({1\over{G_{-}}}\bigcdot m, \mathbb F\right)_{T\wedge\tau}\right].\label{Equation5.400} \end{eqnarray} Therefore, by using (\ref{HellingerE}) and inserting (\ref{equa401}) in (\ref{Equation5.400}), we obtain \begin{eqnarray*}\label{Equa546} u_T(S^{\tau}, \mathbb G)= E\left[({\widetilde G}\bigcdot {\widetilde{\cal H}}({\mathbb G}))_T-\langle{\widetilde K}^{\mathbb F}, m\rangle^{\mathbb F}_T\right]+ {\cal H}_{\mathbb G}\left(P\big|{\widetilde Q}_T\right) \end{eqnarray*} Thus, by combining this with (\ref{Htilde4F1}), (\ref{Delta(S,tau)1}) follows immediately. Hence, in virtue of (\ref{Rpositive}), the proof of the theorem will be complete as soon as we prove \begin{eqnarray}\label{Htilde(F,G)} {\cal W}(\mathbb F, \mathbb G):=\left({\widetilde G}\bigcdot \left({\widetilde{\cal H}}(\mathbb F)-{\widetilde{\cal H}}(\mathbb G)\right)\right)^{p,\mathbb F}+\langle{\widetilde K}^{\mathbb F}-{\widetilde L}^{\mathbb F}, m\rangle^{\mathbb F}\in {\cal A}^+(\mathbb F). \end{eqnarray} On the one hand, similar calculations and arguments as in (\ref{Log(KG)}) applied to \begin{eqnarray}\label{ZbarG} {\overline Z}^{\mathbb G}:={\cal E}({\widetilde L}^{\mathbb F})^{\tau}{\cal E}(-{\widetilde V}^{\mathbb F})^{\tau}/{\cal E}(G_{-}^{-1}\bigcdot m)^{\tau},\end{eqnarray} lead to \begin{eqnarray*} -\ln({\overline Z}^{\mathbb G})={\widetilde{\cal H}}(\mathbb F)^{\tau}-({\widetilde L}^{\mathbb F})^{\tau}+{1\over{G_{-}}}\bigcdot m^{\tau}-H^{(0)}\left({1\over{G_{-}}}\bigcdot m, \mathbb F\right)^{\tau}.\end{eqnarray*} Then by combining this equality with (\ref{Log(KG)}), we obtain \begin{eqnarray*} -\ln\left({\widetilde Z}^{\mathbb G}/{\overline Z}^{\mathbb G}\right)={\widetilde{\cal H}}(\mathbb G)^{\tau}-{\widetilde{\cal H}}(\mathbb F)^{\tau}-({\widetilde K}^{\mathbb F})^{\tau}+({\widetilde L}^{\mathbb F})^{\tau}. \end{eqnarray*} On the other hand, by using Jenson's inequality and the facts that $1/{\widetilde Z}^{\mathbb G}={\cal E}(\widetilde\varphi\bigcdot S^{\tau})$, ${\overline Z}^{\mathbb G}\in {\cal D}(S^{\tau},\mathbb G)$ and both processes ${\widetilde{\cal H}}(\mathbb G)$ and ${\widetilde{\cal H}}(\mathbb F)$ are $\mathbb F$-optional, we deduce that $-\ln\left({\widetilde Z}^{\mathbb G}/{\overline Z}^{\mathbb G}\right)$ is a $\mathbb G$-submartingale, and hence ${\cal W}(\mathbb F, \mathbb G)$ is nondecreasing and $\mathbb F$-predictable This ends the proof of the theorem.\qed \subsubsection{Proof of Theorem \ref{Proposition4DifferenceU}} This proof has four parts, where the four assertions are proved respectively.\\ 1) Here we prove assertion (a). It is clear that the ``correlation-risk" is null if and only if the process ${\cal W}(\mathbb F, \mathbb G)$ defined in (\ref{Htilde(F,G)}) is null, or equivalently the two deflators $\widetilde Z^{\mathbb G}$ and ${\overline Z}^{\mathbb G}$ defined in (\ref{ZbarG}) are equal. This is obviously equivalent, due to the uniqueness of the Doob-Meyer decomposition in $\mathbb G$, to \begin{eqnarray*}\label{DoobG} ({\widetilde V}^{\mathbb F})^{\tau}={\widetilde V}^{\tau}\quad\textcolor{blue} ox{and}\quad {\cal E}({\widetilde K}^{\mathbb F})^{\tau}= {\cal E}({\widetilde L}^{\mathbb F})^{\tau}. \end{eqnarray*} Hence, thanks to the assumption $G>0$, the above equalities are equivalent to \begin{eqnarray}\label{DoobG} {\widetilde V}^{\mathbb F}={\widetilde V}\quad\textcolor{blue} ox{and}\quad{\widetilde K}^{\mathbb F}={\widetilde L}^{\mathbb F}. \end{eqnarray} Then, in virtue of the uniqueness of Jacod's decomposition of Theorem \ref{tmgviacharacteristics}, we conclude that the ``correlation-risk" is null if and only if \begin{eqnarray}\label{JacodG} c(\beta-\widetilde\varphi)=-c{\widetilde\lambda},\ m^{\perp}\equiv 0,\ g_m=0\ \textcolor{blue} ox{and}\ {{f_m}\over{1+{\widetilde\varphi}^{tr}x}}={1\over{1+{\widetilde\lambda}^{tr}x}}\ M^P_{\mu}-a.e.\hskip 0.75cm \end{eqnarray} Therefore, in virtue of this latter equivalence, we deduce that ``correlation-risk" is null and $\widetilde\varphi=\widetilde\lambda$ is equivalent to \begin{eqnarray*} c\beta\equiv 0,\ m^{\perp}\equiv 0,\ g_m=0\ \textcolor{blue} ox{and}\ f_m=1\ M^P_{\mu}-a.e.\end{eqnarray*} This is equivalent to $m\equiv m_0$, i.e. $\tau$ is a pseudo-stopping time. This proves the first statement in assertion (a). Furthermore, due to $m\equiv m_0$, we get $\langle{\widetilde L}^{\mathbb F}, m\rangle^{\mathbb F}\equiv 0$ and hence the NP$(\mathbb F)$-correlation factor is null. Thus, by inserting all these in (\ref{Delta(S,tau)1}), we get (\ref{Case1}), and the proof of assertion (a) is completed.\\ 2) This part addresses assertion (b). Suppose that $\tau$ is non-correlated to $S$, which is equivalent to $c\beta=0$ $P\otimes A$-a.e. and $f_m=1$ $P\otimes A\otimes F$-a.e.. Then thanks to Lemma \ref{Lemma4Uniqueness}, we deduce that $\widetilde\lambda=\widetilde\varphi$, and hence we derive \begin{eqnarray*} \ \langle{\widetilde L}^{\mathbb F}, m\rangle^{\mathbb F}\equiv 0\quad \textcolor{blue} ox{and}\quad {\widetilde {\cal R}}\equiv 0.\end{eqnarray*} This implies that both factors of ``NP($\mathbb F)$-correlation" and ``num\'eraire-change-premium" factor are null, and hence ``information-premium" coincides with "correlation-risk". By inserting all these in (\ref{Delta(S,tau)1}), we obtain again (\ref{Case1}) and the proof of assertion (b) is completed.\\ 3) Here we prove assertions (c) and (d). On the one hand, when $\widetilde\lambda$ coincides with $\widetilde\varphi$, then $\widetilde{\cal R}\equiv 0$ follows from (\ref{NumeraireChanegPremuim}). On the other hand, using Taylor's expansion and (\ref{Cond4ptimalityG}) for $\theta=\widetilde\lambda$, we derive \begin{eqnarray*} {\widetilde{\cal R}}\geq (\widetilde\lambda-\widetilde\varphi)^{tr}c(\widetilde\lambda-\widetilde\varphi)+\int{{((\widetilde\lambda-\widetilde\varphi)^{tr}x)^2}\over{\max((1+\widetilde\lambda^{tr}x)^2, (1+\widetilde\varphi^{tr}x)^2)}}F(dx). \end{eqnarray*} Therefore, we deduce that $ {\widetilde{\cal R}}$ is null iff $c\widetilde\lambda=c\widetilde\varphi$ $P\otimes A$-a.e. and $\widetilde\lambda^{tr}x=\widetilde\varphi^{tr}x$ $P\otimes A\otimes F$-a.e.. Therefore, assertion (c) follows from combining this latter claim and the fact that, due to $G>0$ and in virtue of (\ref{Delta(S,tau)2}), \begin{eqnarray}\label{Rzero} \textcolor{blue} ox{ the ``num\'eraire-change-premium" is null iff}\quad {\widetilde{\cal R}}\equiv0\quad P\otimes A\textcolor{blue} ox{-.a.e..}\end{eqnarray} The rest fo this part proves assertion (d). Suppose $S\in {\cal M}_{loc}(\mathbb F)$. Then we get $\widetilde\lambda\equiv 0$ and ${\widetilde L}^{\mathbb F}\equiv 0$. As a result, we deduce that $u_T(S, \mathbb F)=0$ and $\Delta_T(S, \tau, \mathbb F)=u_T(S^{\tau}, \mathbb G)\geq 0$. This ends the proof of assertion (d). \\ 4) Suppose $S$ is continuous. Then thanks to Proposition \ref{Corollary5.3}-(c), we deduce the following equalities \begin{eqnarray}\label{equalities00} {\widetilde K}^{\mathbb F}=(\beta-{\widetilde\varphi})\bigcdot S^c+m^{\perp}={\widetilde L}^{\mathbb F}+m^{\perp},\ {\widetilde V}^{\mathbb F}={\widetilde V}=0.\end{eqnarray} Therefore, direct calculations on Hellinger processes, see also \cite{ChoulliStricker2005,ChoulliStricker2006,ChoulliStricker2007} for more details about this fact, we derive \begin{eqnarray*} &&H^{(0)}({\widetilde K}^{\mathbb F}, P)=H^{(0)}({\widetilde L}^{\mathbb F}, P)+H^{(0)}(m^{\perp}, P),\ {\widetilde G}=G_{-}(1+\Delta m^{\perp}),\\ &&-{\widetilde G}\bigcdot H^{(0)}(m^{\perp}, P)=G_{-}\bigcdot H^{(E)}(m^{\perp}, P)-G_{-}\bigcdot [m^{\perp}, m^{\perp}]\\ &&h^{(E)}(G_{-}^{-1}\bigcdot m, P)={1\over{2}}\beta^{tr}c\beta\bigcdot A+h^{(E)}(m^{\perp}, P) \end{eqnarray*} Thus, by combining these equalities with (\ref{Delta(S,tau)1}) and (\ref {equalities00}), assertion (e) follows immediately and the proof of the theorem is completed. \qed \subsection{The case when $(S,\mathbb F)$ is a jump-diffusion model}\label{section4JumpDifusionCase} This subsection illustrates the main results of Section \ref{section3} and Subsection \ref{section4} on the case where the initial model $(S,\mathbb F)$ is a one-dimensional jump-diffusion model. Precisely, we suppose that a standard Brownian motion $W$ and a Poisson process $N$ with intensity $\lambda>0$ are defined on the probability space $(\Omega, {\cal F}, P)$, the filtration $\mathbb F$ is the completed and right continuous filtration generated by $W$ and $N$. Consider a fixed horizon $T\in (0,+\infty)$, and suppose $S$ satisfies \begin{equation}\label{SPoisson2} S_t:=S_0 {\cal E} (X)_t,\ X_t: =(\sigma\bigcdot W)_t+(\zeta\bigcdot {N}^{\mathbb F})_t + \int_{0}^{t} \mu_s ds,\ {N_t}^{\mathbb F}:=N_t-\lambda t, \end{equation} and there exists a constant $\delta\in(0,+\infty)$ such that $\mu$, $\sigma$ and $\zeta$ are bounded $\mathbb F$-predictable processes satisfying \begin{eqnarray}\label{parameters2} \zeta>-1,\quad\min( \sigma, \vert\zeta\vert)\geq \delta,\ P\otimes dt\textcolor{blue} ox{-a.e.}. \end{eqnarray} Since $m$ is an $\mathbb F$-martingale, then there exists two $\mathbb F$-predictable processes $\varphi^{(m)}$ and $\psi^{(m)}$ such that $\int_0^T \left((\varphi^{(m)}_s)^2 +\vert \psi^{(m)}_s\vert\right) ds<+\infty\ P\textcolor{blue} ox{-a.s.}$, and \begin{eqnarray}\label{model4tau2} G_{-}^{-1}\bigcdot m=\varphi^{(m)}\bigcdot W+(\psi^{(m)}-1)\bigcdot N^{\mathbb F}.\end{eqnarray} \begin{theorem}\label{OptimalDeflatorLogJD} Suppose $G>0$, $S$ be given by (\ref{SPoisson2})-(\ref{parameters2}), and consider \begin{equation}\label{thetaTilde} \widetilde{\theta}:= \displaystyle\frac{\xi + sign(\zeta) \sqrt{\xi^2 + 4 \lambda \psi^{(m)}} }{2\sigma} - \frac{1}{\zeta},\ \textcolor{blue} ox{where}\ \xi:= \frac{\mu-\lambda\zeta}{\sigma} + \varphi^{(m)}+ \frac{\sigma}{\zeta}, \end{equation} Then $ \widetilde{\theta}\in{\cal L}(S, \mathbb F)\cap L(S, \mathbb F)$ is the num\'eraire portfiolio rate for $(S^{\tau}, \mathbb G)$, and the following assertions are equivalent.\\ {\rm{(a)}} The random time $\tau$, parametrized in $\mathbb F$ by $(\varphi^{(m)}, \psi^{(m)},G_{-})$, satisfies \begin{eqnarray}\label{ConditionHellinger} E\left[\int_0^T G_{s-}\left[(\varphi^{(m)}_s)^2+\lambda\psi^{(m)}_s\ln(\psi^{(m)}_s)-\lambda\psi^{(m)}_s+\lambda\right]dt\right]<+\infty.\end{eqnarray} {\rm{(b)}} The solution to (\ref{dualproblem}) exists and is given by \begin{equation}\label{ZtildeG} {\widetilde Z}^{\mathbb G}:={\cal E}({\widetilde K}^{\mathbb G}),\ {\widetilde K}^{\mathbb G}:= - \sigma\widetilde\theta\bigcdot {\cal T}({W}) -\frac{\psi^{(m)}\zeta{\widetilde\theta}}{1+{\widetilde\theta}\zeta} \bigcdot {\cal T}({N^{\mathbb F}}).\end{equation} {\rm{(c)}} $\widetilde{\theta}$ is the log-optimal portfolio rate for the model $(S^{T\wedge\tau},\mathbb G)$. \\ \end{theorem} \begin{proof} For the model (\ref{SPoisson2})-(\ref{parameters2}), the predictable characteristics of Section 3 can be derived as follows. Let $\delta_{a}(dx) $ be the Dirac mass at the point $a$. Then in this case we have $d=1$ and \begin{eqnarray*} &&\mu (dt,dx) = \delta_{\zeta_tS_{t-}}(dx)dN_t, \ \nu (dt , dx)= \delta_{\zeta_tS_{t-}}(dx) \lambda dt,\ F_t(dx) = \lambda \delta_{\zeta_tS_{t-}}(dx), \\ &&A_t = t,\ c= (S_{-}\sigma)^2,\ b= (\mu - \lambda \zeta I_{\{ |\zeta|S_{-} > 1 \}}) S_{-}, \ (\beta, g_m,m^\perp)=({{\varphi^{(m)}}\over{S_{-}\sigma}},0,0).\end{eqnarray*} As a result, the set \begin{eqnarray*} {\cal L}_{(\omega, t)}(S,\mathbb F)&&:=\{\varphi\in\mathbb R\ \big|\ \varphi x>-1\ F_{(\omega, t)}(dx)-a.e.\}=\{\varphi\in\mathbb R\ \big|\ \varphi S_{-}\zeta>-1\}\\ &&=\left(-{1/(S_{-}\zeta)^+},{1/(S_{-}\zeta)^-}\right)\end{eqnarray*} is an open set in $\mathbb R$ (with the convention $1/0^+=+\infty$). Then the condition (\ref{Cond4ptimalityG}), characterizing the optimal portfolio $\widetilde\varphi$, becomes an equation as follows. \begin{eqnarray} 0 && = \mu-\lambda\zeta I_{\{\vert\zeta\vert>{1/S_{-}}\}}+S_{-}\sigma^2({{\varphi^{(m)}}\over{S_{-}\sigma}}-\theta)+\lambda{{\psi^{(m)}\zeta}\over{1+S_{-}\theta\zeta}}-\lambda\zeta I_{\{\vert\zeta\vert\leq {1/S_{-}}\}} \nonumber\\ && = \mu -\lambda \zeta+\sigma\varphi^{(m)} - S_{-}\sigma^2\theta + \frac{\psi^{(m)} \lambda\zeta}{1+\theta S_{-}\zeta} .\label{mainequation4levy}\end{eqnarray} By putting $\varphi:=1+\theta S_{-}\zeta>0$, the above equation is equivalent to $$0=- {{\sigma^2}\over{\zeta}}\varphi^2 +[\mu -\lambda \zeta+\sigma\varphi^{(m)}+{{\sigma^2}\over{\zeta}} ]\varphi+ \psi^{(m)} \lambda\zeta,$$ which has always (since $\psi^{(m)}>0$) a unique positive solution given by $$\widetilde\varphi:={{\Gamma\zeta+\vert\zeta\vert\sqrt{\Gamma^2+4\sigma^2\lambda\psi^{(m)}}}\over{2\sigma^2}},\quad \Gamma:=\mu -\lambda \zeta+\sigma\varphi^{(m)}+{{\sigma^2}\over{\zeta}}.$$ Hence, we deduce that $\widetilde\lambda:={\widetilde{\theta}}/S_{-}$, where $\widetilde\theta$ is given by (\ref{thetaTilde}), coincides with $(\widetilde{\varphi}-1)/(S_{-}\zeta)$, satisfies $ 1 + \zeta\widetilde{\theta}>0$, and hence it is the unique solution to (\ref{mainequation4levy}). It is also clear that $\widetilde{\theta}$ is $S$-integrable (or equivalently $\widetilde\lambda$ is $S$-integrable) due to the assumptions in (\ref{parameters2})-(\ref{model4tau2}). As a result, the optimal wealth process is ${\cal E}(\widetilde\lambda\bigcdot S^{\tau})={\cal E}(\widetilde\theta\bigcdot X^{\tau})$ and assertions (a) and (b) follow immediately using the above analysis and Theorems \ref{optimalportfoliogen}. \qed\end{proof} \vspace*{0,5cm} \centerline{\textbf{APPENDIX}} \appendix \normalsize \section{Some $\mathbb G$-properties versus those in $\mathbb F$} Some results in the following lemma sounds new to us. \begin{lemma}\label{PortfolioGtoF} Let $A$ is a nondecreasing and $\mathbb F$-predictable, and suppose that $G>0$. Then the following assertions hold.\\ {\rm{(a)}} For any $\mathbb G$-predictable process $\varphi^{\mathbb G}$, there exists an $\mathbb F$-predictable process $\varphi^{\mathbb F}$ such that $\varphi^{\mathbb G}=\varphi^{\mathbb F}$ on ${\Lbrack0,\tau\Lbrack}$. Furthermore, if $\varphi^{\mathbb G}>0$ (respectively $\varphi^{\mathbb G}\leq 1$), then $\varphi^{\mathbb F}>0$ (respectively $\varphi^{\mathbb F}\leq 1$). \\ {\rm{(b)}} For any $\theta\in{\cal L}(S^{\tau},\mathbb G)$, there exists ${\varphi}\in {\cal L}(S,\mathbb F)$ such that $ {\varphi}={\theta} $ on $\Lbrack0,\tau\Lbrack$.\\ {\rm{(c)}} For any $\theta\in L(S^{\tau},\mathbb G)$, there exists ${\varphi}\in L(S,\mathbb F)$ such that $ {\varphi}={\theta} $ on $\Lbrack0,\tau\Lbrack$.\\ {\rm{(d)}} Let $v$ be an $\mathbb F$-predictable process. Then $v I_{\Lbrack0,\tau\Lbrack}\leq 0$ $P\otimes A$-a.e. if and only if $v\leq 0$ $P\otimes A$-a.e..\\ {\rm{(e)}} Let $\varphi$ be a nonnegative and $\mathbb F$-predictable process. Then $\varphi<+\infty$ $P\otimes A$-a.e. on $\Lbrack0,\tau\Lbrack$ if and only if $\varphi<+\infty$ $P\otimes A$-a.e.\\ {\rm{(f)}} Let $V$ be an $\mathbb F$-predictable and nondecreasing process that takes values in $[0,+\infty]$. If $V^{\tau}$ is $\mathbb G$-locally integrable, then $V$ is $\mathbb F$-locally integrable. \end{lemma} \begin{proof} Assertion (a) is a particular case of \cite[Lemma B.1]{ACDJ1} and assertion (b) can be found in \cite[Lemma A.1]{ChoulliYansori1}, while assertions (e) and (f) follow immediately from \cite[Proposition B.2-(c)-(f)]{ACDJ1}. Thus the rest of this proof focuses on proving assertions (c) and (d).\\ {\rm{(c)}} Let $\theta\in{ L}(S^{\tau},\mathbb G)$. Then on the one hand, due to \cite[Theorem 1.16, or Remark 2.2-(h)]{Stricker}, this equivalent to the set \begin{eqnarray*}\label{Lzero1} {\cal X}^{\mathbb G}:=\left\{ \sup_{t\geq 0}\vert (H\theta I_{\{\vert\theta\vert\leq n}\bigcdot S^{\tau})_t\vert\ \Big|\ H\ {\mathbb G}-\textcolor{blue} ox{predictable}\ \vert H\vert\leq 1, n\geq 1\right\}\end{eqnarray*} being bounded in probability. On the other hand, a direct application of assertion (a), we deduce that there exists an $\mathbb F$-predictable process $\varphi$ such that $\theta=\varphi$ on $\Lbrack0,\tau\Lbrack$, and the $\mathbb G$-predictable in ${\cal X}^{\mathbb G}$ can be replaced with $\mathbb F$-predictable as well. Furthermore, for any $T\in (0,+\infty)$, any $c>0$ and any $\mathbb F$-predictable $H$ bounded by one, by putting $Q:=(G_T/E[G_T])\cdot P\sim P$ and $X^*_t:=\sup_{0\leq s\leq t}\vert X_s\vert$ for right continuous with left limits process $X$, we have \begin{eqnarray}\label{domination} P\left( (H\theta I_{\{\vert\theta\vert\leq n}\bigcdot S^{\tau})_{T}^*>c\right)\geq Q\left((H\varphi I_{\{\vert\varphi\vert\leq n}\bigcdot S)_T^*>c\right)E[G_T]. \end{eqnarray} This allows us to conclude, due to \cite[Theorem 1.16, or Remark 2.2-(h)]{Stricker} again, that $\varphi\in L(S^T,\mathbb F)$, for any $T\in (0,+\infty)$. Thus, assertion (d) follows from combining this latter fact and \cite[Theorem 4]{StrickerSI}.\\ {\rm{(d)}} Let $v$ be an $\mathbb F$-predictable process such that $v I_{\Lbrack0,\tau\Lbrack}\leq 0$ $P\otimes A$-a.e. This is equivalent to $$0=E[v^+\bigcdot A_{\tau\wedge T}]=E[v^+G_{-}\bigcdot A_{T}],$$ or equivalently $v^+=0$ $P\otimes A$-a.e.. This is obviously equivalent to $v\leq 0$ $P\otimes A$-a.e., and assertion (d) is proved. This ends the proof of the lemma.\qed \end{proof} The following recalls $\mathbb G$-compensator of $\mathbb F$-optional process stopped at $\tau$. \begin{lemma}\label{lemmaV} Let $V \in {\cal A}_{loc} ({\mathbb F})$, then we have $$(V^{\tau})^{p, {\mathbb G}}= I_{\Lbrack 0,\tau\Lbrack} G_-^{-1} \bigcdot ({\widetilde G} \bigcdot V)^{p, {\mathbb F}}.$$ \end{lemma} For the proof of this lemma and other related results, we refer to \cite{ACDJ1,ACDJ3}. \section{Some useful martingale integrability properties} The results of this section are new, very useful, and not technical at all. \begin{lemma}\label{H0toH1martingales} Consider $K\in {\cal M}_{0,loc}(\mathbb H)$ with $1+\Delta K>0$, and let $H^{(0)}(K,P)$ be given by Definition \ref{Hellinger}. If $E[H^{(0)}_T(K,P)]<+\infty$, then $E[\sqrt{[K,K]_T}]<+\infty$ or equivalently $E[\displaystyle\sup_{0\leq t\leq T}\vert K_t\vert]<+\infty$. \end{lemma} \begin{proof} Let $K\in {\cal M}_{0,loc}(\mathbb H)$ such that $1+\Delta K>0$ and $E[H^{(0)}_T(K,P)]<+\infty$. Then remark that, for any $\delta\in (0,1)$, we always have $$\Delta K-\ln(1+\Delta K)\geq {{\delta\vert \Delta K\vert }\over{\max\left(2(1-\delta),1+\delta^2\right)}}I_{\{\vert \Delta K\vert >\delta\}}+{{(\Delta K)^2}\over{1+\delta}} I_{\{\vert \Delta K\vert \leq \delta\}}.$$ By combining this with (\ref{HellingerLog}), on the one hand, we deduce that \begin{eqnarray*} &&E\left[\langle K^c\rangle_T+\sum_{0<t\leq T}\vert \Delta K_t\vert I_{\{\vert \Delta K_t\vert >\delta\}}+\sum_{0<t\leq T}(\Delta K_t)^2 I_{\{\vert \Delta K_t\vert \leq \delta\}}\right]\\ &&\leq C_{\delta} E\left[\langle K^c\rangle_T+\sum_{0<s\leq T}(\Delta K_s-\ln(1+\Delta K_s))\right] \leq 2C_{\delta}E[H^{(0)}_T(K, P)] < +\infty,\end{eqnarray*} where $C_{\delta}:=\delta^{-1}+\max\left(\delta^{-1}-2,\delta\right)$. On the other hand, it is clear that $$[K,K]^{1/2}_T\leq \sqrt{\langle K^c\rangle_T}+\sum_{0<t\leq T}\vert \Delta K_t\vert I_{\{\vert \Delta K_t\vert >\delta\}}+\sqrt{\sum_{0<t\leq T}(\Delta K_t)^2 I_{\{\vert \Delta K_t\vert \leq \delta\}}}.$$ This ends the proof of the lemma.\qed \end{proof} \begin{proposition}\label{Hzero4Log(Z)} Let $Z$ be a positive supermartingale such that $Z_0=1$. Then the following assertions hold.\\ {\rm{(a)}} There exist $K\in {\cal M}_{loc}(\mathbb H)$ and an nondecreasing and $\mathbb H$-predictable process $V$ such that $K_0=V_0=0$ , $\Delta K>-1$, and $Z={\cal E}(K)\exp(-V)$.\\ {\rm{(b)}} $-\ln(Z)$ is a uniformly integrable submartingale if and only if there exists a local martingale $N$ and a nondecreasing and predictable process $V$ such that $\Delta N>-1,\ Z={\cal E}(N)\exp(-V)$ and \begin{eqnarray}\label{Conditions} E\left[V_T+H^{(0)}_T(N,P)\right]<+\infty. \end{eqnarray} {\rm{(c)}} Suppose that there exist a finite sequence of positive supermartingale $(Z^{(i)})_{i=1,...,n}$ such that the product $Z:=\displaystyle\prod_{i=1}^n Z^{(i)}$ is a supermartingale. Then $-\ln(Z)$ is uniformly integrable submartingale if and only if all $-\ln( Z^{(i)})$, $i=1,...,n,$ are uniformly integrable submartingales. \end{proposition} \begin{proof} It is clear that assertion (a) is obvious. Thus, the rest of this proof will be given in two parts, where we prove assertions (b) and (c) respectively.\\ {\bf Part 1.} It is clear that there exist unique local martingale $N$ and a nondecreasing and predictable process $V$ such that $N_0=V_0=0$, $$\Delta N>-1,\quad\quad Z={\cal E}(N)\exp(-V).$$ Thus, we derive \begin{eqnarray}\label{Ito}-\ln(Z)&&=-\ln({\cal E}(N))+V=-N+H^{(0)}(N,P)+ V,\end{eqnarray} where both processes $V$ and $H^{(0)}(N,P)$ are nondecreasing. \\ Suppose that $-\ln(Z)$ is a uniformly integrable submartingale, and let $(\tau_n)_n$ be a sequence of stopping times that increases to infinity and $N^{\tau_n}$ is a martingale. Then on the one hand, by stopping (\ref{Ito}) with $\tau_n$, and taking expectation afterwards we get $$ E[-\ln(Z_{\tau_n\wedge T})]=E\left[V_{\tau_n\wedge T}+H^{(0)}_{\tau_n\wedge T}(N,P)\right].$$ On the other hand, since $\{ -\ln(Z_{\tau_n\wedge T}),\ n\geq 0\}$ is uniformly integrable and the RHS term of the above equality is increasing, by letting $n$ goes to infinity in this equality, (\ref{Conditions}) follows immediately. Now suppose that (\ref{Conditions}) holds. As a consequence $E[H^{(0)}_{ T}(N,P) ]<+\infty$, and by combining this with Lemma \ref{H0toH1martingales} and (\ref{Ito}), we deduce that $-\ln(Z)$ is a uniformly integrable submartingale.\\ {\bf Part 2.} Here we prove assertion (b). A direct application of assertion (a) to each $Z^{(i)}$ ($i=1,..., n$), we obtain the existence of $N^{(i)}\in {\cal M}_{loc}(\mathbb H)$ and nondecreasing and predictable $V^{(i)}$ such that $$\Delta N^{(i)}>-1,\quad\quad Z^{(i)}={\cal E}(N^{(i)})\exp(-V^{(i)}),\quad i=1,...,n.$$ Furthermore, we derive \begin{eqnarray*}-\ln(Z)&&=-\sum_{i=1}^n N^{(i)} +\sum_{i=1}^n H^{(0)}(N^{(i)},P)+ \sum_{i=1}^n V^{(i)}.\end{eqnarray*} Hence, $-\ln(Z)$ is a uniformly integrable submartingale if and only if \begin{eqnarray*}\label{Variable} E\left[\sum_{i=1}^n H^{(0)}_T(N^{(i)},P)+ \sum_{i=1}^n V^{(i)}_T\right]<+\infty,\end{eqnarray*} or equivalently $E\left[H^{(0)}_T(N^{(i)},P)+V^{(i)}_T\right]<+\infty$ for all $i=1,.., n$. Hence, thanks to assertion (b) ---applied to each $Z^{(i)}$ for $i=1,...,n$---, the proof of assertion (c) follows. This ends the proof of the proposition.\qed \end{proof} \section{Martingales' parametrization via predictable characteristics}\label{sectionC} Consider an arbitrary general model $(X, \mathbb H)$, and recall the corresponding notation given in the first paragraph of Section \ref{section4} up to (\ref{PCharac4X}).\\ For the following, we refer to \cite[Theorem 3.75]{J79} and to \cite[Lemma 4.24]{JS03}. \begin{theorem}\label{tmgviacharacteristics} Let $N\in {\cal M}_{0,loc}$. Then, there exist $\phi\in L^1_{loc}(X^c)$, $N'\in {\cal M}_{loc}$ with $[N',X]=0$, $N'_0=0$ and functionals $f\in {\widetilde{{\cal P}}}$ and $g\in {\widetilde{{\cal O}}}$ such that the following hold.\\ {\rm{(a)}} $\sqrt{(f-1)^2\star\mu}$ and $\Bigl(\sum (\widehat f- a)^2(1-a)^{-2} I_{\{a<1\}}I_{\{\Delta X=0\}}\Bigr)^{1/2}$ belong to ${\cal A}^+_{loc}$.\\ {\rm{(b)}} $(g^2\star\mu)^{1/2}\in{\cal A}^+_{loc}$, $M^P_{\mu}(g\ |\ {\widetilde {{\cal P}}})=0,$ $P\otimes\mu$-a.e., $ \{a=1\}\subset\{\widehat f=1\}$, and \begin{equation} \label{Ndecomposition} N=\phi\cdot X^c+\left(f-1+{{\widehat f-a}\over{1-a}}I_{\{a<1\}}\right)\star(\mu-\nu)+g\star\mu+{N'}. \end{equation} \end{theorem} The quadruplet $ (\phi, f, g, N') $ is called throughout the paper by Jacod's components of $N$ (under $P$). \section{A result on log-optimal portfolio: Choulli and Yansori (2020)}\label{DeepResultonDual} Herein, we consider the general setting and its notation, as in the first paragraph of Section \ref{section5}, where $(X,\mathbb H)$ is an arbitrary general model. \begin{theorem}\label{LemmaCrucial} Let $X$ be an $\mathbb H$-semimartingale with predictable characteristics $\left(b,c,F, A\right)=\left(b^X,c^X,F^X, A^X\right)$, and ${\cal K}_{log}$ be the function given by (\ref{Kfunction}). Then the following assertions are equivalent.\\ {\rm{(a)}} The set ${\cal D}_{log}(X,\mathbb H)$, given by (\ref{DeflatorsLOG}), is not empty (i.e. ${\cal D}_{log}(X,\mathbb H)\not=\emptyset$).\\ {\rm{(b)}} There exists an $\mathbb H$-predictable process $\widetilde\psi\in{\cal L}(X,\mathbb H)$ such that, for any $\varphi$ belonging to ${\cal L}(X,\mathbb H)$, the following hold \begin{eqnarray} &&(\varphi-\widetilde\psi)^{tr}(b-c\widetilde\psi)+ \int \left( {{(\varphi-\widetilde\psi)^{tr}x}\over{1+{\widetilde\psi}^{tr}x}}-(\varphi-\widetilde\psi)^{tr}h(x)\right)F(dx)\leq 0, \label{C6forX}\\ &&E\left[{\widetilde V}^X_T+{1\over{2}}(\widetilde\psi^{tr}c\widetilde\psi\bigcdot A)_T+({\cal K}_{log}(\widetilde\psi^{tr}x)\star\nu)_T\right]<+\infty ,\label{Condi11}\\ && {\widetilde V}^X:=\left[ \widetilde\psi^{tr}(b-c\widetilde\psi)+\int \left[{{\widetilde\psi^{tr}x }\over{1+\widetilde\psi^{tr}x}}-{\widetilde\psi}^{tr}h(x)\right] F(dx)\right]\bigcdot A\hskip 0.5cm\label{processV} \end{eqnarray} {\rm{(c)}} There exists a unique $\widetilde Z\in{\cal D}_{log}(X,\mathbb H)$ such that \begin{eqnarray}\label{dualSolution} \inf_{Z\in{\cal D}(X,\mathbb H)}E[-\ln(Z_T)]=E[-\ln(\widetilde Z_T)]. \end{eqnarray} {\rm{(d)}} There exists a unique $\widetilde\theta\in\Theta(X,\mathbb H)$ such that \begin{eqnarray}\label{PrimalSolution} \sup_{\theta\in\Theta(X,\mathbb H)}E[\ln(1+(\theta\bigcdot X)_T)]=E[\ln(1+(\widetilde\theta\bigcdot X)_T)]<+\infty. \end{eqnarray} {\rm{(e)}} The num\'eraire portfolio exists , and its portfolio ``rate" $\widetilde\psi$ satisfies (\ref{Condi11}).\\ Furthermore, $\widetilde\theta (1+(\widetilde\theta\bigcdot X)_{-})^{-1}$ and $\widetilde\psi$ coincide $P\otimes A$-a.e., and \begin{eqnarray} && \widetilde\varphi\in L(X^c,\mathbb H)\cap {\cal L}(X,\mathbb H),\quad \sqrt{((1+\widetilde\varphi^{tr}x)^{-1}-1)^2\star\mu}\in{\cal A}^+_{loc}(\mathbb H),\label{integrabilities}\\ &&{1\over{\widetilde Z}}={\cal E}(\widetilde\psi\bigcdot X),\ \widetilde Z:={\cal E}(K^X){\cal E}(-V^X),\ K^X:=-\widetilde\psi\bigcdot X^c+{{-{\widetilde\Gamma}^X\widetilde\psi^{tr}x}\over{1+\widetilde\psi^{tr}x}}\star(\mu-\nu).\hskip 1cm\label{duality}\\ &&{\widetilde\Gamma}^X:=\left(1-a+\widehat{f^{(op)}}\right)^{-1},\quad f^{(op)}(t,x):=\left(1+{\widetilde\psi}^{tr}_tx\right)^{-1}.\label{Gammaf(op)}\end{eqnarray} \end{theorem} \section{Proof of Lemma \ref{Lemma4Uniqueness}}\label{Section4Corollary5.3} Let $\theta_1$ and $\theta_2$ two elements of $ {\cal L}(S,\mathbb F)$ such that for any $\theta\in{\cal L}(S,\mathbb F)$, we have \begin{eqnarray*} &&(\theta-{\theta}_1)^{tr}(b-c{\theta}_1)+\int \left({{(\theta-{\theta}_1)^{tr}x}\over{1+{\theta}_1^{tr}x}}-(\theta-{\theta}_1)^{tr}h(x)\right)F(dx)\leq 0,\\ &&(\theta-{\theta}_2)^{tr}(b-c{\theta}_2)+\int \left({{(\theta-{\theta}_2)^{tr}x}\over{1+{\theta}_2^{tr}x}}-(\theta-{\theta}_2)^{tr}h(x)\right)F(dx)\leq 0.\end{eqnarray*} By considering $\theta=\theta_2$ for the first inequality and $\theta=\theta_1$ for the second inequality and adding the resulting two inequalities afterwards, we get \begin{eqnarray*} (\theta_1-{\theta}_2)^{tr}c({\theta}_1-{\theta}_2)+\int\left({{1+{\theta}_2^{tr}x}\over{1+{\theta}_1^{tr}x}}+{{1+{\theta}_1^{tr}x}\over{1+{\theta}_2^{tr}x}}-2\right)F(dx)\leq 0. \end{eqnarray*} Then remark that, for any $x>0$, $x+x^{-1}-2$ is always nonnegative, and it is null if and only if $x=1$. Thus, by combining this fact with the above inequality we deduce that $c\theta_1=c\theta_2$ $P\otimes A$-a.e.. and $\theta_1^{tr}x=\theta_2^{tr}x$ $P\otimes A\otimes F$-a.e.. This ends the proof of the lemma. \end{document}
\begin{document} \title{Dynamical properties of random walks} \begin{abstract} In this paper, we study dynamical properties as hypercyclicity, supercyclicity, frequent hypercyclicity and chaoticity for transition operators associated to countable irreductible Markov chains. As particular cases, we consider simple random walks on $\mathbb{Z}$ and $\mathbb{Z}_+$. \end{abstract} \section{Introduction} Let $X$ be a Banach separable space on $\mathbb{C}$ and $T: X \to X$ be a linear operator on X. The study of the linear dynamical system $(X, T)$ became very active after 1982. Since then related works have built connections between dynamical systems, ergodic theory and functional analysis. We refer the reader to the books \cite{bm,em} and to the more recent papers \cite{BerBonMulPer13,BerBonMulPer14,BesMenPerPui16,SGriEMat14, SGriMRog14}, where many additional references can be found. The objective of this paper is to study some central properties of linear dynamical systems as hypercyclicity, supercyclicity, frequent hypercyclicity, and chaoticity among others, for Markov chain transition operators associated to countable irreductible Markov chains. In particular, we will consider to nearest-neighbor simple random walks. We say that $(X,T)$ is {\it hypercyclic}, or topologically transitive, if it has a dense orbit in X. This notion is equivalent that for all non empty open subsets $U$ and $V$ of $X$, there exists an integer $n \geq 0$ such that $T^{n}(U) {\cal A}p V$ is not empty. If moreover for every non-empty open set $V \subset X$, the set $N(x, V) = \{k \in \mathbb{N},\; T^{k}(x) \in V \}$ has positive lower density, i.e $ \liminf_{n \rightarrow \infty} \; \frac{1}{n} card (N(x, V) {\cal A}p [1, n]) > 0 \, , $ then we call $(X, T)$ {\it frequently hypercyclic}. On the other hand, $(X, T)$ is said to be {\it supercyclic} if there exists $x \in X$ such that the projective orbit of $x$ is is dense in the sphere $S^{1}= \{z \in X, \| z \|=1\}$, that is the set $\{\lambdambda T^{n}(x),\; n \in \mathbb{N}, \lambdambda \in \mathbb{C}\}$ is dense in X. We call $(X, T)$ {\it Devaney chaotic} if it is hypercyclic, has a dense set of periodic points and has a sensitive dependence on the initial conditions. The study of those four properties is a central problem in area of linear dynamical systems (see for instance \cite{bm} and \cite{em}). Notice that the above properties can be studied in the context of more general topological space $X$ called Frechet spaces (the topology is induced by a sequence of semi-norms). There are many examples of hypercyclic linear operators (see \cite{bm}) as the derivative operator on the Frechet space $H(\mathbb{C})$ of holomorphic maps on $\mathbb{C}$ endowed with the topology of uniform convergence on compact sets, translation operator on $H(\mathbb{C})$, classes of weighted shift operators acting on $X \in \{c_0, l^p, p \geq 1\}$. However, the set of hypercyclic linear operators is small. In fact it is proved that this set is nowhere dense in the set of continuous linear operators with respect to the norm topology (see \cite{bm}). An example of non hypercyclic operator is the shift operator $S$ acting on $X \in \{c_0, l^q, q \geq 1\}$. This come from the fact that the norm of $S$ is less or equal to $1$. However, the shift operator is supercyclic and moreover for any $\lambdambda >1,\; \lambdambda S$ is frequently hypercyclic and chaotic (see \cite{bm}). Here we are interested in operators associated to stochastic infinite matrices acting on a separable Banach space $X \in \{c_0, c, l^q, q \geq 1\}$. In particular, we prove that if $A$ is a transition operator on an irreducible Markov chain with countable state space acting on $c$, then $A$ is not supercyclic. The result remains valid if we replace $c$ by $c_0$ or $l^q,\; q \geq 1$ in the positive recurrent case. A natural question is: what happens when the Markov chain is null recurrent or transient if $X \in \{c_0, l^q, q \geq 1\}?$ In order to study the last question, we consider transition operators $W_p$ (resp. $\overlineerline{W}_p$) of nearest-neighbor simple asymmetric random walks on $\mathbb{Z}_+$ (resp. on $\mathbb{Z}$) with jump probability $p\in (0,1)$. For the simple asymmetric random walk on $\mathbb{Z}^{+}$ defined in $X \in \{c_0, c, l^q, q >1\}$, we prove that if the random walk is transient ($p >1/2$), then $W_p$ is supercyclic and moreover for all $\vert \lambdambda \vert> \frac{1}{2p -1},\; \lambdambda W_p$ is frequently hypercyclic and chaotic. If the random walk is null recurrent ($p=1/2$) and $X= l^1$, then $W_p$ is not supercyclic. For the simple asymmetric random walk on $\mathbb{Z}$, we prove that if $p \ne 1/2$ (transient case), $\lambdambda\overlineerline{W}_p$ is not hypercyclic for all $\vert \lambdambda \vert > \frac{1}{\vert 1 -2p \vert}$. We also consider transition operators spatially inhomogeneous simple random walks on $\mathbb{Z}_{+}$, that is operators $G_{\bar{p}} := G$ associated to a sequence of probabilities $\bar{p} = (p_{n})_{n \geq 0}$ and defined by $G_{0,0}= 1- p_0,\; G_{0,1}= p_0$ and for all $i \geq 1,\; G_{i, j} =0 $ if $j \noindentt \in \{ i-1, i+1 \}$, $ G_{i, i-1}= 1-p_i$ and $ G_{i, i+1}= p_i$. In particular, we prove the following result: Consider the sequence $$ w_n= \frac{(1-p_1)(1-p_3)...(1-p_{n-1})}{p_1 p_3... p_{n-1}} \textrm{ for } n \textrm{ even}, $$ and $$ w_n= \frac{(1-p_0)(1-p_2)...(1-p_{n-3})(1-p_{n-1})}{p_0 p_2... p_{n-3}p_{n-1}} \textrm{ for } n \textrm{ odd}. $$ The following results hold: 1. If $X= c_0$ and $\lim w_n=0$ or $X= l^q,\; q \geq 1$ and $\sum_{n=1}^{+\infty} w_n^q <+\infty$, then $G$ is supercyclic on $X$. 2. Let $X \in \{c_0, l^q,\; q \geq 1\}$ and assume that there exists $\alpha >0$ such that $p_n \geq \frac{1}{2}+ \alpha$ for all $n \geq n_0$, then there exists $\delta >1$ such that $ \lambdambda G$ is frequently hypercyclic and Devaney chaotic for all $\vert \lambdambda \vert >\delta$. The last two results can be extended for the spatially inhomogeneous simple random walks on $\mathbb{Z}$. As a consequence of our dynamical study of random walks, we deduce that if the Markov chain is null recurrent, it cannot be supercyclic on $l^1$ (see Proposition 4.6) or supercyclic on $c_0$ (see remark 4.4). We also deduce that, when the Markov chain is transient, it can have nice dynamical properties as supercyclicity, frequently hypercyclicity and chaoticity on $X$ in $\{c_0, l^p, p \geq 1\}$ (see Theorems 4.1 and 4.8). We wonder if it is possible to construct transient Markov chains on $\mathbb{Z}_+$ or $\mathbb{Z}$ that are not supercyclic. The paper is organized as follows: In section 2, we give some definitions and classical results. Section 3 describes the study of dynamical properties of Markov chain operators. In section 4, we consider operators associated to the simple asymmetric and also the spatially inhomogeneous simple random walks on $\mathbb{Z}_{+}$ and $\mathbb{Z}$. \section{Definitions and classical results} To fix the notation we introduce here the proper definitions of the spaces mentioned above: Let $w= (w_n)_{n \geq 0}$ be a sequence of complex numbers. We put $$ \| w \|_\infty = \sup_{n \ge 0} |w_n| < \infty \, , \quad \| w \|_q = \mathcal{B}ig( \sum_{n\ge 0} |w_n|^q \mathcal{B}ig)^\frac{1}{q} \, , \ 1 \le q < \infty \, , $$ and $$ l^\infty = l^\infty(\mathbb{Z}_+) = \{ w \in \mathbb{C}^{\mathbb{Z}_+} : \| w \|_\infty < \infty \} \, , $$ $$ l^q = l^q(\mathbb{Z}_+) = \{ w \in \mathbb{C}^{\mathbb{Z}_+} : \| w \|_q < \infty \} \, , $$ $$ c = c(\mathbb{Z}_+) = \{ w \in l^\infty : w \textrm{ is convergent} \} \, , $$ $$ c_0 = c_0 (\mathbb{Z}_+) = \{ w \in c : \lim_{n\rightarrow \infty} w_n = 0 \} \, . $$ Now recall the definitions from the introduction. Other definitions related to linear dynamics will be needed. \begin{definition} Let $f : Y \to Y$ be a continuous map acting on some metric space $(Y, d)$. We say that $f$ is {\it Devaney chaotic} if (1) $f$ is hypercyclic; (2) $f$ has a dense set of periodic points; (3) $f$ has a sensitive dependence on initial conditions: there exists $\delta > 0$ such that, for any $x$ in $Y$ and every neighborhood $U$ of $x$, one can find $y \in U$ and an integer $ n >0$ such that $d(f^{n}(x), f^{n}(y)) \geq \delta$. \end{definition} Let $X$ be a Banach separable space on $\mathbb{C}$ and $T: X \to X$ be a linear bounded operator on X. \begin{definition} (see \cite{bm}). We say that $T$ satisfies the hypercyclicity (resp. supercyclicity) criterion if there exists an increasing sequence of nonnegative integers $(n_k)_{k \geq 0}$, two dense subspaces of $X,\; D_1$ and $D_2$ and a sequence of maps $S_{n_{k}}: D_2 \to X$ such that \begin{enumerate} \item $\lim_{k} T^{n_{k}}(x)= \lim_{k} S_{n_{k}}(y)=0$ (resp. $\lim_{k} \|\vert T^{n_{k}}(x)\| \| S_{n_{k}}(y)\| =0$),\; $ \forall x \in D_1,\; y \in D_2$. \item $\lim_{k} T^{n_{k}} \circ S_{n_{k}}(x)=x, \; \forall x \in D_2$. \end{enumerate} \end{definition} \begin{theorem} The following properties are true \begin{enumerate} \item If $T$ satisfies the hypercyclicity criterion, then $T$ is hypercyclic. \item $T$ satisfies the hypercyclicity criterion if and only if $T$ is topologically weakly mixing, i.e $T \times T$ is topologically mixing. \item If $T$ satisfies the supercyclicity criterion, then $T$ is supercyclic. \end{enumerate} \end{theorem} There is an efficient criterion that guarantees that $T$ is Devaney chaotic and frequently hypercyclic (see \cite{bm}). \begin{theorem} \lambdabel{crifre} Assume that there exist a dense set $D \subset X$ and a map $S :D \to D$ such that \begin{enumerate} \item For any $x \in D$, the series $\sum_{n=0}^{+\infty} T^{n}(x)$ and $\sum_{n=0}^{+\infty} S^{n}(x)$ are unconditionally convergent (all subseries of both series are convergent). \item For every $x \in D,\; T \circ S(x)= x$, \end{enumerate} then $T$ is chaotic and frequently hypercyclic. \end{theorem} Concerning the dynamical properties of a linear dynamical system $(X,T)$, the spectrum of $T$ plays an important role. We denote by $\sigma(X, T)$, $\sigma_{pt}(X,T)$, $\sigma_r(X,T)$ and $\sigma_c(X,T)$ respectively the spectrum, point spectrum, residual spectrum and continuous spectrum of $T$. Recall that $\lambdambda$ belongs to $ \sigma(X,T)$ (resp. $\sigma_{pt}(X,T)$) if $(S- \lambdambda I)$ is not bijective (resp. not one to one). If $(S- \lambdambda I)$ is one to one and not onto, then $\lambdambda \in \sigma_r(X,T)$ if $(S- \lambdambda I) (X)$ is not dense in $X$, otherwise, we say that $\lambdambda \in \sigma_c(X,T)$. Below we also use the notation $X'$ and $T'$ to indicate respectively the topological dual space and the dual operator associated to $(X,T)$. \begin{lemma} (\cite{bm}) \lambdabel{spectrhyper} Let $X$ be a Banach separable space on $\mathbb{C}$ and $T: X \to X$ be a linear bounded operator on X. \begin{enumerate} \item If $T$ is hypercyclic then every connected component of the spectrum intersects the unit circle. \item If $T$ is hypercyclic, then $\sigma_{pt}(X',T')= \emptyset$, \item If $T$ is supercyclic then there exists a real number $R \geq 0$ such every connected component of the spectrum intersects the circle $\{z \in \mathbb{C},\; \vert z \vert = R\}$. \item If $T$ is supercyclic, then $\sigma_{pt}(X',T')$ contains at most one point. \end{enumerate} \end{lemma} In this paper, we will use only items 2) and 4) of Lemma \ref{spectrhyper}. 1) and 3) are used in \cite{acmv} for the study of dynamical properties of Markov chains associated to stochastic adding machines. \begin{remark} If $T$ is not supercyclic then $\lambdambda T$ is not hypercyclic for every fixed $\lambdambda$. However, it is possible to have $T$ supercyclic and $\lambdambda T$ not hypercyclic for all sufficiently large (but fixed) $\lambdambda$. \end{remark} \section {Dynamical properties of Markov Chains Operators} Let $Y = (Y_n)_{n\ge 1}$ be a discrete time irreducible Markov chain with countable state space $E$ and with transition operator $A = [A_{i,j}]_{i,j\in E}$ (irreducible means that for each pair $i$, $j \in E$ there exists a nonnegative integer $n$ such that $A^n_{i.j} > 0$). The Markov chain $Y$ is said to be recurrent if the probability of visiting any given state is equal to one, otherwise $Y$ is said to be transient. The Markov chain $Y$ is called positive recurrent if it has an invariant probability distribution, i.e., there exists $u \in l^1$ such that $uA = u$. Every positive recurrent Markov chain is recurrent. If $Y$ is recurrent but not positive recurrent, it is called null recurrent. For the transient and null recurrent cases we have the following well-known equivalent definitions (see \cite{ross}): \begin{enumerate} \item[(i)] $A$ is transient if and only if $\sum_{n= 1}^{+\infty} A^n_{i,j} < \infty$ for all $i,j \in E$. \item[(ii)] $A$ is null recurrent if and only if $\lim_{n \rightarrow \infty} A^n_{i,j} = 0$ and $\sum_{n= 1}^{+\infty} A^n_{i,j} = \infty$ for all $i,j \in E$. \end{enumerate} \begin{proposition} \lambdabel{c-ciclic} Let $A$ be a transition operator on an irreducible Markov chain with countable state space acting on $c$, then $A$ is not supercyclic. The result remains valid if we replace $c$ by $c_0$ in the positive recurrent case. \end{proposition} \noindentindent {\bf Proof:} Consider an enumeration of the state space so that we can consider $E = \mathbb{N}$ and the stochastic matrix $A = [A_{ij}]_{i,j \in \mathbb{N}}$ associated with the transition operator $A$. Assume that $A$ is is transient or null recurrent, then $\lim_{n\rightarrow \infty} A^{n}_{i,j} = 0$ for every $i$ and $j$. Now fix $y \in c-c_0$ (we do not need to consider the case $y \in c_0$ while considering density of orbits of $y$ under $A$ or $\lambdambda A$ because $c_0$ is a closed invariant subspace). Suppose that $\lim_{n \rightarrow \infty} y_n = \alpha \in \mathbb{C} - \{0\}$. We have that \begin{equation} \lambdabel{eq:convTn1} \lim_{n\rightarrow \infty} \big( A^{n}y \big)_i = \alpha \, , \end{equation} for every $i \in \mathbb{N}$. Indeed, since $\sum_{j=1}^{+\infty} A^{n}_{i,j} = 1$, for every $n \in \mathbb{N}$ $$ \mathcal{B}ig| \big( A^{n}y \big)_i - \alpha \mathcal{B}ig| = \mathcal{B}ig| \sum_{j=1}^{+\infty} A^{n}_{i,j} (y_j - \alpha) \mathcal{B}ig| \le \big( \|y\|_{\infty} + \vert \alpha \vert \big) \sum_{j=1}^m A^{n}_{i,j} + \sup_{j \ge m+1} |y_j - \alpha| \, . $$ The second term in the rightmost side of the previous expression can be made arbitrarily small by choosing $m$ sufficiently large while the first one goes to zero as $n$ tends to $+\infty$ for every choice of $m$. Hence \eqref{eq:convTn1} holds. From \eqref{eq:convTn1}, we have that $$ \liminf_{n \rightarrow \infty} \frac{|(A^n y)_i|}{\| A^n y \|_{\infty}} \ge \frac{\vert \alpha \vert}{\| y \|_{\infty}} > 0 \, , $$ and then $\{ (A^n y)/\| A^n y \|_{\infty} : n \ge 1 \}$ is not dense in the unit sphere of $c$ centered at $0$ which implies that $\{ \lambdambda A^n y \, : \ \lambdambda \in \mathbb{C}, \ n \ge 1 \}$ is not a dense subset of $c$. Since $y$ is arbitrary, $A$ is not supercyclic on $c$. Now, assume that $A$ is positive recurrent, then there exists an invariant measure $u \in l^1 \setminus \{0\}$ such that $u A= u$, hence $u A^n= u$ for all integer $n \geq 1$. Suppose that $A$ is supercyclic. Take $y \in c {\cal A}p S^1_c$, where $S^1_c = \{ x \in c : \| x \|_\infty = 1 \}$ such that projective orbit of $y$ under $A$ is dense in $S^1_c$, then for all $x \in S^1_c$, there exists an increasing sequence $(n_k)_{k \geq 0}$ such that $\lim_{k \rightarrow \infty} \frac{A^{n_k}y}{\| A^{n_k} y \|_{\infty}} = x$. Since $\| A^{n_k} y \|_{\infty} \le \| y \|_{\infty} = 1$, then $$ |<u, x>| = \lim_{k} \frac{ | < u , A^{n_k} y > |}{\| A^{n_k} y \|_{\infty} } = \lim_{k} \frac{ |< u , y >| }{\| A^{n_k} y \|_{\infty} } \ge | < u , y > |.$$ Where $<u, z>$ is the scalar product between $u$ and $z$ for $z$ in $c$. Since $x$ is arbitrary in $S^1_c$, the last inequality implies that $u=0$ and this is an absurd. Hence the projective orbit of $y$ under $A$ could not be dense in $S^1_c$ which means that $A$ is not supercyclic. \\ To finish the proof we just point out that in the positive recurrent case, the same proof holds if we replace $c$ by $c_0$. $\square$ Another result is: \begin{proposition} \lambdabel{lq-hyp-c0} Let $q \geq 1$ and $A: l^q \to l^q$ be an hypercyclic (supercyclic) operator on $l^q$, then \begin{enumerate} \item If $A(c_0) \subset c_0$, then $A$ is also hypercyclic (supercyclic) on $c_0$. \item If $r > q$ and $A(l^r) \subset l^r$, then $A$ is also hypercyclic (supercyclic) on $l^r$. \end {enumerate} \end{proposition} \noindentindent {\bf Proof:} (1) Suppose that $A$ is hypercyclic on $l^q$ and let $x \in l^q$ be a hypercyclic vector,\; i.e $\overlineerline{ O(x)}= l^q$. Now fix $y \in c_0$ and $\epsilon >0$. Take $m \in \mathbb{N}$ such that $\sup_{i > m} |y_i| \le \epsilon/2$. Define $y^{(m)}$ as $$ y^{(m)}_i = \left\{ \begin{array}{cl} y_i &, \ 1\le i \le m ,\\ 0 &, \ \textrm{otherwise}, \end{array} \right. $$ for every $i \in \mathbb{N}$. Since $y^{(m)} \in l^q$, there exists $n \in \mathbb{N}$ such that $\| A^nx - y^{(m)} \|_\infty \le \| A^nx - y^{(m)} \|_q \le \epsilon/2$. Therefore $$ \| A^nx - y \|_\infty \le \| A^nx - y^{(m)} \|_\infty + \| y^{(m)} - y \|_\infty \le \epsilon \, . $$ Since $\epsilon$ and $y$ are arbitrary, $x$ is a hypercyclic vector in $c_0$. The proof in the supercyclic case is analogous. (2) The proof is analogous to item 1) and come from the fact that if $1 \leq q <r$, then $l^q \subset l^r$. $\square$ \begin{corollary} \lambdabel{posit} Let $A: X \to X$ where $X \in \{l^q,\; q \geq 1\}$ be the transition operator of a irreducible positive recurrent stochastic Markov chain, then $A$ is not supercyclic. \end{corollary} \noindentindent {\bf Proof:} From Proposition \ref{c-ciclic} we have that $A$ acting on $c_0$ is not supercyclic. Thus from (1) in Proposition \ref{lq-hyp-c0} we obtain that $A$ acting on $X$ is not supercyclic. $\square$ \begin{remark} Since an operator $A$ is supercyclic if and only if $cA$ is supercyclic for $c \neq 0$, then all the previous results in this section hold for operators associated to countable non-negative irreducible matrices with each line having the same sum of their entries. \end{remark} \noindentindent \textbf{Question:} What happens if $A:X \to X$ is a countable infinite non-negative irreducible matrix where the the sum of entries of lines is not constant? Is $A$ not supercyclic on $c$? If $A$ is positive recurrent (see \cite{k} for the definition), Can we prove that $A$ is not supercyclic in $X \in \{c_0, c, l^q,\; q \geq 1\}$? \section{Simple Random Walks} Consider the nearest neighbor simple random walk on $\mathbb{Z}_+$ with partial reflection at the boundary and jump probability $p\in (0,1)$ (when at zero, the walk stays at zero with probability $1-p$). Denote by $W_p := W= (W_{i,j})_{i, j \geq 0}$ its transition operator. We have $ W_{0,0}= 1-p,\; W_{0,1}= p$ and for all $i \geq 1,\; W_{i, j} =0 $ if $j \noindentt \in \{ i-1, i+1 \},\; W_{i,i-1}= 1-p,\; W_{i,i+1}= p$ for all $i \geq 1$. We have $$ W_p= \tiny{ \left[ \begin{array}{cccccccccc} 1-p & p & 0 & 0 & 0 & 0 & 0 & 0 & 0 & {\cal D}ots \\ 1-p & 0 & p & 0 & 0 & 0 & 0 & 0 & 0 & {\cal D}ots \\ 0 & 1-p & 0 & p & 0 & 0 & 0 & 0 & 0 & {\cal D}ots \\ 0 & 0 & 1-p & 0 & p & 0 & 0 & 0 & 0 & {\cal D}ots \\ \vdots & \vdots & \vdots & \vdots & \vdots & \vdots & \vdots & \vdots & \vdots & \ddots \end{array} \right]} $$ It is known (\cite{ross}) that the simple random walk on $\mathbb{Z}_{+}$ is positive recurrent if $p<1/2$, null recurrent if $p=1/2$ and transient if $ p>1/2$. In particular, from Proposition \ref{c-ciclic} and Corollary \ref{posit}, we have that $W_p$ acting on $X \in \{c_0,c,l^q \ q\ge 1\}$ is not supercyclic if $p<1/2$. \begin{theorem} \lambdabel{passeio} Let $X \in \{c_0, l^q,\; q \geq 1\}$. If $p >1/2$, then the infinite matrix $W_p$ of the simple random walk on $\mathbb{Z}_{+}$ is supercyclic on $X$. Moreover $\lambdambda W_p$ is frequently hypercyclic and chaotic for all $\vert \lambdambda \vert >\frac{1}{2p-1}$. \end{theorem} Before we prove Theorem \ref{passeio} we need three technical results: \begin{lemma} \lambdabel{zer} Let $X \in \{c_0, l^q,\; q \geq 1\}$, then $\sigma_{pt}(X,W_p)$ is not empty if and only if $p >1/2$, moreover, in this case $0 \in \sigma_{pt}(X,W_p)$. \end{lemma} \noindentindent {\bf Proof:} Let $ \lambdambda $ be an element of $\sigma_{pt}(W_p)$ and $u= (u_n)_{n \geq 0}$ be an eigenvector associated to $\lambdambda$, then $$ (1- p- \lambdambda) u_0+ p u_1=0,\; (1-p)u_n - \lambdambda u_{n+1}+ p u_{n+2}=0,\; \forall n \geq 0.$$ We deduce that there exists a sequence of complex numbers $(q_{n})_{n \geq 0}$ where $q_0=1,\; q_1= \frac{\lambdambda + p-1}{p}$ and $u_n= q_n u_0$. Moreover $$\begin{bmatrix}q_n\\ q_{n-1}\end{bmatrix} = M \begin{bmatrix}q_{n-1}\\ q_{n-2}\end{bmatrix},\;\; \forall n \geq 2.$$ Where $M= \begin{bmatrix}\frac{\lambdambda} { p}& \frac{p-1}{p}&\\ 1&0\end{bmatrix}$. Hence $\begin{bmatrix}q_n\\ q_{n-1}\end{bmatrix} = M^{n-1} \begin{bmatrix}q_1\\ q_{0}\end{bmatrix}$ for all $n \geq 2$. Assume that $\lambdambda^2 \neq 4 p (1-p)$, then the matrix $M$ have distinct eigenvalues and hence it is diagonalizable. Therefore, there exist $c, d \in \mathbb{C}\setminus \{0\}$ such that $q_n= c \alpha^n + d \beta^n$ for all integer $n \geq 0$, where $\alpha, \beta$ are the eigenvalues of $M$. Since $\alpha \beta= det (M)= \frac{1-p}{p}$, then if $0 <p <1/2$, we have $ \alpha \beta > 1$ . Then either $\vert \alpha \vert > 1$ or $\vert \beta \vert > 1$, Hence $(q_n)_{n \geq 0}$ is not bounded and therefore the point spectrum of $W_p$ is empty. If $p=1/2$, then either ($\vert \alpha \vert > 1$ or $\vert \beta \vert > 1$) or $ \alpha, \beta$ are conjugated complex numbers of modulus $= 1$. In both cases the point spectrum of $W_p$ is empty. If $\lambdambda^2 = 4 p (1-p)$, then the matrix $M$ is not diagonalisable and has a unique eigenvalue $\theta$. In this case, there exist $e, f \in \mathbb{C} \setminus \{0\}$ such that $q_n= (e+ f n) \theta^n$ for all integer $n \geq 0$. If $p \leq 1/2$, then $\vert \theta \vert = \sqrt {\frac{1-p}{p} } \geq 1$, hence $q_n$ is not bounded. We deduce that the point spectrum of $W_p$ is empty. Now assume that $1/2 <p <1$ and $\lambdambda= 0$ ($M$ diagonalizable), then $\alpha, \beta \in \{- \sqrt { \frac{1-p}{p}} i,\; \sqrt { \frac{1-p}{p}} i\}$, therefore $\alpha, \beta$ are complex conjugated numbers of modulus $< 1$. Hence $0 \in \sigma_{pt}(X,W_p)$. If $p=1$ and $\lambdambda= 0$, then $q_0=e$ and $q_n=0$ for all $n \geq 1$. Thus $0 \in \sigma_{pt}(X,W_p)$. $\square$ \begin{remark} \lambdabel{zeropoint} Consider $M$ as in the proof of Lemma \ref{zer}. 1. Since the eigenvalues of $M$ depend continuously of $\lambdambda$, we deduce that for all $p > 1/2$, there exists $0 < r_p < 1$ such that $D(0, r_p) \subset \sigma_{pt}(X,W_p)$. In particular, we can prove that $[0, 2 \sqrt{1-p}) \subset \sigma_{pt}(X,W_p)$. 2. If $p=1/2$ the eigenvalues of $M$ are $\lambdambda \pm \sqrt{\lambdambda^2 -1}$, we deduce that if $X= l^{\infty}$, the interval $ ]-1, 1[ \subset \sigma_{pt}(l^{\infty},W_p)$. \end{remark} \begin{lemma} \lambdabel{inversa1} Let $X \in \{c_0, l^q,\; q \geq 1\}$ and $v= (v_{i})_{i \geq 0} \in X$. Let $a= (a_n)_{n \geq 0} \in l^{1}$ and $x= (x_n)_{n \geq 0}$ defined by $$x_n= \sum_{k=0}^{n} a_k v_{n-k},\; \forall n \in \mathbb{Z}_{+}.$$ Then $(x_n)_{n \geq 0} \in X$, moreover $$\| x \| \leq \||a \|_1 \| v \|.$$ \end{lemma} \noindentindent {\bf Proof:} By putting $v_k= 0$ for all $k <-1$, we can assume that $x_n= \sum_{k=0}^{+\infty} a_k v_{n-k}$ for all $n \geq 0$. Now suppose that $X= c_0$. For each $i \in \mathbb{N}$ $$ |x_n| \leq \mathcal{B}igg(\sum_{k=0}^i \vert a_k \vert \mathcal{B}igg) \sup_{0 \leq k \leq i} \|v_{n-k}\| + \mathcal{B}igg(\sum_{k=i+1}^\infty \vert a_k \vert \mathcal{B}igg) \|v\|_{\infty}. $$ Since $ (v_{n})_{n \geq 0} \in c_0$. and $(a_n)_{n \geq 0} \in l^1$, we deduce that $(x_n)_{n \geq 0} \in c_0$. If $X= l^1$, we have $$ \sum_{n =0}^{+\infty} |x_n| \leq \sum_{n =0}^{+\infty}\sum_{k=0}^\infty \vert a_k v_{n-k} \vert \leq \mathcal{B}igg(\sum_{k=0}^\infty \vert a_k \vert\mathcal{B}igg) \mathcal{B}igg(\sum_{n =0}^{+\infty} \vert v_{n} \vert \mathcal{B}igg) =\|a \|_1 \|v \|_1 . $$ Now, assume that $X= l^q,\; 1 < q < \infty$, we consider its conjugate exponent $r$ (i.e., $1/q + 1/r= 1$). We have $|x_n| \sum_{k=0}^\infty\vert a_k \vert^\frac{1}{r}\vert a_k \vert^{ (1- 1/r)}\vert v_{n-k} \vert$. Hence By H\"older's inequality, we obtain $$ |x_n| \leq \mathcal{B}igg(\sum_{k=0}^\infty\vert a_k \vert\mathcal{B}igg)^\frac{1}{r} \mathcal{B}igg(\sum_{k=0}^\infty \vert a_k \vert^{q (1- 1/r)}\vert v_{n-k} \vert^q \mathcal{B}igg)^\frac{1}{q}. $$ As a consequence, $ \sum_{n =0}^{+\infty} \vert x_n \vert^q \leq \mathcal{B}igg(\sum_{k=0}^\infty\vert a_k \vert\mathcal{B}igg)^{q-1} \sum_{n =0}^{+\infty}\sum_{k=0}^\infty \vert a_k \vert \vert v_{n-k} \vert^q.$ Hence $$ \sum_{n =0}^{+\infty} \vert x_n \vert^q \leq \mathcal{B}igg(\sum_{k=0}^\infty\vert a_k \vert \mathcal{B}igg)^{q} \sum_{n =0}^{+\infty} \vert v_{n} \vert^q.$$ $\square$ \begin{proposition} \lambdabel{inversa} Let $X \in \{c_0, l^q,\; q \geq 1\}$ and $p >1/2$ then for $v= (v_{i})_{i \geq 0} \in X$ there exists $u= (u_{i})_{i \geq 0} \in X$ such that $W(u) = v$. Moreover, $u$ is explicitly given by \begin{eqnarray} \lambdabel{frma} u_{n}= \frac{1}{p} \; \mathcal{B}ig( \sum_{j=0}^{\lfloor n/2 \rfloor} \mathcal{B}ig( \frac{p-1}{p} \mathcal{B}ig)^{j} v_{n-2j-1} + \mathcal{B}ig( \frac{p-1}{p} \mathcal{B}ig)^{\lfloor n/2 \rfloor + 1} p u_0 \mathcal{B}ig) \end{eqnarray} where $\lfloor n/2 \rfloor $ is the largest integer $< n/2$. \end {proposition} \noindentindent {\bf Proof:} Fix $v= (v_{i})_{i \geq 0} \in X$ and $u= (u_{i})_{i \geq 0} \in l^{\infty}$ such that $W(u)= v$, then \begin{eqnarray} \lambdabel{fre} u_1= \frac{v_{0}}{p}+ \frac{p-1}{p} u_{0},\; u_n = \frac{v_{n-1}}{p}+ \frac{p-1}{p} u_{n-2},\; \forall n \geq 2. \end{eqnarray} Then we obtain by induction that for all integer $n \geq 2$, $$ u_{n}= 1/p \; ( v_{n-1}+ \gamma v_{n-3} + \gamma^2 v_{n-5}+ \ldots \gamma^k v_{n-2k-1}+ \ldots \gamma^{\lfloor n/2 \rfloor} v_t + \gamma^{\lfloor n/2 \rfloor+1} p u_0 ), $$ where $\gamma= \frac{p-1}{p}$, $t= 0$ if $n $ is odd and $t=1$ otherwise. Thus we have (\ref{frma}). By Lemma \ref{inversa1}, we deduce that $u$ belongs to $X$. Observe that if $u \in c_0$, then \begin{eqnarray} \lambdabel{fie}\| u \|_{\infty} \leq \delta \; max ( \| v \|_{\infty}, \vert u_0 \vert) \mbox { where } \delta = \frac{1}{p}\sum_{n=0}^{+\infty} \gamma ^n= \frac{1}{2p-1}. \end{eqnarray} If $u \in l^q,\; q \geq 1$, then by Lemma \ref {inversa1}, \begin{eqnarray} \lambdabel{fie2} \| u \|_q \leq \frac{1}{2p-1} \| v \|_q + \vert u_0 \vert \mathcal{B}ig( \sum_{n=1}^{+\infty} \gamma^{nq} \mathcal{B}ig)^{1/q}. \end{eqnarray} \begin{remark} \lambdabel{zer} If $u_0= 0$, then $\| u \|_q \leq \frac{1}{2p-1} \| v \|$ (in $X$). \end{remark} \begin{lemma} \lambdabel{kernel} Let $X \in \{c_0, l^q,\; q \geq 1\},\; p >1/2$, then $D= \bigcup_{n=1}^{+\infty} Ker (W^n)$ is dense in $X$. \end {lemma} \noindentindent {\bf Proof:} Fix $X$ in $\{c_0, l^q,\; q \geq 1\}$. By Proposition \ref{zer}, we have that $0 \in \sigma_{pt}(X,W)$. Then for all integer $n \geq 1,\; Ker (W^n) $ is not empty. {\bf Claim:} {\em for all integer $n \geq 1$, there exists $V_{0,n},\ldots , V_{n-1,n} \in l^{\infty}$, linearly independent such that if $u=(u_{i})_{i \geq 0} \in Ker (W^{n})$, then $ u = \sum_{i=0}^{n-1} u_i V_{i,n}$.} Indeed, since $W_{j,k}=0$ for all $k \geq j+2$, we deduce that for all integer $n \geq 2,\; W^{n}_{j,k}=0$ for all integer $k \geq j+n+1$. Assume that $u=(u_i)_{i \geq 0} \in Ker (W^n)$. The relation $\sum_{k=0}^{j+n} W^{n}_{j,k} \, u_k=0$ holds for all $j \in \mathbb{N}$. Since $W_{0,n}^{n} > 0$, we deduce that $$u_{n}= \sum_{i=0}^{n-1} u_i c_{i,n,n} \, ,$$ where $c_{i,n,n}= -\frac{W^{n}_{0,i}}{W^{n}_{0,n}}$ for all $i=0,\ldots, n-1$. We also obtain by induction that $u_{k}= \sum_{i=0}^{n-1} u_i c_{i,k,n}$ for all $k \geq n,$ where $c_{i,k,n}$ are real numbers. For all $i \in \{0,1,\ldots n-1\}$, define the infinite vector $V_{i,n}= (V_{i,n}(k))_{k \geq 0}$ by putting $V_{i,n}(k)= c_{i,k,n}$ for all $k \geq n$ and $V_{i,n}(k)= \delta_{i,k}$ for all $0 \leq k <n$. Then, we obtain the claim. Now observe that $V_{i, n} \in X$ for every integer $n$ and $i=0,\ldots, n$. Indeed for all $i=0,\ldots n$, we have $W^{n-1} V_{i, n} \in ker W$ that is contained in $X$. Hence by Lemma \ref{inversa}, we deduce that $W^{n-2} V_{i, n} \in X$ and continuing by the same way, we obtain that $V_{i, n} \in X$. Now, let $z= (z_i)_{i \geq 0} \in X$, such that $z_i=0$ for all $i >n$ where $n$ is a large integer number, then $z$ can be approximated by the vector $\sum _{i=0}^{n-1} z_i V_{i,n}$ which belongs to the set $D$. Hence the $ D$ is dense in $X$. $\square$ {\bf Proof of Theorem \ref{passeio}:} First we prove that $W$ is supercyclic. Recall the definition of $D$ from the statement of Lemma \ref{kernel}. By Proposition \ref{inversa}, for every $v \in D$, we can choose $Sv \in D$ such that $W(Sv)= v$. Using the fact that $Sv \in D$, we prove by induction that $W^n (S^n v)= v$ for all $v \in D$. On the other hand, since for all $u \in D$, there exists a nonnegative integer $N$ such that $W^{n}(u)=0$ for all $n \geq N$, we deduce that $\lim \| W^{n}(u) \| \, \| S^{n}(v)\| =0, \; \forall u, v \in D$. Hence $W$ satisfies the supercyclicity criterion. Thus $W$ is supercyclic. \noindentindent {\bf Claim: $\lambdambda W$ is frequently hypercyclic and chaotic for all $\vert \lambdambda \vert> \frac{1}{2p-1} $.} Indeed, let $ v= (v_{i})_{i \geq 0} \in X$ and $u= (u_{i})_{i \geq 0} \in l^{\infty}$ such that $W(u)= v$, then $u$ satisfies (\ref{frma}). Putting $u_0=0$, we obtain $S(v)= (0, u_1,u_2 \ldots)$ and $W(Sv)= v$. We also have by remark \ref{zer} that $\| Sv \| \leq \frac{1}{2p-1} \| v \|$. On the other hand, since $S(v)_0= 0$, we obtain by (\ref{fre}) that $$S^2(v)= (0, 0, (S^2 v)_2, (S^2 v)_{3},\ldots).$$ We deduce that for all integer $n \geq 0 $ $$ S^n(v)= (\underlinederbrace{0,\ldots, 0}_n, (S^n v)_n, (S^n v)_{n+1},\ldots) \mbox { and } \| S^n (v) \| \leq \mathcal{B}igg(\frac{1}{2p-1}\mathcal{B}igg)^{n} \| v \|.$$ Let $\lambdambda$ be a complex number such that $\vert \lambdambda \vert > \delta$, then $\| \lambdambda^{-n} S^n (v) \|$ converges to $0$ exponentially as $n$ goes to $+\infty$. Taking $W'= \lambdambda W$ and $S' = \lambdambda^{-1} S$ and $D= \cup_{n=0}^{+\infty} Ker(W^n)$, we obtain that the series $\sum_{n=0}^{+\infty} W'^{n}(x)$ and $\sum_{n=0}^{+\infty} S'^{n}(x)$ are absolutely convergent and hence unconditionally convergent for all $x \in D$, moreover $ W' \circ S'= I$ on $D$, then we are done by Theorem \ref{crifre}. $\square$ \begin{proposition} \lambdabel{notSRW} For $p =1/2$, the operator $W_p$ acting on $l^1$ is not supercyclic \end{proposition} \noindentindent \textbf{Proof:} Note that $W_p$ is symmetric for $p=1/2$, then by (2) in remark \ref{zeropoint} we have that $\sigma_{pt} ((l^1)',W_p') = \sigma_{pt} (l^\infty,W_p) \supset ]-1,1[$. By (4) in Lemma (\ref{spectrhyper}), we obtain the result. $\square$ \noindentindent {\bf Question:} For $p=1/2$, is the operator $W_p$ acting on $c_0$ or $l^q,\; q >1$ not supercyclic? \subsection {Simple Random Walks on $\mathbb{Z}$} Consider the simple random walk on $\mathbb{Z}$ with jump probability $p\in (0,1)$, i.e, at each time the random walk jumps one unit to the right with probability p, otherwise it jumps one unit to the left. Denote by $\overlineerline{W}_p:=\overlineerline{W}$ its transition operator. For all $i, j \in \mathbb{Z}$, We have $\overlineerline{W}_{i,j}=0$ if $j \ne i-1$ or $j \ne i+1$ and $\overlineerline{W}_{i, i-1}=1-p,\; \overlineerline{W}_{i, i+1}=p$. The simple random walk on $\mathbb{Z}$ is null recurrent if $p=1/2$, otherwise it is transient. \begin{proposition} If $p \ne 1/2$, then $\lambdambda \overlineerline{W}_p$ is not hypercyclic on $X \in \{c_0, l^q,\; q \geq 1\}$, for all $\vert \lambdambda \vert \geq \frac{1}{\vert 1- 2p \vert}$. If $p =1/2$, then $\overlineerline{W}_{p}$ is not supercyclic on $l^1$. \end{proposition} \noindentindent \textbf{Proof:} Let $X \in \{c_0, l^q,\; q \geq 1\}$ and $x= (x_{i})_{i \in \mathbb{Z}} \in X$, then $S(x) = (1-p) y+ p z$ where $y= (y_i)_{i \in \mathbb{Z}}$ and $z= (z_i)_{i \in \mathbb{Z}}$ satisfy $y_i= x_{i-1}$ and $z_i= x_{i+1}$ for all $i$. Hence $$\| S(x) \| \geq (1-p) \| y \| - p \| z \|.$$ Since $\| y \| = \| z \|= \| x \|$, we deduce that $\| S(x) \| \geq \vert 1 -2p \vert \| x \|$. Hence $$\| S^n(x) \| \geq \vert 1 -2p \vert^n \| x \| \mbox { for all } n\ge 1.$$ Then $\lambdambda \overlineerline{W}_p$ is not hypercyclic on $X \in \{c_0, l^q,\; q \geq 1\}$, for all $\vert \lambdambda \vert \geq \frac{1}{\vert 1- 2p \vert}$. Now, assume that $p=1/2$. Note that $\overlineerline{W}_p$ is symmetric, then by (2) in remark \ref{zeropoint} we have that $\sigma_{pt} ((l^1)',W_p') = \sigma_{pt} (l^\infty,W_p)$. We can prove that $]-1, 1[ \subset \sigma_{pt} (l^\infty,W_p)$. By (4) in Lemma (\ref{spectrhyper}), we obtain $\overlineerline{W}$ is not supercyclic on $l^1$. $\square$ \begin{remark} Dynamical properties of simple Random Walks on $\mathbb{Z}_{+}$ and on $\mathbb{Z}$ are different in case where they are transient. \end{remark} \textbf{Question:} Is $\lambdambda \overlineerline{W}_p$ not hypercyclic on $X \in \{c_0, l^q,\; q \geq 1\}$ for all $|\lambdambda| \ge 1$? Can $\overlineerline{W}_p$ be supercyclic? \ \subsection {Spatially inhomogeneous simple random walks on $\mathbb{Z}_{+}$} In this section we consider spatially inhomogeneous simple random walks on $\mathbb{Z}_{+}$, or discrete birth and death processes. Let $\bar{p} = (p_{n})_{n \geq 0}$ be a sequence of probabilities, the simple random walk on $\mathbb{Z}_{+}$ associated to $\bar{p}$ is a Markov chain with transition probability $G_{\bar{p}} := G$ defined by $G_{0,0}= 1- p_0,\; G_{0,1}= p_0$ and for all $i \geq 1,\; G_{i, j} =0 $ if $j \noindentt \in \{ i-1, i+1 \}$, $ G_{i, i-1}= 1-p_i$ and $ G_{i, i+1}= p_i$. $$ G_{\bar{p}}= \tiny{ \left[ \begin{array}{cccccccccc} \!\!1-p_0 \!\!&\!\! p_0 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\! {\cal D}ots \!\! \\ \!\!1-p_1 \!\!&\!\!0\!\!&\!\!p_1 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\! {\cal D}ots \!\! \\ \!\!0 \!\!&\!\!1-p_2 \!\!&\!\!0 \!\!&\!\!p_2 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\! {\cal D}ots \!\! \\ \!\!0 \!\!&\!\!0 \!\!&\!\!1-p_3\!\!&\!\!0\!\!&\!\!p_3 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\!0 \!\!&\!\! {\cal D}ots \!\! \\ \!\!\vdots \!\!&\!\!\vdots \!\!&\!\!\vdots \!\!&\!\!\vdots \!\!&\!\!\vdots \!\!&\!\!\vdots \!\!&\!\!\vdots \!\!&\!\!\vdots \!\!&\!\!\vdots \!\!&\!\!\ddots \end{array} \right]} $$ It is known (see chapter 5 in \cite{ross}) that $G_p$ is transient if and only if $$ S_1= \sum _{n=1}^{+\infty}\frac{(1-p_1)(1-p_2)...(1-p_n)}{p_1 p_2... p_{n}} < \infty \, , $$ and positive recurrent if and only if $$ S_2= \sum_{n=1}^{+\infty} \frac{p_0 p_1... p_{n-1}}{(1-p_1)(1-p_2)...(1-p_n)} < \infty \, . $$ Thus if both series $S_1$ and $S_2$ do not converge, then $G$ is null recurrent. Now, consider the sequence $$ w_n= \frac{(1-p_1)(1-p_3)...(1-p_{n-1})}{p_1 p_3... p_{n-1}} \textrm{ for } n \textrm{ even}, $$ and $$ w_n= \frac{(1-p_0)(1-p_2)...(1-p_{n-3})(1-p_{n-1})}{p_0 p_2... p_{n-3}p_{n-1}} \textrm{ for } n \textrm{ odd}. $$ \begin{theorem} \lambdabel{passweight} The following properties hold: 1. If $X= c_0$ and $\lim w_n=0$ or $X= l^q,\; q \geq 1$ and $\sum_{n=1}^{+\infty} w_n^q <+\infty$, then $G$ is supercyclic on $X$. 2. Let $X \in \{c_0, l^q,\; q \geq 1\}$ and assume that there exist $n_0 \in \mathbb{N}$ and $\alpha >0$ such that $p_n \geq \frac{1}{2}+ \alpha$ for all $n \geq n_0$, then there exists $\delta >1$ such that $ \lambdambda G$ is frequently hypercyclic and chaotic for all $\vert \lambdambda \vert >\delta$. \end{theorem} \begin{remark} Item 1 in Theorem \ref{passweight} implies that there exist null recurrent random walks which are supercyclic on $c_0$. \end{remark} \noindentindent \textbf{Proof:} 1. Assume that $0$ is an eigenvalue of $G$ associated to an eigenvector $u= (u_n)_{n \geq 0}$. Then $$ u_1= \frac{p_{0}-1}{p_{0}}u_0 \mbox { and } u_n = \frac{p_{n-1}-1}{p_{n}}u_{n-2} ,\; \forall n \geq 2. $$ Thus $u_n = (-1)^{n} w_n u_0$ . If $X= c_0$ then $ 0 \in \sigma_{pt}(T)$ if and only if $\lim w_n=0$ . If $X= l^q,\; q \geq 1$, then $ 0 \in \sigma_{pt}(G)$ if and only if $\sum_{n=1}^{+\infty} w_n^q <+\infty$. In both cases, we deduce, exactly as done in the proof of Theorem \ref{passeio}, that $G$ is supercyclic on $X$. 2. Assume that $\sum_{n=1}^{+\infty} w_n <+\infty$, Indeed, let $ v= (v_{i})_{i \geq 0} \in X$ and $u= (u_{i})_{i \geq 0} \in l^{\infty}$ such that $G(u)= v$ and $u_0=0$, then $$u_1= \frac{v_{0}}{p_0} \quad \textrm{ and } \quad u_n = \frac{v_{n-1}}{p_{n-1}}+ \frac{p_{n-1}-1}{p_{n-1}} u_{n-2} \textrm{ for all integer } n \geq 2. $$ Putting $r_n = \frac{p_n -1}{p_{n}}$ for all integer $n \geq 0$, we obtain by induction that for all integer $n \geq 2$, \begin{eqnarray*} u_{n} & = &\frac{1}{p_{n-1}} v_{n-1}+ \frac{1} {p_{n-3}} r_{n-1}v_{n-3} + \frac{ 1}{p_{n-5}} r_{n-1}r_{n-3} v_{n-5}+ {\cal D}ots \\ & & \frac{ 1}{p_{n-2k+1}} ( r_{n-1}r_{n-3} {\cal D}ots r_{n-2k+1} ) v_{n-2k-1} + {\cal D}ots + \frac{ 1}{p_{t}} ( r_{n-1}r_{n-3} {\cal D}ots r_{t+2} ) v_t , \end{eqnarray*} where $t= 0$ if $n $ is odd and $t=1$ otherwise. Put $S(v)= (0, u_1, \ldots)$ for all $v \in X$, then $G(Sv)= v$. Since $p_n \geq \frac{1}{2}+ \alpha$ for all $n \geq n_0$, we have that $r_n \leq \frac{1/2 - \alpha}{1/2+ \alpha} <1$. We deduce exactly as done in the proof of Theorem \ref{passeio} that there exists $\delta >1$ such that $ \lambdambda T$ is frequently hypercyclic and chaotic for all $\vert \lambdambda \vert >\delta$. $\square$ {\bf Questions:} 1. If $X \in \{c_0, l^q,\; q \geq 1\}$ and $\sum_{n=1}^{+\infty} w_n <+\infty$, can we prove that there exists $\delta >1$ such that $ \lambdambda G$ is frequently hypercyclic and chaotic for all $\vert \lambdambda \vert >\delta$? 2. If $\sum_{n=1}^{+\infty} w_n <+\infty$, then by H\"older inequality, we deduce that $$\sum_{n=1}^{+\infty} \frac{(1-p_1)(1-p_2) \ldots (1-p_{n})}{p_0 p_1 \ldots p_{n-1}} < +\infty$$ and hence $G$ is transient. Does there exist $G$ transient and not supercyclic on $l^1$ such that $\sum_{n=1}^{+\infty} w_n <+\infty$? \begin{theorem} \lambdabel{notGRW} If $X = c_0$ and $\sum_{n=1}^{+\infty} w_n ^{-1} <+\infty$ or $X= l^1$ and $1/w_n$ is bounded or $X= l^q,\; q > 1$ and $\sum_{n=1}^{+\infty} ( w_n )^{-\frac{q}{ q-1}} <+\infty$, then $\lambdambda G$ is not hypercyclic for all $\vert \lambdambda \vert >1$. \end{theorem} \begin{remark} Theorem \ref{notGRW} is the closest we get to Theorem \ref{notSRW}. We conjecture that $G$ is not supercyclic under the hypothesis of Theorem \ref{notGRW}. \end{remark} \noindentindent \textbf{Proof:} Assume that $ 0 $ is an element of $\sigma_{pt}(X', G')$ and $u= (u_n)_{n \geq 0}$ an eigenvector associated to $0$, then $u T= 0$. Thus $(1-p_0)u_0 + (1- p_1)u_1= 0$ and $p_n u_n + (1- p_{n+2}) u_{n+2}= 0$ for all $n \geq 0$. Hence for all $n \geq 1$, we have $$u_{2n} = \frac{p_{2n-2} p_{2n-4} \ldots p_{0}}{(p_{2n-2}-1)(p_{2n-4}-1) \ldots (p_{0}-1)} u_0$$ and $$u_{2n+1} = \frac{p_{2n-1} p_{2n-3} \ldots p_{1}}{(p_{2n-1}-1)(p_{2n-3}-3) \ldots (p_{1}-1)} \frac{(1-p_{0})}{p_1 -1} u_0.$$ Hence if $X = c_0$ and $\sum_{n=1}^{+\infty} w_n ^{-1} <+\infty$ or $X= l^1$ and $1/w_n$ is bounded or $X= l^q,\; q > 1$ and $\sum_{n=1}^{+\infty} ( w_n )^{-\frac{q}{1- q}} <+\infty$, we have $0 \in \sigma_{pt}((\lambdambda G)^{'}, X'))$. Thus, by Lemma \ref{spectrhyper}, $\lambdambda G$ is not hypercyclic for all $\lambdambda$. $\square$ {\bf Question:} If $G$ is null recurrent and $X= l^1$, Is $\lambdambda T$ is not hypercyclic for all $\vert \lambdambda \vert >1$? \begin{remark} Let $\bar{p} = (p_{n})_{n \in \mathbb{Z}}$ be a sequence of probabilities, and denote by $\overlineerline{G}_{\bar{p}} := \overlineerline{G}$ the transition operator of the spatially inhomogeneous simple random walks on $\mathbb{Z}$, defined by: For all $i \in \mathbb{Z},\; \overlineerline{G}_{i, i-1}= 1-p_i,\; \overlineerline{G}_{i, i+1}= p_i$ and $\overlineerline{G}_{i, j}= 0$ if $j \noindentt \in \{i-1, i+1\}$. Then by using the same method done in Theorem \ref{passweight}, we can prove the following results: 1. If $\lim w_n=0$ and $\lim w_{-n}^{-1}=0$ then $\overlineerline{G}$ is supercyclic on $c_0$. If $q \geq 1$ and $\sum_{n =1}^{+\infty} w_n^{q} <+\infty$ and $ \sum_{n =1}^{+\infty} w_ {-n}^{-q} <+\infty$, then $\overlineerline{G}$ is supercyclic on $l^q$. 2. Let $X \in \{c_0, l^q,\; q \geq 1\}$ and assume that there exist positive constants $n_0, n_1, \alpha $ such that $p_n \leq \frac{1}{2}- \alpha$ for all $n \geq n_0$ and $p_n \geq \frac{1}{2}+ \alpha$ for all $n \leq -n_1$, then there exists $\delta >1$ such that $ \lambdambda \overlineerline{G}$ is frequently hypercyclic and chaotic for all $\vert \lambdambda \vert >\delta$. \end{remark} \textbf{Question:} Does there exist a transient Markov operator that is not supercyclic? Does there exist a null recurrent Markov operator that is supercyclic on $l^1$? \noindentindent{\bf Acknowledgment:} The authors would like to thank El Houcein El Abdalaoui and Patricia Cirilo for fruitful discussions. \end{document}
\begin{document} \title{Many visits TSP revisited\anonymyze{\thanks{This research is a part of projects that have received funding from the European Research Council (ERC) under the European Union's Horizon 2020 research and innovation programme Grant Agreement 714704 (S.~Li, W. Nadara) and 677651 (Ł. Kowalik, M. Smulewicz).}} } \date{\today} \anonymyze{ \author{Łukasz Kowalik\thanks{Institute of Informatics, University of Warsaw, Poland (\texttt{[email protected]})} \and Shaohua Li\thanks{Institute of Informatics, University of Warsaw, Poland (\texttt{[email protected]})} \and Wojciech Nadara\thanks{Institute of Informatics, University of Warsaw, Poland (\texttt{[email protected]})} \and Marcin Smulewicz\thanks{Institute of Informatics, University of Warsaw, Poland (\texttt{[email protected]})} \and Magnus Wahlström\thanks{Royal Holloway, University of London, UK (\texttt{[email protected]})}} } \maketitle \anonymyze{ \begin{textblock}{20}(0, 13.0) \includegraphics[width=40px]{logo-erc} \end{textblock} \begin{textblock}{20}(-0.25, 13.4) \includegraphics[width=60px]{logo-eu} \end{textblock} } \begin{abstract} We study the \ProblemName{Many Visits TSP} problem, where given a number $k(v)$ for each of $n$ cities and pairwise (possibly asymmetric) integer distances, one has to find an optimal tour that visits each city $v$ exactly $k(v)$ times. The currently fastest algorithm is due to Berger, Kozma, Mnich and Vincze [SODA 2019, TALG 2020] and runs in time and space ${\ensuremath{\mathcal{O}}}star(5^n)$. They also show a polynomial space algorithm running in time ${\ensuremath{\mathcal{O}}}(16^{n+o(n)})$. In this work, we show three main results: \begin{itemize} \item A randomized polynomial space algorithm in time ${\ensuremath{\mathcal{O}}}star(2^nD)$, where $D$ is the maximum distance between two cities. By using standard methods, this results in $(1+\epsilon)$-approximation in time ${\ensuremath{\mathcal{O}}}star(2^n\epsilon^{-1})$. Improving the constant $2$ in these results would be a major breakthrough, as it would result in improving the ${\ensuremath{\mathcal{O}}}star(2^n)$-time algorithm for {\sc Directed Hamiltonian Cycle}, which is a 50 years old open problem. \item A tight analysis of Berger et al.'s exponential space algorithm, resulting in ${\ensuremath{\mathcal{O}}}star(4^n)$ running time bound. \item A new polynomial space algorithm, running in time ${\ensuremath{\mathcal{O}}}(7.88^n)$. \end{itemize} \end{abstract} \section{Introduction} In the \ProblemName{Many Visits TSP} (MVTSP) we are given a set $V$ of $n$ vertices, with pairwise distances (or costs) $d:V^2\rightarrow \mathbb{Z}q \cup \{\infty \}$. We are also given a function $k:V\rightarrow\mathbb{Z}_{+}$. A valid tour of length~$\ell$ is a sequence of vertices $(x_1, \dots, x_\ell)$, where $\ell=\sum_{v \in V}{k(v)}$, such that each $v \in V$ appears in the sequence exactly $k(v)$ times. The cost of the tour is $\sum_{i=1}^{\ell-1} d(x_i,x_{i+1}) + d({x_\ell,x_1})$. Our goal is to find a valid tour with minimum cost. \ProblemName{Many Visits TSP} is a natural generalization of the classical (asymmetric) {\sc Traveling Salesman Problem} (TSP), which corresponds to the case when $k(v)=1$ for every vertex $v$. Similarly as its special case, MVTSP arises with a variety of applications, including scheduling~\cite{Psaraftis1980, HochbaumShamir1991,BraunerEtAl2005,vanderVeenZhang1996,flowshop}, computational geometry~\cite{scatterTSP} and parameterized complexity~\cite{Lampis10}. \subsection{Related work} The standard dynamic programming for TSP of Bellman~\cite{BellmanTSP}, Held and Karp~\cite{HeldKarpTSP} running in time ${\ensuremath{\mathcal{O}}}star(2^n)$ can be easily generalized to MVTSP resulting in an algorithm with the running time of ${\ensuremath{\mathcal{O}}}star(\prod_{v \in V}{(k(v)+1)})$, as noted by Psaraftis~\cite{Psaraftis1980}. A breakthrough came in the work of Cosmadakis and Papadimitriou~\cite{papadimitriou} who presented an algorithm running in time $2^{O(n\log n)}+{\ensuremath{\mathcal{O}}}(n^3\log\ell)$ and space $2^{O(n\log n)}$, thus essentially removing the dependence on the function $k$ from the bound (the $\log\ell$ factor can be actually skipped if we support the original algorithm with a today's state-of-the-art minimum cost flow algorithm). This may be surprising since the {\em length} of the output sequence is $\ell$. However, beginning from the work of Cosmadakis and Papadimitriou we consider MVTSP with compressed output, namely the output is a multiplicity function which encodes the number of times every edge is visited by the tour. By using a standard Eulerian tour algorithm we can compute an explicit tour from this output. The crux of the approach of Cosmadakis and Papadimitriou~\cite{papadimitriou} was an observation that every solution can be decomposed to a minimal connected spanning Eulerian subgraph (which enforces connectivity of the solution) and subgraph satisfying appropriate degree constraints (which completes the tour so that the numbers of visits agree). Moreover, once we guess the degree sequence $\delta$ of the Eulerian subgraph, our task splits into two separate tasks: finding a cheapest minimal connected Eulerian subgraph consistent with $\delta$ (which is computationally hard) and finding a cheapest subgraph satisfying the degree constraints (which can by solved in polynomial time by a reduction to minimum cost flow). Yet another breakthrough came only recently, namely Berger, Kozma, Mnich and Vincze~\cite{berger-soda,berger-arxiv} improved the running time to ${\ensuremath{\mathcal{O}}}star(5^n)$. Their main contribution is an idea that it is more convenient to use outbranchings (i.e. spanning trees oriented out of the root) to force connectivity of the solution. The result of Berger et al.\ is the first algorithm for MVTSP which is optimal assuming Exponential Time Hypothesis (ETH)~\cite{eth}, i.e., there is no algorithm in time $2^{o(n)}$, unless ETH fails. Moreover, by applying the divide and conquer approach of Gurevich and Shelah~\cite{GurevichShelah} they design a polynomial space algorithm, running in time ${\ensuremath{\mathcal{O}}}(16^{n+o(n)})$. \subsection{Our results} In this work, we take the next step in exploration of the \ProblemName{Many Visits TSP} problem: we aim at algorithms which are optimal at a more fine grained level, namely with running times of the form ${\ensuremath{\mathcal{O}}}(c^n)$, such that an improvement to ${\ensuremath{\mathcal{O}}}((c-\epsilon)^n)$ for any $\epsilon>0$ meets a kind of natural barrier, for example contradicts Strong Exponential Time Hypothesis (SETH)~\cite{seth} or Set Cover Conjecture (SCC)~\cite{secoco}. Our main result is the following theorem. \begin{theorem} \label{thm:bnd-tsp} There is a randomized algorithm that solves \ProblemName{Many Visits TSP} in time ${\ensuremath{\mathcal{O}}}star(2^nD)$ and polynomial space, where $D=\max\{d(u,v) : u, v \in V, d(u, v) \neq \infty\}$. The algorithm returns a minimum weight solution with constant probability. \end{theorem} The natural barrier in this case is connected with \ProblemName{Directed Hamiltonicity}, the problem of determining if a directed graph contains a Hamiltonian cycle. Indeed, this is a special case of \ProblemName{Many Visits TSP} with $D=1$, so an improvement to ${\ensuremath{\mathcal{O}}}star(1.99^nD)$ in Theorem~\ref{thm:bnd-tsp} would result in an algorithm in time ${\ensuremath{\mathcal{O}}}star(1.99^n)$ for \ProblemName{Directed Hamiltonicity}. While it is not known whether such an algorithm contradicts SETH or SCC, the question about its existence is a major open problem which in the last 58 years has seen some progress only for special graph classes, like bipartite graphs~\cite{bkk:laplacians,cygan:bases}. At the technical level, Theorem~\ref{thm:bnd-tsp} uses so-called algebraic approach and relies on two key insights. The first one is to enforce connectivity not by guessing a spanning connected subgraph as in the previous works, but by applying the Cut and Count approach of Cygan et al~\cite{cutcount}. The second insight is to satisfy the degree constraints using Tutte matrix~\cite{tutte,lovasz-tutte}. By using standard rounding techniques, we are able to make the algorithm from Theorem~\ref{thm:bnd-tsp} somewhat useful even if the maximum distance $D$ is large. Namely, we prove the following. \begin{theorem} \label{thm:aprox} For any $\epsilon > 0$ there is a randomized $(1+\epsilon)$-approximation algorithm that solves \ProblemName{Many Visits TSP} in ${\ensuremath{\mathcal{O}}}star({2^n}{\epsilon^{-1}})$ time and polynomial space. \end{theorem} In Theorems~\ref{thm:bnd-tsp} and~\ref{thm:aprox} the better exponential dependence in the running time was achieved at the cost of sacrificing an ${\ensuremath{\mathcal{O}}}(D)$ factor in the running time, or the optimality of the solution. What if we do not want to sacrifice anything? While we are not able to get a ${\ensuremath{\mathcal{O}}}star(2^n)$ algorithm yet, we are able to report a progress compared to the algorithm of Berger et al. in time ${\ensuremath{\mathcal{O}}}star(5^n)$. In fact we do not show a new algorithm but we provide a refined analysis of the previous one. The new analysis is tight (up to a polynomial factor). \begin{theorem} \label{thm:expspace} There is an algorithm that solves \ProblemName{Many Visits TSP} in time and space ${\ensuremath{\mathcal{O}}}star(4^n)$. \end{theorem} In short, Berger et al.'s polyspace ${\ensuremath{\mathcal{O}}}star(16^{n+o(n)})$ time algorithm iterates through all $O(4^n)$ degree sequences of an outbranching, finds the cheapest outbranching for each sequence in time $O(4^{n+o(n)})$, and completes it to satisfy the degree constraints using a polynomial time flow computation. Note that it is hard to speed up the cheapest outbranching routine, because for the sequence of $n-1$ ones and one zero we get essentially the TSP, for which the best known polynomial space algorithm takes time $O(4^{n+o(n)})$~\cite{GurevichShelah}. However, we are still able get a significant speed up of their algorithm, roughly, by using a more powerful minimum cost flow network, which allows for computing the cheapest outbranchings in smaller subgraphs. \begin{theorem} \label{thm:polyspace} There is an algorithm that solves \ProblemName{Many Visits TSP} in time ${\ensuremath{\mathcal{O}}}star(7.88^n)$ and polynomial space. \end{theorem} \heading{Organization of the paper} In Section~\ref{sec:reduction} we show that, essentially, using a polynomial time preprocessing we can reduce an instance of \ProblemName{Many Visits TSP} to an equivalent one but with demands ${\sf in}$, ${\sf out}$ bounded $O(n^2)$. This reduction is a crucial prerequisite for Section~\ref{sec:2^n} where we prove Theorem~\ref{thm:bnd-tsp}. Next, in Section~\ref{sec:general} we prove Theorem~\ref{thm:expspace} and in Section~\ref{sec:polyspace} we prove Theorem~\ref{thm:polyspace}. We note that in these two sections we do not need the reduction from Section~\ref{sec:reduction}, however, in practice, applying it, which should speed-up the flow computations used in both algorithms described there. Finally, in Section~\ref{sec:approx} we show Theorem~\ref{thm:aprox} and we discuss further research in Section~\ref{sec:further}. \section{Preliminaries} We use Iverson bracket, i.e., if $\alpha$ is a logical proposition, then the expression $[\alpha]$ evaluates to $1$ when $\alpha$ is true and $0$ otherwise. For two integer-valued functions $f,g$ on the same domain $D$, we write $f\le g$ when $f(x)\le g(x)$ for every $x\in D$. Similarly, $f+g$ (resp. $f-g$) denote the pointwise sum (difference) of $f$ and $g$. This generalizes to functions on different domains $D_f$, $D_g$ by extending the functions to $D_f\cup D_g$ so that the values outside the original domain are $0$. For a cost function $d:V^2\rightarrow\Zq \cup\{\infty\}$, and a multiplicty function $m:V^2\rightarrow\mathbb{Z}q$ we denote the cost of $m$ as $d(m)=\sum_{u,v\in V^2}d(u,v)m(u,v)$. \heading{Multisets} Recall that a {\em multiset} $A$ can be identified by its {\em multiplicity }function $m_A:U\rightarrow\mathbb{Z}_{\ge 0}$, where $U$ is a set. We write $e\in A$ when $e\in U$ and $m_A(e)>0$. Consider two multisets $A$ and $B$. We write $A\subseteq B$ when for every $e\in A$ we have $e\in B$ and $m_A(e) \le m_B(e)$. Also, $A=B$ when $A\subseteq B$ and $B\subseteq A$. Assume w.l.o.g.\ that $m_A$ and $m_B$ have the same domain $U$. Operations on multisets are defined by the corresponding multiplicites as follows: for every $e\in U$, we have $m_{A\cup B}(e)=\max\{m_A(e),m_B(e)\}$, $m_{A\cap B}(e)=\min\{m_A(e),m_B(e)\}$, $m_{A\setminus B}(e)=\max\{m_A(e)-m_B(e),0\}$, $m_{A\bigtriangleup B}(e)=m_{(A\setminus B)\cup(B\setminus A)}=|m_A(e)-m_B(e)|$. This notation extends to the situation when $A$ or $B$ is a set, by using the indicator function $m_A(e)=[e\in A]$. \heading{Directed graphs} Directed graphs (also called digraphs) in this paper can have multiple edges and multiple loops, so sets $E(G)$ will in fact be multisets. We call a directed graph {\em simple} if it has no multiple edges or loops. We call it {\em weakly simple} if it has no multiple edges or multiple loops (but single loops are allowed). For a digraph $G$ by $G^\downarrow$ we denote the {\em support} of $G$, i.e., the weakly simple graph on the vertex set $V(G)$ such that $E(G^\downarrow)=\{(u,v) \mid \text{$G$ has an edge from $u$ to $v$}\}$. Given a digraph $G=(V,E)$ we define its {\em multiplicity function} $m_G:V^2\rightarrow\mathbb{Z}q$ as the multiplicity function of its edge multiset, i.e., for any pair $u,v\in V$, we put $m_G(u,v)=m_{E}((u,v))$. Conversely, for a function $m:V^2\rightarrow\mathbb{Z}q$ we define the {\em thick graph} $G_m=(V,E)$ so that $m_G=m$. Abusing a notation slightly, we will identify $m$ and $G_m$, e.g., we can say that $m$ is strongly connected, contains a subgraph, etc. We call a directed graph {\em connected} if the underlying undirected graph is connected. Similarly, a {\em connected component} of a digraph $G$ is a subgraph of $G$ induced by a vertex set of a connected component of the underlying undirected graph. For a graph $G$ (directed or undirected) and a subset $X\subseteq V(G)$, by $G[X]$ we denote the subgraph induced by $X$. \heading{Solutions} The following observation follows easily from known properties of Eulerian digraphs. \begin{observation} \label{obs:solutions} \ProblemName{Many Visits TSP} has a tour of cost $c$ if and only if there is a {\em multiplicity function} $m_G:V^2\rightarrow\mathbb{Z}q$ of cost $c$ such that $m$ contains a spanning connected subgraph. \end{observation} Thanks to Observation~\ref{obs:solutions}, in the remainder of this paper we refer to multiplicity functions as solutions of MVTSP (and some related problems which we are going to define). By standard arguments, the multiplicity function can be transformed to a tour in time ${\ensuremath{\mathcal{O}}}(\ell)$. Moreover, Grigoriev and Van de Klundert~\cite{Grigoriev2006} describe an algorithm which transforms it to a compressed representation of the tour in time $O(n^4 \log{\ell})$. \heading{Out-trees} An {\em out-tree} is the digraph obtained from a rooted tree by orienting all edges away of the root. If an out-tree $T$ is a subgraph of a directed graph $G$ and additionally $T$ spans the whole vertex set $V(G)$ we call $T$ an {\em outbranching}. The sequence $\{{\sf outdeg}_T(v)\}_{v\in V(T)}$ is called the {\em outdegree sequence} of $T$. Consider a set of vertices $X\subseteq V$, $|X|\ge 2$. \begin{lemma}[Berger et al.~\cite{berger-soda}, Lemma 2.4] \label{lem:out-sequence} A sequence of nonnegative integers $\{d_v\}_{v\in X}$ is an outdegree sequence of an out-tree spanning $X$ and rooted at $r\in X$ if and only if $(i)$ $d_r\ge 1$ and $(ii)$ $\sum_{v\in X}d_v = |X|-1$. \end{lemma} A sequence $\{d_v\}_{v\in X}$ that satisfies $(i)$ and $(ii)$ will be called an {\em out-tree sequence rooted at $r$}, or {\em outbranching sequence rooted at $r$} when additionally $X=V$. A $\delta$-out-tree means any subtree spanning $X$ with outdegree sequence $\delta$. \section{Reduction to small demands} \label{sec:reduction} The goal of this section is to show that, essentially, using a polynomial time preprocessing we can reduce an instance of \ProblemName{Many Visits TSP} to an equivalent one but with demands ${\sf in}$, ${\sf out}$ bounded $O(n^2)$. Consider the following problem, for a family of simple digraphs ${\ensuremath{\mathcal{F}}}$. \defproblem{\ProblemName{Fixed Degree $\Ff$-Subgraph}}{$d:V^2\rightarrow\mathbb{Z}q \cup \{\infty\}$, ${\sf in}, {\sf out}: V \rightarrow \mathbb{Z}q$}{Find a function $m:V^2\rightarrow \mathbb{Z}q$ such that \begin{enumerate}[$(i)$] \item $G_m$ contains a member of ${\ensuremath{\mathcal{F}}}$ as a spanning subgraph, \item for every $v\in V$ we have ${\sf in}(v)={\sf indeg}_{G_m}(v)$ and ${\sf out}(v)={\sf outdeg}_{G_m}(v)$, and \end{enumerate} so as to minimize the value of $d(m)=\sum_{v,w\in V}d(v,w)m(v,w)$. } In this paper, we will consider two versions of the problem: when ${\ensuremath{\mathcal{F}}}$ is the family of all oriented trees, called \ProblemName{Fixed Degree Connected Subgraph}, and when ${\ensuremath{\mathcal{F}}}$ is the family of all out-trees with a fixed root $r$, called \ProblemName{Fixed Degree Subgraph With Outbranching}. The role of ${\ensuremath{\mathcal{F}}}$ is to force connectivity of the instance. Other choices for ${\ensuremath{\mathcal{F}}}$ can also be interesing, for example Cosmadakis and Papadimitriou~\cite{papadimitriou} consider the family of minimal Eulerian digraphs. When considering the instance of \ProblemName{Fixed Degree $\Ff$-Subgraph} we will use the notation $n=|V|$ and $\ell=\sum_{v\in V}{\sf in}(v)$. (Clearly, we can assume that also $\ell=\sum_{v\in V}{\sf out}(v)$, for otherwise there is no solution.) Observe that if the image of $d$ is $\{0,+\infty\}$ we get the natural unweighted version, where we are given a graph with edge set $d^{-1}(0)$ and the goal is to decide if one can choose multiplicities of the edges so that the resulting digraph contains a member of ${\ensuremath{\mathcal{F}}}$ and its in- and outdegrees match the demands of ${\sf in}$ and ${\sf out}$. The following observation follows by standard properties of Eulerian cycles in digraphs and the fact that every strongly connected graph contains an outbranching rooted at arbitrary vertex. \begin{observation} \label{obs:red} \ProblemName{Many Visits TSP} is a special case of both \ProblemName{Fixed Degree Connected Subgraph} and \ProblemName{Fixed Degree Subgraph With Outbranching} with ${\sf in}(v)={\sf out}(v)=k(v)$ for every vertex $v\in V$. \end{observation} In the following lemma, we consider the relaxed problem \ProblemName{Fixed Degree Subgraph}, defined exactly as \ProblemName{Fixed Degree $\Ff$-Subgraph}, but dropping the constraint that solutions must contain a member of ${\ensuremath{\mathcal{F}}}$. In what follows, $s_n({\ensuremath{\mathcal{F}}})=\max_{\substack{G\in {\ensuremath{\mathcal{F}}}, |V(G)|= n}}|E(G)|$. \begin{lemma} \label{lem:diff} Fix an input instance $d:V^2\rightarrow\Zq \cup\{\infty\}$, ${\sf in},{\sf out}:V^2\rightarrow \mathbb{Z}q$. For every optimal solution $r$ of \ProblemName{Fixed Degree Subgraph} there is an optimal solution $c'$ of \ProblemName{Fixed Degree $\Ff$-Subgraph} such that for every $u,v\in V$ \[|r(u,v) - c'(u,v)|\le s_{|V|}({\ensuremath{\mathcal{F}}}).\] \end{lemma} \begin{proof} Let $c$ be an arbitrary optimal solution of \ProblemName{Fixed Degree $\Ff$-Subgraph} and let $B$ be an arbitrary graph from ${\ensuremath{\mathcal{F}}}$ which is a spanning subgraph of $G_c$. Our plan is to build an optimal solution $c'$ of \ProblemName{Fixed Degree $\Ff$-Subgraph} which contains $B$ and does not differ too much from $r$. Define multisets $A_c = E(G_{c}) \setminus E(G_{r})$, $A_r = E(G_{r}) \setminus E(G_{c})$ and $A=A_c\cup A_r= E(G_{c}) \bigtriangleup E(G_{r})$. In what follows, by an {\em alternating cycle} we mean an even cardinality set of edges \[\{(v_0,v_1),(v_2,v_1),(v_2,v_3),(v_4,v_3)\ldots,(v_{2\ell-2},v_{2\ell-1}),(v_0,v_{2\ell-1})\},\] where edges come alternately from $A_c$ and $A_r$. Note that an alternating cycle is not really a directed cycle, it is just an orientation of a simple undirected cycle. Note that for every vertex $v\in V$, among the edges in $A$ that enter (resp.\ leave) $v$ the number of edges from $A_c$ is the same as the number of edges from $A_r$ (counted with corresponding multiplicities), since both $c$ and $r$ satisfy the degree constraints for the same instance. It follows that $A$ can be decomposed into a multiset ${\ensuremath{\mathcal{C}}}$ of alternating simple cycles, i.e., \[m_A=\sum_{C\in{\ensuremath{\mathcal{C}}}}m_C,\] where $m_C:V^2\rightarrow\mathbb{Z}_{\ge 0}$ and for each pair $u,v\in V$ we have $m_C(u,v)=[(u,v)\in C]\cdot m_{\ensuremath{\mathcal{C}}}(C)$. To clarify, we note that the sum above is over all cycles in ${\ensuremath{\mathcal{C}}}$, and not over all copies of cycles. Denote $B^+=E(B)\setminus E(G_{r})$. Since $B^+\subseteq A_c$, for each $e\in B^+$, there is at least one cycle in ${\ensuremath{\mathcal{C}}}$ that contains $e$. We choose an arbitrary such cycle and we denote it by $C_e$. (Note that it may happen that $C_e=C_{e'}$ for two different edges $e,e'\in B^+$.) Let ${\ensuremath{\mathcal{C}}}^+=\{C_e \mid e\in B^+\}$. Then we define $c'$, by putting for every $u,v\in V$ \begin{equation} \label{eq:c'} c'(u,v)=r(u,v) + (-1)^{[(u,v)\in A_r]}\sum_{C\in{\ensuremath{\mathcal{C}}}^+}[(u,v)\in C]. \end{equation} In other words, $c'$ is obtained from $r$ by iterating over all cycles in $C\in {\ensuremath{\mathcal{C}}}^+$, and adding one copy of each edge of $C\cap A_c$ and removing one copy of each edge of $C\cap A_r$. Let us show that $G_{c'}$ contains $B$. This is trivial for every $e\in B^+$. When $e\in E(B) \cap E(G_{r})$, consider two cases. If $e\not\in A_r$, then $c'(e)\ge r(e)$, so $e\in G_{c'}$. If $e\in A_r$, $m_A(e)=r(e)-c(e)$. Then $c'(e)= r(e)-\sum_{C\in{\ensuremath{\mathcal{C}}}^+}[(u,v)\in C]\ge r(e)-m_A(e)=c(e)\ge 1$, where the last inequality follows since $B\subseteq G_c$. To see that $c'$ satisfies the degree constraints, recall that $r$ does so, and note that if in~\eqref{eq:c'} we consider only the summands corresponding to a single cycle $C\in{\ensuremath{\mathcal{C}}}^+$, then for every vertex we either add one outgoing edge and remove one outgoing edge, or add one ingoing edge and remove one ingoing edge, or we do not change the set of edges incident to it. For a cycle $C\in{\ensuremath{\mathcal{C}}}$ let $\delta(C)=d(A_c \cap C)-d(A_r \cap C)$. Observe that for every cycle $C\in{\ensuremath{\mathcal{C}}}$ we have $\delta(C)\ge 0$, for otherwise $E(G_{r})\setminus (C\cap A_r) \cup (C\cap A_c)$ contradicts the optimality of $r$. It follows that \begin{equation} d(c')=d(r)+\sum_{C\in{\ensuremath{\mathcal{C}}}^+ } \delta(C) \le d(r)+\sum_{C\in{\ensuremath{\mathcal{C}}}} \delta(C) = d(c). \end{equation} Hence, since $c$ is optimal solution of \ProblemName{Fixed Degree $\Ff$-Subgraph}, we get that $c'$ is optimal solution of \ProblemName{Fixed Degree $\Ff$-Subgraph} as well. Moreover, by~\eqref{eq:c'}, for every $u,v\in V$, \begin{equation} |c'(u,v)-r(u,v)| \le |{\ensuremath{\mathcal{C}}}^+| \le |B| \le s_{|V|}({\ensuremath{\mathcal{F}}}). \end{equation} This ends the proof. \end{proof} As noted in~\cite{papadimitriou,berger-arxiv}, \ProblemName{Fixed Degree Subgraph} can be solved by a reduction to minimum cost flow. By applying Orlin's algorithm~\cite{Orlin93} we get the following. \begin{observation}[Folklore,~\cite{papadimitriou,berger-arxiv}] \label{obs:flows} \ProblemName{Fixed Degree Subgraph} can be solved in time $O(n^3 \log n)$. \end{observation} \begin{theorem}[Kernelization] \label{thm:kernel} There is a polynomial time algorithm which, given an instance $I=(d,{\sf in},{\sf out})$ of \ProblemName{Fixed Degree $\Ff$-Subgraph} outputs an instance $I'=(d,{\sf in}',{\sf out}')$ of the same problem and a function $f:V^2\rightarrow\mathbb{Z}q$ such that \begin{enumerate}[$(i)$] \item ${\sf in}'(v),{\sf out}'(v)= {\ensuremath{\mathcal{O}}}(n\cdot s_n({\ensuremath{\mathcal{F}}}))$ for every vertex $v$, \item if $m^*$ is an optimal solution for $I'$, then $f+m^*$ is an optimal solution for $I$. \end{enumerate} The algorithm does need to know ${\ensuremath{\mathcal{F}}}$, just the value of $s_n({\ensuremath{\mathcal{F}}})$. \end{theorem} \begin{proof} Our algorithm begins by finding an optimal solution $r$ of \ProblemName{Fixed Degree Subgraph} using Observation~\ref{obs:flows}. Define $f_0:V^2\rightarrow \mathbb{Z}q$, where for every $v,w\in V$ we put $f_0(v,w)=\max\{r(v,w)-s_n({\ensuremath{\mathcal{F}}}),0\}.$ By Lemma~\ref{lem:diff}, there exists an optimal solution $c'$ for instance $I$ such that $c'\ge f_0$. Now define $f:V^2\rightarrow \mathbb{Z}q$, where for every $v,w\in V$ we put $f(v,w)=\max\{f_0(v,w)-1,0\}$. Finally, we put ${\sf in}'(v)={\sf in}(v)-\sum_{w\in V}f(w,v)$ and ${\sf out}'(v)={\sf out}(v)-\sum_{w\in V}f(v,w)$. The algorithm outputs $I'=(d,{\sf in}',{\sf out}')$ and $f$. In what follows, we show that the output has the desired properties. For the property $(i)$, consider any vertex $v\in V$ and observe that $\sum_{w\in V}f(v,w)\ge \sum_{w\in V}(f_0(v,w)-1) \ge \sum_{w\in V}(r(v,w)-s_n({\ensuremath{\mathcal{F}}})-1)$. Since $r$ is a feasible solution of $I$, we have ${\sf out}(v)=\sum_{w\in V}r(v,w)$. It follows that ${\sf out}'(v)\le n(1+s_n({\ensuremath{\mathcal{F}}})) = O(n\cdot s_n({\ensuremath{\mathcal{F}}}))$ as required. The argument for ${\sf in}'(v)$ is symmetric. Now we focus on $(ii)$. Let $m^*$ be an optimal solution for $I'$. It is easy to check that $f+m^*$ satisfies the degree constraints for the instance $I$. Also, since $m^*$ contains a subgraph from ${\ensuremath{\mathcal{F}}}$, then $f+m^*$ contains the same subgraph. It follows that $f+m^*$ is a feasible solution of $I$. It suffices to show that $f+m^*$ is an optimal solution for $I$. Denote $r=c'-f$. Consider any pair $v,w\in V$ such that $c'(v,w)\ge 1$. We claim that $f(v,w)\le c'(v,w)-1$. Indeed, if $f_0(v,w)=0$ then $f(v,w)=0\le c'(v,w)-1$, and if $f_0(v,w)\ge 1$ then $f(v,w)=f_0(v,w)-1\le c'(v,w)-1$. It follows that $r(v,w)\ge 1$. In particular, since $c'$ contains a subgraph from ${\ensuremath{\mathcal{F}}}$, then also $r$ contains the same subgraph. It follows that $r$ is a feasible solution for $I'$ (the degree constraints are easy to check). Hence, $d(m^*) \le d(r)$. It follows that $d(f+m^*)\le d(r+f)=d(c')$, so $f+m^*$ is indeed an optimal solution for $I$. \end{proof} \section{The small costs case in time ${\ensuremath{\mathcal{O}}}star(2^nD)$} \label{sec:2^n} In this section we establish Theorem~\ref{thm:bnd-tsp}. We do it in a bottom-up fashion, starting with a simplified core problem, and next generalizing the solution in a few steps. \subsection{Unweighted decision version with small degree demands} \label{sec:bnd-decision} Consider the following problem. \defproblem{\ProblemName{Decision Unweighted Fixed Degree Connected Subgraph}}{a digraph $G=(V,E)$, ${\sf in}, {\sf out}: V \rightarrow \mathbb{Z}q$}{Is there a function $m:V^2\rightarrow\mathbb{Z}q$ such that $G^\downarrow_m$ is a connected subgraph of $G$ and for every $v\in V$ we have ${\sf in}(v)={\sf indeg}_{G_m}(v)$ and ${\sf out}(v)={\sf outdeg}_{G_m}(v)$? } Note that \ProblemName{Decision Unweighted Fixed Degree Connected Subgraph} generalizes the directed Hamiltonian cycle problem, which is known to be solvable in ${\ensuremath{\mathcal{O}}}star(2^n)$ time and polynomial space. In this section we show that this running time can be obtained for the more general problem as well, though we need to allow some randomization. \begin{theorem} \label{thm:bnd-decision} There is a randomized algorithm which solves an instance $I=({\sf in},{\sf out})$ of \ProblemName{Decision Unweighted Fixed Degree Connected Subgraph} in time ${\ensuremath{\mathcal{O}}}star(2^n\mathop{\rm{poly}}(M))$ and polynomial space, where $M=\max_{v}\max\{{\sf in}(v),{\sf out}(v)\}$. The algorithm is Monte Carlo with one-sided error, i.e., the positive answer is always correct and the negative answer is correct with probability at least $p$, for any constant $p<1$. \end{theorem} Our strategy will be to reduce our problem to detecting a perfect matching in a bipartite graph with an additional connectivity constraint. We define a bipartite graph $B_G=(O,I,E(B_G))$ as follows. Let $I=\{v^I_1,\ldots,v^I_{{\sf in}(v)} \mid v\in V(H)\}$, $O=\{v^O_1,\ldots,v^O_{{\sf out}(v)} \mid v\in V(H)\}$, and $E(B_G)=\{u^O_iv^I_j\mid (u,v)\in E(G)\}$. \begin{observation} $|I|=|O|={\ensuremath{\mathcal{O}}}(nM)$ and $|E(B_G)|\le E(G)M^2 = {\ensuremath{\mathcal{O}}}(n^2M^2)$. \end{observation} For an undirected graph $H$ by ${\ensuremath{\mathcal{PM}}}(H)$ we denote the set of perfect matchings in $H$. We say that a matching $M$ in $B_G$ is {\em connected} when for every cut $(X,V\setminus X)$ with $\emptyset\ne X\subsetneq V$ the matching $M$ contains an edge $u^O_iv^I_j$ such that $u\in X$ and $v\in V\setminus X$ or $v\in X$ and $u\in V\setminus X$. For a matching $M$ in $B_G$ we define a {\em contraction} of $M$ as function $m:V^2\rightarrow \mathbb{Z}q$ such that $m(u,v)=|\{u^O_iv^I_j \in M \mid i\in[{\sf out}(u)], j\in[{\sf in}(v)]\}|$. In other words $G_{m}$ is obtained from $M$ by (1) orienting every edge from $O$ to $I$ and (2) identyfing all vertices in $\{v^I_1,\ldots,v^I_{{\sf in}(v)}\}\cup\{v^O_1,\ldots,v^O_{{\sf out}(v)}\}$ for every $v\in V$, and keeping the multiple edges and loops. \begin{lemma} \label{lem:red-to-matching} $(G,{\sf in},{\sf out})$ is a yes-instance of \ProblemName{Decision Unweighted Fixed Degree Connected Subgraph} iff graph $B_G$ contains a connected perfect matching. \end{lemma} \begin{proof} Let $M$ be a connected perfect matching in $B_G$ and let $m$ be its contraction. We claim that $m$ is a solution of $(G,{\sf in},{\sf out})$. By the definition of $B_G$, $G^\downarrow_{m}$ is a subgraph of $G$. Since $M$ is connected, $G_{m}$ is connected as well, and so is $G^\downarrow_{m}$. Moreover, since $M$ is a perfect matching ${\sf in}(v)={\sf indeg}_{G_m}(v)$ and ${\sf out}(v)={\sf outdeg}_{G_m}(v)$ for every vertex $v$. For the other direction, let $m$ be a solution for $(G,{\sf in},{\sf out})$. For every $v\in V$, there are exactly ${\sf out}(v)$ edges leaving $v$ in $G_m$. Let us denote them $e^O_{v,1},\ldots,e^O_{v,{\sf out}(v)}$. Similarly, let us denote all the edges entering $v$ by $e^I_{v,1},\ldots,e^I_{v,{\sf in}(v)}$. Then we define $M$ as the set of edges of the form $u^O_iv^I_j$ such that $G_m$ contains an edge $e=e^O_{u,i}=e^I_{v,j}$. The fact that $M$ is a perfect matching is clear from the construction. Also, $M$ is connected, for otherwise $G_m$ is not connected. \end{proof} From now on, let $B=(O,I,E(B))$ be an arbitrary subgraph of $B_G$. Define the following multivariate polynomial over $\field{2^t}$, for an integer $t$ to be specified later. \begin{equation} R = \sum_{\substack{M\in{\ensuremath{\mathcal{PM}}}(B) \\ \text{$M$ is connected}}}\prod_{e\in M}x_e \end{equation} \begin{lemma} \label{lem:polynom-equiv} $R$ is not the zero polynomial if and only if $B$ contains a connected perfect matching. \end{lemma} \begin{proof} It is clear that if $R$ is non-zero then $B$ contains a connected perfect matching. For the reverse implication it suffices to notice that every summand in $R$ has a different set of variables, so it does not cancel out with other summands over $\field{2^t}$. \end{proof} Our strategy is to test whether $R$ is non-zero by means of DeMillo--Lipton--Schwartz--Zippel Lemma, which we recall below. \begin{lemma}[DeMillo and Lipton~\cite{DeMilloLipton1978}, Schwartz~\cite{schwartz}, Zippel~\cite{zippel}] \label{lem:zs} Let $P(x_1, x_2, \ldots, x_m)$ be a nonzero polynomial of degree at most $d$ over a field $\mathbb{F}$ and let $S$ be a finite subset of $\mathbb{F}$. Then, the probability that $P$ evaluates to zero on a random element $(a_1, a_2, \ldots , a_m)\in S^m$ is bounded by $d/|S|$. \end{lemma} By Lemmas~\ref{lem:polynom-equiv} and \ref{lem:zs}, the task reduces to {\em evaluating} $R$ fast. To this end, we will define a different polynomial $P$ which is easier to evaluate and turns out to be equal to $R$ over $\field{2^t}$. Consider a subset $X\subseteq V$. Let $I_X=\{v^I_i \in I \mid v\in X, i=1,\ldots,{\sf in}(v)\}$ and $O_X=\{v^O_i \in O \mid v\in X, i=1,\ldots,{\sf out}(v)\}$. Abusing the notation slightly, we will denote $B[X]=B[I_X\cup O_X]$. Define the following polynomial. \begin{equation} P_X = \sum_{M\in{\ensuremath{\mathcal{PM}}}(B[X])} \prod_{e\in M}x_e \end{equation} In what follows, $v^*$ is an arbitrary but fixed vertex of $V$. Define yet another polynomial. \begin{equation} P = \sum_{\substack{X\subseteq V\\v^*\in X}} P_X P_{V\setminus X}. \end{equation} \begin{lemma} $P=R$. \end{lemma} \begin{proof} For a matching $M$ in $B$ we say that a set $X\subseteq V$ is {\em consistent} with $M$ when $M$ does not contain an edge $u^O_iv^I_j$ such that $u\in X$ and $v\in V\setminus X$ or $v\in X$ and $u\in V\setminus X$. The family of all subsets of $V$ that are consistent with $M$ will be denoted by ${\ensuremath{\mathcal{C}}}(M)$. Then we can rewrite $P$ as follows. \[ \begin{aligned} P=& \sum_{\substack{X\subseteq V\\v^*\in X}} \sum_{M_1\in{\ensuremath{\mathcal{PM}}}(B[X])} \sum_{M_2\in{\ensuremath{\mathcal{PM}}}(B[V\setminus X])} \prod_{e\in M_1\cup M_2} x_e & \text{[definition]}\\ =& \sum_{M\in{\ensuremath{\mathcal{PM}}}(B)} \sum_{\substack{X\in{\ensuremath{\mathcal{C}}}(M)\\v^*\in X}} \prod_{e\in M} x_e & \text{[group by $M=M_1\uplus M_2$]}\\ =& \sum_{M\in{\ensuremath{\mathcal{PM}}}(B)} |\{X\in{\ensuremath{\mathcal{C}}}(M)\mid v^*\in X\}| \prod_{e\in M} x_e & \text{[trivial]} \end{aligned} \] Let us consider a perfect matching $M\in{\ensuremath{\mathcal{PM}}}(B)$ and the corresponding contraction $m$. Observe that the number of sets that are consistent with $M$ and contain a vertex $v^*$ is equal to $2^{\mathop{\rm{cc}}(M)-1}$, where $\mathop{\rm{cc}}(M)$ is the number of connected components of $G_m$. Indeed, when $X$ is consistent with $M$, then for every connected component $Q$ of $G_m$, either $V(Q)\subseteq X$ or $V(Q)\subseteq V\setminus X$. For the component that contains $v^*$ the choice is fixed, while every choice for the remaining components defines a set consistent with $M$. It follows that when $M$ is not connected $\mathop{\rm{cc}}(M)\ge 2$, and the value of $2^{\mathop{\rm{cc}}(M)-1}$ is equal to $0$ in $\field{2^t}$, so the corresponding summand vanishes. On the other hand, if $M$ is connected, the corresponding summand equals just $\prod_{e\in M} x_e$ and it does not cancel out with another summand because the monomial has a unique set of variables. It follows that $P=R$. \end{proof} \begin{lemma}[Tutte, Lovasz~\cite{tutte,lovasz-tutte}] \label{lem:tutte} For an arbitrary set $X\subseteq V$, the polynomial $P_X$ can be evaluated using $\mathop{\rm{poly}}(n+M)$ field operations. \end{lemma} \begin{proof} Compute the determinant of the corresponding Tutte matrix of dimension $|O|\times|I|$. \end{proof} Let us now fix our field, namely $t=\lceil 1+\log n+\log M\rceil$. Since arithmetic operations in $\field{2^t}$ can be performed in time ${\ensuremath{\mathcal{O}}}(t\log^2t)={\ensuremath{\mathcal{O}}}(\log(n+M)\log^2\log(n+m))$, by the definition of $P$ and Lemma~\ref{lem:tutte} we get the following corollary. \begin{corollary} \label{cor:poly-P-eval} $P$ can be evaluated in time $2^n\mathop{\rm{poly}}(n+M)$. \end{corollary} \begin{lemma} \label{lem:matching-decision} There is a randomized algorithm which decides if $B$ contains a connected perfect matching in time ${\ensuremath{\mathcal{O}}}star(2^n\mathop{\rm{poly}}(M))$ and polynomial space, where $M=\max_{v}\max\{{\sf in}(v),{\sf out}(v)\}$. The algorithm is Monte Carlo with one-sided error, i.e., the positive answer is always correct and the negative answer is correct with probability at least $p$, for any constant $p<1$. \end{lemma} \begin{proof} The algorithm evaluates polynomial $P$ using Corollary~\ref{cor:poly-P-eval} substituting a random element of $\field{2^t}$ for each variable, and reports `yes' when the evaluation is nonzero and `no' otherwise. If it reported 'yes', then $P$ was a non-zero polynomial and by Lemma~\ref{lem:polynom-equiv} the answer is correct. Assume it reported 'no' for a yes-instance. By Lemma~\ref{lem:polynom-equiv} $P$ is non-zero. Since $\deg P = |I| \le nM$, by Lemmma~\ref{lem:zs} the probability that $P$ evaluated to $0$ is bounded by $\deg P / 2^t \le 1/2$ and we can make this probability arbitrarily small by repeating the whole algorithm a number of times, and reporting `yes' if at least one evaluation was nonzero. The claim follows. \end{proof} Theorem~\ref{thm:bnd-decision} follows immediately from Lemma~\ref{lem:red-to-matching} and Lemma~\ref{lem:matching-decision} applied to $B_G$. \subsection{Finding the solution} \begin{lemma} \label{lem:finding} There is a randomized algorithm which, given a yes-instance of \ProblemName{Decision Unweighted Fixed Degree Connected Subgraph}, always returns the corresponding solution $m$ in expected time ${\ensuremath{\mathcal{O}}}star(2^n\mathop{\rm{poly}}(M))$. The time can be made deterministic at the cost of introducing arbitrarily small probability of failure. \end{lemma} In order to prove Lemma~\ref{lem:finding} we cast the problem in the setting of {\em inclusion oracles} from the work of Bj\"orklund et al.~\cite{syphilis}. Consider a universe $U$ and an (unknown) family of {\em witnesses} $\mathcal{F}\subseteq 2^U$. An {\em inclusion oracle} is a procedure which, given a query set $Y\subseteq U$, answers (either YES or NO) whether there exists at least one witness $W\in\mathcal{F}$ such that $W\subseteq Y$. Bj\"orklund et al. prove the following. \begin{theorem}[\cite{syphilis}] \label{thm:syphilis} There exists an algorithm that extracts a witness of size $k$ in $\mathcal{F}$ using in expectation at most $O(k\log |U|)$ queries to a randomized inclusion oracle that has no false positives but may output a false negative with probability at most $p\leq \frac{1}4$. \end{theorem} \begin{proof}[Proof of Lemma~\ref{lem:finding}] Let $U=E(B_G)$ and let ${\ensuremath{\mathcal{F}}}$ be the family of all connected perfect matchings in $B_G$. Note that $|U|=O(n^2M^2)$ and witnesses in ${\ensuremath{\mathcal{F}}}$ have all size $|I|=O(nM)$. Then, Lemma~\ref{lem:matching-decision} provides a randomized inclusion oracle and we can apply Theorem~\ref{thm:syphilis}. (If one insists on deterministic, and not expected, running time, it suffices to chose a sufficiently large constant $r$ and stop the algorithm if it exceeds the expected running time at least $r$ times --- by Markov's inequality, this happens with probability at most $1/r$.) \end{proof} \subsection{Proof of Theorem~\ref{thm:bnd-tsp}} In the lemma below we will adapt the construction from Section~\ref{sec:bnd-decision} to the weighted case in a standard way, by introducing a new variable tracking the weight. \begin{lemma} \label{lem:bnd-tsp-small-demands} There is a randomized algorithm which solves an instance $I=(d,{\sf in},{\sf out},w)$ of \ProblemName{Fixed Degree Connected Subgraph} in time ${\ensuremath{\mathcal{O}}}star(2^nD\mathop{\rm{poly}}(M))$ and polynomial space, where $M=\max_{v}\max\{{\sf in}(v),{\sf out}(v)\}$ and $D$ is the maximum integer value of $d$. The algorithm returns a minimum weight solution with probability at least $p$, for any constant $p<1$. \end{lemma} \begin{proof} Define $G=(V,E)$ where $E=\{(u,v)\in V^2\mid d(u,v)\in\mathbb{Z}q\}$. Let $R'$ be the polynomial obtained from $R$ by replacing every variable $x_e$ for $e=u^O_iv^I_j\in E(B_G)$ by the product $x_e\cdot y^{d(u,v)}$, where $y$ is a new variable. Proceed similarly with $P$, obtaining $P'$. By Lemma~\ref{lem:polynom-equiv}, $P'=R'$. Decompose $R'$ as $R'=\sum_{i=0}^{|I|\cdot D}R'_iy^i$, where $R'_i$, for every $i=0,\ldots,|I|\cdot D$, is a polynomial in variables $\{x_e\}_{e\in E(B_G)}$. The monomials in $R'_i$ enumerate all matchings $M$ such that the contraction $m$ of $M$ has weight $d(m)=i$. By the construction in the proof of Lemma~\ref{lem:red-to-matching} $R'_i$ is non-zero if and only if instance $I$ has a solution of weight $i$. Using Lagrange interpolation, we can recover the value of each $R'_i$ for random values of the variables $\{x_e\}_{e\in E(B_G)}$ (the values are the same for all the polynomials). The interpolation algorithm requires $|I|\cdot D={\ensuremath{\mathcal{O}}}(nMD)$ evaluations of $R'$. Since $R'=P'$, by Lemma~\ref{cor:poly-P-eval} each of them takes $2^n\mathop{\rm{poly}}(n+M))$ time. Our algorithm reports the minimum $w$ such that $R'_w$ evaluated to a non-zero element of $\field{2^t}$, or $+\infty$ if no such $w$ exists. The solution of weight $w$ is then found using Lemma~\ref{lem:finding}. The event that the optimum value $w^*$ is not reported means that $R'_{w^*}$ is a non-zero polynomial that evaluated to 0 at the randomly chosen values. By Lemma~\ref{lem:zs} this happens with probability at most $\deg P / 2^t \le 1/2$, and one can make this probability arbitrarily small by standard methods. \end{proof} Theorem~\ref{thm:bnd-tsp} follows now immediately by applying Theorem~\ref{thm:kernel} which reduces the general problem to the $M={\ensuremath{\mathcal{O}}}(n^2)$ case and solving the resulting instance by Lemma~\ref{lem:bnd-tsp-small-demands}. Theorem~\ref{thm:bnd-tsp} says in particular that if finite weights are bounded by a polynomial in $n$ then we can solve \ProblemName{Many Visits TSP} in time ${\ensuremath{\mathcal{O}}}star(2^n)$ and polynomial space by a randomized algorithm with no false positives and with false negatives with arbitrarily small constant probability. \section{The general case} \label{sec:general} In this section we prove Theorem~\ref{thm:expspace}, i.e., we show an algorithm solving \ProblemName{Many Visits TSP} in time ${\ensuremath{\mathcal{O}}}star(4^n)$. In fact, we do not introduce a new algorithm, but we consider an algorithm by Berger et al.~(Algorithm 5 in~\cite{berger-soda}) and we provide a refined analysis, resulting in an improved running time bound ${\ensuremath{\mathcal{O}}}star(4^n)$, which is tight up to a polynomial factor. Let us recall the algorithm of Berger et al., in a slightly changed notation. In fact, they solve a slightly more general problem, namely \ProblemName{Fixed Degree Subgraph With Outbranching}. Let $I=(d,{\sf in},{\sf out},r)$ be an instance of this problem, i.e., we want to find a solution $m:V^2\rightarrow\mathbb{Z}q$ that satisfies the degree constraints specified by ${\sf in}$ and ${\sf out}$ and contains an outbranching rooted at $r$. In what follows we assume $V=\{1,\ldots,n\}$ and $r=1$. Consider an outbranching sequence $\{\delta_v\}_{v \in V}$ rooted at $r=1$. In what follows, all outbranching sequences will be rooted at $1$, so we skip specifying the root. Let $T_\delta$ be a minimum cost outbranching among all outbranchings with outdegree sequence $\delta$ and let $r_\delta$ be an optimum solution of \ProblemName{Fixed Degree Subgraph} for instance $(d,{\sf in}',{\sf out}')$ where ${\sf out}'={\sf out}-{\sf outdeg}_T$ and ${\sf in}'={\sf in}-{\sf indeg}_T$. Berger et al.\ note that then $m_\delta=m_{T_\delta}+r_\delta$ is a feasible solution for instance $I$ of \ProblemName{Fixed Degree Subgraph With Outbranching}, and moreover it has minimum cost among all solutions that contain an outbranching with outdegree sequence $\delta$. Since $r_\delta$ can be found in polynomial time by Observation~\ref{obs:flows}, in order to solve instance $I$ it suffices to find outbranchings $T_\delta$ for all outbranching sequences $\delta$ and return the solution $m_\delta$ of minimum cost. Hence, Theorem~\ref{thm:expspace} boils down to proving the following lemma. \begin{lemma} \label{lem:4^n} There is an algorithm which, for every outbranching sequence $\delta$, finds a minimum cost outbranching among all outbranchings with outdegree sequence $\delta$ and runs in time ${\ensuremath{\mathcal{O}}}star(4^n)$. \end{lemma} \newcommand{\ProblemName{BestOutbranching}}{\ProblemName{BestOutbranching}} \newcommand{\textsc{BestOutbranching}}{\textsc{BestOutbranching}} \newcommand{\textsf{first}}{\textsf{first}} \newcommand{\textsf{minCost}}{\textsf{minCost}} \newcommand{\textsf{best}}{\textsf{best}} \newcommand{\textsf{lastRmvd}}{\textsf{lastRmvd}} \newcommand{\textsf{bad}}{\textsf{bad}} We prove Lemma~\ref{lem:4^n} by using dynamic programming (DP). However, it will be convenient to present the DP as a recursive function \ProblemName{BestOutbranching} with two parameters, $S\subseteq V$ and $\{\delta_v\}_{v\in S}$ (see Algorithm~\ref{alg:general}). It is assumed that $1\in S$. We will show that $\textsc{BestOutbranching}(S,\delta)$ returns a minimum cost out-tree among all out-trees with outdegree sequence $\delta$ that are rooted at $1$ and span $S$. Our algorithm runs \ProblemName{BestOutbranching} for $S=V$ and all outbranching sequences $\delta:V\rightarrow\mathbb{Z}q$. Whenever \ProblemName{BestOutbranching} returns a solution for an input $(S,\delta)$, it is memoized (say, in an efficient dictionary), so that when \ProblemName{BestOutbranching} is called with parameters $(S,\delta)$ again, the output can be retrieved in polynomial time. \begin{algorithm} \begin{algorithmic} \caption{} \label{alg:general} \Function {BestOutbranching} {$S, \delta$} \State $v_\textsf{first} \gets \min\{v\in S\mid\delta_v = 0\}$ \If {$|S| = 2$} \Return $\{(1,v_\textsf{first})\}$. \Else \State $\textsf{minCost} \gets \infty$ \For {$w \in S$} \If {$(\delta_w \ge 1 \wedge w \neq 1) \vee (\delta_w \ge 2 \wedge w = 1)$} \State $S' \gets S\setminus\{v_\textsf{first}\}$ \State $\delta' \gets \delta|_{S'}$ \State $\delta'_w \gets \delta'_w - 1$ \State $R_w \gets \textsc{BestOutbranching}(S', \delta') \cup \{(w,v_\textsf{first})\}$ \If {$d(R_w) < \textsf{minCost}$} \State $\textsf{minCost} \gets d(R_w)$ \State $\textsf{best} \gets R_w$ \EndIf \EndIf \EndFor \Return $\textsf{best}$ \EndIf \EndFunction \end{algorithmic} \end{algorithm} Let us define $\textsf{lastRmvd}(S) := \max (\{0, 1, 2, \ldots, n\} \setminus S)$ and $\textsf{bad}(S, \delta) := \{v \in S \mid v < \textsf{lastRmvd}(S) \wedge \delta_v = 0\}$. Let us call $(S, \delta)$ a {\em reachable state} if it meets the following conditions: \begin{enumerate}[$(i)$] \item $\delta_1 \ge 1$ \item $\sum_{v\in S} \delta_v = |S| - 1$ \item $|\textsf{bad}(S,\delta)| \le 1$ \end{enumerate} \begin{lemma} \label{lem:only-reachable} If function \ProblemName{BestOutbranching} is given a reachable state as input then all recursively called \ProblemName{BestOutbranching} will also be given only reachable states. \end{lemma} \begin{proof} Let us fix a reachable state $(S,\delta)$ for $|S|>2$ and consider the associated value $v_\textsf{first}$ from the algorithm. Denote $S' = S\setminus\{v_\textsf{first}\}$. Clearly, it suffices to show that all pairs $(S',\delta')$ created in the {\bf for} loop are reachable states. First, let us argue that $\textsf{bad}(S', \delta)=\emptyset$. There are two cases: \begin{itemize} \item Assume $|\textsf{bad}(S,\delta)| = 0$. In this case $v_\textsf{first} > \textsf{lastRmvd}(S)$ so $\textsf{lastRmvd}(S') = v_\textsf{first}$. Then, $\textsf{bad}(S', \delta)= \{v \in S' \mid v < \textsf{lastRmvd}(S') \wedge \delta_v = 0\}=\{v \in S \mid v < v_\textsf{first} \wedge \delta_v = 0\}=\emptyset$. \item Assume $|\textsf{bad}(S,\delta)| = 1$. Then, (1) $\textsf{lastRmvd}(S') = \textsf{lastRmvd}(S)$ because $\textsf{lastRmvd}(S) > v_\textsf{first}$ and (2) $\textsf{bad}(S,\delta) = \{v_\textsf{first}\}$. It follows that $\textsf{bad}(S', \delta)\stackrel{(1)}{=}\{v \in S' \mid v < \textsf{lastRmvd}(S) \wedge \delta_v = 0\} = \textsf{bad}(S, \delta)\setminus\{v_\textsf{first}\}\stackrel{(2)}{=}\emptyset$. \end{itemize} Let us consider the recursive call of \ProblemName{BestOutbranching} for a particular $w$. Sequence $\delta'|_{S'}$ differs from $\delta$ only at $w$, so $\textsf{bad}(S',\delta') \subseteq \{w\} \cup \textsf{bad}(S',\delta) = \{w\}$. This means that the condition $(iii)$ from the definition of reachable state holds for $(S',\delta')$. Since $(S,\delta)$ is reachable, $\delta_1\ge 1$. Then either $w\ne 1$ and $\delta'_1=\delta_1\ge 1$ or $w=1$ and $\delta'_1=\delta_1-1 \ge 1$, where the last inequality holds thanks to the condition in the {\bf if} statement in Algorithm~\ref{alg:general}. In both cases, $(i)$ holds for $(S',\delta')$. Finally, $(ii)$ is immediate by the definition of $\delta'$. It follows that $(S', \delta')$ is a reachable state, as required. \end{proof} \begin{lemma} \label{lem:correctness} If function \ProblemName{BestOutbranching} is given a reachable state $(S,\delta)$, it returns a cheapest out-tree $T$ rooted at vertex $1$, spanning $S$ and with outdegree sequence $\delta$. \end{lemma} \begin{proof} We will use induction on $|S|$. In the base case $|S| = 2$, there is only one outbranching spanning $S$ rooted at $1$, namely $\{(1, v_\textsf{first})\}$ and it is indeed returned by the algorithm. In the inductive step assume $|S| > 2$. By conditions $(i)$ and $(ii)$ in the definition of a reachable state and Lemma~\ref{lem:out-sequence}, there is at least one out-tree rooted at $1$, spanning $S$, and with outdegree sequence $\delta$. Let $T$ be a cheapest among all such out-trees. Vertex $v_\textsf{first}$ is a leaf of $T$, since $\delta_{v_\textsf{first}}=0$. At some point $w$ in the {\bf for} loop in Algorithm~\ref{alg:general} is equal to the parent $w^*$ of $v_\textsf{first}$ in $T$. Then, $T \setminus \{(w^*, v_\textsf{first})\}$ is an out-tree rooted at $1$, spanning $S'$, and with outdegree sequence $\delta'$. Since $(S',\delta')$ is a reachable state by Lemma~\ref{lem:only-reachable}, by the inductive hypothesis we know that a cheapest such out-tree $T'$ will be returned by $\textsc{BestOutbranching}(S', \delta')$. In particular, it means that $d(T')\le d(T\setminus\{(w^*,v_\textsf{first})\})$. Denote $R_{w^*}=T' \cup \{(w^*,v_\textsf{first})\}$. Then, $d(R_{w^*})=d(T')+d(w^*,v_\textsf{first})\le d(T\setminus\{(w^*,v_\textsf{first})\})+d(w^*,v_\textsf{first})=d(T)$. It follows that \ProblemName{BestOutbranching} returns a set of edges $\textsf{best}$ of cost at most $d(T)$. However $\textsf{best}=R_w$ for a vertex $w$ and by applying the induction hypothesis it is easy to see that $R_w$ is an out-tree rooted at $1$, spanning $S$ with outdegree sequence $\delta$. The claim follows. \end{proof} \newcommand{\bar{\delta}}{\bar{\delta}} \newcommand{\bar{s}}{\bar{s}} \begin{lemma} \label{lem:few-reachable} There are ${\ensuremath{\mathcal{O}}}star(4^n)$ reachable states. \end{lemma} \begin{proof} Any sequence of $n$ nonnegative integers that sums up to at most $n-1$ will be called an {\em extended sequence}. It is well known that there are exactly ${{2n - 1} \choose {n}} < 2^{2n - 1} = {\ensuremath{\mathcal{O}}}(4^n)$ such sequences. To see this consider sequences of $n-1$ balls and $n$ barriers and bijectively map them to the sequences of $n$ numbers by counting balls between barriers and discarding the balls after the last barrier. Let us fix an extended sequence $\bar{\delta}=\{\bar{\delta}_v\}_{v \in V}$, and denote $\bar{s} := n - (1 + \sum_{i = 1}^n \bar{\delta}_i)$. We claim that there are only ${\ensuremath{\mathcal{O}}}(n)$ reachable states $(S,\delta)$ such that $\bar{\delta}|_S=\delta$ and $\bar{\delta}|_{V\setminus S}=0$. Consider any such pair $(S,\delta)$. Let $(v_1, v_2, \ldots, v_k)$ be the vertices of $\{v\in V\mid \bar{\delta}_v=0\}$ sorted in increasing order. By the definition of a reachable state we know that $|S|=1+\sum_{i = 1}^n \bar{\delta}_i$, so $\bar{s} = |\{1, 2, \ldots, n\} \setminus S|$. By $(ii)$, for at least one vertex $v\in S$ we have $\bar{\delta}_v=\delta_v=0$, so $k\ge\bar{s}+1$. Let us assume that $k\ge \bar{s}+2$ and $\textsf{lastRmvd}(S) \ge v_{\bar{s}+2}$. Then, $\{v_1, v_2, \ldots, v_{\bar{s}+1}\} \cap S \subseteq \textsf{bad}(S,\delta)$. Since $v_{\bar{s}+2} \le \textsf{lastRmvd}(S) \not\in S$, at most $\bar{s}-1$ elements from $\{v_1, v_2, \ldots, v_{\bar{s}+1}\}$ are outside $S$, so $|\textsf{bad}(S,\delta)| \ge (\bar{s}+1) - (\bar{s}-1) = 2$. This is a contradiction with $(S,\delta)$ being a reachable state, which proves that $k\le \bar{s}+1$ or $\textsf{lastRmvd}(S) < v_{\bar{s}+2}$. In any case, $\{1, 2, \ldots, n\} \setminus S \subseteq \{v_1, \ldots, v_{\bar{s}+1}\}$. There are $\bar{s}+1 = {\ensuremath{\mathcal{O}}}(n)$ ways to choose $\bar{s}$ elements to the set $\{1, 2, \ldots, n\} \setminus S$ from $\{v_1, \ldots, v_{\bar{s}+1}\}$, so equivalently there are ${\ensuremath{\mathcal{O}}}(n)$ sets $S$ such that $(S,\delta)$ is a reachable state, $\bar{\delta}|_S=\delta$ and $\bar{\delta}|_{V\setminus S}=0$. Every reachable state $(S,\delta)$ has the corresponding extended sequence $\{\bar{\delta}\}_{v\in V}$ defined by $\bar{\delta}|_S=\delta$ and $\bar{\delta}|_{V\setminus S}=0$. Since there are ${\ensuremath{\mathcal{O}}}(4^n)$ extended sequences, and each of them has $O(n)$ corresponding reachable states there are ${\ensuremath{\mathcal{O}}}(4^n) \cdot {\ensuremath{\mathcal{O}}}(n) = {\ensuremath{\mathcal{O}}}star(4^n)$ reachable states in total. \end{proof} We are ready to prove Lemma~\ref{lem:4^n}. Recall that our algorithm runs $\textsc{BestOutbranching}(V,\delta)$ for all outbranching sequences $\delta$ and uses memoization to avoid repeated computation. We claim that for any outbranching sequence $\delta$, the pair $(V,\delta)$ is a reachable state . Indeed, conditions $(i)$ and $(ii)$ hold since $\delta$ is an outbranching sequence. By definition, $\textsf{lastRmvd}(V)=0$, so $\textsf{bad}(V,\delta)=\emptyset$ which implies $(iii)$. Hence by Lemma~\ref{lem:correctness} the algorithm is correct. By Lemma~\ref{lem:only-reachable} the running time can be bounded by the number of reachable states times a polynomial, which is ${\ensuremath{\mathcal{O}}}star(4^n)$ by Lemma~\ref{lem:few-reachable}. This ends the proof of Lemma~\ref{lem:4^n} and hence also Theorem~\ref{thm:expspace}, as discussed in the beginning of this section. \section{Polynomial space} \label{sec:polyspace} \newcommand{{\sf cost}}{{\sf cost}} \newcommand{{\sf cap}}{{\sf cap}} In this section we show Theorem~\ref{thm:polyspace}, that is, we solve \ProblemName{Many Visits TSP} in ${\ensuremath{\mathcal{O}}}star(7.88^n)$ time and polynomial space. Berger et al.~\cite{berger-arxiv} solved this problem in ${\ensuremath{\mathcal{O}}}(16^{n+o(n)})$ time and polynomial space, with the key ingredient being the following. \begin{lemma}[Berger et al.~\cite{berger-arxiv}] \label{lem:berger:outbranching} There is a polynomial space algorithm running in time ${\ensuremath{\mathcal{O}}}(4^{n+o(n)})$ which, given an outdegree sequence $\{\delta_v\}_{v\in V}$, a cost function $d:V^2\rightarrow\mathbb{Z}q$, and a root $r\in V$ computes the cheapest outbranching rooted at $r$ with the required outdegrees. \end{lemma} More precisely, the ${\ensuremath{\mathcal{O}}}(16^{n+o(n)})$-time algorithm consists of the following steps: \begin{enumerate}[$(i)$] \item Enumerate all ${\ensuremath{\mathcal{O}}}(4^n)$ outbranching sequences \item For each outbranching sequence compute the cheapest outbranching with required degrees using Lemma~\ref{lem:berger:outbranching} in time ${\ensuremath{\mathcal{O}}}(4^{n+o(n)})$ \item For each of these outbranchings complete it to a solution of the original \ProblemName{Many Visits TSP} instance with an optimal solution of \ProblemName{Fixed Degree Subgraph} on the residual degree sequences (in polynomial time, by Observation~\ref{obs:flows}). \end{enumerate} The intuition behind our approach is as follows. We iterate over all subsets of vertices $R$. Here, $R$ represents our guess of the set of inner vertices of an outbranching in an optimal solution. Then we perform $(i)$ and $(ii)$ in the smaller subgraph induced by $R$. Finally, we replace $(iii)$ by a more powerful flow-based algorithm which connects the vertices in $V\setminus R$ to $R$, and at the same time computes a feasible solution of \ProblemName{Fixed Degree Subgraph} on the residual degree sequences, so that the total cost is minimized. Let $r=|R|$. Clearly, when $r$ is a small fraction of $n$, we get significant savings in the running time. The closer $r/n$ is to $1$ the smaller are the savings, but also the smaller is the number ${n \choose r}$ of sets $R$ to examine. In fact, the real algorithm is slightly more complicated. Namely, we fix an integer parameter $K$ and $R$ corresponds to the set of vertices left from an outbranching in an optimal solution after $K$ iterations of removing all leaves. The running time of our algorithm depends on $K$, because the algorithm actually guesses the layers of leaves in each iteration. The space complexity is polynomial and does not depend on $K$. In the end of this section, we show that our running time bound is minimized when $K=4$. \subsection{Our algorithm} Similarly as in Section~\ref{sec:general}, we solve the more general \ProblemName{Fixed Degree Subgraph With Outbranching}: for a given instance $I=(d,{\sf in},{\sf out},{\sf root})$ we want to find a solution $m:V^2\rightarrow\mathbb{Z}q$ that satisfies the degree constraints specified by ${\sf in}$ and ${\sf out}$ and contains an outbranching rooted at ${\sf root}$. Let $T$ be an arbitrary outbranching. We define a sequence $L_1(T), L_2(T), \ldots$ of subsets of $V(T)$ as follows. For $i \ge 1$ let $L_i(T)$ be the set of leaves of $T \setminus (L_1(T) \cup L_2(T) \cup \ldots \cup L_{i-1}(T))$ if $|V(T) \setminus (L_1(T) \cup \ldots \cup L_{i-1}(T))|>1$, and otherwise $L_i=\emptyset$. The sets $L_i(T)$ will be called \emph{leaf layers}. Denote $R_i(T)=V\setminus(L_1(T)\cup\cdots\cup L_i(T))$ for any $i\ge 1$. \begin{lemma} \label{obs:leaf-layers} For every $i\ge 1$ we have ${\sf root} \in R_i(T)\setminus L_{i+1}(T)$, $|L_i(T)| \ge |L_{i+1}(T)|$ and $|L_{i+1}|\le\frac{n-|R_i(T)|}{i}$. \end{lemma} \begin{proof} In this proof we skip the `$(T)$' in $L_i$ and $R_i$ because there is no ambiguity. Assume ${\sf root}\in L_i$ for some $i\ge 1$. It means that ${\sf root}$ is a leaf in $T \setminus (L_1 \cup L_2 \cup \ldots \cup L_{i-1})$. Then $V \setminus (L_1 \cup L_2 \cup \ldots \cup L_{i-1})=\{{\sf root}\}$ and $L_i=\emptyset$, a contradiction. Hence ${\sf root} \not\in L_i$ for all $i\ge 1$, and in consequence ${\sf root} \in R_i$ for all $i\ge 1$. However, ${\sf root} \in R_{i+1}(T)$ implies that ${\sf root} \not\in L_{i+1}(T)$, hence ${\sf root} \in R_i\setminus L_{i+1}$. If $|V \setminus (L_1 \cup \ldots \cup L_{i})|>1$, then $L_{i+1}$ is the set of leaves of the out-tree $T \setminus (L_1 \cup \ldots \cup L_{i})$, which is contained in the set of parents of vertices in $L_i$. Since every vertex in $L_i$ has exactly one parent, $|L_i|\ge |L_{i+1}|$. If $|V \setminus (L_1 \cup \ldots \cup L_{i})|\le 1$ then $L_{i+1}=\emptyset$ and clearly $|L_i|\ge |L_{i+1}|=0$. Finally, since for every $j<i$ we have $|L_j|\ge |L_i|$ we get $n-|R_i|=|L_1|+\ldots+|L_i|\ge i|L_i|$. It follows that $|L_{i+1}|\le |L_i| \le \frac{n-|R_i|}i$, as required. \end{proof} A pseudocode of our algorithm is presented as Algorithm~\ref{alg:polyspace}. \begin{algorithm} \begin{algorithmic}[1] \caption{} \label{alg:polyspace} \Function {Solve} {$G, {\sf out}, {\sf in}, d, {\sf root}$} \State $\textsf{best} \gets \infty$ \For {$R, L_{K+1}, \delta$} \State $T_R \gets$ cheapest $\delta$-out-tree spanning $R$ rooted at ${\sf root}$ (Lemma~\ref{lem:berger:outbranching}) \State ${\sf out}' \gets {\sf out}-{\sf outdeg}_{T_R}$ \State ${\sf in}' \gets {\sf in}-{\sf indeg}_{T_R}$ \For {$L_1, \ldots, L_{K}$} \State $F \gets \textsc{CreateNetwork}(G, R, {\sf out}', {\sf in}', d, L_1, \ldots, L_K)$ \State $f \gets \textsc{MinCostMaxFlow}(F)$ \If {$|f| = \sum_{v \in V(G)} {\sf out}'(v)$ {\bf and} ${\sf cost}(f) + d(T_R) < \textsf{best}$ } \State $\textsf{best} \gets {\sf cost}(f) + d(T_R)$ \EndIf \EndFor \EndFor \Return $\textsf{best}$ \EndFunction \end{algorithmic} \end{algorithm} For clarity, in the pseudocode we skipped some constraints that we enforce on the sets $L_i$ and sequence $\delta$. We state them below. \begin{enumerate}[(C1)] \item $L_{K+1} \subseteq R \subseteq V, {\sf root} \in R \setminus L_{K+1}, |L_{K+1}| \le \frac{n-|R|}{K}$ \item $\{\delta_v\}_{v\in R}$ is a rooted out-tree sequence, i.e., for all $v\in R$ we have $\delta_v \in \mathbb{Z}q, \sum_{v \in R} \delta(v) = |R| - 1$; also $\delta_{\sf root}\ge 1$ if $|R|\ge 2$ and $\delta_{\sf root}=0$ if $|R|=1$. \item for every $v \in L_{K+1}$ we have $\delta_v = 0$ and for every $v \in R\setminus (L_{K+1}\cup\{{\sf root}\})$ we have $\delta_v \ge 1$ \item $L_1\uplus L_2\uplus\cdots\uplus L_K=V\setminus R$ \item $|L_i| \ge |L_{i+1}|$ for $i=1,\ldots,K$. \end{enumerate} It is clear that all these possibilities can be enumerated in time proportional to their total number times $O(n)$. Let us provide some further intuition about Algorithm~\ref{alg:polyspace}. Consider an optimum solution $m$ of $I$ and any outbranching $B$ in $m$ rooted at ${\sf root}$. In Algorithm~\ref{alg:polyspace}, for any $i=1,\ldots K+1$, the set $L_i$ is a guess of the leaf layer $L_i(B)$, while $R$ is a guess of $V\setminus(L_1(B)\cup\cdots\cup L_K(B))$. Finally, $\delta$ is a guess of the outdegree sequence of the out-tree $B[R]$. In Line 8 we create a flow network, and in line 9 a minimum cost maximum flow is found in polynomial time. In the next section we discuss the flow network and properties of the flow. \subsection{The flow} In this section we consider a run of Algorithm~\ref{alg:polyspace}, and in particular we assume that the variables $R,\delta,L_1,\ldots,L_{K+1}$ have been assigned accordingly. Function {\sc CreateNetwork} in our algorithm builds a flow network $F = (V(F), E(F), {\sf cap}, {\sf cost})$, where $E(F)$ is a set of directed edges and ${\sf cap}$ and ${\sf cost}$ are functions from edges to integers denoting capacities and costs of corresponding edges. As usual, the function ${\sf cost}$ extends to flow functions in a natural way, i.e., ${\sf cost}(f)=\sum_{e\in E(F)}f(e){\sf cost}(e)$. We let $V(F) = \{s, t\} \cup \{v^I, v^O \mid v \in V(G)\} \cup \{v^C \mid v \in V\setminus R\}$, where $s$ and $t$ denote the source and the sink of $F$. We put following edges into $E(F)$: \begin{itemize} \item $(s, v^O)$, where ${\sf cap}(s, v^O) = {\sf out}'(v), {\sf cost}(s, v^O) = 0$ for every $v \in V(G)$ \item $(v^I, t)$, where ${\sf cap}(v^I, t) = {\sf in}'(v), {\sf cost}(v^I, t) = 0$ for every $v \in R$ \item $(v^I, t)$, where ${\sf cap}(v^I, t) = {\sf in}'(v)-1, {\sf cost}(v^I, t) = 0$ for every $v \notin R$ \item $(v^C, t)$, where ${\sf cap}(v^C, t) = 1, {\sf cost}(v^C, t) = 0$ for every $v \notin R$ \item $(u^O, v^I)$, where ${\sf cap}(u^O, v^I) = \infty, {\sf cost}(u^O, v^I) = d(u, v)$ for every $(u, v) \in E(G)$ \item $(u^O, v^C)$, where ${\sf cap}(u^O, v^C) = \infty, {\sf cost}(u^O, v^C) = d(u, v)$ for every $v \in L_i, u \in R \cup L_{i + 1} \cup \ldots \cup L_K, (u, v) \in E(G)$. \end{itemize} We will say that $F$ has a \emph{full flow} if it has a flow $f$ with value $|f|=\sum_{v \in V} {\sf out}'(v)$. By the construction of $F$, then all edges leaving source are saturated, i.e., carry flow equal to their capacity. Since $\sum_{v \in V} {\sf out}'(v)=\sum_{v \in V} {\sf in}'(v)$, also all edges that enter the sink are saturated. Essentially, the network above results from extending the standard network used to get Observation~\ref{obs:flows} by vertices $v^C$. The flow between $\{v^O\mid v\in V\}$ and $\{v^I\mid v\in V\}\cup \{v^C \mid v \in V\setminus R\}$ represents the resulting solution. In a full flow the edges leaving $v^C$ are saturated, so a unit of flow enters every vertex $v^C$, which results in connecting $v$ in the solution to a higher layer or to $R$. Thanks to that the solution resulting from adding the out-tree $T_R$ to the solution extracted from $f$ contains an outbranching. \begin{lemma} \label{clm:flow-to-sol} If $f$ is a full flow of minimum cost in $F$ then there exists a solution of $I$ with cost ${\sf cost}(f) + d(T_R)$. Moreover, the solution can be extracted from $f$ in polynomial time. \end{lemma} \begin{proof} By standard arguments, since all capacities in $F$ are integer, we infer that there is an integral flow of minimum cost (and it can be found in polynomial time), so we assume w.l.o.g.\ that $f$ is integral. Let $b : V^2 \to \{0, 1\}$ denote a function such that $b(u, v) = [(u, v)\in T_R]$. Now we construct a solution $m:V^2\rightarrow\mathbb{Z}q$ of $I$. \[ m(u,v) =\begin{cases} f(u^O, v^I) + b(u, v) & \text{if } v \in R\\ f(u^O, v^I) + f(u^O, v^C) & \text{if } v \not\in R. \end{cases} \] In other words, $m$ describes how many times edge $(u, v)$ was used by the out-tree $T_R$ and flow $f$ in total. Let us verify that $m$ is a feasible solution for $I$. The degree constraints are easy to verify, so we are left with showing that $m$ contains an outbranching rooted at ${\sf root}$. To this end it suffices to show that every vertex $v$ is reachable from ${\sf root}$ in $G_m$. Clearly, this holds for vertices in $R$, thanks to the out-tree $T_R$. Pick an arbitrary vertex $v\not\in R$. Then $v \in L_i$ for some $i=1,\ldots,K$. We know that $f(v^C, t) = 1$, so there exists $u$ such that $f(u^O, v^C) = 1$. Therefore, $v$ is connected in $G_m$ to a vertex from $R \cup L_{i+1} \cup \ldots L_{K}$. Since $v$ in $G_m$ has an in-neighbor either in $R$ or in a layer with a higher index, we can conclude that there is a path in $G_m$ from $R$ to $v$. Hence $m$ indeed contains the required outbranching. Finally, it can be easily checked that $d(m)={\sf cost}(f) + d(T_R)$, what concludes this proof. \end{proof} Let $m$ be a feasible solution for $I$. Let $R$, $L_i$ for $i=1,\ldots,K+1$ be sets of vertices and $\delta$ an out-tree sequence on $R$, as in Algorithm~\ref{alg:polyspace}. We say that $m$ is {\em compliant} with $R$, $L_1,\ldots,L_{K+1}$ and $\delta$ when $m$ contains an outbranching $T$ rooted at ${\sf root}$ such that $R_K(T)=R$, $L_i(T)=L_i$ for $i=1,\ldots,K+1$ and $\delta$ is equal to the outdegree sequence of $T[R]$. \begin{lemma} \label{clm:sol-to-flow} Assume that there exists a solution $m$ of $I$ that is compliant with $R$, $L_1,\ldots,L_{K+1}$ and $\delta$. Then $F$ has a full flow $f$ such that ${\sf cost}(f) + d(T_R) \le d(m)$. \end{lemma} \begin{proof} Let $T$ be an outbranching in $m$ which certifies that $m$ is compliant with $R$, $L_1,\ldots,L_{K+1}$ and $\delta$. Let $p : V^2 \to \{0, 1\}$ be a function such that for every $u,v\in V$ we have $p(u,v)=[(u,v) \in T]$. We set $f(s, u) = {\sf cap}(s, u)$ for all edges $(s,u) \in E(F)$ and $f(u, t) = {\sf cap}(u, t)$ for all edges $(u,t) \in E(F)$. If $v \in V\setminus R$ then we set $f(u^O, v^C) = p(u, v)$. For all $u, v \in V(G)$ we set $f(u^O, v^I) = m(u, v) - p(u, v)$. It can be easily checked that such function $f$ is a full flow and ${\sf cost}(f)=d(m) - d(T[R])$. However, since $T[R]$ is a $\delta$-out-tree rooted at ${\sf root}$ and $T_R$ is a cheapest such out-tree, $d(T_R)\le d(T[R])$. It follows that ${\sf cost}(f)\le d(m) - d(T_R)$, so ${\sf cost}(f) + d(T_R) \le d(m)$ as required. \end{proof} Consider a {\em minimum cost} full flow $f'$ in $F$ that is found by Algorithm~\ref{alg:polyspace} for a choice of $R, L_1,\ldots,L_{K+1}, \delta$. The claim above implies that ${\sf cost}(f') + d(T_R) \le d(m)$. However, notice that we do not claim that ${\sf cost}(f')$ is the cost of optimal completion of $T_R$ consistent with all guesses, as the intuitions we described earlier might suggest. It could be the case that in the solution resulting from $f'$, a vertex which was guessed to belong to $L_i$ does not have any out-neighbor that was guessed to belong to $L_{i-1}$, what would mean that this vertex should be in an earlier layer. However, that is not an issue for the extraction of the global optimum solution of $I$, because we may get only better solutions than the optimum completion for that particular guess. \subsection{Correctness} \begin{lemma} Function {\sc Solve} returns the cost of an optimal solution of $I$. \end{lemma} \begin{proof} From Lemma~\ref{clm:flow-to-sol} we infer that {\sc Solve} returns the cost of a feasible solution of $I$. It remains to show that it returns a value that smaller or equal to the cost of an optimal solution of $I$. To this end, let $m$ be an arbitrary optimal solution of $I$ and let $T$ be an arbitrary outbranching rooted at ${\sf root}$ in $G_m$. Let $R=R_K(T)$, $L_i=L_i(T)$ for $i=1,\ldots,K+1$ and let $\delta$ be the outdegree sequence of $T[R]$. Let us verify that $R, L_1,\ldots,L_{K+1}$ and $\delta$ satisfy constraints (C1)--(C5). We get (C1) and (C5) by Lemma~\ref{obs:leaf-layers}. (C2) follows from the definition of $\delta$. For (C3), consider two cases. If $|R|>1$, then $L_{K+1}$ is the set of leaves in $R$ and hence indeed for every $v \in L_{K+1}$ we have $\delta_v = 0$ and for every $v \in R\setminus (L_{K+1}\cup\{{\sf root}\})$ we have $\delta_v \ge 1$. When $|R|\le 1$, we have $L_{K+1}=\emptyset$ and since ${\sf root}\in R$ by Lemma~\ref{obs:leaf-layers}, $R=\{{\sf root}\}$. Then both sets $L_{K+1}$ and $R\setminus (L_{K+1}\cup\{{\sf root}\})$ are empty, so (C3) trivially holds. Finally, (C4) follows by the definition of leaf layers. Since $R, L_1,\ldots,L_{K+1}$ and $\delta$ satisfy constraints (C1)--(C5), then {\sc Solve} reaches this particular evaluation of the variables $R, L_1,\ldots,L_{K+1}$ and $\delta$. Then, based on Lemma \ref{clm:sol-to-flow}, the network $F$ has a full flow $f$ such that ${\sf cost}(f) + d(T_R) \le d(m)$, and it follows that {\sc Solve} returns a value $\textsf{best}\le {\sf cost}(f) + d(T_R) \le d(m)$, as required. \end{proof} Obviously, {\sc Solve} can be easily adapted to return a solution of $I$ with the cost it returns, but we have not taken this into account in Algorithm~\ref{alg:polyspace} for the sake of its readability. \subsection{Running time} Having a correct algorithm solving \ProblemName{Fixed Degree Subgraph With Outbranching} in polynomial space, let us analyze its complexity depending on $K$. Let us denote $r=|R|$ and $c=|L_{K+1}|$. Recall that $1\le r \le n$ and $0\le c\le \lfloor \frac{n - r}{K} \rfloor$. If we fix $r$ and $c$, then there are ${n-1 \choose r-1}$ guesses for $R$ (it has to contain ${\sf root}$) and at most ${r-1 \choose c}$ guesses for $L_{K+1}$. Let us bound the number of guesses for $\delta$. By (C2) and (C3), $\sum_{v\in R}\delta_v=r-1$, and $\delta_v=0$ iff $v\in L_{K+1}$ so essentially we put $r-1$ balls into $r-c$ bins that must be nonempty, which is ${r - 2 \choose c - 1}$ by standard combinatorics. In the special case $c=0$ there is one choice for $\delta$, where $\delta_{{\sf root}} = 0$. In total, there are at most ${n \choose r} {r \choose c}^2$ guesses for all $R, L_{K+1}, \delta$ simultaneously. For each of these guesses, using Lemma~\ref{lem:berger:outbranching} function {\sc Solve} calculates an optimal $\delta$-out-tree spanning $R$, which takes time ${\ensuremath{\mathcal{O}}}(4^{n+o(n)})$. It follows that that part takes time ${\ensuremath{\mathcal{O}}}star(\sum_r {n \choose r} 4^{r+o(r)} \sum_c {r \choose c}^2).$ Then, {\sc Solve} guesses a partition of $V \setminus R$ into $L_1, \ldots, L_K$ in at most $K^{n-r}$ ways. For each such guess, {\sc Solve} spends polynomial time, so that part takes ${\ensuremath{\mathcal{O}}}star (\sum_r {n \choose r} K^{n-r} \sum_c {r \choose c}^2)$ time. Hence the total running time can be bounded by $${\ensuremath{\mathcal{O}}}star \left( 2^{o(n)}\sum_{r=1}^{n} \sum_{c=0}^{\lfloor \frac{n - r}{K} \rfloor} \underbrace{{n \choose r} (K^{n-r} + 4^r) {r \choose c}^2}_{\xi(r,c)} \right) .$$ Since there are polynomially many guesses for $r$ and $c$, we can actually replace sums with maximums in the expression above and focus on the expression $\xi(r,c)={n \choose r} (K^{n-r} + 4^r) {r \choose c}^2$. We will heavily use the well-known bound ${n \choose \alpha n} < 2^{h(\alpha)n}$, where $h(\alpha)=-\alpha\log_2\alpha-(1-\alpha)\log_2(1-\alpha)$ is the binary entropy function (see e.g.~\cite{expalg-book}). For readability, let us denote $f(\alpha) = 2^{h(\alpha)}$ and let us point out that $f$ is increasing on interval $[0, \frac{1}{2}]$ and decreasing on interval $[\frac12, 1]$. Let us denote $\beta \coloneqq \frac{r}{n}$. We are going to distinguish two cases here. \begin{enumerate} \item $\frac{n-r}{K} \ge \frac{r}{2}$ This inequality can be rephrased as $r \le \frac{2}{K+2} n$, which is equivalent to $\beta \le \frac{2}{K+2}$. We will use here a trivial bound ${r \choose c} \le 2^r$. Then, $\xi(r,c)\le f(\beta)^n ((K^{1-\beta})^n + 4^{\beta n}) 4^{\beta n} = (f(\beta)K^{1-\beta}4^{\beta})^n + (f(\beta)4^{2\beta})^n$ \item $\frac{n-r}{K} < \frac{r}{2}$ In that case we know that $\max_{c=0}^{\lfloor \frac{n-r}{K} \rfloor} {r \choose c}^2$ is attained when $c = \lfloor \frac{n-r}{K} \rfloor$ and for that particular value of $c$ we can use the following bound. $${r \choose c}^2 = {r \choose \frac{\lfloor \frac{n-r}{K} \rfloor}{r} \cdot r}^2 = {\ensuremath{\mathcal{O}}}star \left( f\left( \frac{\lfloor \frac{n-r}{K} \rfloor}{r} \right) ^{2r}\right) = {\ensuremath{\mathcal{O}}}star \left( f\left( \frac{\frac{n-r}{K}}{r} \right) ^{2r}\right) =$$ $$ ={\ensuremath{\mathcal{O}}}star \left( f \left( \frac{1 - \beta}{K \beta} \right) ^{2\beta n} \right) $$ In the third equality above we used fact that $f$ is increasing in interval $[0, \frac12]$. To sum up, in this case, $$\xi(r,c)={\ensuremath{\mathcal{O}}}star \left( \left( f(\beta)K^{1-\beta} f \left( \frac{1 - \beta}{K \beta} \right) ^{2\beta } \right)^n + \left( f(\beta)4^{\beta} f \left( \frac{1 - \beta}{K \beta} \right) ^{2\beta } \right)^n \right).$$ \end{enumerate} Our numerical analysis shows that it is optimal to choose $K=4$. For that particular value of $K$, the first case applies if and only if $\beta \le \frac13$. Then, \[\xi(r,c)=(4f(\beta))^n+(4^{2\beta}f(\beta))^n={\ensuremath{\mathcal{O}}}((4f(\beta))^n)={\ensuremath{\mathcal{O}}}((4f(\tfrac13))^n)={\ensuremath{\mathcal{O}}}(7.56^n).\] Let us now investigate the second case, when $\beta > \frac13$. For $\frac13 < \beta < \frac12$ the summand $ \left( f(\beta)4^{1-\beta} f \left( \frac{1 - \beta}{4 \beta} \right) ^{2\beta } \right)^n$ dominates, and for $\beta \ge \frac12$ the summand $\left( f(\beta)4^{\beta} f \left( \frac{1 - \beta}{4 \beta} \right) ^{2\beta } \right)^n$ dominates. We have numerically verified that \[f(\beta)4^{1-\beta} f \left( \frac{1 - \beta}{4 \beta} \right) ^{2\beta } \le 7.68 \text{ for } \beta \in (\tfrac13,\tfrac12)\] and \[f(\beta)4^{\beta} f \left( \frac{1 - \beta}{4 \beta} \right) ^{2\beta } \le 7.871 \text{ for } \beta\in[\tfrac12,1].\] Hence, we can conclude that for $K=4$ and our algorithm runs in time ${\ensuremath{\mathcal{O}}}star(7.871^{n+o(n)})={\ensuremath{\mathcal{O}}}(7.88^n)$ and in polynomial space. This concludes the proof of Theorem~\ref{thm:polyspace}. \section{$(1+\epsilon)$-approximation} \label{sec:approx} In this section we show theorem \ref{thm:aprox}, i.e. we present an algorithm for \ProblemName{Many Visits TSP} which finds a $(1+\epsilon)$-approximation in ${\ensuremath{\mathcal{O}}}star\left(\frac{2 ^ n}{\epsilon}\right)$ time and polynomial space. To achieve this we consider a more general problem, namely \ProblemName{Fixed Degree Connected Subgraph}. The main idea is to round weights of edges of the given instance, so that we can use the algorithm for polynomially bounded weights from Lemma \ref{lem:bnd-tsp-small-demands} which is an analog of theorem \ref{thm:bnd-tsp} for \ProblemName{Fixed Degree Connected Subgraph}. Let us first consider the case with degrees bounded by a polynomial. \begin{lemma} \label{lem:aprx_bounded_degree} For given $\epsilon > 0$ and an instance $I=(d,{\sf in},{\sf out})$ of \ProblemName{Fixed Degree Connected Subgraph} such that ${\sf in}(v), {\sf out}(v) \le {\ensuremath{\mathcal{O}}}(n^2)$ for every vertex $v$ there exists an algorithm finding a $(1+\epsilon)$-approximate solution in ${\ensuremath{\mathcal{O}}}star\left(\frac{2 ^ n}{\epsilon}\right)$ time and polynomial space. \end{lemma} \begin{proof} Let us denote the optimal solution for $I$ by ${\rm OPT}$. First, our algorithm guesses the most expensive edge used by ${\rm OPT}$. Let us denote its cost by $E$, in particular \setcounter{equation}{0} \begin{equation} \label{eq:expensEdge} E \le d({\rm OPT}). \end{equation} Let us denote by $C$ the universal constant such that ${\sf in}(v), {\sf out}(v) \le C n^2$ for every vertex $v$ and let us round $d$ in the following way \begin{equation} \label{eq:def} d'(u,v) := \left\{ \begin{array}{ll} \ceil{\frac{C n^3}{\epsilon E}d(u,v)} & \textrm{if $d(u,v)\le E$}\\ \infty & \textrm{if $d(u,v) > E$} \end{array} \right. \end{equation} If $d'(u,v)$ is finite then it is bounded by $\ceil{\frac{C n^3}{\epsilon E} E} = \ceil{\frac{C n^3}{\epsilon}}$. Our algorithm simply returns the optimal solution for instance $I' = (d',{\sf in},{\sf out})$ which can be found in ${\ensuremath{\mathcal{O}}}star\left(\frac{2 ^ n}{\epsilon}\right)$ time using the algorithm from Lemma \ref{lem:bnd-tsp-small-demands} with $D = \ceil{\frac{C n^3}{\epsilon}}$. Let us denote this solution by ${\rm ALG}$. Now we only need to prove that ${\rm ALG}$ is $(1 + \epsilon)$-approximation for the original instance $I$. We know that ${\rm ALG}$ is an optimal solution for $I'$, in particular \begin{equation} \label{eq:optInD'} d'({\rm ALG})\le d'({\rm OPT}). \end{equation} For every $v$ we have ${\sf out}(v) \le C n^2$, so \begin{equation} \label{eq:cntEdges} \sum_{(u,v) \in V^2} {\rm OPT}(u,v) = \sum_{u \in V} {\sf out}(u) \le n \cdot C n^2. \end{equation} The following chain of inequalities finishes the proof. $$d({\rm ALG}) \stackrel{(\ref{eq:def})}{\le} \frac{\epsilon E}{C n^3} d'({\rm ALG}) \stackrel{(\ref{eq:optInD'})}{\le} \frac{\epsilon E}{C n^3} d'({\rm OPT}) \stackrel{(\ref{eq:def})}{\le} \frac{\epsilon E}{C n^3} \sum_{(u,v) \in V^2} {\rm OPT}(u,v) \left(\frac{d(u,v) C n^3}{\epsilon E} + 1\right) = $$ $$ = d({\rm OPT}) + \frac{\epsilon E}{C n^3} \sum_{(u,v) \in V^2} {\rm OPT}(u,v) \stackrel{(\ref{eq:cntEdges})}{\le} d({\rm OPT}) + \frac{\epsilon E}{C n^3} Cn^3 \stackrel{(\ref{eq:expensEdge})}{\le} (1 + \epsilon) d({\rm OPT}) $$ \end{proof} Now we can generalize the algorithm from Lemma \ref{lem:aprx_bounded_degree} using it as a black box for the general case. \begin{lemma} \label{lem:aproxConSub} For a given $\epsilon > 0$ and an instance $I=(d,{\sf in},{\sf out})$ of \ProblemName{Fixed Degree Connected Subgraph} there exists an algorithm finding a $(1+\epsilon)$-approximate solution in ${\ensuremath{\mathcal{O}}}star\left(\frac{2 ^ n}{\epsilon}\right)$ time and polynomial space. \end{lemma} \begin{proof} First let us use algorithm from Theorem \ref{thm:kernel} which outputs an instance $I'=(d,{\sf in}',{\sf out}')$ of \ProblemName{Fixed Degree Connected Subgraph} and a function $f:V^2\rightarrow\mathbb{Z}q$. Let us denote the optimal solution for $I'$ by ${\rm OPT}'$. By Theorem \ref{thm:kernel} the optimal solution for $I$ equals ${\rm OPT}' + f$. Moreover, we know that ${\sf in}', {\sf out}'(v) \le {\ensuremath{\mathcal{O}}}(n^2)$ for every vertex $v$. In particular we can use algorithm from Lemma \ref{lem:aprx_bounded_degree} to get a solution ${\rm ALG}'$ for instance $I'$ such that $d({\rm ALG}') \le (1 + \epsilon) d({\rm OPT}')$. Our algorithm simply returns solution ${\rm ALG}' + f$, which is a solution for $I$ because ${\rm ALG}$ is connected and $f$ increases degrees exactly by difference between $I$ and $I'$. To prove ${\rm ALG}' + f$ is $(1+\epsilon)$-approximation we just need to observe that $$d({\rm ALG}' + f) \le (1 + \epsilon)d({\rm OPT}') + d(f) \le (1 + \epsilon)d({\rm OPT}' + f).$$ \end{proof} \ProblemName{Fixed Degree Connected Subgraph} is a generalization of \ProblemName{Many Visits TSP} so the algorithm from Lemma \ref{lem:aproxConSub} proves Theorem \ref{thm:aprox}. \section{Further Research} \label{sec:further} Since TSP is solvable in time ${\ensuremath{\mathcal{O}}}star(2^n)$ and exponential space~\cite{BellmanTSP,HeldKarpTSP} and time ${\ensuremath{\mathcal{O}}}(4^{n+o(n)})$ and polynomial space~\cite{GurevichShelah}, the main remaining question is whether these bounds can be achieved for \ProblemName{Many Visits TSP} avoiding in the running time bound the linear dependence on maximum distance $D$. Another interesting goal is a deterministic version of Theorem~\ref{thm:bnd-decision}. \anonymyze{ \end{document}
\begin{document} \tildetle{Dynamical properties of families of holomorphic mappings} \keywords{} \thanks{The first named author was supported by CSIR-UGC(India) fellowship} \thanks{The second named author was supported by the DST SwarnaJayanti Fellowship 2009--2010 and a UGC--CAS Grant} \author{Ratna Pal and Kaushal Verma} \address{Ratna Pal: Department of Mathematics, Indian Institute of Science, Bangalore 560 012, India} \email{[email protected]} \address{Kaushal Verma: Department of Mathematics, Indian Institute of Science, Bangalore 560 012, India} \email{[email protected]} \partialgestyle{headings} \begin{abstract} We study some dynamical properties of skew products of H\'{e}non maps of $\mathbb C^2$ that are fibered over a compact metric space $M$. The problem reduces to understanding the dynamical behavior of the composition of a pseudo-random sequence of H\'{e}non mappings. In analogy with the dynamics of the iterates of a single H\'{e}non map, it is possible to construct fibered Green's functions that satisfy suitable invariance properties and the corresponding stable and unstable currents. This analogy is carried forth in two ways: it is shown that the successive pullbacks of a suitable current by the skew H\'{e}non maps converges to a multiple of the fibered stable current and secondly, this convergence result is used to obtain a lower bound on the topological entropy of the skew product in some special cases. The other class of maps that are studied are skew products of holomorphic endomorphisms of $\mathbb P^k$ that are again fibered over a compact base. We define the fibered basins of attraction and show that they are pseudoconvex and Kobayashi hyperbolic. \end{abstract} \title{Dynamical properties of families of holomorphic mappings} \section{Introduction} \noindent The purpose of this note is to study various dynamical properties of some classes of fibered mappings. We will first consider families of the form $H : M \tildemes \mathbb C^2 \rightarrow M \tildemes \mathbb C^2$ defined by \begin{equation} H(\lambda, x, y) = (\sigmagma(\lambda), H_{\lambda}(x, y)) \end{equation} where $M$ is an appropriate parameter space, $\sigmagma$ is a self map of $M$ and for each $\lambda \in M$, the map \[ H_{\lambda}(x, y) = H_{\lambda}^{(m)} \circ H_{\lambda}^{(m-1)} \circ \ldots \circ H_{\lambda}^{(1)}(x, y) \] where for every $1 \le j \le m$, \[ H_{\lambda}^{(j)}(x, y) = (y, p_{j, \lambda}(y) - a_{j}(\lambda) x) \] is a generalized H\'{e}non map with $p_{j, \lambda}(y)$ a monic polynomial of degree $d_j \ge 2$ whose coefficients and $a_{j}(\lambda)$ are functions on $M$. The degree of $H_{\lambda}$ is $d = d_1d_2 \ldots d_m$ which does not vary with $\lambda$. The two cases that will be considered here are as follows. First, $M$ is a compact metric space and $\sigmagma$, $a_j$ and the coefficients of $p_{j, \lambda}$ are continuous functions on $M$ and second, $M \subset \mathbb C^k$, $k \ge 1$ is open in which case $\sigmagma$, $a_j$ and the coefficients of $p_{j, \lambda}$ are assumed to be holomorphic in $\lambda$. In both cases, $a_j$ is assumed to be a non-vanishing function on $M$. We are interested in studying the ergodic properties of such a family of mappings. Part of the reason for this choice stems from the Fornaess-Wu classification (\cite{FW}) of polynomial automorphisms of $\mathbb C^3$ of degree at most $2$ according to which any such map is affinely conjugate to \begin{enumerate} \item[(a)] an affine automorphism, \item[(b)] an elementary polynomial automorphism of the form \[ E(x, y, z) = (P(y, z) + ax, Q(z) + by, cz + d) \] where $P, Q$ are polynomials with $\max \{\deltag (P), \deltag (Q) \} = 2$ and $abc \noindentt= 0$, or \item[(c)] to one of the following: \begin{itemize} \item $H_1(x, y, z) = (P(x, z) + ay, Q(z) + x, cz + d)$ \item $H_2(x, y, z) = (P(y, z) + ax, Q(y) + bz, y)$ \item $H_3(x, y, z) = (P(x, z) + ay, Q(x) + z, x)$ \item $H_4(x, y, z) = (P(x, y) + az, Q(y) + x, y)$ \item $H_5(x, y, z) = (P(x, y) + az, Q(x) + by, x)$ \end{itemize} where $P, Q$ are polynomials with $\max \{ \deltag(P), \deltag(Q) \} = 2$ and $abc \noindentt= 0$. \end{enumerate} \noindent The six classes in (b) and (c) put together were studied in \cite{CF} and \cite{CG} where suitable Green functions and associated invariant measures were constructed for them. As observed in \cite{FW}, several maps in (c) are in fact families of H\'{e}non maps for special values of the parameters $a, b, c$ and for judicious choices of the polynomials $P, Q$. For instance, if $Q(z) = 0$ and $P(x, z) = x^2 + \ldots$, then $H_1(x, y, z) = (P(x, z) + ay, x, z)$ which is conjugate to \[ (x, y, z) \mapsto (y, P(y, z) + ax, cz + d) = (y, y^2 + \ldots + ax, cz + d) \] by the inversion $\tau_1(x, y, z) = (y, x, z)$. Here $\sigma(z) = cz + d$. Similarly, if $a = 1, P(y, z) = 0$ and $Q$ is a quadratic polynomial, then $H_2(x, y, z) = (x, Q(y) + bz, y)$ which is conjugate to \[ (x, y, z) \mapsto (x, z, Q(z) + by) = (x, z, z^2 + \ldots + by) \] by the inversion $\tau_3(x, y, z) = (x, z, y)$. Here $\sigma(x) = x$ and finally, if $b = 1, Q(x) = 0$ and $P(x, y) = x^2 + \ldots$, then $H_5(x, y, z) = (P(x, y) + az, y, x)$ which is conjugate to \[ (x, y, z) \mapsto (z, y, P(z, y) + ax) = (z, y, z^2 + \ldots + ax) \] by the inversion $\tau_5(x, y, z) = (z, y, x)$ where again $\sigma(y) = y$. All of these are examples of the kind described in $(1.1)$ with $M = \mathbb C$. In the first example, if $c \noindentt= 1$ then an affine change of coordinates involving only the $z$-variable can make $d = 0$ and if further $\vert c \vert \le 1$, then we may take a closed disc around the origin in $\mathbb C$ which will be preserved by $\sigmagma(z) = cz$. This provides an example of a H\'{e}non family that is fibered over a compact base $M$. Further, since the parameter mapping $\sigmagma$ in the last two examples is just the identity, we may restrict it to a closed ball to obtain more examples of the case when $M$ is compact. The maps considered in $(1.1)$ are in general $q$-regular, for some $q \ge 1$, in the sense of Guedj--Sibony (\cite{GS}) as the following example shows. Let $\mathcal H : \mathbb C^3 \rightarrow \mathbb C^3$ be given by \[ \mathcal H(\lambda, x, y) = (\lambda, y, y^2 - ax), a \noindentt= 0 \] which in homogeneous coordinates becomes \[ \mathcal H([\lambda : x : y : t]) = [\lambda t : yt : y^2 - axt : t^2]. \] The indeterminacy set of this map is $I^+ = [\lambda : x : 0 : 0]$ while that for $\mathcal H^{-1}$ is $I^{-1} = [\lambda : 0 : y : 0]$. Thus $I^+ \cap I^- = [1 : 0 : 0 : 0]$ and it can be checked that $X^+ = \overline{\mathcal H \big( (t = 0) \setminus I^+ \big)} = [0: 0: 1: 0]$ which is disjoint from $I^+$. Also, $X^- = \overline {\mathcal H^- \big( (t = 0) \setminus I^- \big)} = [0:1:0:0]$ which is disjoint from $I^-$. All these observations imply that $\mathcal H$ is $1$-regular in the sense of \cite{GS}. Further, $\deltag(\mathcal H) = \deltag(\mathcal H^{-1}) = 2$. This global view point does have several advantages as the results in \cite{GS}, \cite{G} show. However, thinking of $(1.1)$ as a family of maps was seconded by the hope that the methods of Bedford--Smillie (\cite{BS1}, \cite{BS2} and \cite{BS3}) and Fornaess--Sibony \cite{FS} that were developed to handle the case of a single generalized H\'{e}non map would be amenable to this situation -- in fact, they are to a large extent. Finally, in view of the systematic treatment of families of rational maps of the sphere by Jonsson (see \cite{JM}, \cite{J}), considering families of H\'{e}non maps appeared to be a natural next choice. Several pertinent remarks about the family $H$ in (1.1) with $\sigmagma(\lambdambda)=\lambdambda$ can be found in \cite{DS}. Let us first work with the case when $M$ is a compact metric space. For $n \ge 0$, let \[ H_{\lambda}^{\pm n} = H_{\sigma^{n-1}(\lambda)}^{\pm 1} \circ \cdots \circ H_{\sigma(\lambda)}^{\pm 1} \circ H_{\lambda}^{\pm 1}. \] Note that $H_{\lambda}^{+n}$ is the second coordinate of the $n$-fold iterate of $H(\lambda, x, y)$. Furthermore \[ (H_{\lambda}^{+n})^{-1} = H_{\lambda}^{-1} \circ H_{\sigma(\lambda)}^{-1} \circ \cdots \circ H_{\sigma^{n-1}(\lambda)}^{-1} \noindentt= H_{\lambda}^{-n} \] and \[ (H_{\lambda}^{-n})^{-1} = H_{\lambda} \circ H_{\sigma(\lambda)} \circ \cdots \circ H_{\sigma^{n-1}(\lambda)} \noindentt= H_{\lambda}^{+n} \] for $n \ge 2$. The presence of $\sigma$ creates an asymmetry which is absent in the case of a single H\'{e}non map and which requires the consideration of these maps as will be seen later. In what follows, no conditions on $\sigma$ except continuity are assumed unless stated otherwise. The first thing to do is to construct invariant measures for the family $H(\lambda, x, y)$ that respect the action of $\sigmagma$. The essential step toward this is to construct a uniform filtration $V_R$, $V_R^{\pm}$ for the maps $H_\lambdambda$ where $R>0$ is sufficiently large. For each $\lambdambda \in M$, the sets $I_\lambdambda^{\pm}$ of escaping points and the sets $K_\lambdambda^{\pm}$ of non-escaping points under random iteration determined by $\sigmagma$ on $M$ are defined as follows: \[ I_\lambdambda^{\pm}=\{z\in \mathbb{C}^2: \Vert H_{\lambda}^{\pm n}(x, y) \Vert \rightarrow \infty \; \text{as} \; n\rightarrow \infty \}, \] \[ K_\lambdambda^{\pm}=\{z\in \mathbb{C}^2: \; \text{the sequence}\; \{ H_{\lambda}^{\pm n} (x, y)\}_n \; \text{is bounded}\} \] Clearly, $H_\lambdambda^{\pm 1}(K_\lambdambda^{\pm})= K_{\sigmagma(\lambdambda)}^{\pm}$ and $H_\lambdambda^{\pm 1}(I_\lambdambda^{\pm})= I_{\sigmagma(\lambdambda)}^{\pm}$. Define $K_{\lambda} = K_{\lambda}^+ \cap K_{\lambda}^-, J_{\lambda}^{\pm} = \partial K_{\lambda}^{\pm}$ and $J_{\lambda} = J_{\lambda}^+ \cap J_{\lambda}^-$. For each $\lambda \in M$ and $n \ge 1$, let \[ G_{n, \lambda}^{\pm}(x, y) = \frac{1}{d^n} \log^+ \Vert H_{\lambda}^{\pm n}(x, y) \Vert \] where $\log^+ t=\max \{\log t,0\}$. \begin{prop}\lambdabel{pr1} The sequence $G_{n, \lambda}^{\pm}$ converges uniformly on compact subsets of $M \tildemes \mathbb C^2$ to a continuous function $G_{\lambda}^{\pm}$ as $n \rightarrow \infty$ that satisfies \[ d^{\pm 1} G_{\lambda}^{\pm} = G_{\sigma(\lambda)}^{\pm} \circ H_{\lambda}^{\pm 1} \] on $\mathbb C^2$. The functions $G_{\lambda}^{\pm}$ are positive pluriharmonic on $\mathbb C^2 \setminus K_{\lambda}^{\pm}$, plurisubharmonic on $\mathbb C^2$ and vanish precisely on $K_{\lambda}^{\pm}$. The correspondence $\lambdambda \mapsto G_\lambdambda^{\pm}$ is continuous. In case $\sigma$ is surjective, $G_{\lambda}^+$ is locally uniformly H\"{o}lder continuous, i.e., for each compact $S \subset \mathbb C^2$, there exist constants $\tau, C > 0$ such that \[ \big\vert G_{\lambda}^+(x, y) - G_{\lambda}^+(x', y') \big\vert \le C \Vert (x, y) - (x', y') \Vert^{\tau} \] for all $(x, y), (x', y') \in S$. The constants $\tau, C$ depend on $S$ and the map $H$ only. \end{prop} \noindent As a result, $\mu_{\lambda}^{\pm} = (1/2\pi) dd^c G_{\lambda}^{\pm}$ are well defined positive closed $(1, 1)$ currents on $\mathbb C^2$ and hence $\mu_{\lambda} = \mu_{\lambda}^+ \wedge \mu_{\lambda}^-$ defines a probability measure on $\mathbb C^2$ whose support is contained in $V_R$ for every $\lambda \in M$. Moreover the correspondence $\lambdambda\mapsto \mu_\lambdambda$ is continuous. That these objects are well behaved under the pullback and push forward operations by $H_{\lambda}$ and at the same time respect the action of $\sigma$ is recorded in the following: \begin{prop}\lambdabel{pr2} With $\mu_{\lambda}^{\pm}, \mu_{\lambda}$ as above, we have \[ {(H_{\lambda}^{\pm 1})}^{\ast} \mu_{\sigma(\lambda)}^{\pm} = d^{\pm 1} \mu_{\lambda}^{\pm}, \; (H_{\lambda})_{\ast} \mu_{\lambda}^{\pm} = d^{\mp 1} \mu_{\sigma(\lambda)}^{\pm} \] The support of $\mu^{\pm}_{\lambda}$ equals $J_{\lambda}^{\pm}$ and the correspondence $\lambda \mapsto J_{\lambda}^{\pm}$ is lower semi-continuous. Furthermore, for each $\lambdambda\in M$, the pluricomplex Green function of $K_\lambdambda$ is $\max\{G_\lambdambda^+, G_\lambdambda^-\}$, $\mu_\lambdambda$ is the complex equilibrium measure of $K_\lambdambda$ and ${\rm supp}(\mu_\lambdambda)\subseteq J_\lambdambda$. In particular, if $\sigma$ is the identity on $M$, then $(H_{\lambda}^{\pm 1})^{\ast} \mu_{\lambda} = \mu_{\lambda}$. \end{prop} \noindent Let $T$ be a positive closed $(1, 1)$ current in a domain $\Omega \subset \mathbb C^2$ and let $\psi \in C^{\infty}_0(\Omega)$ with $\psi \ge 0$ be such that $\text{supp}(\psi) \cap \text{supp}(dT) = \phi$. Theorem 1.6 in \cite{BS3} shows that for a single H\'{e}non map $H$ of degree $d$, the sequence $d^{-n} H^{n \ast}(\psi T)$ always converges to $c \mu^+$ where $c = \int \psi T \wedge \mu^- > 0$. In the same vein, for each $\lambda \in M$ let $S_{\lambda}(\psi, T)$ be the set of all possible limit points of the sequence $d^{-n}\big( H_{\lambda}^{+n}\big)^{\ast}(\psi T)$. \begin{thm}\lambdabel{thm1} $S_{\lambda}(\psi, T)$ is nonempty for each $\lambda \in M$ and $T, \psi$ as above. Each $\gamma_{\lambda} \in S_{\lambda}(\psi, T)$ is a positive multiple of $\mu_{\lambda}^+$. \end{thm} In general, $S_\lambdambda(\psi, T)$ may be a large set. However, there are two cases for which it is possible to determine the cardinality of $S_{\lambda}(\psi, T)$ and both are illustrated by the examples mentioned earlier. \begin{prop} If $\sigmagma$ is the identity on $M$ or when $\sigmagma : M \rightarrow M$ is a contraction, i.e., there exists $\lambda_0 \in M$ such that $\sigma^n(\lambda) \rightarrow \lambda_0$ for all $\lambda \in M$, the set $S_{\lambda}(\psi, T)$ consists of precisely one element. Consequently, in each of these cases there exists a constant $c_{\lambda}(\psi, T) > 0$ such that \[ \lim_{n \rightarrow \infty} d^{-n} \big( H_{\lambda}^{+n}\big)^{\ast}(\psi T) = c_{\lambda}(\psi, T) \mu_{\lambda}^+. \] \end{prop} Let us now consider the case when $M$ is a relatively compact open subset of $\mathbb C^k$, $k \ge 1$ and the map $\sigma$ is the identity on $M$. Since this means that the slices over each point in $M$ are preserved, we may (by shrinking $M$ slightly) assume that the maps $H_{\lambda}$ are well defined in a neighborhood of $\overline M$. Thus the earlier discussion about the construction of $\mu_{\lambda}^{\pm}, \mu_{\lambda}$ applies to the family (which will henceforth be considered) \begin{gather} H : M \tildemes \mathbb C^2 \rightarrow M \tildemes \mathbb C^2, \noindenttag\\ H(\lambda, x, y) = (\lambda, H_{\lambda}(x, y)) \noindenttag. \end{gather} \noindent For every probability measure $\mu'$ on $M$, \begin{equation} \lambdangle \mu, \phi \rightarrowngle = \int_{M} \bigg( \int_{\{\lambda\} \tildemes \mathbb C^2} \phi \; \mu_{\lambda} \bigg) \mu'(\lambda) \end{equation} defines a measure on $M \tildemes \mathbb C^2$ by describing its action on continuous functions $\phi$ on $M \tildemes \mathbb C^2$. This is not a dynamically natural measure since $\mu'$ is arbitrary. It will turn out that the support of $\mu$ is contained in \[ \mathcal J = \bigcup_{\lambda \in M} \left( \{ \lambda \} \tildemes J_{\lambda} \right) \subset M \tildemes V_R. \] The slice measures of $\mu$ are evidently $\mu_{\lambda}$ and since $\sigma$ is the identity it can be seen from Proposition 1.2 that $\mu$ is an invariant probability measure for $H$ as above. \begin{thm} Regard $H$ as a self map of ${\rm supp}(\mu$) with invariant measure $\mu$. The measure theoretic entropy of $H$ with respect to $\mu$ is at least $\log d$. In particular, the topological entropy of $H : \mathcal J \rightarrow \mathcal J$ is at least $\log d$. \end{thm} It would be both interesting and desirable to obtain lower bounds for the topological entropy for an arbitrary continuous function $\sigmagma$ in (1.1). We will now consider continuous families of holomorphic endomorphisms of $\mathbb P^k$. For a compact metric space $M$, $\sigma$ a continuous self map of $M$, define $F : M \tildemes \mathbb P^k \rightarrow M \tildemes \mathbb P^k$ as \begin{equation} F(\lambda, z) = (\sigma(\lambda), f_{\lambda}(z)) \end{equation} where $f_{\lambda}$ is a holomorphic endomorphism of $\mathbb P^k$ that depends continuously on $\lambda$. Each $f_{\lambda}$ is assumed to have a fixed degree $d \ge 2$. Corresponding to each $f_{\lambda}$ there exists a non-degenerate homogeneous holomorphic mapping $F_{\lambda} : \mathbb C^{k+1} \rightarrow \mathbb C^{k+1}$ such that $\pi \circ F_{\lambda} = f_{\lambda} \circ \pi$ where $\pi : \mathbb C^{k+1} \setminus \{0\} \rightarrow \mathbb P^k$ is the canonical projection. Here, non-degeneracy means that $F_{\lambda}^{-1}(0) = 0$ which in turn implies that there are uniform constants $l, L >0$ with \begin{eqnarray} l \Vert x \Vert^d \le \Vert F_{\lambda}(x) \Vert \le L \Vert x \Vert^d \end{eqnarray} for all $\lambda \in M$ and $x \in \mathbb C^{k+1}$. Therefore for $0 < r \leq (2L)^{-1/(d-1)}$ \[ \Vert F_\lambdambda(x) \Vert \leq (1/2) \Vert x \Vert \] for all $\lambdambda\in M$ and $\Vert x \Vert \leq r$. Likewise for $R\geq (2l)^{-1/(d-1)}$ \[ \Vert F_\lambdambda(x) \Vert \geq 2 \Vert x \Vert \] for all $\lambdambda\in M$ and $\Vert x \Vert \geq R$. While the ergodic properties of such a family have been considered in \cite{T1}, \cite{T2} for instance, we are interested in looking at the basins of attraction which may be defined for each $\lambda \in M$ as \[ \mathcal A_{\lambda} = \big\{ x \in \mathbb C^{k+1} : F_{\sigma^{n-1}(\lambda)} \circ \ldots \circ F_{\sigma(\lambda)} \circ F_{\lambda}(x) \rightarrow 0 \; \text{as} \; n \rightarrow \infty \big\} \] and for each $\lambdambda\in M$ the region of normality $\Omega'_{\lambda} \subset \mathbb P^k$ which consists of all points $z \in \mathbb P^k$ for which there is a neighborhood $V_z$ on which the sequence $\big \{f_{\sigma^{n-1}(\lambda)} \circ \ldots \circ f_{\sigma(\lambda)} \circ f_{\lambda} \big\}_{n \ge 1}$ is normal. Analogs of $\mathcal A_{\lambda}$ arising from composing a given sequence of automorphisms of $\mathbb C^n$ have been considered in \cite{PW} where an example can be found for which these are not open in $\mathbb C^n$. However, since each $F_{\lambda}$ is homogeneous, it is straightforward to verify that each $\mathcal A_{\lambda}$ is a nonempty, pseudoconvex complete circular domain. As in the case of a single holomorphic endomorphism of $\mathbb P^k$ (see \cite{HP}, \cite{U}), the link between these two domains is provided by the Green function. For each $\lambda \in M$ and $n \ge 1$, let \[ G_{n, \lambda}(x) = \frac{1}{d^n} \log \Vert F_{\sigma^{n-1}(\lambda)} \circ \ldots \circ F_{\sigma(\lambda)} \circ F_{\lambda}(x) \Vert. \] \begin{prop} For each $\lambda \in M$, the sequence $G_{n, \lambda}$ converges uniformly on $\mathbb C^{k+1}$ to a continuous plurisubharmonic function $G_{\lambda}$ which satisfies \[ G_{\lambda}(c x) = \log \vert c \vert + G_{\lambda}(x) \] for $c \in \mathbb C^{\ast}$. Further, $d G_{\lambda} = G_{\sigma(\lambda)} \circ F_{\lambda}$, and $G_{\lambda_n} \rightarrow G_{\lambda}$ locally uniformly on $\mathbb C^{k+1} \setminus \{0\}$ as $\lambda_n \rightarrow \lambda$ in $M$. Finally, \[ \mathcal A_{\lambda} = \{x \in \mathbb C^{k+1} : G_{\lambda}(x) < 0\} \] for each $\lambda \in M$. \end{prop} For each $\lambda \in M$, let $\mathcal H_{\lambda} \subset \mathbb C^{k+1}$ be the collection of those points in a neighborhood of which $G_{\lambda}$ is pluriharmonic and define $\Omega_{\lambda} = \pi(\mathcal H_{\lambda}) \subset \mathbb P^k$. \begin{prop} For each $\lambda \in M$, $\Omega_{\lambda} = \Omega'_{\lambda}$. Further, each $\Omega_{\lambda}$ is pseudoconvex and Kobayashi hyperbolic. \end{prop} {\bf{Acknowledgment:}} The first named author would like to thank G. Buzzard and M. Jonsson for their helpful comments in an earlier version of this paper. \section{Fibered families of H\'{e}non maps} \noindent The existence of a filtration $V^{\pm}_R, V_R$ for a H\'{e}non map is useful in localizing its dynamical behavior. To study a family of such maps, it is therefore essential to first establish the existence of a uniform filtration that works for all of them. Let \begin{align*} V_R^+ &= \big\{ (x, y) \in \mathbb C^2 : \vert y \vert > \vert x \vert, \vert y \vert > R \big\},\\ V_R^- &= \big\{ (x, y) \in \mathbb C^2 : \vert y \vert < \vert x \vert, \vert x \vert > R \big\},\\ V_R &= \big\{ (x, y) \in \mathbb C^2 : \vert x \vert, \vert y \vert \le R\} \end{align*} be a filtration of $\mathbb{C}^2$ where $R$ is large enough so that \[ H_{\lambda}(V_R^+) \subset V_R^+ \] for each $\lambda \in M$. The existence of such an $R$ is shown in the following lemma. \begin{lem} \lambdabel{le1} There exists $R>0$ such that $$ H_\lambdambda(V_R^+)\subset V_R^+, \ \ H_\lambdambda(V_R^+\cup V_R)\subset V_R^+\cup V_R $$ and $$ H_\lambdambda^{-1}(V_R^-)\subset V_R^-, \ \ H_\lambdambda^{-1}(V_R^-\cup V_R)\subset V_R^-\cup V_R $$ for all $\lambdambda \in M$. Furthermore, $$ I_\lambdambda^{\pm}=\mathbb{C}^2\setminus K_\lambdambda^{\pm}=\bigcup_{n=0}^\infty (H_{\lambda}^{\pm n})^{-1}(V_R^{\pm}). $$ \end{lem} \begin{proof} Let \[ p_{j,\lambdambda}(y)=y^{d_j} + c_{\lambdambda(d_j-1)}y^{d_j-1} + \ldots + c_{\lambdambda 1}y + c_{\lambdambda 0} \] be the polynomial that occurs in the definition of $H_\lambdambda^{(j)}$. Then \begin{equation} \vert y^{-d_j} p_{j, \lambda}(y) - 1 \vert \le \vert c_{\lambda(d_j - 1)} y^{-1} \vert + \ldots + \vert c_{\lambda 1} y^{-d_j + 1} \vert + \vert c_{\lambda 0} y^{-d_j} \vert. \lambdabel{0} \end{equation} Let $a=\sup_{\lambdambda,j}|a_{\lambdambda,j}|$. Since the coefficients of $p_{j,\lambdambda}$ are continuous on $M$, which is assumed to be compact, and $d_j \ge 2$ it follows that there exists $R>0$ such that \[ \vert p_{j,\lambdambda}(y) \vert \geq (2 + a) \vert y \vert \] for $\vert y \vert>R$, $\lambdambda\in M$ and $1\leq j \leq m$. To see that $H_\lambdambda(V_R^+)\subset V_R^+$ for this $R$, pick $(x,y)\in V_R^+$. Then \begin{equation} \lvert p_{j,\lambdambda}(y)-a_j(\lambdambda)x\rvert \geq \lvert p_{j,\lambdambda}(y)\rvert -\lvert a_j(\lambdambda)x\rvert \geq \lvert y \rvert \lambdabel{1} \end{equation} for all $1\leq j \leq m$. It follows that the second coordinate of each $H_\lambdambda^{(j)}$ dominates the first one. This implies that \[ H_\lambdambda(V_R^+)\subset V_R^+ \] for all $\lambdambda\in M$. The other invariance properties follow by using similar arguments. Let $\rho>1$ be such that \[ \lvert p_{j,\lambdambda}(y)-a_j(\lambdambda)x \rvert > \rho \lvert y \rvert \] for $(x,y)\in \overlineerline{V_R^+}$, $\lambdambda\in M$ and $1\leq j \leq m$. That such a $\rho$ exists follows from (\ref{1}). By letting $\pi_1$ and $\pi_2$ be the projections on the first and second coordinate respectively, one can conclude inductively that \begin{equation} H_\lambdambda(x,y)\in V_R^+ \text{ and } \vert \pi_2(H_\lambdambda(x,y)) \vert >\rho^m \vert y \vert. \lambdabel{2} \end{equation} Analogously, for all $(x,y)\in \overlineerline{V_R^{-}}$ and for all $\lambdambda\in M$, there exists a $\rho>1$ satisfying \begin{equation} H_\lambdambda^{-1}(x,y)\in V_R^- \text{ and }|\pi_1(H_\lambdambda^{-1}(x,y))|>\rho^m|x|.\lambdabel{2.1} \end{equation} These two facts imply that \begin{equation} \overlineerline{V_R^+} \subset H_{\lambda}^{-1}(\overlineerline{V_R^+})\subset H_{\lambda}^{-1} \circ H_{\sigma(\lambda)}^{-1} (\overline{V_R^+}) \subset \ldots \subset (H_{\lambda}^{+n})^{-1}(\overlineerline{V_R^+})\subset \ldots \end{equation} and \begin{equation} \overlineerline{V_R^{-}} \supset H_{\lambda}^{-1}(\overlineerline{V_R^{-}})\supset H_{\lambda}^{-1} \circ H_{\sigma(\lambda)}^{-1}(\overline{V_R^-}) \supset \ldots \supset (H_{\lambda}^{+n})^{-1}(\overlineerline{V_R^-})\supset \ldots . \lambdabel{3} \end{equation} At this point one can observe that if we start with a point in $\overlineerline{V_R^+}$, it eventually escapes toward the point at infinity under forward iteration determined by the continuous function $\sigmagma$, i.e., $\vert H_{\lambda}^{+n}(x, y) \vert \rightarrow \infty$ as $n\rightarrow \infty$. This can be justified by using (\ref{2}) and observing that \begin{equation*} \lvert y_\lambdambda^n \rvert > \rho^m \lvert y_\lambdambda^{n-1} \rvert> \rho^{2m}\lvert y_\lambdambda^{n-2} \rvert> \ldots >\rho^{nm}\lvert y \rvert>\rho^{nm}R \end{equation*} where $H_{\lambda}^{+n}(x, y) =(x_\lambdambda^n,y_\lambdambda^n)$. A similar argument shows that if we start with any point in $(x,y)\in \bigcup_{n=0}^{\infty} (H_{\lambda}^{+n})^{-1}(V_R^+)$ the orbit of the point never remains bounded. Therefore \begin{equation} \bigcup_{n=0}^{\infty} (H_{\lambda}^{+n})^{-1}(V_R^+)\subseteq I_\lambdambda^+. \end{equation} Moreover using (\ref{2}) and (\ref{2.1}), we get \[ (H_{\lambda}^{-n})^{-1}(V_R^+)\subseteq \big\{(x,y):\lvert y\rvert > \rho^{nm}R \big\} \] and \[ (H_{\lambda}^{+n})^{-1}(V_R^-)\subseteq \big\{(x,y):\lvert x \rvert > \rho^{nm}R \big\} \] which give \begin{equation} \bigcap_{n=0}^{\infty} (H_{\lambda}^{-n})^{-1}(V_R^+) = \bigcap_{n=0}^{\infty} (H_{\lambda}^{-n})^{-1}(\overlineerline{V_R^+})=\phi \lambdabel{4} \end{equation} and \begin{equation} \bigcap_{n=0}^{\infty} (H_{\lambda}^{+n})^{-1}(V_R^-)= \bigcap_{n=0}^{\infty} (H_{\lambda}^{+n})^{-1}(\overlineerline{V_R^{-}})=\phi \lambdabel{4.1} \end{equation} respectively. Set \[ W_R^+=\mathbb{C}^2\setminus \overlineerline{V_R^{-}} \text{ and }W_R^-=\mathbb{C}^2\setminus \overlineerline{V_R^+}. \] Note that (\ref{3}) and (\ref{4.1}) are equivalent to \begin{equation} W_R^+\subset H_{\lambdambda}^{-1}(W_R^+) \subset \ldots \subset (H_{\lambda}^{+n})^{-1}(W_R^+)\subset \ldots \end{equation} and \begin{equation} \bigcup_{n=0}^{\infty} (H_{\lambda}^{+n})^{-1}(W_R^+)= \mathbb{C}^2 \lambdabel{5} \end{equation} respectively. Now (\ref{5}) implies that for any point $(x,y)\in \mathbb{C}^2$ there exists $n_0>0$ such that $H_{\lambda}^{+n}(x,y)\in W_R^+\subset V_R\cup \overlineerline{V_R^+}$ for all $n\geq n_0$. So either \[ H_{\lambda}^{+n}(x,y)\in V_R \] for all $n \ge n_0$ or there exists $n_1 \geq n_0$ such that $H_{\lambda}^{+n_1}(x,y)\in \overlineerline{V_R^+}$. In the latter case, $H_{\lambda}^{+(n_1+1)}(x,y)\in V_R^+$ by (\ref{2}). This implies that \begin{equation*} I_\lambdambda^{+}=\mathbb{C}^2\setminus K_\lambdambda^{+}=\bigcup_{n=0}^\infty (H_{\lambda}^{+n})^{-1}(V_R^{+}).\lambdabel{5.1} \end{equation*} A set of similar arguments yield \begin{equation*} I_\lambdambda^{-}=\mathbb{C}^2\setminus K_\lambdambda^{-}=\bigcup_{n=0}^\infty (H_{\lambda}^{-n})^{-1}(V_R^{-}). \end{equation*} \end{proof} \begin{rem}\lambdabel{re1} It follows from Lemma \ref{le1} that for any compact $A_\lambdambda \subset \mathbb{C}^2$ satisfying $A_\lambda \cap K_\lambdambda^+=\phi$, there exists $N_\lambdambda>0$ such that $H_{\lambda}^{+n_{\lambda}}(A_\lambdambda)\subseteq V_R^+$. More generally, for any compact $A \subset \mathbb{C}^2$ that satisfies $A\cap K_\lambdambda^+=\phi$ for each $\lambdambda\in M$, there exists $N>0$ so that $H_{\lambda}^{+N}(A)\subseteq V_R^+$ for all $\lambdambda\in M$. The proof again relies on the fact that the coefficients of $p_{j,\lambdambda}$ and $a_j(\lambdambda)$ vary continuously in $\lambdambda$ on the compact set $M$ for all $1 \le j \le m$. \end{rem} \begin{rem}\lambdabel{re2} By applying the same kind of techniques as in the case of a single H\'{e}non map, it is possible to show that $I_\lambdambda^{\pm}$ are nonempty, pseudoconvex domains and $K_\lambdambda^{\pm}$ are closed sets satisfying $K_\lambdambda^{\pm}\cap V_R^{\pm}=\phi$ and having nonempty intersection with the $y$-axis and $x$-axis respectively. In particular, $K_\lambdambda^{\pm}$ are nonempty and unbounded. \end{rem} \subsection*{Proof of Proposition \ref{pr1}} Since the polynomials $p_{j, \lambda}$ are all monic, it follows that for every small $\epsilon_1 > 0$ there is a large enough $R > 1$ so that for all $(x,y)\in \overlineerline{V_R^+}$, $1\leq j \leq m$ and for all $\lambdambda\in M$, we have $H_\lambdambda^{(j)}(x,y)\in V_R^+$ and \begin{equation} (1-\epsilon_1)\lvert y \rvert^{d_j}<\lvert \pi_2\circ H_\lambdambda^{(j)}(x,y)\rvert < (1+\epsilon_1)\lvert y \rvert^{d_j}. \lambdabel{7} \end{equation} For a given $\epsilon > 0$, choose an $\epsilonsilon_1>0$ small enough so that the constants \[ A_1=\prod_{j=1}^m (1-\epsilonsilon_1)^{d_{j+1} \ldots d_m} \text{ and } A_2=\prod_{j=1}^m (1+\epsilonsilon_1)^{d_{j+1} \ldots d_m} \] (where $d_{j+1} \ldots d_m=1$ by definition when $j=m$) satisfy $1-\epsilonsilon \leq A_1$ and $A_2 \leq 1+\epsilonsilon$. Therefore by applying (\ref{7}) inductively, we get \begin{equation} (1-\epsilonsilon)\lvert y \rvert^{d} \leq A_1\lvert y \rvert^{d}<\lvert \pi_2\circ H_\lambdambda(x,y)\rvert<A_2\lvert y \rvert^{d}\leq (1+\epsilonsilon)\lvert y \rvert^{d}\lambdabel{8} \end{equation} for all $\lambdambda\in M$ and for all $(x,y)\in \overlineerline{V_R^+}$. Let $(x,y)\in \overlineerline{V_R^+}$. In view of (\ref{2}) there exists a large $R>1$ so that $H_\lambdambda^{+n}(x,y)=(x_\lambdambda^n,y_\lambdambda^n)\in V_R^+$ for all $n\geq 1$ and for all $\lambdambda\in M$. Therefore $$ G_{n,\lambdambda}^+(x,y)=\frac{1}{d^n}\log\lvert \pi_2\circ H_\lambdambda^{+n}(x,y)\rvert $$ and by applying (\ref{8}) inductively we obtain \begin{equation*} (1-\epsilonsilon)^{1+d+ \ldots +d^{n-1}} \lvert y \rvert^{d^n}<\lvert y_\lambdambda^n \rvert< (1+\epsilonsilon)^{1+d+ \ldots +d^{n-1}}\lvert y \rvert^{d^n}. \end{equation*} Hence \begin{equation} 0<\log\lvert y \rvert+K_1<G_{n,\lambdambda}^+(x,y)=\frac{1}{d^n}\log\lvert \pi_2\circ H_\lambdambda^{+n}(x,y)\rvert<\log\lvert y \rvert+K_2,\lambdabel{9} \end{equation} with $K_1= (d^n-1)/(d^n(d-1)) \log(1-\epsilonsilon)$ and $K_2= (d^n-1)/(d^n(d-1)) \log(1+\epsilonsilon)$. By (\ref{9}) it follows that $$ \lvert G_{n+1,\lambdambda}^+(x,y)-G_{n,\lambdambda}^+(x,y)\rvert=\left | d^{-n -1} \log\lvert {y_\lambdambda^{n+1}}/{(y_\lambdambda^n)^d}\rvert\right | \lesssim d^{-n-1} $$ which proves that $\{G_{n,\lambdambda}^+\}$ converges uniformly on $\overlineerline{V_R^+}$. As a limit of a sequence of uniformly convergent pluriharmonic functions $\{G_{n,\lambdambda}^+\}$, $G_\lambdambda^+$ is also pluriharmonic for each $\lambdambda\in M$ on $V_R^+$. Again by (\ref{9}), for each $\lambdambda\in M$, \[ G_\lambdambda^+-\log\lvert y \rvert \] is a bounded pluriharmonic function in $\overlineerline{V_R^+}$. Therefore its restriction to vertical lines of the form $x = c$ can be continued across the point $(c, \infty)$ as a pluriharmonic function. Since \[ \lim_{\lvert y \rvert\rightarrow \infty}(G_\lambdambda^+(x,y)-\log\lvert y \rvert) \] is bounded in $x\in \mathbb{C}$ by (\ref{9}) it follows that $\lim_{\lvert y \rvert\rightarrow \infty}(G_\lambdambda^+(x,y)-\log\lvert y \rvert)$ must be a constant, say $\gammamma_\lambdambda$ which also satisfies $$ \log (1-\epsilonsilon)/(d-1) \leq \gammamma_\lambdambda \leq \log (1+\epsilonsilon)/(d-1). $$ As $\epsilonsilon > 0$ is arbitrary, it follows that \begin{equation} G_\lambdambda^+(x,y)=\log\lvert y \rvert + u_\lambdambda(x,y) \lambdabel{9.1} \end{equation} on $V_R^+$ where $u_\lambdambda$ is a bounded pluriharmonic function satisfying $u_\lambdambda(x,y) \rightarrow 0$ as $\vert y \vert \rightarrow \infty$. Now fix $\lambdambda\in M$ and $n\geq 1$. For any $r > n$ \begin{eqnarray*} G_{r,\lambdambda}^+(x,y)&=& d^{-r}{\log}^+\lvert H_\lambdambda^{+r}(x,y)\rvert \\ &=& d^{-n}G_{(r-n),\sigmagma^n(\lambdambda)}^+\circ H_\lambdambda^{+n}(x,y). \end{eqnarray*} As $r\rightarrow \infty$, $G_{r,\lambdambda}^+$ converges uniformly on $(H_{\lambda}^{+n})^{-1}(V_R^+)$ to the pluriharmonic function $d^{-n} G_{\sigmagma^n(\lambdambda)}^+ \circ H_\lambdambda^{+n}$. Hence $$ d^n G_\lambdambda^+(x,y)=G_{\sigmagma^n(\lambdambda)}^+\circ H_\lambdambda^{+n}(x,y) $$ for $(x,y)\in (H_\lambdambda^{+n})^{-1}(V_R^+)$. By (\ref{9}), for $(x,y)\in (H_\lambdambda^{+n})^{-1}(V_R^+)$ $$ G_{r,\lambdambda}^+(x,y)=d^{-n}G_{(r-n),\sigmagma^n(\lambdambda)}^+\circ H_\lambdambda^{+n}(x,y)> d^{-n}(\log R + K_1)> 0, $$ for each $r>n$ which shows that \[ G_\lambdambda^+(x,y)\geq d^{-n}(\log R + K_1)>0 \] for $(x,y)\in (H_\lambdambda^{+n})^{-1}(V_R^+)$. This is true for each $n\geq 1$. Hence $G_{r,\lambdambda}^+$ converges uniformly to the pluriharmonic function $G_\lambdambda^+$ on every compact set of \[ \bigcup_{n=0}^\infty (H_\lambdambda^{+n})^{-1}(V_R^+)=\mathbb{C}^2\setminus K_\lambdambda^+. \] Moreover $G_\lambdambda^+ >0$ on $\mathbb{C}^2\setminus K_\lambdambda^+$. Note that for each $\lambdambda\in M$, $G_\lambdambda^+=0$ on $K_\lambdambda^+$. By Remark \ref{re2}, there exists a large enough $R>1$ so that $K_\lambdambda^+\subseteq V_R \cup V_R^-$ for all $\lambdambda\in M$. Now choose any $A>R>1$. We will show that $\{G_{n,\lambdambda}^+\}$ converges uniformly to $G_\lambdambda^+$ on the bidisc \[ \Delta_A=\{(x,y):\lvert x \rvert\leq A,\lvert y \rvert\leq A\} \] as $n\rightarrow\infty$. Consider the sets \[ N=\{(x,y)\in \mathbb{C}^2: \lvert x \rvert \leq A\}, \;N_\lambdambda=N\cap K_\lambdambda^+ \] for each $\lambdambda\in M$. Start with any point $z=(x_0,x_1)\in \mathbb{C}^2$ and define $(x_i^\lambdambda,x_{i+1}^\lambdambda)$ for $\lambdambda\in M$ and $i\geq 1$ in the following way: \[ (x_0^\lambdambda,x_1^\lambdambda) \xrightarrow{H_\lambdambda^{(1)}} (x_1^\lambdambda,x_2^\lambdambda) \xrightarrow{H_\lambdambda^{(2)}} \ldots \xrightarrow{H_\lambdambda^{(m)}} (x_m^\lambdambda,x_{m+1}^\lambdambda)\xrightarrow{H_\lambdambda^{(1)}} (x_{m+1}^\lambdambda,x_{m+2}^\lambdambda)\rightarrow \ldots , \] where $(x_0^\lambdambda, x_1^\lambdambda)=(x_0,x_1)$ and we apply $H_\lambdambda^{(1)}, \ldots ,H_\lambdambda^{(m)}$ periodically for all $\lambdambda\in M$. Inductively one can show that if $(x_i^\lambdambda,x_{i+1}^\lambdambda)\in N_\lambdambda$ for $0\leq i \leq j-1$, then $\lvert x_i^\lambdambda\rvert \leq A $ for $0\leq i \leq j$. This implies that there exists $n_0>0$ independent of $\lambdambda$ so that \begin{equation} G_{n,\lambdambda}^+(x,y)< \epsilonsilon \lambdabel{10} \end{equation} for all $n\geq n_0$ and for all $(x,y)\in N_\lambdambda$. Consider a line segment \[ L_a=\{(a,w):\lvert w \rvert\leq A\} \subset \mathbb C^2 \] with $\lvert a \rvert \leq A$. Then $G_{n,\lambdambda}^+-G_\lambdambda^+$ is harmonic on $L_a^\lambdambda=\{(a,w):\lvert w \rvert < A\}\setminus K_\lambdambda^+$ viewed as a subset of $\mathbb{C}$ and the boundary of $L_a^\lambdambda$ lies in $\{(a,w):\lvert w \rvert=A\}\cup (K_\lambdambda^+\cap L_a)$. By Remark \ref{re1}, there exists $n_1>0$ so that $$ -\epsilonsilon< G_{n,\lambdambda}^+(a,w)-G_{\lambdambda}^+(a,w)<\epsilonsilon $$ for all $n\geq n_1$ and for all $(a,w)\in\{\lvert a \rvert\leq A,\lvert w \rvert=A\}$. The maximum principle shows that $$ -\epsilonsilon <G_{n,\lambdambda}^+(x,y)-G_\lambdambda^+(x,y) < \epsilonsilon $$ for all $n\geq \max\{n_0,n_1\}$ and for all $(x,y)\in L_a^\lambdambda$. This shows that for any given $\epsilonsilon>0$ there exists $n_2>0$ such that \begin{equation} -\epsilonsilon< G_{n,\lambdambda}^+(z)-G_\lambdambda^+(z)<\epsilonsilon \lambdabel{10.5} \end{equation} for all $n\geq n_2$ and for all $(\lambdambda,z)\in M\tildemes \Delta_A$. Hence $G_{n,\lambdambda}^+$ converges uniformly to $G_\lambdambda^+$ on any compact subset of $\mathbb{C}^2$ and this convergence is also uniform with respect to $\lambdambda\in M$. In particular this implies that for each $\lambdambda\in M$, $G_\lambdambda^+$ is continuous on $\mathbb{C}^2$ and pluriharmonic on $\mathbb{C}^2\setminus K_\lambdambda^+$. Moreover $G_\lambdambda^+$ vanishes on $K_\lambdambda^+$. In particular, for each $\lambdambda\in M$, $G_\lambdambda^+$ satisfies the submean value property on $\mathbb{C}^2$. Hence $G_\lambdambda^+$ is plurisubharmonic on $\mathbb{C}^2$. Next, to show that the correspondence $\lambdambda \mapsto G_\lambdambda^{\pm}$ is continuous, take a compact set $S\subset \mathbb{C}^2$ and $\lambdambda_0\in M$. Then \begin{multline*} \vert G_{\lambda}^+(x,y)-G_{\lambda_0}^+(x,y) \vert \le \vert G_{n,\lambda}^+(x,y)-G_{\lambda}^+(x,y)\vert + \vert G_{n,\lambdambda}^+(x,y)-G_{n,\lambdambda_0}^+(x,y)\vert \\ + \vert G_{n,\lambdambda_0}^+(x,y)-G_{\lambdambda_0}^+(x,y)\vert \end{multline*} for $(x,y)\in S$. It follows from (\ref{10.5}) that for given $\epsilonsilon>0$, one can choose a large $n_0>0$ such that the first and third terms above are less that $\epsilon/3$. By choosing $\lambdambda$ close enough to $\lambdambda_0$ it follows that $G_{n_0,\lambdambda}^+(x,y)$ and $G_{n_0,\lambdambda_0}^+(x,y)$ do not differ by more than $\epsilon/3$. Hence the correspondence $\lambdambda\mapsto G_\lambdambda^{+}$ is continuous. Similarly, the correspondence $\lambdambda\mapsto G_\lambdambda^-$ is also continuous. To prove that $G_\lambdambda^+$ is H\"{o}lder continuous for each $\lambdambda\in M$, fix a compact $S \subset \mathbb C^2$ and let $R > 1$ be such that $S$ is compactly contained in $V_R$. Using the continuity of $G_{\lambda}^+$ in $\lambda$, there exists a $\delta > 0$ such that $G_{\lambda}^+(x, y) > (d + 1)\delta$ for each $\lambda \in M$ and $(x, y) \in V_R^+$. Now note that the correspondence $\lambda \mapsto K_{\lambda}^+ \cap V_R$ is upper semi-continuous. Indeed, if this is not the case, then there exists a $\lambda_0 \in M$, an $\epsilon > 0$ and a sequence $\lambda_n \in M$ converging to $\lambda_0$ such that for each $n \ge 1$ there exists a point $a_n \in K_{\lambda_n}^+ \cap V_R$ satisfying $\vert a_n - z \vert \ge \epsilon$ for all $z \in K_{\lambda_0}^+$. Let $a$ be a limit point of the $a_n$'s. Then by the continuity of $\lambda \mapsto G_{\lambda}^+$ it follows that \[ 0 = G_{\lambda_n}^+(a_n) \rightarrow G_{\lambda}^+(a) \] which implies that $a \in K_{\lambda_0}^+$. This is a contradiction. For each $\lambdambda\in M$, define \[ \Omegaega_\deltalta^\lambdambda= \big\{ (x,y)\in V_R : \deltalta < G_\lambdambda^+(x,y) \leq d \deltalta \big\} \] and \[ C_\lambdambda=\sup\big\{ \lvert {\partialrtial G_\lambdambda^+}/{\partialrtial x}\rvert,\lvert {\partialrtial G_\lambdambda^+}/{\partialrtial y}\rvert :(x,y)\in \Omegaega_\deltalta^\lambdambda \big\}. \] The first observation is that the $C_{\lambda}$'s are uniformly bounded above as $\lambda$ varies in $M$. To see this, fix $\lambda_0 \in M$ and $\tau > 0$ and let $W \subset M$ be a neighbourhood of $\lambda_0$ such that the sets \[ \Omega_W = \overline{\bigcup_{\lambda \in W} \Omega_{\delta}^{\lambda}} \;\; \text{and} \;\; K_W = \overline{ \bigcup_{\lambda \in W} (K_{\lambda}^+ \cap V_R)} \] are separated by a distance of at least $\tau$. This is possible since $K_{\lambda}^+ \cap V_R$ is upper semicontinuous in $\lambda$. For each $\lambda \in W$, $G_{\lambda}^+$ is pluriharmonic on a fixed slightly larger open set containing $\Omega_W$. Cover the closure of this slightly larger open set by finitely many open balls and on each ball, the mean value property shows that the derivatives of $G_{\lambda}^+$ are dominated by a universal constant times the sup norm of $G_{\lambda}^+$ on it -- and this in turn is dominated by the number of open balls (which is the same for all $\lambda \in W$) times the sup norm of $G_{\lambda}^+$ on $V_R$ upto a univeral constant. Since $G_{\lambda}^+$ varies continuously in $\lambda$, it follows that the $C_{\lambda}$'s are uniformly bounded for $\lambda \in W$ and the compactness of $M$ gives a global bound, say $C > 0$ independent of $\lambda$. Fix $\lambda_0 \in M$ and pick $(x, y) \in S \setminus K_{\lambda_0}^+$. Let $N > 0$ be such that \[ d^{-N} \delta < G_{\lambda_0}^+(x, y) \le d^{-N + 1} \delta \] so that \[ \delta < d^N G_{\lambda_0}^+(x, y) \le d \delta. \] The assumption that $N > 0$ means that $(x, y)$ is very close to $K_{\lambda_0}^+$. But \[ d^N G_{\lambda_0}^+(x, y) = G_{\sigma^N(\lambda_0)}^+ \circ H_{\lambda_0}^{+N}(x, y) \] which implies that $H_{\lambda_0}^{+N}(x, y) \in \Omega_{\delta}^{\sigma^N(\lambda_0)}$ where $G_{\sigma^N(\lambda_0)}^+$ is pluriharmonic. Note that \[ H_{\lambda_0}(V_R \cup V_R^+) \subset V_R \cup V_R^+, \; H_{\lambda_0}(V_R^+) \subset V_R^+ \] which shows that $H_{\lambda_0}^{+k} \in V_R$ for all $k \le N$ since all the $G_{\lambda}^+$'s are at least $(d+1)\delta$ on $V_R^+$. Differentiation of the above identity leads to \[ d^N \frac{\partial G_{\lambda_0}^+}{\partial x}(x, y) = \frac{\partial G_{\sigma^N(\lambda_0)}^+}{\partial x} (H_{\lambda_0}^{+N}) \frac{ \partial (\pi_1 \circ H_{\lambda_0}^{+N}) }{\partial x}(x, y) + \frac{\partial G_{\sigma^N(\lambda_0)}^+}{\partial y}(H_{\lambda_0}^{+N}) \frac{ \partial (\pi_2 \circ H_{\lambda_0}^{+N}) }{\partial x}(x, y). \] Let the derivatives of $H_{\lambda}$ be bounded above on $V_R$ by $A_{\lambda}$ and let $A = \sup A_{\lambda} < \infty$. It follows that the derivatives of $H_{\lambda_0}^{+N}$ are bounded above by $2^{N-1}A^N$ on $V_R$. Hence \[ \vert d^N \partial G_{\lambda_0}^+ / \partial x (x, y) \vert \le C (2A)^N. \] Let $\gamma = \log 2A/ \log d$ so that $C (2A)^N = C d^{N \gamma}$. Therefore \[ \vert \partial G_{\lambda_0}^+ / \partial x \vert \le C d^{N(\gamma - 1)} \le C (d \delta/G_{\lambda_0}^+)^{\gamma - 1} \] which implies that \[ \vert \partial (G_{\lambda_0}^+)^{\gamma}/ \partial x \vert \le C \gamma(d \delta)^{\gamma - 1}. \] A similar argument can be used to bound the partial derivative of $(G_{\lambda_0}^+)^{\gamma}$ with respect to $y$. Thus the gradient of $(G_{\lambda_0}^+)^{\gamma}$ is bounded uniformly at all points that are close to $K_{\lambda_0}^+$. Now suppose that $(x, y) \in S \setminus K_{\lambda_0}^+$ is such that \[ d^{N} \delta < G_{\lambda_0}^+(x, y) \le d^{N + 1} \delta \] for some $N > 0$. This means that $(x, y)$ is far away from $K_{\lambda_0}^+$ and the above equation can be written as \[ \delta < d^{-N} G_{\lambda_0}^+(x, y) \le d \delta. \] By the surjectivity of $\sigma$, there exists a $\mu_0 \in M$ such that $\sigma^N(\mu_0) = \lambda_0$. With this the invariance property of the Green's functions now reads \[ G_{\mu_0}^+ \circ (H_{\mu_0}^{+N})^{-1}(x, y) = d^{-N} G_{\lambda_0}^+(x, y). \] The compactness of $S$ shows that there is a fixed integer $m < 0$ such that if $(x, y)$ is far away from $S \setminus K_{\lambda_0}^+$ then it be can brought into the strip \[ \big\{ (x,y) : \deltalta < G_{\lambdambda_0}^+(x,y) \leq d \deltalta \big\} \] by $(H_{\lambda}^{+ \vert k \vert})^{-1}$ for some $m \le k < 0$ and for all $\lambda \in M$. By enlarging $R$ we may assume that the image of $S$ under all the maps $(H_{\lambda}^{+ \vert k \vert})^{-1}$, $m \le k < 0$ is contained in $V_R$. By increasing $A$, we may also assume that all the derivatives of $H_{\lambda}$ and $H_{\lambda}^{-1}$ are bounded by $A$ on $V_R$. Now repeating the same argument as above, it follows that the gradient of $(G_{\lambda_0}^+)^{\gamma}$ is bounded uniformly at all points that are far away from $K_{\lambda_0}^+$ -- the nuance about choosing $\gamma$ as before is also valid. The choice of $\mu_0$ such that $\sigma^{N}(\mu_0) = \lambda_0$ is irrelevant since the derivatives involved are with respect to $x, y$ only. The only remaining case is when $(x, y) \in \Omega_{\lambda_0}^{\delta}$ which precisely means that $N = 0$. But in this case, $(G_{\lambda_0}^+)^{\gamma - 1}$ is uniformly bounded on $V_R$ and so are the derivatives of $G_{\lambda_0}^+$ on $\Omega_{\lambda_0}^{\delta}$ by the reasoning given earlier. Therefore there is a uniform bound on the gradient of $(G_{\lambda_0}^+)^{\gamma}$ everywhere on $S$. This shows that $(G_{\lambda_0}^+)^{\gamma}$ is Lipschitz on $S$ which implies that $G_{\lambda_0}^+$ is H\"{o}lder continuous on $S$ with exponent $1/\gamma = \log d/ \log 2A$. A set of similar arguments can be applied to deduce analogous results for $G_{\lambda}^{-}$. \subsection*{Proof of Proposition \ref{pr2}} We have \[ (H_{\lambda}^{\pm 1})^{\ast}(\mu_{\sigmagma(\lambdambda)}^\pm) = (H_{\lambda}^{\pm 1})^{\ast}(dd^cG_{\sigmagma(\lambdambda)}^\pm) = dd^c(G_{\sigmagma(\lambdambda)}^\pm \circ H_\lambdambda^{\pm 1}) = dd^c(d^{\pm 1}G_\lambdambda^\pm) = d^{\pm 1}\mu_\lambdambda^\pm \] where the third equality follows from Proposition \ref{pr1}. A similar exercise shows that \[ (H_{\lambda}^{\pm 1})_{\ast} \mu_{\lambda}^{\pm} = d^{\mp 1} \mu_{\sigma(\lambda)}^{\pm}. \] If $\sigma$ is the identity on $M$, then \[ G_{\lambda}^+ \circ H_{\lambda}^{\pm 1} = d^{\pm 1} G_{\lambda}^{+} \; \text{and} \; G_{\lambda}^{-} \circ H_{\lambda}^{\pm 1} = d^{\mp 1} G_{\lambda}^{-} \] which in turn imply that \[ (H_{\lambda}^{\pm 1})^{\ast} \mu_{\lambda} = (H_{\lambda}^{\pm 1})^{\ast} (\mu_{\lambda}^+ \wedge \mu_{\lambda}^-) = (H_{\lambda}^{\pm 1})^{\ast} \mu_{\lambda}^+ \wedge (H_{\lambda}^{\pm 1})^{\ast} \mu_{\lambda}^- = d^{\pm 1} \mu_{\lambda}^+ \wedge d^{\mp 1} \mu_{\lambda}^- = \mu_{\lambda}. \] By Proposition \ref{pr1}, the support of $\mu_\lambdambda^+$ is contained in $J_\lambdambda^+$. To prove the converse, let $z_0\in J_\lambdambda^+$ and suppose that $\mu_\lambdambda^+ =0$ on a neighbourhood $U_{z_0}$ of $z_0$. This means that $G_\lambdambda^+$ is pluriharmonic on $U_{z_0}$ and $G_\lambdambda^+$ attains its minimum value of zero at $z_0$. This implies that $G_\lambdambda^+ \equiv 0$ on $U_{z_0}$ which contradicts the fact that $G_\lambdambda^+>0$ on $\mathbb{C}^2\setminus K_\lambdambda^+$. Similar arguments can be applied to prove that supp$(\mu_\lambdambda^-)=J_\lambdambda^-$. Finally, to show that $\lambda \mapsto J_{\lambda}^+$ is lower semicontinuous, fix $\lambda_0 \in M$ and $\epsilon > 0$. Let $x_0\in J_{\lambdambda_0}^+= {\rm supp}(\mu_{\lambdambda_0}^+)$. Then $\mu_{\lambdambda_0}^+(B(x_0, {\epsilonsilon}/{2}))\neq 0$. Since the correspondence $\lambdambda \mapsto \mu_\lambdambda^+$ is continuous, there exists a $\deltalta>0$ such that \[ d(\lambdambda,\lambdambda_0)<\deltalta \text{ implies } \mu_\lambdambda^+(B(x_0;{\epsilonsilon}/{2}))\neq 0. \] Therefore $x_0\in {(J_\lambdambda^+)}^\epsilonsilon=\bigcup_{a\in J_\lambdambda^+}B(a,\epsilonsilon)$ for all $\lambdambda \in M$ satisfying $d(\lambdambda,\lambdambda_0)< \deltalta$. Hence the correspondence $\lambdambda\mapsto J_\lambdambda^{\pm}$ is lower semicontinuous. Let $\mathcal L$ be the class of plurisubharmonic functions on $\mathbb C^2$ of logarithmic growth, i.e., $$ \mathcal{L}=\{ u\in \mathcal{PSH}(\mathbb{C}^2): u(x,y)\leq \log^+\lVert (x,y) \rVert +L \} $$ for some $L>0$ and let $$ \tildelde{\mathcal{L}}=\{ u\in \mathcal{PSH}(\mathbb{C}^2):\log^+\lVert (x,y) \rVert -L \leq u(x,y)\leq \log^+\lVert (x,y) \rVert +L\} $$ for some $L>0$. Note that there exists $L>0$ such that $$ G_\lambdambda^+(z)\leq \log^+ \lVert z \rVert +L $$ for all $z\in \mathbb{C}^2$ and for all $\lambdambda\in M$. Thus $G_\lambdambda^+ \in \mathcal{L}$ for all $\lambdambda\in M$. For $E\subseteq \mathbb{C}^2$, the pluricomplex Green function of $E$ is $$ L_E(z)=\sup\{u(z):u\in\mathcal{L},u\leq 0 \text{ on } E\}. $$ and let $L_E^{\ast}(z)$ be its upper semicontinuous regularization. It turns out that the pluricomplex Green function of $K_\lambdambda^{\pm}$ is $G_\lambdambda^{\pm}$ for all $\lambdambda\in M$. The arguments are similar to those employed for a single H\'{e}non map and we merely point out the salient features. Fix $\lambdambda\in M$. Then $G_{\lambdambda}^+=0$ on $K_\lambdambda^+$ and $G_\lambdambda^+ \in \mathcal{L}$. So $G_\lambdambda^+ \leq L_{K_{\lambdambda}^+}$. To show equality, let $u\in \mathcal{L}$ be such that $u\leq 0=G_\lambdambda^+$ on $K_\lambdambda^+$. By Proposition \ref{pr1}, there exists $M>0$ such that \[ \log\lvert y \rvert-M<G_\lambdambda^+(x,y)<\log\lvert y \rvert+M \] for $(x,y)\in V_R^+$. Since $u\in \mathcal{L}$, \[ u(x,y)-G_\lambdambda^+(x,y)\leq M_1 \] for some $M_1 > 0$ and $(x,y)\in V_R^+.$ Fix $x_0 \in\mathbb C$ and note that $u(x_0,y)-G_\lambdambda^+(x_0,y)$ is a bounded subharmonic function on the vertical line $T_{x_0}=\mathbb{C}\setminus (K_\lambdambda^+ \cap \{x=x_0\})$ and hence it can be extended across the point $y=\infty$ as a subharmonic function. Note also that $$ u(x_0,y)-G_\lambdambda(x_0,y)\leq 0 $$ on $\partialrtial T \subseteq K_\lambdambda^+ \cap \{x=x_0\}$. By the maximum principle it follows that $u(x_0,y)-G_\lambdambda(x_0,y)\leq 0$ on $T_{x_0}$. This implies that $u\leq G_\lambdambda^+ \text{ in } \mathbb{C}^2\setminus K_\lambdambda^+$ which in turn shows that $L_{K_{\lambdambda}^{+}}=G_{\lambdambda}^{+}$. Since $G_\lambdambda^+$ is continuous on $\mathbb{C}^2$, we have \[ L_{K_{\lambdambda}^{+}}=L^{\ast}_{K_{\lambdambda}^{+}}=G_\lambdambda^+. \] Similar arguments show that \[ L_{K_{\lambdambda}^{-}}=L^{\ast}_{K_{\lambdambda}^{-}}=G_{\lambdambda}^{-}. \] Let $u_\lambdambda=\max \{G_\lambdambda^+,G_\lambdambda^-\}$. Again by Proposition \ref{pr1} it follows that $u_\lambdambda\in \tildelde{\mathcal{L}}$. For $\epsilonsilon>0$, set $G_{\lambdambda,\epsilonsilon}^{\pm}=\max \{G_\lambdambda^{\pm},\epsilonsilon\}$ and $u_{\lambdambda,\epsilonsilon}=\max \{G_{\lambdambda,\epsilonsilon}^+, G_{\lambdambda,\epsilonsilon}^{-}\}$. By Bedford--Taylor, \[ {(dd^c u_{\lambdambda,\epsilonsilon})}^2=dd^c G_{\lambdambda,\epsilonsilon}^+ \wedge dd^c G_{\lambdambda,\epsilonsilon}^{-}. \] Now for a $z\in \mathbb{C}^2 \setminus K_\lambdambda^{\pm}$ , there exists a small neighborhood $\Omegaega_{z}\subset \mathbb{C}^2\setminus K_\lambdambda^{\pm}$ of $z$ such that ${(dd^c u_{\lambdambda,\epsilonsilon})}^2=0$ on $\Omegaega_z$ for sufficiently small $\epsilonsilon$. It follows that supp${((dd^c u_\lambdambda))}^2 \subset K_\lambdambda$. Since $G_\lambdambda^{\pm}=L^{\ast}_{{K_\lambdambda}^{\pm}} \leq L^{\ast}_{K_\lambdambda}$, we have $u_\lambdambda\leq L^{\ast}_{K_\lambdambda}$. Further note that $L^{\ast}_{K_\lambdambda} \leq L_{K_\lambdambda}\leq 0=u_\lambdambda$ almost every where on $K_\lambdambda$ with respect to the measure ${(dd^c u_\lambdambda )}^2$. This is because the set $\{L_{K_\lambdambda}^* > L_{K_\lambdambda}\}$ is pluripolar and consequently has measure zero with respect to ${(dd^c u_\lambdambda)}^2$. Therefore $L^{\ast}_{K_\lambdambda}\leq u_\lambdambda$ in $\mathbb{C}^2$. Finally, $L_{K_\lambdambda}$ is continuous and thus $L^{\ast}_{K_\lambdambda}=L_{K_\lambdambda}=\max \{G_\lambdambda^+, G_\lambdambda^-\}$. For a non-pluripolar bounded set $E$ in $\mathbb{C}^2$ the complex equilibrium measure is $\mu_E={(dd^c L^{\ast}_E)^2}$. Again by Bedford--Taylor, $\mu_{K_\lambdambda}= \lim_{\epsilonsilon \rightarrow 0}{(dd^c \max\{G_\lambdambda^+, G_\lambdambda^-,\epsilonsilon\})}^2$ which when combined with $$ \mu_\lambdambda=\mu_\lambdambda^+ \wedge \mu_\lambdambda^-= \lim_{\epsilonsilon\rightarrow 0}dd^c G_{\lambdambda,\epsilonsilon}^+ \wedge dd^c G_{\lambdambda,\epsilonsilon}^- $$ and $$ {(dd^c \max \{G_\lambdambda^+, G_\lambdambda^-,\epsilonsilon\})}^2=dd^c G_{\lambdambda,\epsilonsilon}^+ \wedge dd^c G_{\lambdambda,\epsilonsilon}^- $$ shows that $\mu_\lambdambda$ is the equilibrium measure of $K_\lambdambda$. Since supp$(\mu_\lambdambda^{\pm})=J_\lambdambda^{\pm}$, we have supp$(\mu_\lambdambda) \subset J_\lambdambda$. \subsection{Proof of Theorem \ref{thm1}} Let $\mathcal L_y$ be the subclass of $\mathcal L$ consisting of all those functions $v$ for which there exists $R > 0$ such that \[ v(x, y) - \log \vert y \vert \] is a bounded pluriharmonic function on $V_R^+$. Fix $\lambdambda\in M$ and let $\omegaega= 1/4 \;dd^c \log (1 + \Vert z \Vert^2)$. For a $(1, 1)$ test form $\varphi$ on $\mathbb C^2$, it follows that there exists a $C >0$ such that \[ -C \Vert \varphi \Vert \omegaega \leq \varphi \le C \Vert \varphi \Vert \omegaega \] by the positivity of $\omega$. {\it Step 1:} $S_{\lambda}$ is nonempty.\\ Note that \begin{eqnarray} \frac{1}{d^n}\left | \int_{\mathbb{C}^2}(H_\lambdambda^{+n})^{\ast}(\psi T)\wedge \varphi \right| &\lesssim &\frac{\Vert \varphi \Vert}{d^n}\int_{\mathbb{C}^2}(H_\lambdambda^{+n})^{\ast}(\psi T)\wedge dd^c \log (1 + \Vert z \Vert^2) \noindentnumber \\ & \lesssim & \frac{\Vert \varphi \Vert}{d^n}\int_{\mathbb{C}^2}dd^c(\psi T)\wedge \log (1 + \Vert (H_{\lambda}^{+n})^{-1}(z) \Vert ).\lambdabel{13} \end{eqnarray} Direct calculations show that \[ \frac{1}{d^n}\log^+ \| (H_\lambdambda^{+n})^{-1}(z) \| \leq \log^+|z|+C \lambdabel{14} \] for some $C>0$, for all $n\geq 1$, $\lambdambda\in M$ and \begin{equation} \log (1 + \Vert z \Vert^2) \leq 2 \log^+|z|+2\log 2.\lambdabel{15} \end{equation} It follows that \begin{equation*} 0 \le \frac{1}{d^n} \log \left( 1 + \Vert (H_{\lambda}^{+n})^{-1} \Vert \right) \le 2 \log^+ \vert z \vert + C \end{equation*} for some $C>0$, for all $n>0$ and $\lambdambda\in M$. Hence \begin{equation} \frac{1}{d^n}\left | \int_{\mathbb{C}^2} (H_\lambdambda^{+n})^{\ast}(\psi T)\wedge \varphi \right| \lesssim \Vert \varphi \Vert. \lambdabel{16} \end{equation} The Banach-Alaoglu theorem shows that there is a subsequence $\frac{1}{d^{n_j^\lambdambda}}(H_{\lambda}^{+n_j^{\lambda}})^{\ast}(\psi T)$ that converges in the sense of currents to a positive $(1,1)$ current, say $\gammamma_\lambdambda$. This shows that $S_\lambdambda$ is nonempty. It also follows from the above discussion that $\int_{\mathbb C^2} \gammamma_{\lambda} \wedge \omega < + \infty$. \noindent {\it Step 2:} Each $\gammamma_{\lambda} \in S_{\lambda}$ is closed. Further, the support of $\gamma_{\lambda}$ is contained in $K_{\lambda}^+$.\\ Let $\chi$ be a smooth real $1$-form with compact support in $\mathbb C^2$ and let $\psi_1 \ge 0$ be such that $\psi_1 = 1$ in a neighbourhood of ${\rm supp}(\psi)$. Then \[ \frac{1}{d^{n_j^\lambdambda}}\int_{\mathbb{C}^2}d \chi \wedge (H_\lambdambda^{+n_j^{\lambdambda}})^{\ast}(\psi T) = \frac{1}{d^{n_j^\lambdambda}}\int_{\mathbb{C}^2}\chi \circ (H_\lambdambda^{+n_j^{\lambda}})^{-1} \wedge d\psi \wedge \psi_1 T. \] to obtain which the assumption that ${\rm supp}(\psi) \cap {\rm supp}(dT) = \phi$ is used. By the Cauchy-Schwarz inequality it follows that the term on the right above is dominated by the square root of \[ \left(\int_{\mathbb{C}^2} \big( (J \chi \wedge \chi)\circ (H_\lambdambda^{+n_j^{\lambda}})^{-1} \big) \wedge \psi_1 T\right) \left( \int_{\mathbb{C}^2} d\psi \wedge d^c \psi \wedge \psi_1 T \right) \] whose absolute value in turn is bounded above by a harmless constant times $d^{n_j^\lambdambda}$. Here $J$ is the standard $\mathbb R$-linear map on $1$-forms satisfying $J(d z_j) = i d \overline z_j$ for $j = 1, 2$. Therefore \[ \left| \frac{1}{d^{n_j^{\lambda}}} \int_{\mathbb{C}^2}(\chi \circ (H_{\lambda}^{+n_j^{\lambda}})^{-1} \wedge d\psi \wedge \psi_1 T \right| \lesssim d^{- n_j^{\lambda} / 2}. \] Evidently, the right hand side tends to zero as $j \rightarrow \infty$. This shows that $\gammamma_\lambdambda$ is closed. Let $R>0$ be large enough so that supp$(\psi T)\cap V_R^+=\phi$. Let $z\noindenttin K_\lambdambda^+$ and $B_z$ a small open ball around it such that $\overlineerline{B_z} \cap K_\lambdambda^+=\phi$. By Lemma \ref{le1}, there exists an $N>0$ such that $H_\lambdambda^{+n}(B_z)\subset V_R^+$ for all $n>N$. Therefore $B_z \cap \text{supp}(H_\lambdambda^{+n})^{\ast}(\psi T)= B_z \cap (H_{\lambda}^{+n})^{-1}(\text{supp}(\psi T))=\phi$ for all $n>N$. Since supp$(\gammamma_\lambdambda)\subset \overlineerline{\bigcup_{n=N}^\infty \text{supp}(H_\lambdambda^{+n})^{\ast}(\psi T)})$, we have $z\noindenttin \text{supp}(\gammamma_\lambdambda)$. This implies $\text{supp}(\gammamma_\lambdambda)\subset K_\lambdambda^+$. Since $K_\lambdambda^+\cap V_R^+=\phi$ for all $\lambdambda\in M$, it also follows that $\text{supp}(\gammamma_\lambdambda)$ does not intersect $\overline{V_R^+}$. \noindent {\it Step 3:} Each $\gamma_{\lambda}$ is a multiple of $\mu_{\lambda}^+$. It follows from Proposition 8.3.6 in \cite{MNTU} that $\gamma_{\lambda} = c_{\gamma, \lambda} dd^c U_{\gamma, \lambda}$ for some $c_{\gamma, \lambda} > 0$ and $U_{\gamma, \lambda} \in \mathcal L_y$. In this representation, $c_{\gamma, \lambda}$ is unique while $U_{\gamma, \lambda}$ is unique upto additive constants. We impose the following condition on $U_{\gammamma,\lambdambda}$: \[ \lim_{|y|\rightarrow \infty} (U_{\gammamma,\lambdambda}-\log|y|)=0 \lambdabel{17} \] and this uniquely determines $U_{\gamma, \lambda}$. It will suffice to show that $U_{\gamma, \lambda} = G_{\lambda}^+$. Let $\gammamma_{\lambdambda,x}$ denote the restriction of $\gammamma_\lambdambda$ to the plane $\{(x,y):y\in \mathbb{C}\}$. Since $U_{\gamma, \lambda} \in \mathcal L_y$, it follows that \begin{equation} \int_{\mathbb{C}}\gammamma_{\lambdambda,x}=2\pi c_{\gammamma,\lambdambda}, \;\; U_{\gammamma,\lambdambda}(x,y)=\frac{1}{2\pi c_{\gammamma,\lambdambda}} \int_{\mathbb{C}}\log |y-\zetaeta|\gammamma_{\lambdambda,x}(\zetaeta). \lambdabel{18} \end{equation} Consider a uniform filtration $V^{\pm}_R, V_R$ for all the maps $H_{\lambda}$ where $R^d > 2R$ and $\vert p_{j, \lambda}(y) \vert \ge \vert y \vert^d / 2$ for $\vert y \vert \ge R$. Let $0 \noindentt= a = \sup \vert a_j(\lambda) \vert < \infty$ (where the supremum is taken over all $1 \le j \le m$ and $\lambda \in M$) and choose $R_1 > R^d /2$. Define \[ A = \big \{ (x, y) \in \mathbb C^2 : \vert y \vert^d \ge 2(1 + a) \vert x \vert + 2 R_1 \big \}. \] Evidently $A \subset \{ \vert y \vert > R \}$. Lemma \ref{le1} shows that for all $\lambda \in M$, $H_{\lambda}(x, y) \subset V_R^+$ when $(x, y) \in A \cap V_R^+$. Furthermore for $(x, y) \in A \cap (\mathbb C^2 \setminus V_R^+)$, it follows that \[ \vert p_{j, \lambda}(y) - a_j(\lambda)x \vert \ge \vert y \vert^d / 2 - a \vert x \vert \ge \vert y \vert + R. \] This shows that $H_{\lambda}(A) \subset V_R^+$. By Lemma \ref{le1} again it can be seen that $H_{\lambda}^{+n}(A) \subset V_R^+$ for all $n \ge 1$ which shows that $A \cap K_{\lambda}^+ = \phi$ for all $\lambda \in M$. Let $C>0$ be such that \[ C^d \geq \max\{2(1+\lvert a \rvert), 2R_1\}. \] If $|y|\geq C(\lvert x \rvert^{1/d}+1)$ then \begin{equation*} {|y|}^{d} \geq C^{d}(\lvert x\rvert+1) \geq 2(1+\lvert a \rvert)\lvert x \rvert + 2R_1. \end{equation*} which implies that \[ B= \big\{ (x,y)\in \mathbb{C}^2: |y|\geq C(\lvert x \rvert^{1/d}+1) \big\}\subset A \] and hence $K_\lambdambda^+ \cap B=\phi $. Since $V_R^+ \subset B $ for sufficiently large $R$, by applying Lemma \ref{le1} once again it follows that \begin{equation} K_\lambdambda^+\cap B=\phi \text{ and } \bigcup_{n=0}^\infty (H_{\lambda}^{+n})^{-1}(B)=\mathbb{C}^2\setminus K_\lambdambda^+\lambdabel{19} \end{equation} for all $\lambdambda\in M$. Set $r=C(|x|^{1/d}+1)$. Since supp$(\gammamma_\lambdambda)\subset K_\lambdambda^+$ it follows that \[ \text{supp}(\gammamma_{\lambdambda,x})\subset \{\lvert y \rvert \leq r\} \] for all $\lambdambda\in M$. Since \[ \lvert y \rvert-r\leq \lvert y-\zetaeta\rvert\leq \lvert y \rvert+r \] for $\lvert y \rvert>r$ and $\lvert \zetaeta\rvert\leq r$, (\ref{18}) yields \[ \log(\lvert y \rvert-r) \leq U_{\gammamma,\lambdambda}(x,y) \leq \log(\lvert y \rvert+r) \] which implies that \[ -(r/{\lvert y \rvert})/(1- r/{\lvert y \rvert})\leq U_{\gammamma,\lambdambda}(x,y)- \log \lvert y \rvert \leq r/{\lvert y \rvert}. \] Hence for $\lvert y \rvert > 2r$, we get \begin{equation} -2r/{\lvert y \rvert} \leq U_{\gammamma,\lambdambda}(x,y)- \log \lvert y \rvert \leq r/{\lvert y \rvert} \lambdabel{20} \end{equation} for all $\lambdambda\in M$. For each $N \ge 1$, let $\gammamma_\lambdambda(N) = d^{N}(H_{\lambda}^{+N})_{\ast}(\gammamma_\lambdambda)$. Then \[ \gammamma_\lambdambda(N)=\lim_{j \rightarrow \infty} d^{-n_j + N}\big( H_{\sigma^N(\lambda)}^{+(n_j - N)} \big)^{\ast}(\psi T) \in S_{\sigmagma^N(\lambdambda)}(\psi T). \] Therefore \[ \gamma_{\sigma^N(\lambda)} = c_{\gammamma,\sigmagma^N(\lambdambda)}dd^c U_{\gammamma,\sigmagma^N(\lambdambda)} \] for some $c_{\gammamma,\sigmagma^N(\lambdambda)}>0$ and $U_{\gammamma,\sigmagma^N(\lambdambda)}\in \mathcal{L}_y$ and moreover \[ c_{\gammamma,\lambdambda}dd^c U_{\gammamma,\lambdambda} = \gamma_{\lambda} = d^{-N} \big( H_{\lambda}^{+N} \big)^{\ast} \gamma_{\sigma^N(\lambda)} = c_{\gammamma,\sigmagma^N(\lambdambda)}dd^c \big(d^{-N} \big(H_{\lambda}^{+N} \big)^{\ast} U_{\gamma, \sigma^N(\lambda)} \big). \] Note that both $d^{-N} \big(H_{\lambda}^{+N} \big)^{\ast} U_{\gamma, \sigma^N(\lambda)}$ and $U_{\gammamma,\sigmagma^N(\lambdambda)}$ belong to $\mathcal{L}_y$. It follows that $c_{\gamma, \lambda} = c_{\gamma, \sigma^N(\lambda)}$ and $d^{-N} \big(H_{\lambda}^{+N} \big)^{\ast} U_{\gamma, \sigma^N(\lambda)}$ and $U_{\gammamma,\lambdambda}$ coincide up to an additive constant which can be shown to be zero as follows. By the definition of the class $\mathcal L_y$, there exists a pluriharmonic function $u_{\lambda, N}$ on some $V_R^+$ such that \[ U_{\gammamma,\sigmagma^N(\lambdambda)}(x,y)- \log \lvert y \rvert = u_{\lambdambda,N} \text{ and } \lim_{\lvert y \rvert \rightarrow \infty}u_{\lambdambda,N}(x,y)= u_0 \in \mathbb{C}. \] Therefore if $(x,y)\in (H_{\lambda}^{+N})^{-1}(V_R^+)$ and $(x_N^{\lambda}, y_N^{\lambda}) = H_{\lambda}^{+N}(x, y)$ then \[ d^{-N} \big(H_{\lambda}^{+N} \big)^{\ast} U_{\gamma, \sigma^N(\lambda)} (x, y) - d^{-N}\log \lvert y_N^\lambdambda \rvert = d^{-N}u_{\lambdambda, N}(x_N^\lambdambda,y_N^\lambdambda). \] By (2.15), we have that \[ d^{-N}\log\lvert y_N^\lambdambda\rvert - \log\lvert y \rvert \rightarrow 0 \] as $\lvert y \rvert \rightarrow \infty$ which shows that \[ d^{-N} \big(H_{\lambda}^{+N} \big)^{\ast} U_{\gamma, \sigma^N(\lambda)}(x, y) - \log\lvert y \rvert \rightarrow 0 \] as $\vert y \vert \rightarrow \infty$. But by definition \[ U_{\gammamma,\lambdambda}(x,y) - \log\lvert y \rvert \rightarrow 0 \] as $\lvert y \rvert\rightarrow \infty$ and this shows that $ d^{-N} \big(H_{\lambda}^{+N} \big)^{\ast} U_{\gamma, \sigma^N(\lambda)} = U_{\gammamma,\lambdambda}$. Let $(x,y)\in \mathbb{C}^2\setminus K_\lambdambda^+$ and $\epsilonsilon>0$. For a sufficiently large $n$, $(x_n^\lambdambda, y_n^\lambdambda)=H_\lambdambda^{+n}(x,y)$ satisfies $\lvert x_n^\lambdambda\rvert \leq \lvert y_n^\lambdambda\rvert$ and $(x_n^\lambdambda,y_n^\lambdambda)\in B$ as defined above. Hence by (\ref{20}) we get \[ \left| d^{-n} \big(H_{\lambda}^{+n} \big)^{\ast} U_{\gamma, \sigma^n(\lambda)} - d^{-n}\log \lvert y_n^\lambdambda \rvert \right| \leq \frac{2C}{d^n\lvert y _n^\lambdambda\rvert}({\lvert x_n^\lambdambda \rvert}^{1/d}+1)<\epsilonsilon. \] On the other hand by using (\ref{9.1}), it follows that \[ \left| G_\lambdambda^+(x,y)- d^{-n}\log\lvert y_n^\lambdambda\rvert\right|<\epsilonsilon \] for large $n$. Combining these two inequalities and the fact that $ d^{-n} \big(H_{\lambda}^{+n} \big)^{\ast} U_{\gamma, \sigma^n(\lambda)}=U_{\gammamma,\lambdambda}$ for all $n\geq 1$ we get \[ \left| G_\lambdambda^+(z)-U_{\gammamma,\lambdambda}(z)\right|<2\epsilonsilon. \] Hence $U_{\gammamma,\lambdambda}=G_\lambdambda^+$ in $\mathbb{C}^2\setminus K_\lambdambda^+$. The next step is to show that $U_{\gammamma,\lambdambda}=0$ in the interior of $K_\lambdambda^+$. Since $U_{\gammamma,\lambdambda}=G_\lambdambda^+$ in $\mathbb{C}^2\setminus K_\lambdambda^+$, the maximum principle applied to $U_{\gammamma,\lambdambda}(x,.)$ with $x$ being fixed, gives $U_{\gammamma,\lambdambda}\leq 0$ on $K_\lambdambda^+$. Suppose that there exists a nonempty $\Omegaega\subset\subset K_\lambdambda^+$ satisfying $U_{\gammamma,\lambdambda}\leq -t$ in $\Omegaega$ with $t>0$. Let $R>0$ be so large that $\bigcup_{n=0}^{\infty}H_\lambdambda^{+n}(\Omegaega)\subset V_R$ -- this follows from Lemma \ref{le1}. Since $d^{-n} \big(H_{\lambda}^{+n} \big)^{\ast} U_{\gamma, \sigma^n(\lambda)}=U_{\gammamma,\lambdambda}$ for each $n \ge 1$, it follows that \[ H_\lambdambda^{+n}(\Omegaega)\subset \big\{U_{\gammamma,\sigmagma^n(\lambdambda)}\leq -d^n t\big\}\cap V_R \] for each $n \ge 1$. The measure of the last set with $x$ fixed and $\lvert x \rvert\leq R$ can be estimated in this way -- let \[ Y_x=\big\{ y\in \mathbb{C}:U_{\gammamma,\sigmagma^n(\lambdambda)}\leq -d^n t\big\}\cap \big\{\lvert y \rvert <R\big\}. \] By the definition of capacity \[ \text{cap}(Y_x)\leq \exp (-d^n t) \] and since the Lebesgue measure of $Y_x$, say $m(Y_x)$ is at most $\pi e {\text{cap}(Y_x)}^2$ (by the compactness of $Y_x \subset \mathbb C$) we get \[ m(Y_x)\leq \pi \exp(1-2d^n t). \] Now for each $\lambda\in M$, the Jacobian determinant of $H_\lambda$ is a constant given by $a_\lambda= a_1(\lambda) a_2(\lambda) \ldots a_m(\lambda)\neq 0$ and since the correspondence $\lambda \mapsto a_\lambda$ is continuous, an application of Fubini's theorem yields \[ a^n m(\Omegaega)\leq \lvert a_{\sigmagma^{n-1}(\lambda)}\cdots a_\lambda\rvert m(\Omegaega)=m(H_\lambda^{+n}(\Omegaega))\leq \int_{\lvert x \rvert\leq R}m(Y_x)dv_x \leq \pi^2 R^2 \exp (1-2d^n t) \] where $a=\inf_{\lambda\in M} \lvert a_\lambda \rvert $. This is evidently a contradiction for large $n$ if $m(\Omegaega)>0$. So far it has been shown that $U_{\gammamma,\lambdambda}=G_\lambdambda^+$ in $\mathbb{C}^2\setminus J_\lambdambda^+$. By using the continuity of $G_\lambdambda^+$ and the upper semi-continuity of $U_{\gammamma,\lambdambda}$, we have that $U_{\gammamma,\lambdambda}\geq G_\lambdambda^+$ in $\mathbb{C}^2$. Let $\epsilonsilon>0$ and consider the slice $D_\lambdambda=\{y:G_\lambdambda^+<\epsilonsilon\}$ in the $y$-plane for some fixed $x$. Note that $U_{\gammamma,\lambdambda}(x,.)=G_\lambdambda^+(x,.)=\epsilonsilon$ on the boundary $\partialrtial D$. Hence by the maximum principle $U_{\gammamma,\lambdambda}(x,.)\leq \epsilonsilon$ in $D_\lambdambda$. Since $x$ and $\epsilonsilon$ are arbitrary, it follows that $U_{\gammamma,\lambdambda}^+=G_\lambdambda^+$ in $\mathbb{C}^2$. This implies that \[ \gammamma_\lambdambda=c_{\gammamma,\lambdambda}\mu_\lambdambda^+ \] for any $\gammamma_\lambdambda\in S_\lambdambda(\psi T)$. This completes the proof of Theorem 1.3. \subsection{Proof of Proposition 1.4} Let $\sigma : M \rightarrow M$ be an arbitrary continuous map and pick a $\gamma_{\lambda} \in S(\psi, T)$. Let $\theta = 1/2 \;dd^c \log (1 + \vert x \vert^2)$ in $\mathbb C^2$ (with coordinates $x, y$) which is a positive closed $(1, 1)$-current depending only on $x$. Then for any test function $\varphi$ on $\mathbb C^2$, \[ \int_{\mathbb{C}^2}\varphi \gammamma_\lambdambda \wedge \theta = c_{\gammamma,\lambdambda}\int_{\mathbb{C}^2}U_{\gammamma,\lambdambda}dd^c \varphi \wedge \theta = c_{\gammamma,\lambdambda}\int_{\mathbb{C}}\theta \int_{\mathbb{C}}U_{\gammamma,\lambdambda} \Delta_y \varphi = c_{\gammamma,\lambdambda} \int_{\mathbb{C}}\theta \int_{\mathbb{C}}\varphi \Delta_y U_{\gammamma,\lambdambda}. \] Since $y \mapsto U_{\gammamma,\lambdambda}(x,y)$ has logarithmic growth near infinity and $\varphi$ is arbitrary it follows that \begin{equation} \int_{\mathbb{C}^2}\gammamma_\lambdambda \wedge \theta = 2\pi c_{\gammamma,\lambdambda}\int_{\mathbb{C}^2}\theta ={(2\pi)}^2c_{\gammamma,\lambdambda}.\lambdabel{19} \end{equation} Let $R > 0$ be large enough so that $\text{supp}(\psi T) \cap V_R^+ = \phi$ which implies that $\text{supp}(\psi T)$ is contained in the closure of $V_R \cup V_R^-$. Then \begin{eqnarray*} \int_{\mathbb{C}^2}\frac{1}{d^{n_j^\lambdambda}} (H_\lambdambda^{{+ n_j^\lambdambda}})^{\ast}(\psi T) \wedge \theta &=& \frac{1}{d^{n_j^\lambdambda}}\int_{\mathbb{C}^2}\psi T \wedge \frac{1}{2} (H_\lambdambda^{{+ n_j^\lambdambda}})_{\ast}dd^c\log (1+|x|^2)\\ &=& \frac{1}{d^{n_j^\lambdambda}}\int_{\mathbb{C}^2} (\psi T)\wedge dd^c\left(\frac{1}{2}\log (1+|\pi_1\circ (H_\lambdambda^{{+ n_j^\lambdambda}})^{-1}|^2)\right)\\ &=& \frac{1}{d^{n_j^\lambdambda}} \int_{\overlineerline{V_R\cup V_R^-}}\psi T \wedge dd^c\left(\frac{1}{2}\log (1+|\pi_1\circ (H_\lambdambda^{+ n_j^\lambdambda})^{-1}|^2)\right). \end{eqnarray*} It is therefore sufficient to study the behavior of $\log (1+|\pi_1\circ (H_\lambdambda^{+ n_j^\lambdambda})^{-1}|^2)$. But \[ \log^+ \vert x \vert \le \log^+ \vert (x, y) \vert \le \log^+ \vert x \vert + R \] for $(x, y) \in V_R \cup V_R^-$ and by combining this with \[ 2 \log^+ \vert x \vert \le \log (1 + \vert x \vert^2) \le 2 \log^+ \vert x \vert + \log 2 \] it follows that the behavior of $(1/2) d^{-n_j^{\lambda}} \log (1+|\pi_1\circ (H_\lambdambda^{+ n_j^\lambdambda})^{-1}|^2)$ as $j \rightarrow \infty$ is similar to that of $d^{-n_j^{\lambda}} \log^+ \vert (H_\lambdambda^{+ n_j^\lambdambda})^{-1} \vert$. Now suppose that $\sigma$ is the identity on $M$. In this case, $(H_\lambdambda^{+ n_j^\lambdambda})^{-1}$ is just the usual $n_j^{\lambda}$--fold iterate of the map $H_{\lambda}$ and by Proposition 1.1 it follows that \[ \lim_{j \rightarrow \infty} d^{-n_j^{\lambda}} \log \Vert (H_\lambdambda^{+n_j^\lambdambda})^{-1} \Vert = G_{\lambda}^- \] and hence that \[ 4 \pi^2 c_{\gamma, \lambda} = \int_{\mathbb C^2} \gamma_{\lambda} \wedge \theta = \int_{\mathbb{C}^2} \lim_{j \rightarrow \infty} \frac{1}{d^{n_j^\lambdambda}} (H_\lambdambda^{{+ n_j^\lambdambda}})^{\ast}(\psi T) \wedge \theta = \int_{\mathbb C^2} \psi T \wedge \mu_{\lambda}^-. \] The right side is independent of the subsequence used in the construction of $\gamma_{\lambda}$ and hence $S(\psi, T)$ contains a unique element. The other case to consider is when there exists a $\lambda_0 \in M$ such that $\sigma^n(\lambda) \rightarrow \lambda_0$ for all $\lambda$. For each $n \ge 1$ let \[ \tilde G_{n, \lambda}^- = \frac{1}{d^n} \log^+ \Vert (H_{\lambda}^n)^{-1} \Vert. \] Note that $\tilde G_{n, \lambda}^- \noindentt= G_{n, \lambda}^-!$ It will suffice to show that $\tilde G_{n, \lambda}^-$ converges uniformly on compact subsets of $\mathbb C^2$ to a plurisubharmonic function, say $\tilde G_{\lambda}^-$. Let \[ \tilde K_{\lambda}^- = \big\{ z \in \mathbb C^2 : \;\text{the sequence} \;\{ (H_{\lambda}^{+n})^{-1}(z) \} \;\text{is bounded} \;\big\} \] and let $A \subset \mathbb C^2$ be a relatively compact set such that $A \cap \tilde K_{\lambda}^- = \phi$ for all $\lambda \in M$. The arguments used in Lemma \ref{le1} show that \[ \mathbb C^2 \setminus \tilde K_{\lambda}^- = \bigcup_{n=0}^{\infty} H_{\lambda}^{+n}(V_R^-) \] for a sufficiently large $R > 0$. As Proposition 1.1 it can be shown that $\tilde G_{n, \lambda}^-$ converges to a pluriharmonic function $\tilde G_{\lambda}^-$ on $V_R^-$. Hence for large $m, n$ \begin{equation} \vert \tilde G_{m, \lambda}^-(p) - \tilde G_{n, \lambda}^-(q) \vert < \epsilon \end{equation} for $p, q \in V_R^-$ that are close enough. Let $n_0$ be such that $(H_{\lambda_0}^{+n_0})^{-1}(A) \subset V_R^-$ and pick a relatively compact set $S \subset V_R^-$ such that $(H_{\lambda_0}^{+n_0})^{-1}(A) \subset S$. Pick any $\lambda$. Since $\sigma^n(\lambda) \rightarrow \lambda_0$ and the maps $H_{\lambda}^{\pm 1}$ depend continuously on $\lambda$, it follows that $H_{\sigma^n(\lambda)}^{+n_0}(A) \subset S$. By choosing $m, n$ large enough it is possible to ensure that for all $(x, y) \in A$, $(H_{\sigma^{m - n_0}(\lambda)}^{+n_0})^{-1}(x, y)$ and $(H_{\sigma^{n - n_0}(\lambda)}^{+n_0})^{-1}(x, y)$ are as close to each other as desired. By writing \[ \tilde G_{n, \lambda}^-(x, y) = \frac{1}{d^{n_0}} \frac{1}{d^{n - n_0}} \log^+ \Vert H_{\lambda}^{-1} \circ \cdots \circ H_{\sigma^{n - n_0 + 1}(\lambda)}^{-1} \circ (H_{\sigma^{n - n_0}(\lambda)}^{+n_0})^{-1}(x, y) \Vert \] and using (2.25) it follows that $\tilde G_{n, \lambda}^-$ converges uniformly to a pluriharmonic function on $A$. To conclude that this convergence is actually uniform on compact sets of $\mathbb C^2$, it suffices to appeal to the arguments used in Proposition 1.1. \subsection{Proof of Theorem 1.5} Recall that now $\sigma$ is the identity and \begin{equation} H(\lambda, x, y) = (\lambda, H_{\lambda}(x, y)). \end{equation} Thus the second coordinate of the $n$-fold iterate of $H$ is simply the $n$-fold iterate $H_{\lambda} \circ H_{\lambda} \circ \cdots \circ H_{\lambda}(x, y)$. For simplicity, this will be denoted by $H_{\lambda}^n$ as opposed to $H_{\lambda}^{+n}$ since they both represent the same map. Consider the disc $\mathcal{D}= \{x=0,\vert y \vert < R\} \subset \mathbb C^2$ and let $0 \le \psi \le 1$ be a test function with compact support in $\mathcal D$ such that $\psi \equiv 1$ in a $\mathcal D_r = \{x = 0, \vert y \vert < r\}$ where $r < R$. Let $\imath :\mathcal{D}\rightarrow V_R$ be the inclusion map. Let $L$ be a smooth subharmonic function of $\vert y \vert$ on the $y$-plane such that $L(y)=\log \vert y \vert$ for $\vert y \vert > R$ and define $\Theta= (1/2\pi) dd^c L$. If $\pi_y$ be the projection from $\mathbb C^2$ onto the $y$-axis, let \[ \alphapha_{n,\lambda}= (\pi_y \circ H_{\lambda}^n \circ \imath)^{\ast} \Theta \big|_{\mathcal D_r}. \] By using Theorem 1.3 and Proposition 1.4 along with Lemma 4.1 in \cite{BS3} it follows that if $j_n$ be a sequence such that $1 \le j_n < n$ and both $j_n, n - j_n \rightarrow \infty$ then \[ \lim_{n \rightarrow \infty} d^{-n} (H_{\lambda}^{j_n})_{\ast} \alphapha_{n, \lambda} = c_{\lambda} \mu_{\lambda} \] where $c_{\lambda} = \int \psi [\mathcal D] \wedge \mu_{\lambda}^+$. Note that $c_{\lambda} = 1$ for all $\lambda \in M$ since $\mu_{\lambda}^+ = (1/2\pi) dd^c G_{\lambda}^+$ and $G_{\lambda}^+ = \log \vert y \vert$ plus a harmonic term in $V_R^+$. As a consequence, if $\sigma_{n, \lambda} = d^{-n} \alpha_{n, \lambda}$ and \begin{equation*} \mu_{n,\lambda}=\frac{1}{n}\sum_{j=0}^{n-1} (H_\lambda^j)_{\ast}(\sigma_{n,\lambda}), \lambdabel{21} \end{equation*} then Lemma 4.2 in \cite{BS3} shows that \begin{equation*} \lim_{n\rightarrow \infty}\mu_{n,\lambda}=\mu_\lambda \lambdabel{22} \end{equation*} for each $\lambda\in M$. For an arbitrary compactly supported probability measure $\mu'$ on $M$ and for each $n \ge 0$ let $\mu_n$ and $\sigmagma_n$ be defined by the recipe in (1.2), i.e., for a test function $\phi$, \[ \lambdangle \mu_n, \phi \rightarrowngle = \int_M \left ( \int_{\{ \lambda \} \tildemes \mathbb C^2} \phi \; \mu_{n, \lambda} \right) \mu'(\lambda) \;\; \text{and} \;\; \lambdangle \sigma_n, \phi \rightarrowngle = \int_M \left ( \int_{\{ \lambda \} \tildemes \mathbb C^2} \phi \; \sigma_{n, \lambda} \right) \mu'(\lambda). \] We claim that \[ \lim_{n\rightarrow \infty} \mu_n=\mu \; \text{and} \; \mu_n=\frac{1}{n}\sum_{j=0}^{n-1}H_*^j\sigmagma_n. \] where $H$ is as in (2.26). For the first claim, note that for all test functions $\phi$ \begin{eqnarray} \lim_{n\rightarrow \infty}\lambdangle \mu_n,\phi\rightarrowngle &=& \lim_{n\rightarrow \infty}\int_M \lambdangle \mu_{n,\lambda},\phi\rightarrowngle \mu'(\lambda) = \int_M \lim_{n\rightarrow \infty}\lambdangle \mu_{n,\lambda},\phi\rightarrowngle \mu'(\lambda)\\ \noindenttag &=& \int_M \lambdangle \mu_\lambda,\phi\rightarrowngle \mu'(\lambda) = \lambdangle\mu,\phi\rightarrowngle \lambdabel{23} \end{eqnarray} where the second equality follows by the dominated convergence theorem. For the second claim, note that \[ \left \lambdangle \frac{1}{n}\sum_{j-0}^{n-1}H^j_*\sigmagma_n,\phi \right \rightarrowngle = \int_M \left \lambdangle \frac{1}{n}\sum_{j=0}^{n-1}{H_\lambda^j}_*(\sigmagma_{n,\lambda}),\phi \right \rightarrowngle \mu'(\lambda) = \int_M \lambdangle\mu_{n,\lambda},\phi \rightarrowngle \mu'(\lambda) = \lambdangle \mu_n,\phi\rightarrowngle. \] Hence by (2.27), we get \[ \lim_{n\rightarrow\infty}\frac{1}{n}\sum_{j-0}^{n-1}H^j_*\sigmagma_n=\mu. \] Note that the support of $\mu$ is contained in $\text{supp}(\mu') \tildemes V_R$. Let $\mathcal{P}$ be a partition of $M\tildemes V_R$ so that the $\mu$-measure of the boundary of each element of $\mathcal{P}$ is zero and each of its elements has diameter less than $\epsilonsilon$. This choice is possible by Lemma 8.5 in \cite{W}. For each $n\geq 0$, define the $d_n$ metric on $M\tildemes V_R$ by $$ d_n(p,q)=\max_{0\leq i \leq {n-1}}d(H^i(p),H^i(q)) $$ where $d$ is the product metric on $M\tildemes V_R$. Note that each element $\mathcal{B}$ of $\bigvee_{j=0}^{n-1}H^{-j}\mathcal{P}$ is inside an $\epsilonsilon$-ball in the $d_n$ metric and if $\mathcal B_{\lambda} = (\mathcal B \tildemes \{ \lambda \}) \cap V_R$, then the $\sigmagma_n$ measure of $\mathcal B$ is given by \begin{equation*} \sigmagma_n(\mathcal{B}) = \int_M {\sigmagma_{n,\lambda}(\mathcal{B}_\lambda)}\mu'(\lambda) = \int_M \left ( d^{-n}\int_{{\mathcal{B}_\lambda}\cap \mathcal{D}} {H_\lambda^{n}}^{\ast} \Theta \right) \mu'(\lambda) = \int_M \left ( d^{-n}\int_{H_\lambda^n({\mathcal{B}_\lambda}\cap \mathcal{D})} \Theta \right ) \mu'(\lambda). \end{equation*} Therefore, since $\Theta$ is bounded above on $\mathbb{C}^2$, there exists $C>0$ such that \begin{equation} \sigmagma_n(\mathcal{B})\leq C \; d^{-n}\int_M \text{Area}( H_\lambda^n(\mathcal{B}_\lambda\cap \mathcal{D})) \mu'(\lambda) = C \; d^{-n} \text{Area} \left( H^n ( \mathcal{B}\cap (\mathcal{D}\tildemes M)) \right). \end{equation} For a continuous map $f : X \rightarrow X$ on a compact set $X$ endowed with an invariant probability measure $m$, let \begin{eqnarray*} {\mathcal H}_m(\mathcal{A}) &=& -{\Sigma_{i=1}^k m({A}_i) \log m({A}_i)},\\ h(\mathcal A, f) &=& \lim_{n \rightarrow \infty} \frac{1}{n} \mathcal H_m \left( \bigvee_{j=0}^{n-1} f^{-j} \mathcal A \right) \end{eqnarray*} for a partition $\mathcal{A}=\{ {A}_1, A_2, \ldots, {A}_k\}$ of $X$. By definition, the measure theoretic entropy of $f$ with respect to $m$ is $h_m(f) = \sup_{\mathcal A} h(\mathcal A, f)$. We will work with $X = \text{supp}(\mu) \subset M \tildemes V_R$ and view $H$ as a self map of $X$. If $v^0(H,n,\epsilonsilon)$ denotes the supremum of the areas of $H^n(\mathcal{B}\cap (\mathcal{D}\tildemes M))$ over all $\epsilonsilon$-balls $\mathcal{B}$, then \begin{equation*} \mathcal H_{\sigmagma_n}\left( \bigvee_{j=0}^{n-1} H^{-j}\mathcal{P} \right) \geq -{\log C}+n \log d -\log v^0(H,n,\epsilonsilon) \end{equation*} by (2.28). By appealing to Misiurewicz's variational principle as explained in \cite{BS3} we get a lower bound for the measure theoretic entropy $h_\mu$ of $H$ with respect to the measure $\mu$ as follows: \begin{equation*} h_\mu \geq \limsup_{n\rightarrow \infty} \frac{1}{n}(-{\log C}+n \log d -\log v^0(H,n,\epsilonsilon)) \geq \log d -\limsup_{n\rightarrow \infty} v^0(H,n,\epsilonsilon). \end{equation*} By Yomdin's result (\cite{Y}), it follows that $\lim_{\epsilonsilon\rightarrow 0} v^0(H,n,\epsilonsilon)=0$. Thus $h_\mu\geq \log d$. To conclude, note that $\text{supp}(\mu) \subset \mathcal J \subset M\tildemes V_R$ and therefore by the variational principle the topological entropy of $H$ on $\mathcal J$ is also at least $\log d$. \section{Fibered families of holomorphic endomorphisms of $\mathbb P^k$} \subsection{Proof of Proposition 1.6}: By (1.4) there exists a $C>1$ such that \[ C^{-1} \Vert F_{\sigmagma^{n-1}(\lambda)} \circ \ldots \circ F_\lambda(x) \Vert^d \leq \Vert F_{\sigmagma^{n}(\lambda)} \circ \ldots \circ F_\lambda(x)\Vert \leq C \Vert F_{\sigmagma^{n-1}(\lambda)}\circ \ldots \circ F_\lambda(x)\Vert^d \] for all $\lambda\in M$, $x\in \mathbb{C}^{k+1}$ and for all $n\geq 1$. As a result, \begin{equation} \vert G_{n+1,\lambda}(x)-G_{n,\lambda}(x) \vert \leq \log C/d^{n+1}. \lambdabel{24} \end{equation} Hence for each $\lambda\in M$, as $n\rightarrow\infty$, $G_{n,\lambda}$ converges uniformly to a continuous plurisubharmonic function $G_\lambda$ on $\mathbb{C}^{k+1}$. If $G_n(\lambda, x) = G_{n, \lambda}(x)$, then (3.1) shows that $G_n \rightarrow G$ uniformly on $M \tildemes (\mathbb C^{k+1} \setminus \{0\})$. Furthermore, for $\lambda\in M$ and $c\in \mathbb{C}^*$ \begin{eqnarray} G_\lambda(cx)&=&\lim_{n\rightarrow \infty}\frac{1}{d^n}\log \Vert F_{\sigmagma^{n-1}(\lambda)}\circ \ldots \circ F_\lambda(cx)\Vert \noindentnumber\\ &=&\lim_{n\rightarrow\infty}\left( \frac{1}{d^n}\log {|c|}^{d^n}+\frac{1}{d^n}\log \Vert F_{\sigmagma^{n-1}(\lambda)}\circ \ldots \circ F_\lambda(z) \Vert \right) = \log \vert c \vert + G_\lambda(x). \end{eqnarray} We also note that \[ G_{\sigmagma(\lambda)}\circ F_\lambda(x) = d \lim_{n\rightarrow \infty }\frac{1}{d^{n+1}}\log \Vert F_{\sigmagma^{n}(\lambda)}\circ \ldots \circ F_\lambda(x) \Vert = d G_\lambda(x) \] for each $\lambda\in M$. Finally, pick $x_0 \in \mathcal A_{\lambda_0}$ which by definition means that $\Vert F_{\sigma^{n-1}(\lambda_0)} \circ \ldots \circ F_{\sigma(\lambda_0)} \circ F_{\lambda_0}(x_0) \Vert \le \epsilon$ for all large $n$. Therefore $G_{n, \lambda_0}(x_0) \le d^{-n} \log \epsilon$ and hence $G_{\lambda_0}(x_0) \le 0$. Suppose that $G_{\lambda_0}(x_0) = 0$. To obtain a contradiction, note that there exists a uniform $r > 0$ such that \[ \Vert F_{\lambda}(x) \Vert \le (1/2) \Vert x \Vert \] for all $\lambda \in M$ and $\Vert x \Vert \le r$. This shows that the ball $B_r$ around the origin is contained in all the basins $\mathcal A_{\lambda}$. Now $G_{\lambda}(0) = -\infty$ for all $\lambda \in M$ and since $G_{\lambda_n} \rightarrow G_{\lambda}$ locally uniformly on $\mathbb C^{k+1} \setminus \{0\}$ as $\lambda_n\rightarrow \lambda$ in $M$, it follows that there exists a large $C > 0$ such that \[ \sup_{(\lambda, x) \in M \tildemes \partial B_r} G_{\lambda}(x) \le - C. \] By the maximum principle it follows that for all $\lambda \in M$ \begin{equation} G_{\lambda}(x) \le -C \end{equation} on $B_r$. On the other hand, the invariance property $G_{\sigma(\lambda)} \circ F_{\lambda} = d G_{\lambda}$ implies that \[ d^n G_{\lambda} = G_{\sigma^n(\lambda)} \circ F_{\sigma^{n-1}(\lambda)} \circ \ldots \circ F_{\lambda} \] for all $n \ge 1$. Since we are assuming that $G_{\lambda_0}(x_0) = 0$ it follows that \[ G_{\sigma^n(\lambda_0)} \circ F_{\sigma^{n-1}(\lambda_0)} \circ \ldots \circ F_{\lambda_0}(x_0) = 0 \] for all $n \ge 1$ as well. But $F_{\sigma^{n-1}(\lambda_0)} \circ \ldots \circ F_{\sigma(\lambda_0)} \circ F_{\lambda_0}(x_0)$ is eventually contained in $B_r$ for large $n$ and this means that \[ 0 = G_{\sigma^n(\lambda_0)} \circ F_{\sigma^{n-1}(\lambda_0)} \circ \ldots \circ F_{\lambda_0}(x_0) \le -C \] by (3.3). This is a contradiction. Thus $\mathcal A_{\lambda} \subset \{G_{\lambda} < 0\}$ for all $\lambda \in M$. For the other inclusion, let $x \in \mathbb{C}^{k+1}$ be such that $G_\lambda(x)=-a$ for some $a>0$. This implies that for a given $\epsilonsilon>0$ there exist $j_0$ such that \[ -(a+\epsilonsilon)< \frac{1}{d^j}\log \Vert F_{\sigmagma^{j-1}(\lambda)}\circ \ldots \circ F_\lambda(x)\Vert < -a+\epsilonsilon \] for all $j\geq j_0$. This shows that $ F_{\sigmagma^{j-1}(\lambda)}\circ \ldots \circ F_\lambda(x) \rightarrow 0$ as $j\rightarrow \infty$. Hence $x\in \mathcal{A}_\lambda$. \subsection{Proof of Proposition 1.7}: Recall that $\Omega_{\lambda} = \pi(\mathcal H_{\lambda})$ where $\mathcal H_{\lambda} \subset \mathbb C^{k+1}$ is the collection of those points in a neighborhood of which $G_{\lambda}$ is pluriharmonic and $\Omega'_{\lambda} \subset \mathbb P^k$ consists of those points $z \in \mathbb P^k$ in a neighborhood of which the sequence \[ \{ f_{\sigma^{n-1}(\lambda)} \circ \ldots \circ f_{\sigma(\lambda)} \circ f_{\lambda} \}_{n \ge 1} \] is normal, i.e., $\Omega'_{\lambda}$ is the Fatou set. Once it is known that the basin $\mathcal A_{\lambda} = \{ G_{\lambda} < 0 \}$, showing that $\Omega_{\lambda} = \Omega'_{\lambda}$ and that each $\Omega_{\lambda}$ is in fact pseudoconvex and Kobayashi hyperbolic follows in much the same way as in \cite{U}. Here are the main points in the proof: \noindent {\it Step 1:} For each $\lambda \in M$, a point $p \in \Omega_{\lambda}$ if and only if there exists a neighborhood $U_{\lambda, p}$ of $p$ and a holomorphic section $s_{\lambda} : U_{\lambda, p} \rightarrow \mathbb C^{k+1}$ such that $s_{\lambda}(U_{\lambda, p}) \subset \partial \mathcal A_{\lambda}$. The choice of such a section $s_{\lambda}$ is unique upto a constant with modulus $1$. Suppose that $p\in \Omegaega_\lambdambda$. Let $U_{\lambdambda,p}$ be an open ball with center at $p$ that lies in a single coordinate chart with respect to the standard coordinate system of $\mathbb{P}^k$. Then $\pi^{-1}(U_{\lambdambda,p})$ can be identified with $\mathbb{C}^{\ast} \tildemes U_{\lambdambda,p}$ in canonical way and each point of $\pi^{-1}(U_{\lambdambda,p})$ can be written as $(c,z)$. On $\pi^{-1}(U_{\lambdambda,p})$, the function $G_{\lambda}$ has the form \begin{equation} G_\lambda(c,z)=\log|c|+\gammamma_\lambda(z) \end{equation} by (3.2). Assume that there is a section $s_\lambdambda$ such that $s_\lambda(U_{\lambda,p})\subset \partialrtial \mathcal{A}_\lambdambda$. Note that $s_\lambdambda(z)=(\sigmagma_\lambda(z),z)$ in $U_{\lambda,p}$ where $\sigmagma_\lambda$ is a non--vanishing holomorphic function on $U_{\lambda,p}$. By Proposition 1.6, $G_\lambdambda\circ s_\lambda=0$ on $U_{\lambda,p}$. Thus \[ 0=G_\lambdambda\circ s_\lambdambda(z)=\log|\sigmagma_\lambdambda(z)|+\gammamma_\lambdambda(z). \] Thus $\gammamma_\lambdambda(z)=-\log \vert \sigmagma_\lambdambda(z)\vert$ is pluriharmonic on $U_{\lambdambda,p}$ and consequently $G_\lambdambda$ is pluriharmonic on $\pi^{-1}(U_{\lambdambda,p})$ by (3.4). On the other hand suppose that $\gammamma_\lambda$ is pluriharmonic. Then there exists a conjugate function $\gammamma_\lambda^{\ast}$ on $U_{\lambda,p}$ such that $\gammamma_\lambda+i\gammamma_\lambda^{\ast}$ is holomorphic. Define $\sigmagma_\lambda(z)=\exp (-\gammamma_\lambda(z)-i\gammamma_\lambda^{\ast}(z))$ and $s_\lambda(z)=(\sigmagma_\lambda(z),z)$. Then $G_\lambda(s_\lambda(z))=\log |\sigmagma_\lambda(z)|+\gammamma_\lambda(z)=0$ which shows that $s_\lambda(U_{\lambda,p})\subset \partialrtial \mathcal{A}_\lambda$. \noindent {\it Step 2:} $\Omega_{\lambda} = \Omega'_{\lambda}$ for each $\lambda \in M$. Let $p\in \Omegaega_\lambda'$ and suppose that $U_{\lambda,p}$ is a neighborhood of $p$ on which there is a subsequence of \[ \{f_{\sigmagma^{j-1}(\lambda)}\circ \ldots \circ f_\lambda\}_{j\geq 1} \] which is uniformly convergent. Without loss of generality we may assume that \[ g_\lambda = \lim_{j\rightarrow\infty} f_{\sigmagma^{j-1}(\lambda)}\circ \ldots \circ f_\lambda \] on $U_{\lambda, p}$. By rotating the homogeneous coordinates $[x_0:x_1: \ldots : x_k]$ on $\mathbb{P}^k$, we may assume that $g_\lambda(p)$ avoids the hyperplane at infinity $H = \big\{x_0=0\big\}$ and that $g_\lambdambda(p)$ is of the form $[1:g_1: \ldots : g_k]$. Now choose an $\epsilonsilon$ neighborhood \[ N_\epsilonsilon=\big\{\vert x_0 \vert < \epsilonsilon {\big({\vert x_0 \vert}^2+ \ldots +{\vert x_k \vert}^2\big)}^{1/2} \big\} \] of $\pi^{-1}(H)$ in $\mathbb{C}^{k+1}\setminus \big\{0\big\}$ so that \[ 1>\epsilonsilon {\big(1+{\vert g_1 \vert}^2+ \ldots +{ \vert g_k \vert}^2\big)}^{1/2}. \] Clearly $g_\lambdambda(p)\noindenttin \pi(N_\epsilonsilon)$. Shrink $U_{\lambda,p}$ if needed so that \[ f_{\sigmagma^{j - 1}(\lambda)}\circ \ldots \circ f_\lambda (U_{\lambda,p}) \] is uniformly separated from $\pi(N_\epsilonsilon)$ for sufficiently large $l$. Define \[ s_\lambda(z)= \begin{cases} \log \Vert z \Vert & ;\text{ if } z\in N_\epsilonsilon, \\ \log(\vert z_0 \vert / \vert \epsilonsilon \vert ) & ;\text{ if } z\in \mathbb{C}^{k+1}\setminus (N_\epsilonsilon \cup \{0\}) \end{cases} \] \noindent Note that $0\leq s(z)-\log \Vert z \Vert \leq \log(1/\epsilonsilon)$ which implies that \[ d^{-{j}}s_\lambdambda (f_{\sigmagma^{j-1}(\lambda)}\circ \ldots \circ f_\lambda(z)) \] converges uniformly to the Green function $G_\lambda$ as $j\rightarrow \infty$ on $\mathbb{C}^{k+1}$. Further if $z\in \pi^{-1}(U_{\lambda,p})$, then \[ F_{\sigmagma^{j-1}(\lambda)}\circ \ldots \circ F_\lambda (z)\in \mathbb{C}^{k+1}\setminus (N_\epsilonsilon \cup \{0\}). \] This shows that $d^{-{j}}s_\lambdambda(f_{\sigmagma^{j-1}(\lambda)}\circ \ldots \circ f_\lambda(z))$ is pluriharmonic in $\pi^{-1}(U_{\lambda,p})$ and as a consequence the limit function $G_\lambda$ is also pluriharmonic in $ \pi^{-1}(U_{\lambda,p})$. Thus $p\in \Omegaega_\lambda$. Now pick a point $p\in \Omegaega_\lambdambda$. Choose a neighborhood $U_{\lambdambda,p}$ of $p$ and a section $s_\lambdambda: U_{\lambdambda,p}\rightarrow \mathbb{C}^{k+1}$ as in Step 1. Since $F_{\lambda} : \mathcal A_{\lambda} \rightarrow \mathcal A_{\sigma(\lambda)}$ is a proper map for each $\lambda$, it follows that \[ (F_{\sigmagma^{j-1}(\lambdambda)}\circ \ldots \circ F_{\sigmagma(\lambdambda)}\circ F_\lambdambda)(s_\lambdambda(U_{\lambdambda, p}))\subset \partialrtial \mathcal{A}_{\sigmagma^j(\lambdambda)}. \] It was noted earlier that there exists a $R > 0$ such that $\Vert F_{\lambda}(x) \Vert \ge 2 \Vert x \Vert$ for all $\lambda$ and $\Vert x \Vert \ge R$. This shows that $\mathcal{A}_\lambdambda\subset {B}_R$ for all $\lambdambda\in M$, which in turn implies that the sequence \[ \big\{(F_{\sigmagma^{j-1}(\lambdambda)}\circ \ldots \circ F_{\sigmagma(\lambdambda)}\circ F_\lambdambda)\circ s_\lambdambda\big\}_{j\geq 0} \] is uniformly bounded on $U_{\lambda, p}$. We may assume that it converges and let $g_\lambdambda:U_{\lambdambda,p} \rightarrow \mathbb{C}^{k+1}$ be its limit function. Then $g_\lambdambda(U_{\lambdambda,p})\subset \mathbb{C}^{k+1}\setminus \{0\}$ since all the boundaries $\partial \mathcal A_{\lambda}$ are at a uniform distance away from the origin; indeed, recall that there exists a uniform $r > 0$ such that the ball ${B}_r \subset \mathcal{A}_\lambdambda$ for all $\lambdambda\in M$. Thus $\pi \circ g_\lambdambda$ is well defined and the sequence $\big\{f_{\sigmagma^{j-1}(\lambdambda)}\circ \ldots \circ f_{\sigmagma(\lambdambda)}\circ f_\lambdambda\big\}_{j\geq 0}$ converges to $\pi\circ g_\lambdambda$ uniformly on compact sets. Thus $\big\{f_{\sigmagma^{j-1}(\lambdambda)}\circ \ldots \circ f_{\sigmagma(\lambdambda)}\circ f_\lambdambda \big\}_{j\geq 0}$ is a normal family in $U_{\lambdambda,p}$. Hence $p\in \Omegaega_{\lambdambda}'$. \noindent {\it Step 3:} Each $\Omega_{\lambda}$ is pseudoconvex and Kobayashi hyperbolic. That $\Omega_{\lambda}$ is pseudoconvex follows exactly as in Lemma 2.4 of \cite{U}. To show that $\Omegaega_\lambdambda$ is Kobayashi hyperbolic, it suffices to prove that each component $U$ of $\Omegaega_\lambdambda$ is Kobayashi hyperbolic. For a point $p$ in $U$ choose $U_{\lambdambda,p}$ and $s_\lambdambda$ as in Step $1$. Then $s_\lambdambda$ can be analytically continued to $U$. This analytic continuation of $s_\lambdambda$ gives a holomorphic map $\tildelde{s}_{\lambdambda}: \widetilde{U}\rightarrow \mathbb{C}^{k+1}$ satisfying $\pi\circ \tildelde{s}_{\lambdambda}=p$ where $\widetilde{U}$ is a covering of $U$ and $p: \widetilde{U}\rightarrow U$ is the corresponding covering map. Note that there exists a uniform $R>0$ such that $\lVert F_\lambdambda(z)\rVert \geq 2 \lVert z \rVert$ for all $\lambdambda\in M$ and for all $z\in \mathbb{C}^{k+1}$ with $\lVert z \rVert \geq R$. Thus $\mathcal{A}_\lambdambda \subset B(0,R)$ and $\tildelde{s}_{\lambdambda}(\widetilde{U})\subset B(0,2R)$. Since $\tildelde{s}_{\lambdambda}$ is injective and $B(0,2R)$ is Kobayashi hyperbolic in $\mathbb{C}^{k+1}$, it follows that $\widetilde{U}$ is Kobayashi hyperbolic. Hence $U$ is Kobayashi hyperbolic. \end{document}
\begin{document} \arraycolsep=1pt \title{WEIGHTED MULTILINEAR HARDY OPERATORS AND COMMUTATORS} \author{Zun Wei Fu} \address{Department of Mathematics, Linyi University, Linyi Shandong, 276005, P.R. China} \email{[email protected]} \thanks{This work was partially supported by NSF of China (Grant No. 11271175, 10901076, 10931001 and 11101038).} \author{Shu Li Gong} \address{College of Mathematics and Economtrics, Hunan University, Changsha 410082, P.R. China} \email{[email protected]} \author{Shan Zhen Lu} \address{School of Mathematical Sciences, Beijing Normal University, Laboratory of Mathematics and Complex Systems, Ministry of Education, Beijing 100875, P.R. China} \email{[email protected]} \author{Wen Yuan} \address{School of Mathematical Sciences, Beijing Normal University, Laboratory of Mathematics and Complex Systems, Ministry of Education, Beijing 100875, P.R. China} \email{[email protected]} \subjclass[2010]{Primary 47G10; Secondary 47H60, 47A63, 47A30} \date{} \keywords{weighted Hardy operator, multilinear operator, BMO, Morrey space, commutator.} \begin{abstract} In this paper, we introduce a type of weighted multilinear Hardy operators and obtain their sharp bounds on the product of Lebesgue spaces and central Morrey spaces. In addition, we obtain sufficient and necessary conditions of the weight functions so that the commutators of the weighted multilinear Hardy operators (with symbols in central BMO space) are bounded on the product of central Morrey spaces. These results are further used to prove sharp estimates of some inequalities due to Riemann-Liouville and Weyl. \end{abstract} \maketitle \section{Introduction} Let $\omega:\, [0,1]\rightarrow [0,\infty)$ be a measurable function. The {\it weighted Hardy operator} $H_{\omega}$ is defined on all complex-valued measurable functions $f$ on $\mathbb R^n$ as follows: $$H_{\omega}f(x):=\int^1_{0}f(tx)\omega(t)\,dt,\hspace{3mm}x\in \mathbb{ R}^n.$$ Under certain conditions on $\omega$, Carton-Lebrun and Fosset \cite{CF} proved that $H_{\omega}$ maps $L^{p}(\mathbb{ R}^n)$ into itself for $1<p<\infty$. They also pointed out that the operator $H_{\omega}$ commutes with the Hilbert transform when $n=1$, and with certain Calder\'{o}n-Zygmund singular integral operators including the Riesz transform when $n\geq2$. A further extension of the results obtained in \cite{CF} was due to Xiao in \cite{X}. \noindent{\bf Theorem A \cite{X}.\,} Let $1<p<\infty$ and $\omega : [0,1]\rightarrow [0,\infty)$ be a measurable function. Then, $H_{\omega}$ is bounded on $L^{p}(\mathbb{ R}^n)$ if and only if $$\mathbb{A}:=\int^{1}_{0}t^{-n/p}\omega(t)\,dt<\infty. \eqno(1.1)$$ Moreover, $$\|H_{\omega}f\|_{L^{p}(\mathbb{R}^n) \rightarrow L^{p}(\mathbb{R}^n)}=\mathbb{A}.\eqno(1.2)$$ Notice that the condition (1.1) implies that $\omega$ is integrable on [0,1]. The constant $\mathbb{A}$ seems to be of interest as it equals to $\frac{p}{p-1}$ if $\omega\equiv 1$ and $n=1$. In this case, $H_{\omega}$ is reduced to the {\it classical Hardy operator} $H$ defined by $$Hf(x):=\frac{1}{x}\int^x_{0}f(t)\,dt,\, x\neq0,$$ which is the most fundamental averaging operator in analysis. Also, a celebrated integral inequality, due to Hardy \cite{HLP}, can be deduced from Theorem A immediately $$\|Hf\|_{L^{p}(\mathbb{R})}\leq \frac{p}{p-1}\|f\|_{L^{p}(\mathbb{R})}, \eqno(1.3)$$ where $1<p<\infty$ and the constant $\frac{p}{p-1}$ is the best possible. Another interesting application of Theorem A is the sharp estimate of the Riemann-Liouville integral operator on the Lebesgue spaces. To be precise, let $n=1$ and we take $$\omega(t):=\frac{1}{\Gamma(\alpha)(1-t)^{1-\alpha}}, \quad t\in[0,1],$$ where $0<\alpha<1$. Then $$H_{\omega}f(x)=x^{-\alpha}I_{\alpha}f(x),\quad x>0,$$ where $I_{\alpha}$ is the {\it Riemann-Liouville integral operator} defined by $$I_{\alpha}f(x):=\frac{1}{\Gamma(\alpha)} \int_{0}^{x}\frac{f(t)}{(x-t)^{1-\alpha}}\,dt, \quad x>0.$$ Note that the operator $I_{\alpha}$ is exactly the one-sided version of the well-known Riesz potential $$\mathcal{I}_{\alpha}f(x):=C_{n, \alpha}\int_{\mathbb{R}^{n}} \frac{f(t)}{|x-t|^{n-\alpha}}\,dt,~~~x\in{\mathbb R}.$$ Clearly, Theorem A implies the celebrated result of Hardy, Littlewood and Polya in [8, Theorem 329], namely, for all $0<\alpha<1$ and $1<p<\infty$, $$\|I_{\alpha}\|_{L^{p}({\mathbb R})\to L^p(x^{-p\alpha} dx)} =\frac{\Gamma(1-1/p)}{\Gamma(1+\alpha-1/p)}.\eqno(1.4)$$ Now we recall the commutators of weighted Hardy operators introduced in \cite{FLL}. For any locally integrable function $b$ on $\mathbb{R}^n$ and integrable function $\omega :\, [0,1]\rightarrow [0,\infty)$, the {\it commutator of the weighted Hardy operator} $H_{\omega}^{b}$ is defined by $$H_{\omega}^{b}f:=bH_{\omega}f-H_{\omega}(bf).$$ It is easy to see that the commutator $H_{\omega}^{b}$ is bounded on $L^{p}(\mathbb{R}^{n})$ for $1<p<\infty$ when $b\in L^{\infty}({\mathbb{R}}^{n})$ and $\omega$ satisfies the condition (1.1). An interesting choice of $b$ is that it belongs to the class of $\mathrm{BMO}({\mathbb{R}}^{n})$. Recall that $\mathrm{BMO}({\mathbb{R}}^{n})$ is defined to be the space of all $b\in L_{loc}{({\mathbb{R}}^{n})}$ such that $$\|b\|_{BMO}:=\sup_{Q\subset\mathbb{R}^{n}}\frac{1}{|Q|}\int_{Q}|b(x)-b_{Q}| \,dx< \infty,$$ where $b_{Q}:=\frac{1}{|Q|}\int_{Q}b$ and the supremum is taken over all cubes $Q$ in ${\mathbb{R}^n}$ with sides parallel to the axes. It is well known that $L^{\infty}({\mathbb{R}}^{n})\varsubsetneq \mathrm{BMO}({\mathbb{R}}^{n})$ since $\mathrm{BMO}({\mathbb{R}}^{n})$ contains unbounded functions such as $\log|x|$. When symbols $b\in\mathrm{BMO}({\mathbb{R}}^{n})$, the condition (1.1) on weight functions $\omega$ does not ensure the boundedness of $H_{\omega}^{b}$ on $L^{p}(\mathbb{R}^{n})$. Via controlling $H_{\omega}^{b}$ by the Hardy-Littlewood maximal operators instead of sharp maximal functions, Fu, Liu and Lu \cite{FLL} established sufficient and necessary conditions on weight functions $\omega$ which ensure that $H_{\omega}^{b}$ is bounded on $L^{p}(\mathbb{R}^{n})$ when $1<p<\infty$. Precisely, they obtain the following conclusion. \noindent{\bf Theorem B.\,} Let \[ \mathbb{C}:=\int^{1}_{0}t^{-n/p}\omega(t)\log\frac{2}{t}\,dt \] and $1<p<\infty$. Then following statements are equivalent:\\ $\rm(i)$\quad $\omega$ is integrable and $H^{b}_\omega$ is bounded on $L^{p}(\mathbb{R}^{n})$ for all $b\in \mathrm{BMO}(\mathbb{R}^{n})$;\\ $\rm(ii)$\quad $\mathbb{C}<\infty. $ We remark that the condition (1.1), i.\,e., $\mathbb{A}<\infty$, is weaker than $\mathbb{C}<\infty$ in Theorem B. In fact, if we let \[ \mathbb{B}:=\int^{1}_{0}t^{-n/p}\omega(t)\log\frac{1}{t}\,dt, \] then $\mathbb{C}=\mathbb{A}\log2+\mathbb{B}$. Hence $\mathbb{C}<\infty$ implies $\mathbb{A}<\infty$. However, $\mathbb{A}<\infty$ can not imply $\mathbb{C}<\infty$. To see this, for $0<\alpha<1$, let \[ e^{s(n/p-1)}\tilde{\omega}(s)=\left\{ \begin{array}{ll} s^{-1+\alpha},&\quad 0<s\leq 1,\\ s^{-1-\alpha},&\quad 1<s<\infty,\\ 0,&\quad s=0, \infty \end{array}\right.\eqno(1.5) \] and $\omega(t):=\tilde{\omega}(\log\frac{1}{t})$, $0\leq t\leq1$. Then it is not difficult to verify $\mathbb{A}<\infty$ and $\mathbb{C}=\infty$. Later on in \cite{FZW}, the conclusions in Theorems A and B were further generalized to the central Morrey spaces $\dot{B}^{p,\lambda}({{\rr}^n})$ and the central BMO space $C\dot{M}O^q({{\rr}^n})$. Here the space $C\dot{M}O^q({{\rr}^n})$ was first introduced by Lu and Yang in \cite{LY2}, and the space $\dot{B}^{p,\lambda}({{\rr}^n})$ is a generalization of $C\dot{M}O^q({{\rr}^n})$ introduced by Alvarez, Guzman-Partida and Lakey in \cite{AGL}; see also \cite{bg}. \begin{definition} Let $\lambda\in \mathbb{R}$ and $1<p<\infty$. The \emph{central Morrey space} $\dot{B}^{p,\,\lambda}(\mathbb{R}^{n})$ is defined to be the space of all locally $p$-integrable functions $f$ satisfying that $$\|f\|_{\dot{B}^{p,\,\lambda}}=\sup_{R>0}\biggl(\frac{1}{|B(0, R)|^{1+\lambda p}} \int_{B(0, R)}|f(x)|^{p}dx\biggr)^{1/p}<\infty.$$ \end{definition} Obviously, $\dot{B}^{p,\,\lambda}({{\rr}^n})$ is a Banach space. One can easily check that $\dot{B}^{p,\lambda}(\mathbb{R}^n)=\{0\}$ if $\lambda<-1/p$, $\dot{B}^{p,0}(\mathbb{R}^n)=\dot{B}^{p}(\mathbb{R}^n)$, $\dot{B}^{q,-1/q}(\mathbb{R}^n)=L^{q}(\mathbb{R}^n)$, and $\dot{B}^{p,\lambda}(\mathbb{R}^n)\supsetneq L^{p}(\mathbb{R}^n)$ if $\lambda>-1/p$, where the space $\dot{B}^{p}(\mathbb{R}^n)$ was introduced by Beurling in \cite{B}. Similar to the classical Morrey space, we only consider the case $-1/p<\lambda\leq0$ in this paper. In the past few years, there is an increasing interest on the study of Morrey-type spaces and their various generalizations and the related theory of operators; see, for example, \cite{AGL,GAK,FZW,MN,KMNY}. \begin{definition} Let $1<q<\infty$. A function $f\in L_{\mathrm{loc}}^{q}(\mathbb{R}^{n})$ is said to belong to the \emph{central bounded mean oscillation space} $C\dot{M}O^{q}(\mathbb{R}^{n})$ if $$\|f\|_{C\dot{M}O^{q}}=\sup_{R>0}\biggl(\frac{1}{|B(0, R)|} \int_{B(0, R)}|f(x)-f_{B(0,\, R)}|^{q}dx\biggr)^{1/q}<\infty.\eqno(1.6)$$ \end{definition} The space $C\dot{M}O^{q}({{\rr}^n})$ is a Banach space in the sense that two functions that differ by a constant are regarded as a function in this space. Moreover, {\rm(1.6)} is equivalent to the following condition $$\sup_{R>0}\inf_{c\in\mathbb{C}}\biggl(\dfrac{1}{|B(0, R)|} \int_{B(0, R)}|f(x)-c|^{q}dx\biggr)^{1/q}<\infty.$$ For more detailed properties of these two spaces, we refer to \cite{FZW}. For $1<p<\infty$ and $-1/p< \lambda\le0$, it was proved in \cite[Theorem 2.1]{FZW} that the norm $$\|H_w\|_{\dot{B}^{p,\,\lambda}({{\rr}^n})\to \dot{B}^{p,\,\lambda}({{\rr}^n})} =\int_0^1 t^{n\lambda} w(t)\,dt.$$ Moreover, if $1<p_1<p<\infty$, $1/p_1=1/p+1/q$ and $-1/p<\lambda<0$, then it was proved in \cite[Theorem 3.1]{FZW} that $H^b_w$ is bounded from $\dot{B}^{p,\,\lambda}({{\rr}^n})$ to $\dot{B}^{p_1,\,\lambda}({{\rr}^n})$ if and only if $$\int_0^1 t^{n\lambda} w(t) \log\frac2{t}\,dt<\infty,$$ where the symbol $b\in C\dot{M}O^{q}({{\rr}^n})$. In this paper, we consider the multilinear version of the above results. Recall that the weighted multilinear Hardy operator is defined as follows. \begin{definition} Let $m\in\mathbb{N}$, and $$\omega:\, \overbrace{{[0,1]\times[0,1]\times\cdots\times[0,1]}}^{m\,\text{times}} \rightarrow [0,\infty)$$ be an integrable function. The {\it weighted multilinear Hardy operator $\mathcal{H}_{\omega}^m$} is defined as \[ \mathcal{H}_{\omega}^m(\vec{f})(x):= \int\limits_{0<t_{1},t_{2},\ldots,t_{m}<1} \left(\prod_{i=1}^{m}f_{i}(t_{i}x)\right)\omega(\vec{t})\,d\vec t,\quad x\in \mathbb{R}^n, \] where $\vec f:=(f_1,\ldots, f_m)$, $\omega(\vec{t}):=\omega(t_{1},t_{2},\ldots,t_{m})$, $d\vec t:= dt_1\,\cdots\,dt_m$, and $f_{i}~(i=1,\ldots,m)$ are complex-valued measurable functions on $\mathbb{R}^n$. When $m=2$, $\mathcal{H}_{\omega}^m$ is referred to as bilinear. \end{definition} The study of multilinear averaging operators is traced to the multilinear singular integral operator theory (see, for example, \cite{CM}), and motivated not only the generalization of the theory of linear ones but also their natural appearance in analysis. For a more complete account on multilinear operators, we refer to \cite{FL}, \cite{GL}, \cite{L} and the references therein. The main aim of the paper is to establish the sharp bounds of weighted multilinear Hardy operators on the product of Lebesgue spaces and central Morrey spaces. In addition, we find sufficient and necessary conditions of the weight functions so that commutators of such weighted multilinear Hardy operators (with symbols in $\lambda$-central BMO space) are bounded on the product of central Morrey spaces. The paper is organized as follows: Section 2 is devoted to the sharp estimates of $\mathcal{H}_{\omega}^m$ on the products of Lebesgue spaces and also central Morrey spaces. In Section 3, we present the sharp estimates of the commutator generated by $\mathcal{H}_{\omega}^m$ with symbols in $\dot{CMO}^q({{\rr}^n})$. Section 4 focuses on weighted Ces\`{a}ro operators of multilinear type related to weighted multilinear Hardy operators. \section{Sharp boundedness of $\mathcal{H}_{\omega}^m$ on the product of central Morrey spaces} We begin with the following sharp boundedness of $\mathcal{H}_{\omega}^m$ on the product of Lebesgue spaces, which when $m=1$ goes back to Theorem A. \begin{theorem}\label{t1} Let $1<p, p_i<\infty$, $i=1,\ldots, m$ and $1/p=1/p_1+\cdots+1/p_m$. Then, $\mathcal{H}_{\omega}^m$ is bounded from $L^{p_1}({{\rr}^n})\times \dots \times L^{p_m}({{\rr}^n})$ to $ L^p({{\rr}^n})$ if and only if \begin{eqnarray}\label{A} \mathbb{A}_m:=\int\limits_{0<t_{1},t_{2},...,t_{m}<1} \left(\prod_{i=1}^{m}t_{i}^{-n/p_{i}}\right)\omega(\vec{t})\,d\vec{t}<\infty. \end{eqnarray} Moreover, $$\|\mathcal{H}_{\omega}^m\|_{L^{p_1}({{\rr}^n})\times \dots \times L^{p_m}({{\rr}^n}) \rightarrow L^{p}({{\rr}^n})}=\mathbb{A}_m.$$ \end{theorem} \begin{proof}[Proof] In order to simplify the proof, we only consider the case that $m=2$. Actually, a similar procedure works for all $m\in \mathbb{N}$. Suppose that (\ref{A}) holds. Using Minkowski's inequality yields $$\begin{array}{rl} \displaystyle \|\mathcal{H}_{\omega}^2(f_{1}, f_{2}) \|_{L^p(\mathbb{R}^{n})} &=\displaystyle\left( \int_{\mathbb{R}^n}\left|\int\limits_{0<t_{1},t_{2}<1}f_{1} (t_{1}x)f_{2}(t_{2}x)\omega(t_{1},t_{2})\,dt_{1}dt_{2} \right|^{p}dx\right)^{1/p}\\ &\leq\displaystyle \int\limits_{0<t_{1},t_{2}<1}\left(\int_{\mathbb{R}^n}\left|f_{1}(t_{1}x)f_{2}(t_{2}x)\right|^{p}dx\right)^{1/p}\omega(t_{1},t_{2})\,dt_{1}dt_{2}. \end{array}$$ By H\"{o}lder's inequality with $1/p=1/p_{1}+1/p_{2}$, we see that $$\begin{array}{rl} \displaystyle \|\mathcal{H}_{\omega}^2(f_{1}, f_{2})\|_{L^p(\mathbb{R}^{n})}&\leq\displaystyle \int\limits_{0<t_{1},t_{2}<1}\prod_{i=1}^{2}\left(\int_{\mathbb{R}^n}\left|f_{i}(t_{i}x)\right|^{p_{i}}dx\right)^{1/p_{i}}\omega(t_{1},t_{2})\,dt_{1}dt_{2}\\ &\leq\displaystyle \left(\prod_{i=1}^{2}\|f_{i}\|_{L^{p_i}(\mathbb{R}^{n})}\right)\int\limits_{0<t_{1},t_{2}<1}\left(\prod_{i=1}^{2}t_{i}^{-n/p_{i}}\right)\omega(t_{1},t_{2})\,dt_{1}dt_{2}.\end{array}$$ Thus, $\mathcal{H}_{\omega}^2$ maps the product of Lebesgue spaces $L^{p_1}(\mathbb{R}^{n})\times L^{p_2}(\mathbb{R}^{n})$ to $ L^p(\mathbb{R}^{n})$ and \begin{eqnarray}\label{2.1} \|\mathcal{H}_{\omega}^2\|_{L^{p_1}(\mathbb{R}^{n})\times L^{p_2}(\mathbb{R}^{n})\rightarrow L^p(\mathbb{R}^{n})}\leq\mathbb{A}_2. \end{eqnarray} To see the necessity, for sufficiently small $\varepsilon\in (0, 1)$, we set \begin{eqnarray}\label{2.2} f^{\varepsilon}_{1}(x):= \begin{cases} 0,&\quad |x|\leq\frac{\sqrt{2}}{2},\\ \displaystyle|x|^{-\frac{n}{p_1}-\frac{p_2\varepsilon}{p_1}},&\quad |x|>\frac{\sqrt{2}}{2},\end{cases} \end{eqnarray} and \begin{eqnarray}\label{2.3} f^{\varepsilon}_{2}(x):= \begin{cases} 0,&\quad |x|\leq\frac{\sqrt{2}}{2},\\ \displaystyle|x|^{-\frac{n}{p_2}-\varepsilon},&\quad |x|>\frac{\sqrt{2}}{2}. \end{cases} \end{eqnarray} An elementary calculation gives that $$ \|f_1^\varepsilon\|_{L^{p_1}(\mathbb{R}^{n})}^{p_1} =\|f_2^\varepsilon\|_{L^{p_2}(\mathbb{R}^{n})} ^{p_2}=\frac{\omega_n}{p_2\varepsilon} \Big(\frac{\sqrt{2}}{2}\, \Big)^{-p_2\varepsilon}, $$ where $\omega_n=\frac{n\pi^{n/2}}{\Gamma(1+n/2)}$ is the volume of the unit sphere. Consequently, we have \begin{eqnarray*} &&\|\mathcal{H}_{\omega}^2(f_{1}^\varepsilon, f_{2}^\varepsilon)\|_{L^p(\mathbb{R}^{n})}\\ &&\hspace{0.2cm} =\left\{\int_{\mathbb{R}^n} |x|^ {-n-p_2\varepsilon} \left[\int\limits_{E_{x}(t_{1}, t_{2})}t_{1}^{-\frac{n}{p_1} -\frac{p_2\varepsilon}{p_1}} t_{2}^{-\frac{n}{p_2}-\varepsilon}\omega(t_{1},t_{2}) \,dt_{1}dt_{2}\right]^p\,dx\right\}^{1/p}, \end{eqnarray*} where \[ E_{x}(t_{1}, t_{2}):=\left\{(t_{1}, t_{2})|\, 0<t_{1},t_{2}<1;\, t_1>\frac{\sqrt {2}}{2|x|};\, t_2>\frac{\sqrt {2}}{2|x|}\right\}. \] Hence, $$\begin{array}{rl}\displaystyle&\|\mathcal{H}_{\omega}^2 (f^{\varepsilon}_{1}, f^{\varepsilon}_{2})(x)\|^{p}_{L^p(\mathbb{R}^{n})}\\ &\hspace{0.2cm}\displaystyle\geq\int_{|x|>1/\varepsilon}|x|^{-n-p_2\varepsilon} \left(\int\limits_{E_{\frac{1}{\varepsilon}}(t_{1}, t_{2})} t_{1}^{-\frac{n}{p_1}-\frac{p_2\varepsilon}{p_1}} t_{2}^{-\frac{n}{p_2}-\varepsilon}\omega(t_{1},t_{2})dt_{1}dt_{2}\right)^{p}dx \\ \displaystyle &\hspace{0.2cm}=\displaystyle\frac{\varepsilon^{p_2\varepsilon}\omega_{n}}{p_2 \varepsilon} \left(\int\limits_{E_{\frac{1}{\varepsilon}}(t_{1}, t_{2})} t_{1}^{-\frac{n}{p_1}-\frac{p_2\varepsilon}{p_1}} t_{2}^{-\frac{n}{p_2}-\varepsilon}\omega(t_{1},t_{2})dt_{1}dt_{2}\right)^{p} \\ \displaystyle &\hspace{0.2cm}=\displaystyle\left(\frac{\sqrt{2}}{2}\varepsilon \right)^{p_2\varepsilon}\prod_{i=1}^{2}\|f_{i}^\varepsilon \|_{L^{p_i}(\mathbb{R}^{n})}^{p}\displaystyle\left(\int\limits_{E_{\frac{1}{\varepsilon}}(t_{1}, t_{2})}t_{1}^{-\frac{n}{p_1}-\frac{p_2\varepsilon}{p_1}} t_{2}^{-\frac{n}{p_2}-\varepsilon}\omega(t_{1},t_{2})dt_{1}dt_{2}\right)^{p}. \end{array}$$ Therefore, \begin{eqnarray*} &&\|\mathcal{H}_{\omega}^2\|_{L^{p_1}(\mathbb{R}^{n})\times L^{p_2}(\mathbb{R}^{n}) \rightarrow L^p(\mathbb{R}^{n})}\\ &&\hspace{0.2cm}\geq\displaystyle \left(\frac{\sqrt{2}}{2} \varepsilon\right)^{p_2\varepsilon/p} \int\limits_{E_{\frac{1}{\varepsilon}}(t_{1}, t_{2})} t_{1}^{-\frac{n}{p_1}-\frac{p_2\varepsilon}{p_1}} t_{2}^{-\frac{n}{p_2}-\varepsilon}\omega(t_{1},t_{2})\,dt_{1}\,dt_{2}. \end{eqnarray*} Since $(\sqrt{2}\varepsilon/2)^{p_2\varepsilon/p}\rightarrow1$ as $\varepsilon\rightarrow 0^{+}$, by letting $\varepsilon\rightarrow 0^{+}$, we know that \begin{eqnarray}\label{2.4} \|\mathcal{H}_{\omega}^2\|_{L^{p_1}(\mathbb{R}^{n})\times L^{p_2}(\mathbb{R}^{n})\rightarrow L^p(\mathbb{R}^{n})}\geq \mathbb{A}_2. \end{eqnarray} Combining (\ref{2.1}) and (\ref{2.4}) then finishes the proof. \end{proof} Observe that when $n=1$ and $\alpha\in(0,m)$, if we take $$\omega(\vec{t}):=\frac{1}{\Gamma(\alpha)|(1-t_{1}, \dots, 1-t_{m})|^{m-\alpha}},$$ then $$\mathcal{H}_{\omega}^m(\vec{f})(x)=x^{-\alpha}I^{m}_{\alpha}\vec{f}(x),\quad x>0,$$ where $$I^{m}_{\alpha}\vec{f}(x):=\frac{1}{\Gamma(\alpha)} \int\limits_{0<t_{1},t_{2},...,t_{m}<x} \frac{\prod_{i=1}^{m}f_{i}(t_{i})}{|(x-t_{1}, \dots, x-t_{m})|^{m-\alpha}}\,d\vec{t}.$$ The operator $I^{m}_{\alpha}$ turns out to be the one-sided analogous to the one-dimensional multilinear Riesz operator $\mathcal{I}^{m}_{\alpha}$ studied by Kenig and Stein in [9], where $$\mathcal{I}^{m}_{\alpha}\vec{f}(x):= \int\limits_{t_{1},t_{2},...,t_{m}\in\mathbb{R}} \frac{\prod_{i=1}^{m}f_{i}(t_{i})}{|(x-t_{1}, \dots, x-t_{m})|^{m-\alpha}}\,d\vec{t},\qquad x\in{\mathbb R}.$$ As an application of Theorem \ref{t1} we obtain the following sharp estimate of the boundedness of $I^{m}_{\alpha}$. \begin{corollary} Let $0<\alpha<m$. With the same assumptions as in Theorem \ref{t1}, the operator $I^{m}_{\alpha}$ maps $L^{p_1}({\mathbb R})\times \dots \times L^{p_m}({\mathbb R})$ to $ L^p(x^{-p\alpha} dx)$ and the operator norm equals to $$\frac{1}{\Gamma(\alpha)}\int\limits_{0<t_{1},t_{2},\ldots,t_{m}<1}\left(\prod_{i=1}^{m} t_{i}^{-1/p_{i}}\right)\frac{1}{|(1-t_{1}, \dots, 1-t_{m})|^{m-\alpha}}\,d\vec{t}.$$ \end{corollary} Next we extend the result in Theorem \ref{t1} to the product of central Morrey spaces. \begin{theorem}\label{t2} Let $1<p<p_i<\infty,$ $1/p=1/p_1+\cdots+1/p_m$, $\lambda=\lambda_1+\cdots+\lambda_m$ and $-1/p_i\leq \lambda_i<0~(i=1,2,\ldots,m)$. {\rm (i)} If \begin{eqnarray}\label{Am} \widetilde{\mathbb{A}}_{m}: =\int\limits_{0<t_1,t_2,\ldots,t_m<1}\left(\prod_{i=1}^m t_i^{n\lambda_i}\right)\omega(\vec{t})d\vec{t}<\infty, \end{eqnarray} then $\mathcal{H}_\omega^m$ is bounded from $\dot{B}^{p_1,\lambda_1}({{\rr}^n})\times\cdots \times\dot{B}^{p_m,\lambda_m}({{\rr}^n})$ to $\dot{B}^{p,\lambda}({{\rr}^n})$ with its operator norm not more that $\widetilde{\mathbb{A}}_{m}$. {\rm (ii)} Assume that $\lambda_1p_1=\cdots=\lambda_mp_m$. In this case the condition \eqref{Am} is also necessary for the boundedness of $\mathcal{H}_\omega^m:\ \dot{B}^{p_1,\lambda_1}({{\rr}^n})\times\cdots \times\dot{B}^{p_m,\lambda_m}({{\rr}^n})\to\dot{B}^{p,\lambda}({{\rr}^n})$. Moreover, $$\|\mathcal{H}_\omega^m\|_{\dot{B}^{p_1,\lambda_1}({{\rr}^n}) \times\cdots\times\dot{B}^{p_m,\lambda_m}({{\rr}^n}) \rightarrow\dot{B}^{p,\lambda}({{\rr}^n})}=\widetilde{\mathbb{A}}_{m}.$$ \end{theorem} \begin{proof} By similarity, we only give the proof in the case $m=2$. When $-1/p_i=\lambda_i$, $i=1,2$, then Theorem \ref{t2} is just Theorem \ref{t1}. Next we consider the case that $-1/p_i<\lambda_i<0$, $i=1,2$. First, we assume $\widetilde{\mathbb{A}}_2<\infty$. Since $1/p=1/p_1+1/p_2$, by Minkowski's inequality and H\"{o}lder's inequality, we see that, for all balls $B=B(0,R)$, \begin{eqnarray}\label{H2f} &&\left(\frac{1}{|B|^{1+\lambda p}}\int_B|\mathcal{H}_{\omega}^2(\vec{f})(x)|^pdx\right)^{1/p}\nonumber\\ & &\hspace{0.2cm}\leq \int\limits_{0<t_1,t_2<1}\left(\frac{1}{|B|^{1+\lambda p}}\int_B\Big|\prod_{i=1}^2f_i(t_i x)\Big|^pdx\right)^{1/p}\omega(\vec{t})d\vec{t}\nonumber\\ & &\hspace{0.2cm}\leq \int\limits_{0<t_1,t_2<1}\prod_{i=1}^2\left(\frac{1}{|B|^{1+\lambda_i p_i}}\int_B\Big|f_i(t_i x)\Big|^{p_i}dx\right)^{1/p_i}\omega(\vec{t})d\vec{t}\nonumber\\ &&\hspace{0.2cm}= \int\limits_{0<t_1,t_2<1}t_1^{n\lambda_1}t_2^{n\lambda_2}\prod_{i=1}^2\left(\frac{1}{|t_i B|^{1+\lambda_i p_i}}\int_{t_i B}\Big|f_i( x)\Big|^{p_i}dx\right)^{1/p_i}\omega(\vec{t})d\vec{t}\nonumber\\ &&\hspace{0.2cm}\le \|f_1\|_{\dot{B}^{p_1,\lambda_1}}\|f_2\|_{\dot{B}^{p_2,\lambda_2}} \int\limits_{0<t_1,t_2<1}t_1^{n\lambda_1}t_2^{n\lambda_2}\omega(\vec{t})d\vec{t}. \end{eqnarray} This means that $\|\mathcal{H}_\omega^2\|_{\dot{B}^{p_1,\lambda_1}({{\rr}^n})\times\dot{B}^{p_2,\lambda_2}({{\rr}^n}) \rightarrow\dot{B}^{p,\lambda}({{\rr}^n})}\le\widetilde{\mathbb{A}}_{2}.$ For the necessity when $\lambda_1p_1=\lambda_2p_2$, let $f_1(x):=|x|^{n\lambda_1}$ and $f_2(x):=|x|^{n\lambda_2}$ for all $x\in{{\rr}^n}\setminus\{0\}$, and $f_1(0)=f_2(0):=0$. Then for any $B:=B(0,R)$, \begin{eqnarray*} \left(\frac{1}{|B|^{1+\lambda_i p_i}}\int_B|f_i(x)|^{p_i}dx\right)^{1/p_i}&=&\left(\frac{1}{|B|^{1+\lambda_i p_i}}\int_B|x|^{n\lambda_i p_i}dx\right)^{1/p_i}= \left(\frac{\omega_n}{n}\right)^{-\lambda_i}\left(\frac{1}{1+\lambda_ip_i}\right)^{1/p_i}. \end{eqnarray*} Hence $\|f_i\|_{\dot{B}^{p_i,\lambda_i}} =(\omega_n/n)^{-\lambda_i}(\frac{1}{n+n\lambda_ip_i})^{1/p_i}$, $i=1,2$. Since $\lambda=\lambda_1+\lambda_2$ and $-1/p_i< \lambda_i<0, 1<p<p_i<\infty,~i=1,2$, we have \begin{eqnarray} &&\left(\frac{1}{|B|^{1+\lambda p}}\int_B|\mathcal{H}_{\omega}^2(\vec{f})(x)|^{p}dx\right)^{1/p} \nonumber\\ &&\hspace{0.2cm}=\left(\frac{1}{|B|^{1+\lambda p}}\int_B|x|^{n\lambda p}\,dx\right)^{1/p}\int\limits_{0<t_1,t_2<1}t_1^{n\lambda_1} t_2^{n\lambda_2}\omega(\vec{t})d\vec{t} \nonumber\\ &&\hspace{0.2cm}=\left(\frac{\omega_n}{n}\right)^{-\lambda}\left(\frac{1}{1+\lambda p}\right)^{1/p}\int\limits_{0<t_1,t_2<1}t_1^{n\lambda_1} t_2^{n\lambda_2}\omega(\vec{t})d\vec{t} \nonumber\\ &&\hspace{0.2cm}= \|f_1\|_{\dot{B}^{p_1,\lambda_1}} \|f_2\|_{\dot{B}^{p_2,\lambda_2}} \frac{(1+\lambda_1p_1)^{1/p_1} (1+\lambda_2p_2)^{1/p_2}}{(1+\lambda p)^{1/p}} \int\limits_{0<t_1,t_2<1}t_1^{n\lambda_1}t_2^{n\lambda_2}\omega(\vec{t})d\vec{t}. \nonumber\\ &&\hspace{0.2cm}= \|f_1\|_{\dot{B}^{p_1,\lambda_1}} \|f_2\|_{\dot{B}^{p_2,\lambda_2}} \int\limits_{0<t_1,t_2<1}t_1^{n\lambda_1}t_2^{n\lambda_2}\omega(\vec{t})d\vec{t}, \label{eq2-6} \end{eqnarray} since $\lambda_1p_1=\lambda_2p_2.$ Then, $\widetilde{\mathbb{A}}_2\leq \|\mathcal{H}_{\omega}^2\|_{\dot{B}^{p_1,\lambda_1}\times \dot{B}^{p_2,\lambda_2}\rightarrow\dot{B}^{p,\lambda}}<\infty.$ Combining (\ref{H2f}) and (\ref{eq2-6}) then concludes the proof. This finishes the proof of the Theorem \ref{t2}. \end{proof} We remark that Theorem \ref{t2} when $m=1$ goes back to \cite[Theorem 2.1]{FZW}. A corresponding conclusion of $I^\alpha_m$ is also true. \begin{corollary} Let $0<\alpha<m$. With the same assumptions as in Theorem \ref{t2}, the operator $I^{m}_{\alpha}$ maps $\dot{B}^{p_1,\lambda_1}({\mathbb R})\times \cdots\times\dot{B}^{p_m,\lambda_m}({\mathbb R})$ to $\dot{B}^{p,\lambda}(x^{-p\alpha}dx)$ with the operator norm not more than $$\frac{1}{\Gamma(\alpha)}\int\limits_{0<t_{1},t_{2},\ldots,t_{m}<1}\left(\prod_{i=1}^{m} t_{i}^{\lambda_{i}}\right)\frac{1}{|(1-t_{1}, \dots, 1-t_{m})|^{m-\alpha}}\,d\vec{t}.$$ In particular, when $\lambda_1p_1=\cdots=\lambda_mp_m$, then the operator norm of $I^{m}_{\alpha}$ equals to the above quantity. \end{corollary} \begin{remark} Notice that in the necessary part of Theorem \ref{t2}, we need an additional condition $\lambda_1p_1=\cdots=\lambda_mp_m$. In the case of Lebesgue spaces, this condition holds true automatically. For the case of Morrey spaces, such condition has known to be the necessary and sufficient condition for the interpolation properties of Morrey spaces; see, for example, \cite{LR}. \end{remark} \section{Commutators of weighted multilinear Hardy operators} In this section, we consider the sharp estimates of the multilinear commutator generated by $\mathcal{H}_{\omega}^m$ with symbols in $\dot{CMO}^q({{\rr}^n})$. Before presenting the main results of this section, we first introduce the following well-known Riemann-Lebesgue-type Lemma, which plays a key role in the below proof. For completeness, we give a detailed proof. \begin{lemma}\label{LA} Let $m\in\mathbb{N}$ and $\omega:\,[a,b]^m\to[0,\infty)$ be an integrable function. Then $$\lim_{r\to\infty}\int_{[a,b]^m}\omega(t_1,\cdots,t_m)\,\prod_{i\in E} \sin(\pi r t_i)\,dt_1\,\cdots\,dt_m=0,$$ where $E$ is an arbitrary nonempty subset of $\{1,\cdots,m\}$. \end{lemma} \begin{proof} For simplicity, we only give the proof for the case that $m=2$ and $E=\{1\}$, namely, to show $$\lim_{r\to\infty}\int_{[a,b]^2}\omega(t_1,t_2)\, \sin(\pi r t_1)\,dt_1\,dt_2=0.$$ Since $\omega$ is integrable, for any $\varepsilon>0$, there exists a partition $\{I_i\times J_j:\ i=1,\cdots,k\hspace{0.3cm}\mathrm{and}\hspace{0.3cm}j=1,\cdots,l\} $ such that $I_i=[a_{I_i},b_{I_i}]$, $J_j=[a_{J_j},b_{J_j}]$, $[a,b]=\cup_{i=1}^k I_i=\cup_{j=1}^l J_j$, $I_i\cap I_j=\emptyset=J_i\cap J_j$ if $i\neq j$, and $$0\le \int_a^b\int_a^b \omega(t_1,t_2)\,dt_1\,dt_2-\sum_{i=1}^k \sum_{j=1}^m m_{ij}|I_i||J_j|<\varepsilon/2,$$ where $m_{ij}$ is the minimum value of $\omega$ on $I_i\times J_j$. Let $$g(t_1,t_2):= \sum_{i=1}^k \sum_{j=1}^m m_{ij}\chi_{I_i}(t_1)\chi_{J_j}(t_2),\quad t_1,t_2\in[a,b].$$ Then $$\int_a^b\int_a^b g(t_1,t_2)\,dt_1\,dt_2=\sum_{i=1}^k \sum_{j=1}^m m_{ij}|I_i||J_j|$$ and $$0\le \int_a^b\int_a^b [\omega(t_1,t_2)-g(t_1,t_2)]\,dt_1\,dt_2<\varepsilon/2.$$ It follows from $\omega-g\ge 0$ that \begin{eqnarray*} &&\left|\int_{[a,b]^2}\omega(t_1,t_2)\, \sin(\pi r t_1)\,dt_1\,dt_2\right|\\ &&\hspace{0.2cm}\le \left|\int_{[a,b]^2}[\omega(t_1,t_2)-g(t_1,t_2)]\, \sin(\pi r t_1)\,dt_1\,dt_2\right|+\left|\int_{[a,b]^2}g(t_1,t_2)\, \sin(\pi r t_1)\,dt_1\,dt_2\right|\\ &&\hspace{0.2cm}\le \int_{[a,b]^2}[\omega(t_1,t_2)-g(t_1,t_2)]\,dt_1\,dt_2+\left|\int_{[a,b]^2}g(t_1,t_2)\, \sin(\pi r t_1)\,dt_1\,dt_2\right|\\ &&\hspace{0.2cm}\le \varepsilon/2+\left|\frac{1}{\pi r}\sum_{i=1}^k \sum_{j=1}^m m_{ij}|J_j|[\cos(\pi ra_{I_i})-\cos(\pi rb_{I_i})]\right|. \end{eqnarray*} Choosing $r$ large enough such that $$\left|\frac{1}{\pi r}\sum_{i=1}^k \sum_{j=1}^m m_{ij}|J_j|[\cos(\pi ra_{I_i})-\cos(\pi rb_{I_i})]\right|<\varepsilon/2,$$ we then know that $$\left|\int_{[a,b]^2}\omega(t_1,t_2)\, \sin(\pi r t_1)\,dt_1\,dt_2\right|<\varepsilon.$$ This finishes the proof. \end{proof} Now we recall the definition for the multilinear version of the commutator of the weighted Hardy operators. Let $m\geq 2$, $\omega :\, [0,1]\times[0,1]^m\rightarrow [0,\infty)$ be an integrable function, and $b_{i}\ (1\leq i\leq m)$ be locally integrable functions on ${{\rr}^n}$. We define $$\mathcal{H}_{\omega}^{\vec{b}}(\vec{f})(x):=\int\limits_{0<t_{1},t_{2},...,t_{m}<1} \left(\prod_{i=1}^{m}f_{i}(t_{i}x)\right) \left(\prod_{i=1}^{m}(b_{i}(x)-b_{i}(t_{i}x))\right)\omega(\vec{t})\,d\vec{t},\quad x\in \mathbb{ R}^n.$$ In what follows, we set $$\mathbb{B}_{m}:=\int\limits_{0<t_{1},t_{2}<,...,<t_{m}<1} \left(\prod_{i=1}^{m}t_{i}^{n\lambda_i}\right) \omega(\vec{t})\prod_{i=1}^{m}\log\frac{1}{t_{i}}\,d\vec{t}$$ and $$\mathbb{C}_{m}:=\int\limits_{0<t_{1},t_{2}<,...,<t_{m}<1} \left(\prod_{i=1}^{m}t_{i}^{n\lambda_i}\right) \omega(\vec{t})\prod_{i=1}^{m}\log\frac{2}{t_{i}}\,d\vec{t}.$$ Then we have the following multilinear generalization of Theorem B. \begin{theorem}\label{t3} Let $1<p<p_i<\infty, 1<q_i<\infty$, $-1/p_i<\lambda_i<0$, $i=1,\ldots, m$, such that $1/p=1/p_1+\cdots+1/p_m+1/q_1+\cdots+1/q_m$, $\lambda=\lambda_1+\cdots+\lambda_m $. Assume further that $\omega$ is a non-negative integrable function on $[0,1]\times\cdots \times [0,1]$. {\rm (i)} If $\mathbb{C}_{m}<\infty,$ then $\mathcal{H}_{\omega}^{\vec{b}} $ is bounded from $\dot{B}^{p_1,\lambda_1}(\mathbb{R}^{n})\times \cdots \times \dot{B}^{p_m,\lambda_m}(\mathbb{R}^{n})$ to $ \dot{B}^{p,\lambda}(\mathbb{R}^{n})$ for all $\vec{b}=(b_1,b_2,\ldots,b_m)\in \dot{\mathrm{CMO}}^{q_1}(\mathbb{R}^{n}) \times\cdots\times\dot{\mathrm{CMO}}^{q_m}(\mathbb{R}^{n})$. {\rm (ii)} Assume that $\lambda_1p_1=\cdots=\lambda_mp_m$. In this case the condition $\mathbb{C}_{m}<\infty$ in (i) is also necessary. \end{theorem} \begin{remark} It is easy to verify that condition $\rm(ii)$ in Theorem \ref{t3} is weaker than the condition (\ref{Am}) in Theorem \ref{t2}. \end{remark} \begin{proof}[Proof] By similarity, we only consider the case that $m=2$. We first show (i). That is, we assume $\mathbb{C}_{2}<\infty$ and show that $$\|\mathcal{H}_{\omega}^{\vec{b}}\|_{ \dot{B}^{p_1,\lambda_1}(\mathbb{R}^{n})\times\dot{B}^{p_2,\lambda_2}(\mathbb{R}^{n}) \rightarrow \dot{B}^{p,\lambda}(\mathbb{R}^{n})}<\infty$$ whenever $\vec b=(b_1, b_2)\in\dot{\mathrm {CMO}}^{q_1}({{\rr}^n})\times\dot{\mathrm {CMO}}^{q_2}({{\rr}^n})$. By Minkowski's inequality we have \begin{eqnarray*} && \Big(\frac{1}{|B|}\int_B |\mathcal{H}_{\omega}^{\vec{b}}(\vec{f})(x)|^p\Big)^{1/p}\\ &&\displaystyle\leq \Big(\frac{1}{|B|}\int_B\Big(\int_0^1\int_0^1\prod_{i=1}^{2}|f_i(t_i x)|\prod_{i=1}^{2}|b_i(x)-b_i(t_ix)|\omega(t_1,t_2)dt_1dt_2\Big)^pdx\Big)^{1/p}\\ &&\leq \int_0^1\int_0^1\Big(\frac{1}{|B|}\int_B\Big(\prod_{i=1}^{2}|f_i(t_i x)|\prod_{i=1}^{2}|b_i(x)-b_i(t_ix)|\Big)^pdx\Big)^{1/p}\omega(t_1,t_2)dt_1dt_2\\ &&=I_1+I_2+I_3+I_4+I_5+I_6, \end{eqnarray*} where \begin{eqnarray*} &&I_1:=\displaystyle \int\limits_{0<t_{1},t_{2}<1}\left(\frac{1}{|B|}\int_{B} \left(\Big(\prod_{i=1}^{2}|f_{i}(t_{i}x)|\Big)\Big(\prod_{i=1}^{2}|b_{i}(x) -b_{i, B}|\Big)\right)^p\,dx\right)^{\frac 1p}\,\omega(\vec{t})\,d\vec{t},\\ &&I_2:=\displaystyle \int\limits_{0<t_{1},t_{2}<1}\left(\frac{1}{|B|}\int_{B} \left(\Big(\prod_{i=1}^{2}|f_{i}(t_{i}x)|\Big)\Big(\prod_{i=1}^{2}|b_{i}(t_ix) -b_{i, t_iB}|\Big)\right)^p\,dx\right)^{\frac 1p}\,\omega(\vec{t})\,d\vec{t},\\ &&I_3:=\displaystyle \int\limits_{0<t_{1},t_{2}<1}\left(\frac{1}{|B|}\int_{B} \left(\Big(\prod_{i=1}^{2}|f_{i}(t_{i}x)|\Big)\Big(\prod_{i=1}^{2}|b_{i,B} -b_{i, t_iB}|\Big)\right)^p\,dx\right)^{\frac 1p}\,\omega(\vec{t})\,d\vec{t},\\ &&I_4:=\displaystyle \int\limits_{0<t_{1},t_{2}<1}\left(\frac{1}{|B|}\int_{B} \left(\Big(\prod_{i=1}^{2}|f_{i}(t_{i}x)|\Big)\Big(\sum_{D(i, j)} |b_{i}(x)-b_{i, B}||b_{j, B}-b_{j, t_{j}B}|\Big)\right)^p\,dx\right)^{\frac 1p}\,\omega(\vec{t})\,d\vec{t},\\ &&I_5:=\displaystyle \int\limits_{0<t_{1},t_{2}<1}\left(\frac{1}{|B|}\int_{B} \left(\Big(\prod_{i=1}^{2}|f_{i}(t_{i}x)|\Big)\Big(\sum_{D(i, j)} |b_{i}(x)-b_{i, B}||b_{j}(t_j x)-b_{j, t_{j}B}|\Big)\right)^p\,dx\right)^{\frac 1p}\,\omega(\vec{t})\,d\vec{t},\\ &&I_6:=\displaystyle \int\limits_{0<t_{1},t_{2}<1}\left(\frac{1}{|B|}\int_{B} \left(\Big(\prod_{i=1}^{2}|f_{i}(t_{i}x)|\Big)\Big(\sum_{D(i, j)} |b_{i,B}-b_{i,t_i B}||b_{j}(t_j x)-b_{j, t_{j}B}|\Big)\right)^p\,dx\right)^{\frac 1p}\,\omega(\vec{t})\,d\vec{t}, \end{eqnarray*} and \[ D(i, j):=\{(i, j)| (1, 2); (2, 1)\},\quad\quad ~b_{i, B}:=\frac{1}{|B|}\int_{B}b_{i},~\quad i=1, 2. \] Choose $p<s_{1}<\infty, p<s_{2}<\infty $ such that $1/s_1=1/p_1+1/q_1$, $1/s_2=1/p_2+1/q_2$. Then by H\"{o}lder's inequality, we know that \begin{eqnarray*} \displaystyle I_{1}&\leq &\displaystyle \int\limits_{0<t_{1},t_{2}<1}\prod_{i=1}^{2}\left(\frac{1}{|B|}\int_{B} \left|f_{i}(t_{i}x)\right|^{p_{i}}dx\right)^{1/p_{i}}\prod_{i=1}^{2}\left(\frac{1}{|B|}\int_{B} \left|b_{i}(x)-b_{i, B}\right|^{q_{i}}dx\right)^{1/q_{i}}\omega(\vec{t})\,d\vec{t}\\ &\leq&\displaystyle |B|^{\lambda}\int\limits_{0<t_{1},t_{2}<1}\prod_{i=1}^{2}t_i^{n\lambda_i}\prod_{i=1}^{2}\left(\frac{1}{|t_{i}B|^{1+\lambda_i p_i}}\int_{t_{i}B} \left|f_{i}(x)\right|^{p_{i}}dx\right)^{1/p_{i}}\\ &&\quad\displaystyle\times\prod_{i=1}^{2}\left(\frac{1}{|B|}\int_{B} \left|b_{i}(x)-b_{i, B}\right|^{q_{i}}dx\right)^{1/q_{i}}\omega(\vec{t})\,d\vec{t}\\ &\leq&\displaystyle C|B|^{\lambda}\|b_1\|_{\dot{CMO}^{q_1}}\|b_2\|_{\dot{CMO}^{q_2}} \|f_1\|_{\dot{B}^{p_1,\lambda_1}}\|f_2\|_{\dot{B}^{p_2,\lambda_2}} \int\limits_{0<t_{1},t_{2}<1}\prod_{i=1}^{2}t_i^{n\lambda_i}\omega(\vec{t})\,d\vec{t}. \end{eqnarray*} Similarly, we obtain \begin{eqnarray*} \displaystyle I_{2}&\leq &\displaystyle \int\limits_{0<t_{1},t_{2}<1}\prod_{i=1}^{2}\left(\frac{1}{|B|}\int_{B} \left|f_{i}(t_{i}x)\right|^{p_{i}}dx\right)^{1/p_{i}}\prod_{i=1}^{2}\left(\frac{1}{|B|}\int_{B} \left|b_{i}(t_{i}x)-b_{i, t_{i}B}\right|^{q_{i}}dx\right)^{1/q_{i}}\omega(\vec{t})\,d\vec{t}\\ &\leq&\displaystyle |B|^{\lambda}\int\limits_{0<t_{1},t_{2}<1}\prod_{i=1}^{2}t_i^{n\lambda_i}\prod_{i=1}^{2}\left(\frac{1}{|t_{i}B|^{1+\lambda_i p_i}}\int_{t_{i}B} \left|f_{i}(x)\right|^{p_{i}}dx\right)^{1/p_{i}}\\ &&\quad\displaystyle\times\prod_{i=1}^{2}\left(\frac{1}{|t_iB|}\int_{t_iB} \left|b_{i}(x)-b_{i, t_iB}\right|^{q_{i}}dx\right)^{1/q_{i}}\omega(\vec{t})\,d\vec{t}\\ &\leq&\displaystyle C|B|^{\lambda}\|b_1\|_{\dot{CMO}^{q_1}}\|b_2\|_{\dot{CMO}^{q_2}}\|f_1\|_{\dot{B}^{p_1,\lambda_1}} \|f_2\|_{\dot{B}^{p_2,\lambda_2}} \int\limits_{0<t_{1},t_{2}<1}\prod_{i=1}^{2}t_i^{n\lambda_i}\omega(\vec{t})\,d\vec{t}. \end{eqnarray*} It follows from $1/p=1/s_{1}+1/{s_2}$ that $1=p/s_{1}+p/{s_2}$. From $1/s_1=1/p_1+1/q_1,1/s_2=1/p_2+1/q_2$ and H\"{o}lder's inequality, we deduce that \begin{eqnarray*} \displaystyle I_3&=&\displaystyle \int\limits_{0<t_{1},t_{2}<1}\left(\frac{1}{|B|}\int_{B} \left(\Big(\prod_{i=1}^{2}|f_{i}(t_{i}x)|\Big)\Big(\prod_{i=1}^{2}|b_{i,B} -b_{i, t_iB}|\Big)\right)^p\,dx\right)^{1/p}\,\omega(\vec{t})\,d\vec{t}\\ &\leq&\displaystyle\int\limits_{0<t_{1},t_{2}<1}\prod_{i=1}^{2}\left(\frac{1}{|B|}\int_B|f_{i}(t_{i}x)|^{s_i}\right)^{1/s_{i}}\left(\prod_{i=1}^{2}|b_{i, B}-b_{i, t_{i}B}|\right)\omega(\vec{t})\,d\vec{t}\\ &\leq&\displaystyle C|B|^{\lambda}\int\limits_{0<t_{1},t_{2}<1}t_1^{n\lambda_1}t_2^{n\lambda_2}\prod_{i=1}^{2}\left(\frac{1}{|t_iB|^{1+\lambda_i p_i}}\int_{t_iB}|f_{i}(t_{i}x)|^{p_i}\right)^{1/p_{i}}\\ &&\quad\times\displaystyle\left(\prod_{i=1}^{2}|b_{i, B}-b_{i, t_{i}B}|\right)\omega(\vec{t})\,d\vec{t}\\ &\leq&\displaystyle C|B|^{\lambda}\|f_1\|_{\dot{B}^{p_1,\lambda_1}}\|f_2\|_{\dot{B}^{p_2,\lambda_2}}\int\limits_{0<t_{1},t_{2}<1}t_1^{n\lambda_1}t_2^{n\lambda_2}\prod_{i=1}^{2}|b_{i, B}-b_{i, t_{i}B}|\omega(\vec{t})\,d\vec{t}\\ &\leq&\displaystyle C|B|^{\lambda}\|f_1\|_{\dot{B}^{p_1,\lambda_1}} \|f_2\|_{\dot{B}^{p_2,\lambda_2}}\sum_{\ell=0}^\infty\sum^{\infty}_{k=0} \int\limits_{{2^{-\ell-1}}\leq t_1<{2^{-\ell}}}\int\limits_{{2^{-k-1}}\leq t_2<{2^{-k}}}t_1^{n\lambda_1}t_2^{n\lambda_2}\\ &&\quad\displaystyle\times\left(\sum^\ell_{j=0}\left|b_{1,2^{-j}B}-b_{1,2^{-j-1}B}\right| +\left|b_{1,2^{-k-1}B}-b_{1,t_1B}\right|\right) \\ &&\quad\times \left( \sum^k_{j=0}\left|b_{2,2^{-j}B}-b_{2,2^{-j-1}B}\right| +\left|b_{2,2^{-k-1}B}-b_{2,t_2B}\right|\right)\omega(\vec{t})\,d\vec{t}\\ &\leq&\displaystyle C|B|^{\lambda}\|b_1\|_{\dot{CMO}^{q_1}}\|b_2\|_{\dot{CMO}^{q_2}} \|f_1\|_{\dot{B}^{p_1,\lambda_1}}\|f_2\|_{\dot{B}^{p_2,\lambda_2}}\\ &&\quad\displaystyle\times\int\limits_{0<t_{1},t_{2}<1}\prod_{i=1}^{2} t_i^{n\lambda_i}\omega(\vec{t})\log\frac{2}{t_1}\log\frac{2}{t_1}\,d\vec{t}, \end{eqnarray*} where we use the fact that \begin{eqnarray*} |b_{1,B}-b_{1,t_1B}|&\le&\sum^k_{j=0}\left|b_{1,2^{-j}B}-b_{1,2^{-j-1}B}\right| +\left|b_{1,2^{-k-1}B}-b_{1,t_1B}\right|\\ &\leq& C(k+1)\|b\|_{\dot{CMO}^{q_1}}\leq C\log\frac{2}{t_1}\|b\|_{\dot{CMO}^{q_1}} \end{eqnarray*} and \begin{eqnarray*} |b_{2,B}-b_{2,t_2B}|\leq C\log\frac{2}{t_2}\|b\|_{\dot{CMO}^{q_2}} \end{eqnarray*} We now estimate $I_{4}$. Similarly, we choose $1<s<\infty$ such that $1/p=1/p_1+1/p_2+1/s$ and $1/s=1/q_1+1/q_2$. Using Minkowski's inequality and H\"{o}lder's inequality yields \begin{eqnarray*} I_4&=& \int\limits_{0<t_{1},t_{2}<1}\left(\frac{1}{|B|}\int_{B} \left(\Big(\prod_{i=1}^{2}|f_{i}(t_{i}x)|\Big)\Big(\sum_{D(i, j)} |b_{i}(x)-b_{i, B}||b_{j, B}-b_{j, t_{j}B}|\Big)\right)^p\,dx\right)^{1/p}\,\omega(\vec{t})\,d\vec{t}\\ &\leq& \int\limits_{0<t_{1},t_{2}<1}\left[\left(\frac{1}{|B|}\int_{B} \left(\bigg(\prod_{i=1}^{2}|f_{i}(t_{i}x)|\bigg) \bigg(|b_{1}(x)-b_{1, B}||b_{2, B}-b_{2, t_{2}B}|\bigg)\right)^pdx\right)^{1/p}\right. \\ &&\quad\left. +\left(\frac{1}{|B|}\int_{B} \left(\bigg(\prod_{i=1}^{2}|f_{i}(t_{i}x)|\bigg)\bigg(|b_{2}(x)-b_{2, B}||b_{1, B}-b_{1, t_{1}B}|\bigg)\right)^p\,dx\right)^{1/p}\right]\,\omega(\vec{t})\,d\vec{t}\\ &\leq& \int\limits_{0<t_{1},t_{2}<1}\prod_{i=1}^{2}\left(\frac{1}{|B|}\int_{B} \left|f_{i}(t_{i}x)\right|^{p_{i}}dx\right)^{1/p_{i}}\Bigg\{\left(\frac{1}{|B|}\int_{B} \left|b_{1}(x)-b_{1, B}\right|^{s}dx\right)^{1/s}\\ &&\quad\times|b_{2, B}-b_{2, t_{2}B}|+ \left(\frac{1}{|B|}\int_{B}\left|b_{2}(x)-b_{2, B}\right|^{s}dx\right)^{1/s}|b_{1, B}-b_{1, t_{1}B}|\Bigg\}\omega(\vec{t})\,d\vec{t}\\ &\leq&\displaystyle C|B|^{\lambda}\int\limits_{0<t_{1},t_{2}<1}t_1^{n\lambda_1} t_2^{n\lambda_2}\prod_{i=1}^{2}\left(\frac{1}{|t_i B|^{1+\lambda_i p_i}}\int_{t_iB} \left|f_{i}(x)\right|^{p_{i}}dx\right)^{1/p_{i}}\\ &&\quad\times\Bigg\{\left(\frac{1}{|B|}\int_{B} \left|b_{1}(x)-b_{1, B}\right|^{s}dx\right)^{1/s}|b_{2, B}-b_{2, t_{2}B}|\\ &&\quad+\left(\frac{1}{|B|}\int_{B}\left|b_{2}(x)-b_{2, B}\right|^{s}dx\right)^{1/s}|b_{1, B}-b_{1, t_{1}B}|\Bigg\}\omega(\vec{t})\,d\vec{t}\\ &\leq& C|B|^{\lambda}\|f_1\|_{\dot{B}^{q_1,\lambda_1}}\|f_2\|_{\dot{B}^{q_2},\lambda_2}\int\limits_{0<t_{1},t_{2}<1}t_1^{n\lambda_1}t_2^{n\lambda_2}\Bigg\{\left(\frac{1}{|B|}\int_{B} \left|b_{1}(x)-b_{1, B}\right|^{s}dx\right)^{1/s}\\ &&\quad\times|b_{2, B}-b_{2, t_{2}B}|+\left(\frac{1}{|B|}\int_{B}\left|b_{2}(x)-b_{2, B}\right|^{s}dx\right)^{1/s}|b_{1, B}-b_{1, t_{1}B}|\Bigg\}\omega(\vec{t})\,d\vec{t}. \end{eqnarray*} From the estimates of $I_{1}$ and $I_{3}$, we deduce that \begin{eqnarray*}I_{4}&\leq & C|B|^{\lambda}\|f_1\|_{\dot{B}^{q_1,\lambda_1}}\|f_2\|_{\dot{B}^{q_2},\lambda_2}\|b_1\|_{\dot{CMO}^{q_1}}\|b_2\|_{\dot{CMO}^{q_2}}\\ &&\quad\times\int_{0}^{1}\int_{0}^{1} t_1^{n\lambda_1}t_2^{n\lambda_2}\omega(t_{1}, t_{2})\left(1+\sum_{i=1}^{2}\log\frac{1}{t_{i}}\right)\,d t_{1}dt_{2}. \end{eqnarray*} It can be deduced from the estimates of $I_{1}$, $I_{2}$, $I_{3}$ and $I_{4}$ that $$I_{5}\leq C|B|^{\lambda}\|f_1\|_{\dot{B}^{q_1,\lambda_1}}\|f_2\|_{\dot{B}^{q_2},\lambda_2}\|b_1\|_{\dot{CMO}^{q_1}}\|b_2\|_{\dot{CMO}^{q_2}}\int\limits_{0<t_{1},t_{2}<1}t_1^{n\lambda_1}t_2^{n\lambda_2}\omega(\vec{t})\,d\vec{t}$$ and \begin{eqnarray*}I_{6}&\leq & C|B|^{\lambda}\|f_1\|_{\dot{B}^{q_1,\lambda_1}}\|f_2\|_{\dot{B}^{q_2},\lambda_2}\|b_1\|_{\dot{CMO}^{q_1}}\|b_2\|_{\dot{CMO}^{q_2}}\\ &&\quad\times\int_{0}^{1}\int_{0}^{1} t_1^{n\lambda_1}t_2^{n\lambda_2}\omega(t_{1}, t_{2})\left(1+\sum_{i=1}^{2}\log\frac{1}{t_{i}}\right)\,d t_{1}dt_{2}. \end{eqnarray*} Combining the estimates of $I_{1}$, $I_{2}$, $I_{3}$, $I_{4}$, $I_{5}$ and $I_{6}$ gives \begin{eqnarray*} \displaystyle \left(\frac{1}{|B|^{1+\lambda p}}\int_{B}|\mathcal{H}_{\omega}^{\vec{b}}\vec{f}(x)|^pdx\right)^{1/p}&\leq& C|B|^{\lambda}\|f_1\|_{\dot{B}^{q_1,\lambda_1}}\|f_2\|_{\dot{B}^{q_2},\lambda_2}\|b_1\|_{\dot{CMO}^{q_1}}\|b_2\|_{\dot{CMO}^{q_2}}\\ &&\quad\displaystyle \times\int_{0}^{1}\int_{0}^{1} t_1^{n\lambda_1}t_2^{n\lambda_2}\omega(t_{1}, t_{2})\prod_{i=1}^{2}\log\frac{2}{t_{i}}\,d t_{1}dt_{2}. \end{eqnarray*} This proves (i). Now we prove the necessity in (ii). Assume that $$\|\mathcal{H}_{\omega}^{\vec{b}}\|_{ \dot{B}^{p_1,\lambda_1}(\mathbb{R}^{n})\times\dot{B}^{p_2,\lambda_2}(\mathbb{R}^{n}) \rightarrow \dot{B}^{p,\lambda}(\mathbb{R}^{n})}<\infty$$ whenever $\vec b=(b_1, b_2)\in\dot{\mathrm {CMO}}^{q_1}({{\rr}^n})\times\dot{\mathrm {CMO}}^{q_2}({{\rr}^n})$. To show $\mathbb{C}_{2}<\infty$, it suffices to prove that $\mathbb{A}_2<\infty$, $\mathbb{B}_{2}<\infty$, $$\begin{array}{rl} \mathbb{D}:=\displaystyle \int\limits_{0<t_{1},t_{2}<1} \left(\prod_{i=1}^{2}t_{i}^{n\lambda_i}\right)\omega(t_1,t_2)\log \frac{1}{t_1}\,dt_1\,dt_2<\infty, \end{array}$$ and $$\begin{array}{rl} \mathbb{E}:=\displaystyle \int\limits_{0<t_{1},t_{2}<1} \left(\prod_{i=1}^{2}t_{i}^{n\lambda_i}\right)\omega(t_1,t_2)\log \frac{1}{t_2}\,dt_1\,dt_2<\infty. \end{array}$$ To prove $\mathbb{B}_2<\infty$, we set $b_{1}(x):=\log|x|\in \mathrm{BMO}(\mathbb{R}^{n})\subset\dot{\mathrm {CMO}}^{q_1}({{\rr}^n})$, and $b_{2}(x):=\log|x|\in \mathrm{BMO}(\mathbb{R}^{n})\subset\dot{\mathrm {CMO}}^{q_2}({{\rr}^n})$. Define $f_{1}:=|x|^{n\lambda_1}$ and $f_{2}:=|x|^{n\lambda_2}$ if $x\in{{\rr}^n}\setminus\{0\}$, and $f_1(0)=f-2(0):=0$. Then $$\|f_1\|_{\dot{B}^{p_1, \lambda_1}}=\left(\frac{\omega_n}{n}\right)^{-\lambda_1}\Big(\frac{1}{1+\lambda_1 p_1}\Big)^{1/p_1},\quad \|f_2\|_{\dot{B}^{p_2, \lambda_2}}=\left(\frac{\omega_n}{n}\right)^{-\lambda_2}\Big(\frac{1}{1+\lambda_2 p_2}\Big)^{1/p_2}$$ and $$\mathcal{H}_\omega^{\vec{b}}(\vec{f})(x)=|x|^{n\lambda_1}|x|^{n\lambda_2}\int_0^1\int_0^1t_1^{n\lambda_1}t_2^{n\lambda_2}\omega(t_1,t_2) \log\frac{1}{t_1}\log\frac{1}{t_2}dt_1dt_2.$$ Since $1<p<p_i<\infty$, $-1/p_i<\lambda_i<0$ and $\lambda=\lambda_1+\lambda_2~(i=1, 2)$, we see that, for all $B=B(0,R)$, \begin{eqnarray*} &&\Big(\frac{1}{|B|^{1+\lambda p}}\int_B |\mathcal{H}_\omega^{\vec{b}}(\vec{f})(x)|^{p}dx\Big)^{1/p}\\ &&\quad=\Big(\frac{1}{|B|^{1+\lambda p}}\int_B |x|^{n\lambda p}dx\Big)^{1/p}\int_0^1\int_0^1t_1^{n\lambda_1}t_2^{n\lambda_2}\omega(t_1,t_2) \log\frac{1}{t_1}\log\frac{1}{t_2}dt_1dt_2\\ &&\quad =\left(\frac{\omega_n}{n}\right)^{-\lambda}\Big(\frac{1}{1+\lambda p}\Big)^{1/p}\int_0^1\int_0^1t_1^{n\lambda_1}t_2^{n\lambda_2}\omega(t_1,t_2) \log\frac{1}{t_1}\log\frac{1}{t_2}dt_1dt_2\\ &&\quad= \|f_1\|_{\dot{B}^{p_1,\lambda_1}}\|f_2\|_{\dot{B}^{p_2,\lambda_2}}\int_0^1\int_0^1t_1^{n\lambda_1}t_2^{n\lambda_2}\omega(t_1,t_2) \log\frac{1}{t_1}\log\frac{1}{t_2}dt_1dt_2. \end{eqnarray*} Thus $\mathbb{B}_{2}\leq\|\mathcal{H}_{\omega}^{\vec{b}}\|_{ \dot{B}^{p_1,\lambda_1}(\mathbb{R}^{n})\times\dot{B}^{p_2,\lambda_2}(\mathbb{R}^{n}) \rightarrow \dot{B}^{p,\lambda}(\mathbb{R}^{n})}<\infty.$ Since the proof for $\mathbb{D}<\infty$ is similar to that for $\mathbb{E}<\infty$, we only show $\mathbb{E}<\infty$. To this end, for any $r\in\mathbb N$ and $R\in(0,+\infty)$, we choose $b_{1}(x):=\chi_{[B(0,R/2)]^c}(x)\,\sin(\pi r|x|),$ and $b_{2}(x):=\log|x|, $ where $[B(0,R/2)]^c:={{\rr}^n}\setminus B(0, R/2)$. Obviously, we have $\vec{b}=(b_{1},b_2)\in \mathrm{\dot{CMO}^{q_1}}(\mathbb{R}^{n})\times\mathrm{\dot{CMO}^{q_2}}(\mathbb{R}^{n}),$ and hence, $$\|\mathcal{H}_{\omega}^{\vec{b}}\|_{ \dot{B}^{p_1,\lambda_1}(\mathbb{R}^{n})\times\dot{B}^{p_2,\lambda_2}(\mathbb{R}^{n}) \rightarrow \dot{B}^{p,\lambda}(\mathbb{R}^{n})}<\infty.$$ Let \begin{equation}\label{f1} f_{1}(x):= \begin{cases} 0,&\quad |x|\leq\frac{R}{2},\\ \displaystyle|x|^{n\lambda_1},&\quad |x|>\frac{R}{2},\end{cases} \end{equation} and \begin{equation}\label{f2} f_{2}(x):= \begin{cases} 0,&\quad |x|\leq\frac{R}{2},\\ \displaystyle|x|^{n\lambda_2},&\quad |x|>\frac{R}{2}. \end{cases} \end{equation} Then, we have $$\begin{array}{rl} \displaystyle &\mathcal{H}_{\omega}^{\vec{b}} \vec{f}(x)\\ &\quad=\displaystyle\int\limits_{0<t_{1},t_{2}<1} \left(\prod_{i=1}^{2}f_{i}(t_{i}x)\right) \left(\prod_{i=1}^{2}(b_{i}(x)-b_{i}(t_{i}x)) \right)\omega(\vec{t})\,d\vec{t}\\ &\quad=\displaystyle |x|^{n\lambda} \int_{{\frac{R}{2|x|}}}^{1}\int_{{\frac{R}{2|x|}}}^{1} t_{1}^{n\lambda_1}t_{2}^{n\lambda_2}\left(b_{1}(x)-b_{1}(t_{1}x) \right)\omega(t_{1}, t_{2})\log\frac1{t_2}\,d t_{1} dt_{2}\\ &\quad = \displaystyle |x|^{n\lambda}b_{1}(x) \int_{{\frac{R}{2|x|}}}^{1}\int_{{\frac{R}{2|x|}}}^{1} t_{1}^{n\lambda_1}t_{2}^{n\lambda_2} \omega(t_{1}, t_{2})\log\frac1{t_2}\,d t_{1} dt_{2}-\eta_{d}, \end{array}$$ whenever $R/2<|x|<R$, $$\begin{array}{rl} \eta_{d}&= |x|^{n\lambda} \displaystyle\int_{{\frac{R}{2|x|}}}^{1}\int_{{\frac{R}{2|x|}}}^{1} t_{1}^{n\lambda_1}t_{2}^{n\lambda_2}\omega(t_{1}, t_{2})b_{1}(t_1x) \log\frac1{t_2}\,d t_{1} dt_{2}\\ &=\displaystyle |x|^{n\lambda} \int_{{\frac{R}{2|x|}}}^{1}\int_{{\frac{R}{2|x|}}}^{1} t_{1}^{n\lambda_1}t_{2}^{n\lambda_2}\omega(t_{1}, t_{2})\sin(\pi rt_1|x|) \log\frac1{t_2}\,d t_{1}\,dt_{2}. \end{array}$$ Since $\omega$ is integrable on $[0,1]\times[0,1]$ and $\mathbb{B}_2<\infty$, we know that $$\displaystyle t_{1}^{n\lambda_1}t_{2}^{n\lambda_2}\omega(t_{1},t_{2})\log\frac1{t_2}$$ is integrable on $(\frac{1}{2},1)\times(\frac{1}{2},1)$. Then, it follows from Lemma \ref{LA} that for any $\delta>0$, there exists a positive constant $C_{R,\delta}$ that depends on $R$ and $\delta$ such that $$\begin{array}{rl} \displaystyle\int_{{\frac{1}{2}}}^{1}\int_{{\frac{1}{2}}}^{1} t_{1}^{n\lambda_1}t_{2}^{n\lambda_2}\omega(t_{1}, t_{2})\,\sin(\pi rt_1) \log\frac1{t_2}\,d t_{1} dt_{2}<\delta/2, \end{array}$$ for all $r>C_{R,\delta}$. Now we choose $r>\max(1/R,1)C_{R,\delta}$. Then for any $R/2<|x|<R$, $r|x|>C_{R,\delta}$, and hence $$\begin{array}{rl} \displaystyle\int_{{\frac{1}{2}}}^{1}\int_{{\frac{1}{2}}}^{1} t_{1}^{n\lambda_1}t_{2}^{n\lambda_2}\omega(t_{1}, t_{2})\,\sin(\pi rt_1|x|) \log\frac1{t_2}\,d t_{1} dt_{2}<\delta/2, \end{array}$$ which further implies that $\eta_{d}<\frac{\delta}2 |x|^{n\lambda}.$ Therefore, for any $R/2<|x|<R$, $$\begin{array}{rl} |\mathcal{H}_{\omega}^{\vec{b}}\vec{f}(x)|&\geq \displaystyle |x|^{n\lambda} \left(\int_{{\frac{R}{2|x|}}}^{1}\int_{{\frac{R}{2|x|}}}^{1} t_{1}^{n\lambda_1}t_{2}^{n\lambda_2} \omega(t_{1}, t_{2})\log\frac1{t_2}\,d t_{1} dt_{2}-\frac{\delta}2\right). \end{array}$$ Let $\varepsilon>0$ be small enough and choose $\delta>0$ such that $$\begin{array}{rl} \delta<\displaystyle \int_{{\frac{R\varepsilon}{2}}}^{1} \int_{{\frac{R\varepsilon}{2}}}^{1} t_{1}^{n\lambda_1}t_{2}^{n\lambda_2} \omega(t_{1} t_{2})\log\frac1{t_2}\,d t_{1} dt_{2}. \end{array}$$ Then, for all balls $B=B(0,R)$, $$\begin{array}{rl} &\displaystyle\left(\frac{1}{|B|^{1+\lambda p}}\int_B|\mathcal{H}_{\omega}^{\vec{b}}\vec{f}(x)|^pdx\right)^{1/p}\\ &\quad\geq \displaystyle \left(\frac{1}{|B|^{1+\lambda p}}\int\limits_{R/2<|x|<R} |x|^{n\lambda p}\left(\int_{{\frac{R}{2|x|}}}^{1} \int_{{\frac{R}{2|x|}}}^{1} t_{1}^{n\lambda_1}t_{2}^{n\lambda_2} \omega(t_{1}, t_{2})\log\frac1{t_2}\,d t_{1} dt_{2} -\frac{\delta}2\right)^{p}\,dx\right)^{1/p}\\ &\quad\geq \displaystyle \left(\frac{1}{|B|^{1+\lambda p}}\int\limits_{R/2<|x|<R} |x|^{n\lambda p}\left(\int_{{\frac{R\varepsilon}{2}}}^{1} \int_{{\frac{R\varepsilon}{2}}}^{1} t_{1}^{n\lambda_1}t_{2}^{n\lambda_2}\omega(t_{1}, t_{2})\log\frac1{t_2}\,d t_{1} dt_{2}-\frac{\delta}2\right)^{p}\,dx\right)^{1/p}\\ &\quad\geq \displaystyle C \left(\frac{1}{|B|^{1+\lambda p}}\int\limits_{R/2<|x|<R} |x|^{n\lambda p}\left(\int_{{\frac{R\varepsilon}{2}}}^{1} \int_{{\frac{R\varepsilon}{2}}}^{1} t_{1}^{n\lambda_1}t_{2}^{n\lambda_2} \omega(t_{1}, t_{2})\log\frac1{t_2}\,d t_{1} dt_{2}\right)^{p}\,dx\right)^{1/p}\\ &\quad\geq\displaystyle C\left(\frac{\omega_n}{n}\right)^{-\lambda}\Big(\frac{1}{1+\lambda p}\Big)^{1/p}\int_{{\frac{R\varepsilon}{2}}}^{1} \int_{{\frac{R\varepsilon}{2}}}^{1} t_{1}^{n\lambda_1}t_{2}^{n\lambda_2}\omega(t_{1}, t_{2})\log\frac1{t_2}\,d t_{1} dt_{2}\\ &\quad=\displaystyle C \prod_{i=1}^{2}\|f_{i}\|_{\dot{B}^{p_i,\lambda_i}(\mathbb{R}^{n})} \int_{{\frac{R\varepsilon}{2}}}^{1} \int_{{\frac{R\varepsilon}{2}}}^{1} t_{1}^{n\lambda_1}t_{2}^{n\lambda_2}\omega(t_{1}, t_{2}) \log\frac1{t_2}\,d t_{1} dt_{2}, \end{array}$$ which further implies that \begin{eqnarray*} &&\|\mathcal{H}_{\omega}^{\vec{b}}\|_{\dot{B}^{p_1,\lambda_1}(\mathbb{R}^{n})\times \dot{B}^{p_2,\lambda_2}(\mathbb{R}^{n}) \rightarrow \dot{B}^{p,\lambda}(\mathbb{R}^{n})}\\ &&\quad\geq \displaystyle C \prod_{i=1}^{2}\|f_{i}\|_{\dot{B}^{p_i,\lambda_i}(\mathbb{R}^{n})} \int_{{\frac{R\varepsilon}{2}}}^{1} \int_{{\frac{R\varepsilon}{2}}}^{1} t_{1}^{n\lambda_1}t_{2}^{n\lambda_2}\omega(t_{1}, t_{2}) \log\frac1{t_2}\,d t_{1} dt_{2}. \end{eqnarray*} Letting $\varepsilon\to 0^+$ concludes $\mathbb{E}<\infty.$ To show that $\mathbb{A}_2<\infty$, we let $$b_{1}(x)=b_{2}(x):=\chi_{[B(0,R/2)]^c}(x)\, \sin(\pi r|x|),$$ where $ R\in(0,+\infty)$ and $r\in \mathbb{N}$, and let $f_1,\ f_2$ be as in (\ref{f1}), (\ref{f2}), respectively. Repeating the proof for $\mathbb{E}<\infty$, we also obtain that $\mathbb{A}_2<\infty$. Combining all above estimates then yields $\mathbb{C}_2<\infty.$ This finishes the proof of the Theorem \ref{t3}. \end{proof} We remark that Theorem \ref{t3} when $m=1$ is just \cite[Theorem 3.1]{FZW}. In particular, when $n=1$ and $$\omega(\vec{t}):=\frac{1}{\Gamma(\alpha)|(1-t_{1}, \dots, 1-t_{m})|^{m-\alpha}},$$ we know that $$\mathcal{H}_{\omega}^{\vec{b}}(\vec{f})(x)=x^{-\alpha}I^{m}_{\alpha, \vec{b}}\vec{f}(x),\,\quad x>0,$$ where $$I^{m}_{\alpha, \vec{b}}\vec{f}(x):=\frac{1}{\Gamma(\alpha)} \int\limits_{0<t_{1},t_{2},...,t_{m}<x} \frac{\left(\prod_{i=1}^{m}f_{i}(t_{i})\right)\prod_{i=1}^{m}(b_{i}(x)-b_{i}(t_{i}x))}{|(x-t_{1}, \dots, x-t_{m})|^{m-\alpha}}d\vec{t}.$$ Then, as an immediate consequence of Theorem \ref{t3}, we have the following corollary. \begin{corollary} Let $0<\alpha<m$. Under the assumptions of Theorem \ref{t3}, the operator $I^{m}_{\alpha, \vec{b}}$ maps the product of central Morrey spaces $\dot{B}^{p_1,\lambda_1}(\mathbb{R})\times \dots \times \dot{B}^{p_m,\lambda_m}(\mathbb{R})$ to $ \dot{B}^{p,\lambda}(x^{-p\alpha}dx)$. \end{corollary} \section{Weighted Ces\`{a}ro operator of multilinear type and its commutator} In this section, we focus on the corresponding results for the adjoint operators of weighted multilinear Hardy operators. Recall that, as the adjoint operator of the weighted Hardy operator, the \emph{weighted Ces\`{a}ro operator $G_{\omega}$} is defined by $$G_{\omega}f(x):=\int^1_{0}f(x/t)t^{-n}\omega(t)\,dt,\hspace{3mm}x\in \mathbb{ R}^n.$$ In particular, when $\omega\equiv 1$ and $n=1$, $G_{\omega}$ is the classical Ces\`{a}ro operator defined as $$\displaylines{ Gf(x):=\left\{\begin{array}{ll} \displaystyle\int^\infty_{x}\frac{f(y)}{y}\,dy,&\quad x>0,\\ \displaystyle-\int^x_{-\infty}\frac{f(y)}{y}\,dy,&\quad x<0.\end{array}\right.}$$ When $n=1$ and $\omega(t):=\frac{1}{\Gamma(\alpha)(\frac{1}{t}-1)^{1-\alpha}}$ with $0<\alpha<1$, the operator $G_{\omega}f(\cdot)$ is reduced to $(\cdot)^{1-\alpha}J_{\alpha}f(\cdot)$, where $J_{\alpha}$ is a variant of Weyl integral operator defined by $$J_{\alpha}f(x):=\frac{1}{\Gamma(\alpha)}\int_{x}^{\infty} \frac{f(t)}{(t-x)^{1-\alpha}}\frac{dt}{t}, \quad x>0$$ Moreover, it is well known that the weighted Hardy operator $H_{\omega}$ and the weighted Ces\`{a}ro operator $G_{\omega}$ are adjoint mutually, namely, $$\int_{\mathbb{R}^{n}}g(x)H_{\omega}f(x)\,dx=\int_{\mathbb{R}^{n}} f(x)G_{\omega}g(x)\,dx,\eqno(4.1)$$ for all $f\in L^p(\mathbb{R}^n)$ and $g\in L^q(\mathbb{R}^n)$ with $1<p<\infty, 1/p+1/q=1$. We refer to \cite{X,FZW} for more details. Let the integer $m\geq 2$, and $\omega : [0,1]\times[0,1]^m\rightarrow [0,\infty)$ be an integrable function. Let $f_{i}$ be measurable complex-valued functions on $\mathbb{R}^n$, $1\leq i\leq m$. Corresponding to the weighted multilinear Hardy operators, we define the following \emph{weighted multilinear Ces\`{a}ro operator}: $$\mathcal{G}_{\omega}(\vec{f})(x):= \int\limits_{0<t_{1},t_{2},\ldots,t_{m}<1} \left(\prod_{i=1}^{m}f_{i}(x/t_{i})(t_{i})^{-n}\right)\omega(\vec{t})\,d\vec{t},\quad x\in \mathbb{ R}^n.$$ Notice that in general $\mathcal{H}_{\omega}$ and $\mathcal{G}_{\omega}$ do not obey the commutative rule (4.1). We also point out that, when $n=1$ and $$\omega(\vec{t}):=\frac{1}{\Gamma(\alpha)|(\frac{1}{t_{1}}-1, \dots, \frac{1}{t_{m}}-1)|^{m-\alpha}},$$ the operator $$\mathcal{G}_{\omega}(\vec{f})(x)=x^{m-\alpha}J^{m}_{\alpha}\vec{f}(x),\,\quad x>0,$$ where $$J^{m}_{\alpha}\vec{f}(x):=\frac{1}{\Gamma(\alpha)} \int\limits_{x<t_{1},t_{2},...,t_{m}<\infty}\frac{\prod_{i=1}^{m}f_{i}(x_{i})}{|(t_{1}-x, \dots, t_{m}-x)|^{m-\alpha}}\frac{d\vec{t}}{\vec{t}}.$$ Similar to the argument used in Section 2, we have the following conclusions. \begin{theorem}\label{t4} If $f_i \in L^{p_i}({{\rr}^n})$, $1<p, p_i<\infty$, $i=1,\ldots, m$, and $1/p=1/p_1+\cdots+1/p_m$, then $\mathcal{G}_{\omega}$ is bounded from $L^{p_1}(\mathbb{R}^{n})\times \dots \times L^{p_m}(\mathbb{R}^{n})$ to $ L^p(\mathbb{R}^{n})$ if and only if $$\mathbb{F}:=\int\limits_{0<t_{1},t_{2},...,t_{m}<1} \left(\prod_{i=1}^{m}t_{i}^{-n(1-1/p_{i})}\right)\omega(\vec{t})\,d\vec{t}<\infty.\eqno(4.2)$$ Moreover, $$\|\mathcal{G}_{\omega}\|_{L^{p_1}(\mathbb{R}^n)\times \dots \times L^{p_m}(\mathbb{R}^n)\rightarrow L^{p}(\mathbb{R}^n)}=\mathbb{F}.\eqno(4.3)$$ \end{theorem} We can also deduce from Theorem \ref{t4} that \begin{corollary} Let $0<\alpha<m$. Under the assumptions of Theorem 4.1, we have $J^{m}_{\alpha}$ maps the product of weighted Lebesgue spaces $L^{p_1}(\mathbb{R})\times \dots \times L^{p_m}(\mathbb{R})$ to $ L^p(x^{pm-p\alpha} dx)$ with norm $$\frac{1}{\Gamma(\alpha)}\int\limits_{0<t_{1},t_{2},...,t_{m}<1}\left(\prod_{i=1}^{m} t_{i}^{-(1-1/p_{i})}\right)\frac{1}{|(\frac{1}{t_{1}}-1, \dots, \frac{1}{t_{m}}-1)|^{m-\alpha}}\,d\vec{t}.$$ \end{corollary} Next, we define the commutator of weighted Ces\`{a}ro operators of multilinear type as $$\mathcal{G}_{\omega}^{\vec{b}}(\vec{f})(x):= \int\limits_{0<t_{1},t_{2},...,t_{m}<1}\left(\prod_{i=1}^{m}f_{i}(x/t_{i})(t_{i})^{-n}\right) \left(\prod_{i=1}^{m}\left(b_{i}(x)-b_{i}(\frac{x}{t_{i}})\right) \right)\omega(\vec{t})\,d\vec{t},\,\quad x\in \mathbb{ R}^n.$$ In particular, we know that $$\mathcal{G}_{\omega}^{\vec{b}}(\vec{f})(x)=x^{m-\alpha}J^{m}_{\alpha, \vec{b}}\vec{f}(x),\,\, x>0,$$ where $$J^{m}_{\alpha, \vec{b}}\vec{f}(x):=\frac{1}{\Gamma(\alpha)}\int\limits_{x<t_{1},t_{2},...,t_{m}<\infty} \frac{\left(\prod_{i=1}^{m}f_{i}(x_{i})\right)\prod_{i=1}^{m} (b_{i}(x)-b_{i}(x/t_{i}))}{|(t_{1}-x, \dots, t_{m}-x)|^{m-\alpha}}\frac{d\vec{t}}{\vec{t}}.$$ Let $m \in \mathbb{N}$ and $m\geq 2$. Define $$\mathbb{F}_{m}:=\int\limits_{0<t_{1},t_{2}<,...,<t_{m}<1}\left(\prod_{i=1}^{m}t_{i}^{-n\lambda_i-n}\right)\omega(\vec{t})\prod_{i=1}^{m}\log\frac{2}{t_{i}}\,d\vec{t}.$$ Similar to the arguments in Section 3, we have the following conclusion. \begin{theorem} \label{t5} If $f_i \in L^{p_i}({{\rr}^n})$, $1<p< p_i<\infty, 1<q_i<\infty$, $-1/p_i<\lambda_i<0$, $i=1,\ldots, m$, and $\frac{1}{p}=\frac{1}{p_1}+ \cdots +\frac{1}{p_m}+\frac{1}{q_1}+ \cdots +\frac{1}{q_m}$, $\lambda=\lambda_1+\cdots+\lambda_m$. $\rm(i)$ If $\mathbb{F}_{m}<\infty$, then $\mathcal{G}_{\omega}^{\vec{b}} $ is bounded from $\dot{B}^{p_1, \lambda_1}(\mathbb{R}^{n})\times \cdots \times \dot{B}^{p_m, \lambda_m}(\mathbb{R}^{n})$ to $ \dot{B}^{p, \lambda}(\mathbb{R}^{n})$, for all $\vec{b}=(b_1,b_2,\ldots,b_m)\in \dot{\mathrm{CMO}}^{q_1}(\mathbb{R}^{n})\times\cdots \times\dot{\mathrm{CMO}}^{q_m}(\mathbb{R}^{n})$. $\rm(ii)$ Assume that $\lambda_1p_1=\cdots=\lambda_mp_m$. In this case the condition $\mathbb{F}_{m}<\infty$ in (i) is also necessary. \end{theorem} As an immediate corollary, we have the following consequence. \begin{corollary} Let $0<\alpha<m$. Under the assumptions of Theorem \ref{t5}, we have $J^{m}_{\alpha, \vec{b}}$ maps the product of weighted Lebesgue spaces $\dot{B}^{p_1, \lambda_1}(\mathbb{R})\times \dots \times \dot{B}^{p_m, \lambda_m}(\mathbb{R})$ to $ \dot{B}^{p, \lambda}(x^{pm-p\alpha}dx)$ \end{corollary} Finally, we give some further comments on weighted product Hardy operators. Let $\omega : [0,1]\times[0,1]\rightarrow [0,\infty)$ be an integrable function. Let $f(x_{1}, x_{2})$ be measurable complex-valued functions on $\mathbb{R}^n\times\mathbb{R}^m$. The \emph{weighted product Hardy operator} is defined as $$\mathbb{H}_{\omega}f(x_{1}, x_{2}):= \int\limits_{0<t_{1},t_{2}<1}f(t_{1}x_{1}, t_{2}x_{2})\omega(t_{1}, t_{2}) \,dt_{1}dt_{2}, \quad (x_1,x_2)\in {{\rr}^n}\times \mathbb{R}^m.$$ If $\omega\equiv 1$ and $n, m=1$, then $\mathbb{H}_{\omega}f$ is reduced to the two dimensional Hardy operator $\mathbb{H}$ defined by $$\mathbb{H}f(x_{1}, x_{2}):= \frac{1}{x_{1}}\frac{1}{x_{2}}\int^{x_{1}}_{0} \int^{x_{2}}_{0}f(t_{1}, t_{1})\,dt_{1}dt_{2},\,\quad x_{1}, x_{2}\neq0,$$ which is first introduced by Sawyer \cite{S}. The sharp estimates for weighted product Hardy operators and their commutators on Lebesgue spaces will be interesting questions. \noindent{\bf Acknowledgements.}\quad The authors cordially thank the referees for their careful reading and helpful comments. \end{document}
\begin{document} \title{Quantum control of tunable-coupling transmons using dynamical invariants of motion} \author{H. Espin\'os} \affiliation{Departamento de F\'isica, Universidad Carlos III de Madrid, Avda. de la Universidad 30, 28911 Legan\'es, Spain} \author{I. Panadero} \affiliation{Departamento de F\'isica, Universidad Carlos III de Madrid, Avda. de la Universidad 30, 28911 Legan\'es, Spain} \affiliation{Arquimea Research Center, Camino las Mantecas s/n, 38320 Santa Cruz de Tenerife, Spain} \author{J. J. Garc\'ia-Ripoll} \affiliation{Instituto de Física Fundamental (IFF), CSIC, Calle Serrano 113b, 28006 Madrid, Spain} \author{E. Torrontegui} \affiliation{Departamento de F\'isica, Universidad Carlos III de Madrid, Avda. de la Universidad 30, 28911 Legan\'es, Spain} \email{[email protected]} \begin{abstract} We analyse the implementation of a fast nonadiabatic CZ gate between two transmon qubits with tuneable coupling. The gate control method is based on a theory of dynamical invariants which leads to reduced leakage and robustness against decoherence. The gate is based on a description of the resonance between the $\ket{11}$ and $\ket{20}$ using an effective Hamiltonian with the 6 lowest energy states. A modification of the invariants method allows us to take into account the higher-order perturbative corrections of this effective model. This enables a gate fidelity several orders of magnitude higher than other quasiadiabatic protocols, with gate times that approach the theoretical limit. \end{abstract} \maketitle \section{Introduction}\label{Introduction} Superconducting qubits are currently one of the most promising platforms to perform highly scalable quantum computations. In the last two decades, there has been great progress both in the quantity of qubits and the quality of their operations~\cite{Wilhelm2008,Barends2014,Gambetta2016a,DiCarlo2017}. This progress has been largely made possible thanks to the transmon qubit~\cite{Koch2007}, a relatively simple, easy to reproduce design, with competitive decoherence times~\cite{Nersisyan2019,Place2021}. Recent progress in quantum gates with transmons have led to substantial speed-ups, surpassing coherence times by up to three orders of magnitude, with competitive fidelities that approach the requirements for scalable quantum error correction~\cite{Nissim2016,Gambetta2016,Hu2019} and fault-tolerant quantum computation~\cite{Chow2015,Martinis2015,Vuillot2017}. The implementation of two-qubit gates with superconducting qubits is possible with many different strategies: gates assisted by microwave controls~\cite{Steffen2011}, parametrically modulated qubits~\cite{Reagor2018} and couplers~\cite{McKay2016,Ganzhorn2019}, gates implemented with tunable-frequency qubit-qubit resonances~\cite{Rol2019} or tunable couplings~\cite{Chen2014,Mundada2019,Han2020,Xu2020,Collodo2020,Ye2021}. These demonstrations have proved successful at performing CZ~\cite{Barends2014,Chen2014,Rol2019,Xu2020,Ye2021}, CPHASE~\cite{Collodo2020} and iSWAP~\cite{Han2020} gates with fidelities over 99\%. Using a tuneable coupling to implement quantum gates presents the advantage of being more energy efficient, requiring less control lines, and having better coherence in general~\cite{Chen2014}. The use of tuneable couplers allows isolating the qubits and cancelling parasitic interactions during single-qubit gates and rest periods. Tuneable couplings also eliminate the problem of frequency crowding that arises in other two-qubit gate strategies with always-on interactions~\cite{Mundada2019}. In order to manipulate the qubits, most of the current protocols benefit from adiabatic controls~\cite{Barends2014,Chen2014,Mundada2019,Collodo2020,Xu2020}. Although adiabatic processes are robust against control imperfections, they are intrinsically slow, limiting the number of operations that can be performed within the lifespan of the qubits, and thus leaving the system more vulnerable to incoherent errors. This has motivated the development of nonadiabatic protocols~\cite{Torrontegui2013,Guery-Odelin2019} using the frequency-tunable, fixed-coupling architecture~\cite{Barends2019,Li2019,Garcia-Ripoll2020}. The principle of these nonadiabatic designs is to allow transitions along the process that implements the gate, but suppress them once said process is finished. A common technique to achieve this is to drive the computational states using dynamical invariants of motion~\cite{Lewis1969}, such that the system's Hamiltonian and the invariant share eigenstates at the beginning and at the end of the control~\cite{Chen2010}. In this work, we present a new method to implement universal CZ gates between transmon connected by a tunable coupler. The formalisim is based on an exact technique that uses invariants of motion to inverse-engineer the gate protocol, and is made possible by a simpler formulation of the transmon dynamics in a reduced space. The derivation of this effective Hamiltonian, starting from the standard two-transmon Hamiltonian, is carried out in Sect. \ref{model}. Using this simplified version, in Sect. \ref{CZgate} we design faster-than-adiabatic protocols to perform the CZ gate. We compare two different approaches to engineer the turn-on and off processes of the coupling: fast quasiadiabatic dynamics (FAQUAD) and invariant-based inverse engineering. In Sect. \ref{test}, we numerically simulate the two-transmon system to evaluate the drivings performance and identify possible sources of infidelity. With the aid of the effective description we introduce higher order energy corrections to rectify incoherent errors due to an energy Stark-shift, showing that the invariant-based method has a great resilience, bringing the gate infidelity below $10^{-5}$ in competitive operation times. Finally, Sect. \ref{conclusions} contains the manuscript main conclusions. \section{Transmon model}\label{model} \subsection{Single transmon}\label{single} A transmon is a charge-based superconducting qubit that is created by shunting a Josephson Junction with a large capacitor. This leads to a drastic reduction in sensitivity to charge in the superconducting island, while keeping an anharmonicity that separates the qubit subspace from the rest of energy levels~\cite{Koch2007}. The Hamiltonian describing one ``bare" transmon, in the number-phase representation, reads \begin{equation} \label{H0} \hat H_T = 4E_C \hat n^2 - E_J \cos(\hat \varphi), \end{equation} with $\hat n$ and $\hat \varphi$ satisfying the canonical commutation relation $[\hat n,\exp(i\hat \varphi)]=-\exp(i\hat\varphi)$. $E_C$ is the charging energy, related to the total capacitance $C$ of the transmon qubit as $E_C=2e^2/C$, and $E_J$ is the Josephson energy. A transmon typically operates at a large $E_J/E_C$ ratio ($\gtrsim 50$), where the qubit dynamics are analogous to a massive particle in a weakly anharmonic potential, \begin{equation} \label{anharmonic_oscillator} \hat H_T = 4E_C \hat n^2 + \frac{E_J}{2} \hat \varphi^2-\frac{E_J}{4!}\hat \varphi^4 +\cdots . \end{equation} Identifying $\hat p^2/2m \sim 4E_C\hat n^2$ and $m\omega^2 \hat x^2 \sim E_J\hat \varphi^2$, the quadratic part of the Hamiltonian can be diagonalized using the ladder operators of the harmonic oscillator, $\hat a$ and $\hat a^\dagger$, defined as, \begin{equation} \label{a_adagger} \hat n = i\left(\frac{E_J}{8E_C}\right)^{1/4}\frac{(\hat a-\hat a^\dagger)}{\sqrt{2}}, \hspace{0.4cm} \hat \varphi = \left(\frac{8E_C}{E_J}\right)^{1/4}\frac{(\hat a+\hat a^\dagger)}{\sqrt{2}}. \end{equation} Notice how, at large $E_J/E_C$, $\hat \varphi$ decreases in magnitude, and hence larger powers of $\hat \varphi$ can be neglected in the expansion from Eq.\ \eqref{anharmonic_oscillator}. Introducing Eq.\ \eqref{a_adagger} into this expansion, the transmon Hamiltonian reads \begin{eqnarray} \label{H0_HO} \hat H_T \simeq \omega_{01}\hat a^\dagger \hat a&+&\frac{\alpha}{2}\hat a^{\dagger 2}\hat a^2+\frac{\alpha}{12}\left(\hat a^4+\hat a^{\dagger 4}\right. \nonumber\\ &+& \left. 4\hat a^{\dagger 3 }\hat a + 4\hat a^\dagger \hat a^3 +6\hat a^{\dagger 2}+6\hat a^2\right), \end{eqnarray} where $\omega_{01}\simeq\sqrt{8E_{C}E_{J}}-E_J$ represents the splitting between the two lowest energy states, $\ket{0}$ and $\ket{1}$. The anharmonicity $\alpha\simeq-E_{C}$ is small but allows us to detune all higher energy states, $\ket{2},\ket{3},\ket{4},\ldots$ from the qubit subspace. The eigenstates of the transmon Hamiltonian, denoted by $\ket{n}$, differ from the eigenstates of the harmonic oscillator due to all the anharmonic terms in Eq.\ \eqref{H0_HO} that do not preserve the number of excitations, i.e., those with different number of $\hat a$ and $\hat a^\dagger$ operators. In fact, these counter-rotating terms can be treated as a perturbation to the reference Hamiltonian $\hat H_0=\omega_{01}\hat a^\dagger \hat a+(\alpha/2)\hat a^{\dagger 2}\hat a^2$, whose eigenstates are those of the harmonic oscillator. Using standard time-independent perturbation theory at first order in $\alpha/\omega_{01}$, we find for the lowest-energy states \begin{align} \label{perturbative_states} \ket{0} \simeq &\ket{\Psi_0}- \frac{\sqrt{2}\alpha}{3(2\omega_{01}+\alpha)}\ket{\Psi_2}-\frac{\sqrt{6}\alpha}{12(2\omega_{01}+3\alpha)}\ket{\Psi_4},\nonumber\\ \ket{1} \simeq &\ket{\Psi_1}- \frac{5\sqrt{6}\alpha}{6(2\omega_{01}+3\alpha)}\ket{\Psi_3}-\frac{\sqrt{30}\alpha}{12(2\omega_{01}+5\alpha)}\ket{\Psi_5},\nonumber\\ \ket{2} \simeq &\ket{\Psi_2} +\frac{\sqrt{2}\alpha}{3(2\omega_{01}+\alpha)}\ket{\Psi_0}- \frac{8\sqrt{3}\alpha}{3(2\omega_{01}+5\alpha)}\ket{\Psi_4}\nonumber\\ &-\frac{\sqrt{10}\alpha}{2(4\omega_{01}+15\alpha)}\ket{\Psi_6}, \end{align} where $\ket{\Psi_i}$ represents the $i$-th eigenstate of the harmonic oscillator. The perturbative expansion reveals that, when an interaction term proportional to $\hat n$ is present, both $\hat a$ and $\hat a^\dagger$ produce a coupling between two adjacent states, in opposition to what happens in the harmonic oscillator. For instance, the operator $\hat a$ projects the state $\ket{0}$ into state $\ket{1}$ with an amplitude proportional to $\alpha/\omega_{01}$. \subsection{Coupled transmons}\label{coupled} In this work we consider two transmons with a tunable interaction between them using the design demonstrated in~\cite{Chen2014}. In this design, two superconducting Xmon qubits are coupled through a circuit that uses a single flux-biased Josephson junction and acts as a tunable current divider. The physics behind this tunable coupler is well explained using a simple linear model (see Ref.~\cite{Geller2015} for a full discussion), \begin{equation} \label{H} \hat H(t)= \hat H_{T,a}+\hat H_{T,b} +g_C(t)\hat n_a\hat n_b, \end{equation} where $g_C$ is the tunable coupling that embodies an interaction that can be varied continuously with nanosecond resolution from negative to positive, going smoothly through zero, where the transmons are isolated. $\hat H_{T,i}$ is the Hamiltonian given by Eq.\ \eqref{H0} of each ``bare" transmon $i=a,b$ used to encode a qubit. For typical experimental circuit parameters, the coupling takes values in the MHz range, while $\omega_{01}$ is usually in the GHz range. Due to this energy mismatch, only close-to-degeneracy states will be able to interact and produce transitions from one state to another. Assuming that both transmons have similar frequencies, these close-to-degeneracy states are the ones that share the total number of excitations, for instance, states $\ket{01}$ and $\ket{10}$, or states $\ket{11}$, $\ket{02}$ and $\ket{20}$. In particular, denoting the ladder operators as $\hat a$, $\hat a^\dagger$ for the first qubit, and $\hat b$, $b^\dagger$ for the second qubit, we can compute the non-zero matrix elements of the coupling between these states using the perturbative expansions from Eq.\ \eqref{perturbative_states}, \begin{align} \widetilde{J}_1(t)&\equiv\bra{01}g_C(t)\hat n_a \hat n_b\ket{10} \nonumber\\ &= J(t)\bra{01}(-\hat a^\dagger \hat b^\dagger + \hat a^\dagger \hat b + \hat a \hat b^\dagger -\hat a\hat b )\ket{10}\nonumber\\ &= J(t)\left[1+\frac{2\alpha_a}{3(2\omega_a+\alpha_a)}+\frac{2\alpha_b}{3(2\omega_b+\alpha_b)}\right.\nonumber\\ &\left.+ \mathcal{O}\left(\frac{\alpha_{a,b}^2}{\omega_{a,b}^2}\right) \right], \end{align} \begin{align} \widetilde{J}_2(t)&\equiv \bra{11}g_C(t)\hat n_a \hat n_b\ket{02} \nonumber\\ &= \sqrt{2}J(t)\left[1+\frac{-\alpha_b}{3(2\omega_b+\alpha_b)}+\frac{5\alpha_b}{2(2\omega_b +3\alpha_b)}\right.\nonumber\\ &\left.+\frac{2\alpha_a}{3(2\omega_a+\alpha_a)}+ \mathcal{O}\left(\frac{\alpha_{a,b}^2}{\omega_{a,b}^2}\right) \right], \end{align} \begin{align} \widetilde{J}_3(t)&\equiv\bra{11}g_C(t)\hat n_a \hat n_b\ket{20} \nonumber\\ &= \sqrt{2}J(t)\left[1+\frac{-\alpha_a}{3(2\omega_a+\alpha_a)}+\frac{5\alpha_a}{2(2\omega_a +3\alpha_a)}\right.\nonumber\\ &\left. +\frac{2\alpha_b}{3(2\omega_b+\alpha_b)} +\mathcal{O}\left(\frac{\alpha_{a,b}^2}{\omega_{a,b}^2}\right) \right], \end{align} where $ J(t)=(1/2)g_C(t)\left[E_{J,a}E_{J,b}/(64 E_{C,a}E_{C,b}\right)]^{1/4}$. For typical experimental values, these first order corrections represent 5-10\% of the total coupling, and they become smaller as the ratios $\alpha_i/\omega_i$ decrease. As we already mentioned, due to energy mismatch of 2-3 orders of magnitude between the frequency of the transmons $\sim$ GHz and the coupling $\sim$ MHz, states with different number of excitations are well separated in energy and are not affected by the coupling term. For instance, the $\hat a^\dagger\hat b^\dagger$ term coming from Hamiltonian \eqref{H} couples states $\ket{00}$ and $\ket{11}$ with an amplitude $\sim J(t)$. Eliminating this term leads to corrections in the energy of said states of order $J^2(t)/(\omega_a+\omega_b)\ll$ GHz and can be safely neglected. Therefore, in the basis of eigenstates of the uncoupled problem, $\{\ket{00}, \ket{01}, \ket{10}, \ket{02}, \ket{11}, \ket{20}\}$, the Hamiltonian \eqref{H} is very well approximated by a Hamilonian matrix of the form \begin{equation} \label{Heff} \hat H(t) =\left( \begin{matrix} 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & \omega_b & \widetilde{J}_1(t) & 0 & 0 & 0 \\ 0 & \widetilde{J}_1(t) & \omega_a & 0 & 0 & 0 \\ 0 & 0 & 0 & 2 \omega_b+\alpha_b & \widetilde{J}_2(t)& 0 \\ 0 & 0 & 0 & \widetilde{J}_2(t) & \omega_a+\omega_b & \widetilde{J}_3(t) \\ 0 & 0 &0 & 0 & \widetilde{J}_3(t) & 2\omega_a + \alpha_a \end{matrix}\right). \end{equation} With the structure of Eq.\ \eqref{Heff}, we emphasised that the three subspaces $S_1:=\{\ket{00} \}$, $S_2:=\{\ket{01}, \ket{10}\}$, and $S_3:= \{ \ket{02}, \ket{11}, \ket{20}\}$, each of them with different number of excitations, are driven independently. In order to perform a certain operation, the coupling $g_C(t)$ can be engineered to produce the desired dynamics in each of these subspaces. Henceforth, we will design $J(t)$ instead of $g_C(t)$ for simplicity (they only differ in a scale factor). \begin{figure} \caption{(a) Numerically calculated lowest eigenenergies of Hamiltonians (\ref{H} \label{fig:Energies} \end{figure} In Fig.\ \ref{fig:Energies} we test the validity of the approximations made to find the Hamiltonian \eqref{Heff} by comparing its eigenenergies with the lowest six numerically calculated eigenenergies of the complete Hamiltonian \eqref{H} as a function of the coupling strength $J$, not taking into account its time dependence. As shown in Fig.\ \ref{fig:Energies}(b), the effective Hamiltonian reproduces the lower energy levels of the two-transmon system with very little deviation, despite having neglected interactions between two-qubit states with a different number of excitations. In the following section, we will derive the different control protocols from \eqref{Heff} to construct a CZ gate, although the full Hamiltonian \eqref{H} will be simulated with the experimental parameters~\cite{Chen2014}: \begin{subequations} \begin{eqnarray} \omega_a &=& 2\pi \times 6.00\text{ GHz,}\\ \omega_b &=& 2\pi \times 5.67\text{ GHz},\\ \alpha_a&=&\alpha_b =-2\pi\times\text{ 0.33 GHz},\\ J_M&=&2\pi\times 16.0\text{ MHz}, \end{eqnarray} \end{subequations} with $J_M$ the maximum coupling. Thus, small deviations such those shown in Fig.\ \ref{fig:Energies}(b), shall be numerically corrected when designing the protocols to achieve maximum fidelity. \section{The {\it CZ} gate}\label{CZgate} The CZ gate is simple to implement and can readily generate controlled-NOT (CNOT) logic. Acting on the computational basis $\{\ket{00}, \ket{01}, \ket{10}, \ket{11}\}$, this gate generates the transformation $\hat U^{CZ}=\mbox{diag}(1,1,1,-1)$, meaning that it leaves the first 3 states of the computational basis unchanged and adds a $\pi$-phase to the last one. The CZ gate has been already experimentally demonstrated using transmons qubits where the tunable frequency of one transmon controls the dynamics~\cite{Dicarlo2009,Barends2014,Rol2019}, and with transmons connected through a tunable coupler~\cite{Chen2014,Xu2020,Ye2021}. A recent work~\cite{Garcia-Ripoll2020} showed that, among the different possible protocols that implement the gate, those inspired on invariants and variational methods lead to more experimentally friendly controls such as better properties of finite bandwidth and resilience to discretization, optimal control of leakage outside the computational basis, and a greater robustness against decoherence. Using quantum control, the performance of this gate can be improved independently of the control design by a proper optimization of the waiting time and destination frequency~\cite{Garcia-Ripoll2020}. \begin{figure} \caption{Gate protocols for the CZ gate: two uncoupled transmons are placed at the degeneracy points $\omega_a=\omega_b-\alpha_a$ and the control $J(t)$ is turned on in a time $T$. During a time $t_w$, both transmons interact until the desired gate is implemented. The coupling is symmetrically turned off. The shape of the turn on/off process depends on the design approach. We show sample curves of the FAQUAD and invariant protocols.} \label{fig:control} \end{figure} \subsection{Design of the gate} Here we propose the use control techniques based on invariants to design a CZ gate with tunable-coupling transmons where $J(t)$ acts as the control. It is interesting to point out that the driving of the $\ket{11}$ state is achieved by the interaction with states out of the computational basis ($\ket{02}$ and $\ket{20}$, see Eq.\ \eqref{Heff}). Consequently, the designed protocols have to be particularly robust to avoid leakage. The operation will be designed in three steps; {\itshape(i)} the coupling is turned on and engineered from the initial value $J(0)=0$ to $J(T)=J_M$, {\itshape(ii)} the control remains constant during a waiting time $t_w$, {\itshape(iii)} the coupling is symmetrically switched off. As result, the gate is implemented in a time $T_g=2T+t_w$, see Fig.\ \ref{fig:control}. The shape of the switch-on and off ramps will be determined by the non-adiabatic method used to design the operation. If the two transmons are identical (same frequency and anharmonicity), the non-computational states $\ket{02}$ and $\ket{20}$ are degenerate, showing an avoided crossing behaviour when the interaction is ``on". If instead, the frequency of the second qubit is set to $\omega_b=\omega_a+\alpha_a$, the $\ket{11}$ and $\ket{20}$ states are brought to degeneracy while the $\ket{02}$ state is separated by an energy gap $|\alpha_a+\alpha_b|$. As result of such interaction, the $\ket{11}$ computational state acquires an adjustable phase compatible with the performance of a CZ gate~\cite{Barends2014, Dicarlo2009}. Under said condition, the effective matrix Hamiltonian becomes \begin{equation} \label{HeffCZ} \hat H(t) =\left( \begin{matrix} 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & \omega_a+\alpha_a & \widetilde{J}_1(t) & 0 & 0 & 0 \\ 0 & \widetilde{J}_1(t) & \omega_a & 0 & 0 & 0 \\ 0 & 0 & 0 & \Omega_2 & \widetilde{J}_2(t)& 0 \\ 0 & 0 & 0 & \widetilde{J}_2(t) & \Omega_1 & \widetilde{J}_3(t) \\ 0 & 0 &0 & 0 & \widetilde{J}_3(t) & \Omega_1 \end{matrix}\right), \end{equation} where $\Omega_1=2\omega_a + \alpha_a$ and $\Omega_2=2 \omega_a+2\alpha_a+\alpha_b$. The dynamics are solved independently in each subspace. The $S_3$ can be approximately reduced to an effective 2x2 system, since the $\ket{02}$ state decouples from the dynamics due to the $\Delta E=|\alpha_a+\alpha_b|\gtrsim 20J_M$ energy mismatch. As result the system Hamiltonian becomes \begin{equation} \label{HeffCZ-2} \hat H(t) \simeq\left( \begin{matrix} 0 & 0 & 0 & 0 \\ 0 & \hat H_2(t) & 0 & 0 \\ 0 & 0 & \Omega_2 & 0 \\ 0 & 0 & 0 & \hat H_3(t) \end{matrix}\right). \end{equation} This means that the dynamics of $\{\ket{20},\ket{11}\}$ are approximately described by \begin{equation} \label{H3} \hat H_3(t)=(2\omega_a+\alpha_a)\mathbb{I}+\widetilde{J}_3(t)\sigma_1, \end{equation} where $\sigma_1$ represents the first Pauli matrix. The unitary transformation produced by Hamiltonian\ \eqref{H3} according to the path depicted in Fig.\ \ref{fig:control} is simply \begin{equation} \label{U3CZ} \hat U_3(T_{g})=e^{-i(2\omega_a+\alpha_a) T_{g}}\exp\left[-i\hat\sigma_1(2\widetilde{J}_{3,T}+\widetilde{J}_3 (T) t_{w})\right], \end{equation} where $\widetilde{J}_{3,T}=\int_{0}^Tdt\widetilde{J}_3(t)$. Independently of the shape of the control $J(t)$, in order to recover the desired final state (times a phase), i.e., $\ket{11}\rightarrow e^{-i(2\omega_a+\alpha_a) T_g}(-\ket{11})$, the waiting time has to be properly adjusted to \begin{equation} \label{tw} t_w=(\pi-2\widetilde{J}_{3,T})/\widetilde{J}_3(T). \end{equation} On the other hand, the $S_2$ subspace is governed by \begin{equation} \label{H2} \hat H_2(t)=(\omega_a+\frac{\alpha_a}{2})\mathbb{I}+\frac{\alpha_a}{2}\hat\sigma_3+\widetilde{J}_1(t)\hat\sigma_1, \end{equation} where $\sigma_1$ and $\sigma_3$ represent the first and third Pauli matrices. As at initial $t=0$ and final $t=T_g$ times the coupling is switched-off ($J=0$), the computational $\ket{01}$ and $\ket{10}$ states correspond to eigenstates of $\hat H_2$, thus an adiabatic evolution would drive this state frictionlessly restoring the original configuration at final time, but at expenses of a very long gate-time to avoid unwanted transitions. This limitation can be overcome by applying quasi-adiabatic quantum control techniques, such as the fast quasiadiabatic dynamics (FAQUAD) method~\cite{Martinez-Garaot2015}, or by inverse-engineering the control using invariants of motion~\cite{Torrontegui2013, Guery-Odelin2019}. Finally, the $S_1$ subspace is driven trivially. \subsection{FAQUAD passage} The fast quasiadiabatic dynamics method is a commonly used technique that aims at speeding up a given process while still making it as adiabatic as possible at all times. It relies on making the standard adiabaticity parameter constant such that the transition probability is equally delocalized during the whole process, \begin{equation} \label{FAQUAD} \mu (t)\equiv\bigg\lvert\frac{\bra{+(t)}\dot{\hat H}_2\ket{-(t)}}{\left[E_+(t)-E_-(t)\right]^2}\bigg\rvert=\mu, \end{equation} where the dot represents the time derivative, $\ket{+(t)}$ and $\ket{-(t)}$ are the two instantaneous eigenstates of $\hat H_2(t)$ and $E_+(t)$ and $E_-(t)$ their respective eigenenergies. The process is considered to be adiabatic if $\mu\ll 1$. From Eq.\ \eqref{FAQUAD} it follows (see Appendix\ \ref{APadi}) \begin{equation} \label{FAQUAD2} \mu = \frac{\dot{\widetilde{J}_1}(t)\alpha_a}{\left[4\widetilde{J}_1^2(t)+\alpha_a^2\right]^{3/2}}. \end{equation} Equation\ \eqref{FAQUAD2} can be integrated with the initial and final conditions $J(0)=0$ and $J(T)=J_M$, which set a value for the integration constant and the adiabaticity parameter. The control is thus \begin{equation} \label{FAQUADcontrol} \widetilde{J}_1(t)=\frac{-\alpha_a\widetilde{J}_1(T)t}{T\sqrt{\alpha_a^2+4\widetilde{J}_1^2(T)\left[1-\left(\frac{t}{T}\right)^2\right]}}, \end{equation} which has the same profile for every value of the ramp time $T$, since $\widetilde{J}_1(t)$, as given by Eq.\ \eqref{FAQUADcontrol}, is a function of $t/T$. However, it can be seen that the adiabaticity parameter obtained with this control is inversely proportional to $T$ (see Appendix\ \ref{APadi}) meaning that shorter times lead to ``less adiabatic" protocols and a corresponding loss of fidelity. \subsection{Invariants passage} We can address the limitation of shorter operation times by inverse-engineering the control $\widetilde{J}_1(t)$ using invariants of motion~\cite{Torrontegui2013, Guery-Odelin2019}. The $\hat H_2$ Hamiltonian has SU(2) structure, $\hat H_2(t)=\sum_j^3h_j(t)\hat T_j$ where $h_j(t)$ are general controls of the Hamiltonian and $\hat T_j=\hat\sigma_j/2$ are the Lie algebra generators satisfying $[\hat T_j,\hat T_k]=i\epsilon_{jkl}\hat T_l$, with $\epsilon_{jkl}$ the Levi-Civita tensor. Standard formalisms~\cite{Chen2011} would lead to the requirement of a time dependent anharmonicity $\alpha_a(t)$. Instead, due to the constrains imposed on the controls of $\hat H_2$, i.e., $h_2=0$ and $h_3=\alpha_a$ $\forall t$, we will make use of the dynamical algebra~\cite{Torrontegui2014} to engineer the invariant of motion and, consequently, $\widetilde{J}_1(t)$. Associated with a Hamiltonian, there are infinitely many time-dependent Hermitian invariants of motion $\hat I(t)$ that satisfy~\cite{Lewis1969} \begin{equation} \label{inva} \frac{d\hat I}{dt}\equiv\frac{\partial\hat I(t)}{\partial t}-\frac{1}{i\hbar}[\hat H_2(t),\hat I(t)]=0. \end{equation} A wave function $\ket{\Psi(t)}$ which evolves with $\hat H_2(t)$ can be expressed as a linear combination of invariant modes~\cite{Lewis1969} \begin{equation} \label{LR} \ket{\Psi(t)}=\sum_{n}c_ne^{i\beta_n(t)}\ket{\chi_n(t)} \end{equation} where the $c_n$ are constant, and the real phases $\beta_n$ fulfil \begin{equation} \label{LR-phase} \hbar\dot\beta_n(t)=\bra{\chi_n(t)}i\hbar\partial_t-\hat H_2(t)\ket{\chi_n(t)}, \end{equation} and are explicitly computed in Appendix\ \ref{APpha}. The eigenvectors $\ket{\chi_n(t)}$ of the invariant are assumed to form a complete set and satisfy $\hat I(t)|\chi_n(t)\rangle=\lambda_n|\chi_n(t)\rangle,$ with $\lambda_n$ the corresponding constant eigenvalues. Assuming that the invariant is also a member of the dynamical algebra, it can be written as $\hat I(t)=\sum_j^3f_j(t)\hat T_j$ where $f_j(t)$ are real, time-dependent functions. Replacing the closed forms of $\hat H_2$ and $\hat I$ into Eq.\ \eqref{inva}, the functions $h_j$ and $f_j$ satisfy \begin{equation} \label{engineer} \dot f_j(t)-\frac{1}{\hbar}\sum_{k}^{3}\sum_{l}^{3}\epsilon_{jkl}h_{k}(t)f_l(t)=0, \quad j=1,2,3. \end{equation} Usually, these coupled equations are interpreted as a linear system of ordinary differential equations for $f_j(t)$ when the $h_j(t)$ components of the Hamiltonian are known~\cite{Kaushal1981, Kaushal1993, Monteoliva1994, Maamache1995, Kaushal1997}. Instead, we use here an inverse perspective, and consider them as an algebraic system to be solved for the $h_j(t)$, when the $f_j(t)$ are given~\cite{Torrontegui2014}. When we inverse engineer the controls, the Hamiltonian is given at initial and final times. According to Eq.\ \eqref{LR} the dynamical wave function is transitionless driven (remember that $\{c_n\}$ are constant) through the eigenstates of the invariant, independently of the duration of the process, $T$. Note that this is exact, in contrast with a quasiadiabatic driving, where transitions from the instantaneous eigenstates of the Hamiltonian appear as soon as the process is shorten. To this end, the invariant $\hat I(t)$ (or, equivalently, $f_1(t)$, see Appendix\ \ref{APinv}) is designed to drive, through its eigenvectors, the initial states of the Hamiltonian $\hat H_2(0)$ to the corresponding states of $\hat H_2(T)$~\cite{Chen2010, Torrontegui2013, Guery-Odelin2019}. This is ensured by imposing the “frictionless conditions" at the boundary times~\cite{Chen2010} \begin{equation} \label{BC} [\hat H_2(t_b ), \hat I (t_b )] = 0, \quad t_b=0,\ T, \end{equation} so $\hat H_2$ and $\hat I$ share eigenstates at these boundary times. The system of equations \eqref{engineer} is compatible if the condition $\sum_j^3f_j^2(t)=c^2$ is fulfilled, with $c^2$ an arbitrary constant. Here, we choose $c^2=\sum_j^3h_j^2(0)=\alpha_a^2$ to make $\hat I(0)=\hat H_2(0)$. Under this condition, the system is invertible and has infinite solutions reflecting the fact that, associated with a Hamiltonian, there are infinite invariants and vice-versa~\cite{Ibanez2012}. For the structure of $\hat H_2$, the coupling $\widetilde{J}_1(t)$ takes the form (see Appendix\ \ref{APinv}) \begin{equation} \label{gt} \widetilde{J}_1(t)=\frac{1}{2\sqrt{c^2-f_1^2-\left(\frac{\dot f_1}{\alpha_a}\right)^2}}\bigg(\frac{\ddot f_1}{\alpha_a}+\alpha_a f_1\bigg), \end{equation} where, according to \eqref{BC} and \eqref{gt}, $f_1\equiv f_1(t)$ satisfies the boundary conditions \begin{equation} \label{BC_f1} f_1(t_b)=-h_1(t_b)\sqrt{\frac{c^2}{h_1^2(t_b)+\alpha_a^2}}, \quad \dot f_1(t_b)=\ddot f_1(t_b)=0, \end{equation} to ensure a perfect driving connecting the eigenstates at $\hat H_2(0)$ with the corresponding states at $\hat H_2(T)$. For the ramp-up of the control, $h_1(0)=0$ and $h_1(T)=2\widetilde{J}_1(T)$, whereas $h_1(T+t_w)=2\widetilde{J}_1(T)$ and $h_1(T_g)=0$ when switching the coupling off. We can then interpolate $f_1(t)$ with a polynomial ansatz $f_1(t)=\sum_{m=0}^Ma_mt^m$ and determine the $a_m$ coefficients from Eq.\ \eqref{BC_f1}. In the simplest interpolation, $M=5$ to satisfy all 6 boundary conditions, although additional constrains can be applied~\cite{Levy2018}. The resulting gate using either this method or the previous one is equivalent to a CZ gate up to local rotations in each individual qubit (see Appendix\ \ref{EffectiveGate}). However, as it will be shown in the following section, the control based on invariants of motions leads to a better driving of the states of the $S_2$ subspace, enabling higher fidelity gates. \section{Applications}\label{test} \begin{figure} \caption{Performance of the invariants (solid blue line) and FAQUAD (orange dashed line) controls producing a CZ gate as a function of the total duration, with a ramp time ranging from 1 to 8 ns. (a) Average infidelity of the CZ gate. (b) Deviation of the entangling phase $\varphi^{12} \label{fig:CZ_noD} \end{figure} In this section, we numerically simulate the system of two transmons using the full Hamiltonian given by \eqref{H0} and \eqref{H} in order to test the protocols designed for the reduced effective model. Due to the multiple approximations made through the derivations, such as imperfections in the reduced model and higher order energy corrections, the obtained results are slightly different from the ideal CZ gate, even after rectifying local phases. The entangling phase, defined as \begin{equation} \label{phi12} \varphi^{12}=\frac{\phi_{00}-\phi_{01}-\phi_{10}+\phi_{11}}{4}, \end{equation} where $\phi_{ij}$ is the phase accumulated by the $\ket{ij}$ state, might deviate from its expected value for a CZ gate, i.e., $\pi/4$, see Appendix\ \ref{EffectiveGate}. This difference, together with population losses in each state, implies a lower average gate fidelity, $\bar{\mathcal{F}}$, the figure of merit used in this paper to analyse the performance of the engineered controls. The average fidelity is calculated from the entanglement fidelity $\mathcal{F}_e$ as~\cite{Nielsen2002} \begin{equation} \label{fid} \bar{\mathcal{F}}=\frac{N\mathcal{F}_e+1}{N+1}, \end{equation} where N is the size of the Hilbert space ($N<\infty$ in a numerical simulation). The entanglement fidelity compares the final states resulting from the evolution $\hat U(T_g)$ of each computational state with the ideal operation $\hat U_{id}$ to be implemented. In our computations, we add an operation $\hat U^\dagger_{loc}$ that eliminates the effect of locally correctable phases, in a way that only the entangling phase is taken into account. Thus, the entanglement fidelity is defined as \begin{equation} \mathcal{F}_e[\hat U_{id},\hat U(T_g)]=\Bigg\lvert\frac{1}{4}\sum_{s=1}^4\bra{s}\hat U^\dagger_{id}\hat U^\dagger_{loc}\hat U (T_g)\ket{s}\Bigg\rvert^2, \end{equation} with $\ket{s=1,2,3,4}=\{\ket{00},\ket{01},\ket{10},\ket{11}\}$. \subsection{Uncorrected gates} We now study the possibility of implementing the CZ gate and compare the performance of the gates obtained using the FAQUAD and invariants approaches. In Fig.\ \ref{fig:CZ_noD}(a) we show the average gate fidelity (or rather, the infidelity, defined as $1-\bar{\mathcal{F}}$) using both designs as a function of the total time taken by the operation. These curves are obtained by varying the ramp time $T$ from 1 to 8 nanoseconds and calculating the corresponding waiting time $t_w$ using Eq.\ \eqref{tw}. Both the FAQUAD and the invariant passages achieve fidelities over 99.9\%, with the invariant protocol giving slightly better results in general. The infidelity saturation at the same value, regardless of the applied control, is due to a common issue of both protocols: the driving of the $\ket{11}$ state is based on a coarse approximation, and corrections coming from a weak interaction with the $\ket{02}$ state lead to a loss of fidelity. The infidelity is the result of {\itshape(i)} the phase $\varphi^{12}$ deviating from its expected value, $\pi/4$, and {\itshape(ii)} the populations of states from the computational basis suffering from losses, such as undesired transfer to other states from the basis or even leakage outside of it, such that $\left|\bra{s}\hat U(T_g)\ket{s}\right|<1$. The phase deviation using the two kinds of control is shown in Fig.\ \ref{fig:CZ_noD}(b), while the population loss of each state is shown in Fig.\ \ref{fig:CZ_noD}(c). A common characteristic of controls based on shortcuts to adiabaticity, such as driving the states using dynamical invariants, is that they are minimally affected by leakage. This characteristic can be observed in the minimal population loss suffered by the $\ket{01}$ and $\ket{10}$ states when using the invariants passage. By contrast, the population loss of these states when using the FAQUAD passage represents a limiting factor in the achieved fidelity. The results shown in these figures suggest that both the phase deviation from $\pi/4$ and the population losses are substantial sources of error that shall be mitigated. In particular, the population of the $\ket{11}$ state is saturating the infidelity curves. Since the driving of the $\ket{11}$ state does not depend on the shape of $J(t)$, only on its integral, the saturation value is the same regardless of the protocol. As we will see, this saturation of the infidelity does not correspond to a fundamental limitation and can be attributed to approximations in the reduced model and a Stark-shift produced in the $S_3$ subspace by the interaction between $\ket{11}$ and $\ket{20}$. We shall introduce higher energy corrections and adjust our protocols to increase the average fidelity. \subsection{Stark-shift corrected gates} With the argument $|\alpha_a+\alpha_b|\gtrsim 20J_M$ to decouple the $\ket{02}$ state from the dynamics on the $S_3$ subspace, we found an analytical expression for the waiting time $t_w$, Eq.\ \eqref{tw}. However, this approximation needs to be examined in greater depth in order to improve the performance of the designed protocols. From \eqref{HeffCZ}, the $S_3$ subspace Hamiltonian after shifting the zero of energy becomes \begin{equation} \hat H_3(t) =\left( \begin{matrix} \alpha_a+\alpha_b & \widetilde{J}_2(t) & 0\\ \widetilde{J}_2(t) & 0 & \widetilde{J}_3(t) \\ 0 & \widetilde{J}_3(t) & 0 \end{matrix}\right). \end{equation} \begin{figure} \caption{Visual representation of the evolution of the $\ket{11} \label{fig:Bloch} \end{figure} In order to find the low-energy effective Hamiltonian for the $\ket{11}$ and $\ket{20}$ states, we apply the Schrieffer-Wolff transformation~\cite{Schrieffer1966} (see Appendix\ \ref{APswt} for the full derivation), leading to \begin{equation}\label{H3eff} \hat H_3^{ef}=\left( \begin{matrix} \delta \Omega & \widetilde{J}_3+\delta \widetilde{J}_3 \\[6pt] \widetilde{J}_3+\delta \widetilde{J}_3 & 0 \end{matrix}\right), \end{equation} where the time dependence has been dropped for simplicity, and \begin{equation} \delta\Omega = \dfrac{\widetilde{J}_2^2(\alpha_a+\alpha_b)}{\widetilde{J}_3^2-(\alpha_a+\alpha_b)^2}, \end{equation} \begin{equation} \delta\widetilde{J}_3 =-\frac{1}{2} \dfrac{\widetilde{J}_2^2\widetilde{J}_3}{\widetilde{J}_3^2-(\alpha_a+\alpha_b)^2}. \end{equation} \begin{figure} \caption{Optimized CZ gate with a detuning $\Delta$ in one of the transmon frequencies and an optimal waiting time. (a) Average infidelity of the gate as a function of the total duration, with a ramp time ranging from 1 to 8 ns. (b) Deviation of $\varphi^{12} \label{fig:CZ_D} \end{figure} It is then clear that the interaction between the $\ket{11}$ and $\ket{02}$ states gives rise to a Stark shift $\delta \Omega$ in the energy of $\ket{11}$. The effect of $\hat H^{ef}_3$ acting on the $\ket{11}$ state can be visualized using the Bloch sphere with antipodal points corresponding to the pseudospin states $\ket{11}$ and $\ket{20}$. As it can be seen in this representation, the presence of $\delta \Omega$ (magnified in the Bloch sphere representation of Fig.\ \ref{fig:Bloch} for a clearer visualization) misaligns the rotation axis preventing a perfect $\ket{11}\leftrightarrow\ket{20}$ swap. As result, the Rabi oscillation amplitude attenuates and its period is shorten, since the rotational speed is increased by the $\hat \sigma_3$ term. This, added to the approximations of the reduced Hamiltonian model, produces a slight difference in the waiting time compared to Eq.\ \eqref{tw}, which explains the relatively high leakage of the $\ket{11}$ state in Fig.\ \ref{fig:CZ_noD}(c), independently of the followed protocol. Besides, the accumulated phase of the $\ket{11}$ state is also affected by the Stark shift, resulting in an entangling phase $\varphi^{12}\neq\pi/4$. As inferred from Fig.\ \ref{fig:CZ_noD}(b), corrections of this entangling phase by just mean of the waiting time would lead to very slow CZ gate implementations due to the $\sim\mu$s phase rate change. However, both the population loss of the state $\ket{11}$ and $\varphi^{12}$ can be simultaneously rectified by leaving the second qubit slightly detuned with respect to the resonance, i.e., $\omega_b = \omega_a+\alpha_a+\Delta$, where $|\Delta| \ll \widetilde{J}_2(T),\widetilde{J}_3(T)$. On the one hand, this constant detuning $\Delta$ sets the $\ket{11}$ state to rotate along the time dependent axis $(\widetilde{J}_3+\delta\widetilde{J}_3)\vec{u}_x+(\Delta+\delta\Omega)\vec{u}_z/2$, see Appendix\ \ref{APswt}. Nevertheless, the symmetry of the designed protocols (see Fig.\ \ref{fig:control}) allows us to always perfectly recover the $\ket{11}$ state with an appropriate waiting time, independently of the $\Delta$ value. Furthermore, a numerical adjustment of the waiting times also rectifies the population loss produced by the energy deviations of the effective model with respect to the complete Hamiltonian. On the other hand, a fine tune of $\Delta$ allows the rectification of $\varphi^{12}$, even if $\delta \Omega$ is in fact time-dependent. Notice that this detuning also affects the $S_2$ subspace, and therefore the designed protocols must be corrected by substituting $\alpha_a\rightarrow \alpha_a+\Delta$ in Eqs.\ \eqref{FAQUADcontrol} and \eqref{gt}. As a matter of fact, if $|\Delta|\lll|\alpha_a|$, the controls barely change. In Fig.\ \ref{fig:CZ_D} we show the results of using corrected protocols in which a small detuning $\Delta$ is introduced in the frequency of one of the transmons ($\omega_b = \omega_a+\alpha_a+\Delta$), and both the waiting time and this detuning are found numerically to minimize the gate infidelity -- computed from Eq.\ \eqref{fid} -- for a range of ramp times $T$ from 1 to 8 ns. Figure \ref{fig:CZ_D}(a) shows a drastic improvement of the gate fidelity, especially in the protocol designed using invariants of motion, where the fidelity is not limited by the population loss of states $\ket{01}$ and $\ket{10}$. This figure shows the great potential of nonadiabatic protocols over adiabatic processes, in which $T\rightarrow \infty$ is required for a truly faultless transition between the eigenstates of the instantaneous Hamiltonian. In Fig.\ \ref{fig:CZ_D}(b) we show how the entangling phase deviation is also attenuated by optimally detuning the two transmons, which leaves the population losses as the main source of errors. The optimal detuning is shown in Fig.\ \ref{fig:CZ_D}(c) as a function of the gate time. Notice that, even though $\delta \Omega$ is positive for the chosen range of parameters, the detuning is also positive. This may seem counter-intuitive at first from Eq.\ \eqref{H3eff}, but it is actually compatible with all our derivations. The detuning not only affects the phase of the $\ket{11}$ state, $\phi_{11}$, but also $\phi_{01}$ and $\phi_{10}$ by approximately the same amount. Thus, a correction in the $\phi_{11}$ phase leads to approximately the same correction in $\varphi^{12}$ with opposite sign, see Eq.\ \eqref{phi12}. The detuning is about the same order and sign as the mean value of $\overline{\delta \Omega}$ over the whole process, which confirms our assumption $|\Delta|\lll|\alpha_a|$. \section{Conclusions}\label{conclusions} To sum up, we demonstrate the possibility of implementing a CZ gate using a control protocol that exploits dynamical invariants of motion to drive the states frictionlessly in a system of two transmons and a tunable coupler. The designed control takes advantage of having the coupling strength at its maximum value during most of the gate time, accelerating the processes close to their speed limits. This method has been compared with the adiabatic process, showing that the method based on invariants leads to an overall better performance for any gate time. The protocol was found analytically using a simplified effective Hamiltonian that describes the lowest six energy levels of the two-transmon system. We then numerically adjusted the waiting time and the destination frequency of the qubits -- slightly out of the avoided crossing between $\ket{11}$ and $\ket{02}$-- to correct higher order errors coming from the Stark shift produced by the $\ket{20}$ state. With these corrections, the invariants method achieves infidelities 2 to 3 orders of magnitude lower than the adiabatic protocol. The demonstration of such a high fidelity CZ gate shows the viability of the theory of dynamical invariants to construct fast diabatic gates in a tunable-coupling qubit architecture. Short gate times, low losses, and reported greater robustness to decoherence make control protocols designed using the theory of dynamical invariants superior candidates for implementing fast and high-fidelity quantum gates in existing setups. \acknowledgments We acknowledge financial support from the Spanish Government through PGC2018-094792-B-I00 (MCIU/AEI/FEDER,UE), CSIC Research Platform PTI-001, and by Comunidad de Madrid-EPUC3M14 and CAM/FEDER Project No. S2018/TCS-4342 (QUITEMAD-CM). H.E. acknowledges the Spanish Ministry of Science, Innovation and Universities for funding through the FPU program (FPU20/03409). E.T. acknowledges the Ramón y Cajal program (RYC2020-030060-I). \appendix \section{Design of FAQUAD passage}\label{APadi} After shifting the zero of energy, Hamiltonian $\hat H_2(t)$ from Eq.\ \eqref{H2} can be written in matrix form as \begin{equation} \hat H_2(t) =\left( \begin{matrix} \dfrac{\alpha_a}{2} & \widetilde{J}_1(t) \\[6pt] \widetilde{J}_1(t) & -\dfrac{\alpha_a}{2} \end{matrix}\right). \end{equation} The instantaneous eigenstates of this Hamiltonian are \begin{equation} \ket{+(t)}= \left(\begin{array}{c} \cos\frac{\theta(t)}{2} \\[6pt] -\sin\frac{\theta(t)}{2} \end{array}\right), \hspace{0.3cm} \ket{-(t)}= \left(\begin{array}{c} \sin\frac{\theta(t)}{2} \\[6pt] \cos\frac{\theta(t)}{2} \end{array}\right), \end{equation} where $\theta (t)=\arctan\frac{\alpha_a}{2\widetilde{J}_1(t)} $. Thus, we find \begin{equation} \bra{+(t)}\dot{\hat H}_2\ket{-(t)}=\dot{\widetilde{J}_1}(t)\cos\theta= \frac{\dot{\widetilde{J}_1}(t)\alpha_a}{2\sqrt{\left(\frac{\alpha_a}{2}^2\right)+\widetilde{J}^2_1(t)}}. \end{equation} The energy associated with each eigenstate is \begin{equation} E_\pm(t)=\pm\sqrt{\left(\frac{\alpha_a}{2}^2\right)+\widetilde{J}^2_1(t)}, \end{equation} and using Eq.\ \eqref{FAQUAD}, we get \begin{equation} \mu = \frac{\dot{\widetilde{J}_1}(t)\alpha_a}{\left[4\widetilde{J}_1^2(t)+\alpha_a^2\right]^{3/2}}. \end{equation} Integrating both sides of the equation, we find \begin{equation} \mu t + c_0= \frac{-\alpha_a}{4\left[4\widetilde{J}_1^2(t)+\alpha_a^2\right]^{1/2}}, \end{equation} where $c_0$ is an integration constant. The previous equation can be inverted into \begin{equation} \widetilde{J}_1(t)= \frac{-\alpha_a}{2}\sqrt{\frac{1}{16\left(\mu t+c_0\right)^2}-1}. \end{equation} Finally, using the boundary conditions on $\widetilde{J}_1(t)$ at $t=0$ and $t=T$, we find the value of $c_0$ and $\mu$ \begin{equation} c_0 = \frac{\alpha_a}{4},\hspace{0.5cm}\mu = -\frac{2 \widetilde{J}_1(T)}{T\alpha_a}\frac{1}{\sqrt{16 \widetilde{J}^2_1(T)+ \alpha_a^2}}. \end{equation} Putting it altogether leads to \begin{equation} \widetilde{J}_1(t)=\frac{-\alpha_a\widetilde{J}_1(T)t}{T\sqrt{\alpha_a^2+4\widetilde{J}_1^2(T)\left[1-\left(\frac{t}{T}\right)^2\right]}}. \end{equation} \section{Inverse engineering of the controls}\label{APinv} The set of equations \eqref{engineer} can be represented matricially \begin{equation} \label{sisSU2} \left(\begin{array}{c} \dot f_1 \\ \dot f_2 \\ \dot f_3 \end{array} \right)= \underbrace{\frac{1}{\hbar}\left(\begin{array}{ccc} 0& f_3 & -f_2 \\ -f_3 & 0& f_1 \\ f_2 & -f_1 & 0 \end{array} \right)}_{=\mathcal{A}} \left(\begin{array}{c} h_1 \\ h_2\\ h_3 \end{array} \right). \end{equation} As $\mathcal{A}=-\mathcal{A}^{\dagger}$ is a real antisymmetric matrix and with odd dimensionality, the eigenvalues are conjugate pure imaginary pairs, and zero, $a^{(0)}=0$, $a^{(1)}=-i\sqrt{\gamma}/\hbar$, and $a^{(2)}=i\sqrt{\gamma}/\hbar$ with $\gamma=f_1^2+f_2^2+f_3^2$. As $a^{(0)}=0$ then $\det(\mathcal{A})=0$, there is no inverse matrix $\mathcal{A}^{-1}$. However, we can still invert the system, for example using Gauss elimination, to reduce the system to an equivalent one with the same solutions applying elementary operations. These are the multiplication of a row by a non-zero scalar, the interchange of columns or rows, and the addition to a row of the multiple of a different one. The augmented matrix of \eqref{sisSU2} is \begin{equation} {\left(\begin{array}{cccc} -f_3 & 0 & f_1 &\hbar\dot f_2\\ 0 & f_3&- f_2 &\hbar\dot f_1\\ f_2 & -f_1 & 0 &\hbar\dot f_3 \end{array} \right)}. \end{equation} After some algebra the system can be written as a lower triangular matrix, \begin{equation} {\left(\begin{array}{cccc} -f_3 & 0 & f_1 &\hbar\dot f_2\\ 0 & f_3 & -f_2 &\hbar\dot f_1\\ 0 & 0 & 0 &\frac{\hbar(f_1\dot f_1+f_2\dot f_2+f_3\dot f_3)}{f_3} \end{array} \right)}. \end{equation} This system is compatible and has infinite solutions if and only if $f_1\dot f_1+f_2\dot f_2+f_3\dot f_3=0$ or, equivalently, $f_1^2+f_2^2+f_3^2=c^2$. The solutions satisfy \begin{equation}a \hbar\dot f_1&=&f_3h_2-f_2h_3, \nonumber\\ \hbar\dot f_2&=&-f_3h_1+f_1h_3, \end{equation}a or in a compact form \begin{equation} \label{hs} h_i=-\hbar{\cal{E}}_{ijk}\frac{\dot f_j}{f_k}+\frac{f_i}{f_k}h_k, \end{equation} with all indices $i,\, j,\, k$ different. $h_k(t)$ is considered a free function chosen for convenience, for example, making it zero if we want to cancel the $h_k$ control of the Hamiltonian. Our aim is to find $\hat H_2(t)=\frac{\alpha_a}{2}\hat\sigma_3+\widetilde{J}_1(t)\hat\sigma_1$ so that the ground and excited states of $\hat H_2(0)$ become the ground and excited states of $\hat H_2(T)$ in an arbitrary time $T$, up to phase factors, in such a way that $h_2(t) = 0, h_3(t)=\alpha_a$ $\forall t$. Choosing $(i,j,k)=(3,1,2)$ in Eq.\ \eqref{hs} and using $\gamma=c^2$ we can express $f_3$ and $f_2$ in terms of $f_1$, \begin{equation}a \label{eqF} f_2&=&\frac{\dot f_1}{\alpha_a}, \nonumber\\ f_3&=&\sqrt{c^2-f_1^2-\frac{\dot f_1^2}{\alpha_a^2}}. \end{equation}a Substituting this in the other equation of Eq.\ \eqref{hs}, with $(i,j,k)=(1,3,2)$, \begin{equation} \label{control} h_1(t)=\frac{1}{\sqrt{c^2-f_1^2-\frac{\dot f_1^2}{\alpha_a^2}}}\bigg(\frac{\ddot f_1}{\alpha_a}+\alpha_a f_1\bigg), \end{equation} leading to $\widetilde{J}_1(t)=h_1(t)/2$. The ``frictionless conditions'' $[\hat H(t_b), \hat I(t_b)]=0$ for a closed Lie algebra of $\hat H$ and $\hat I$ can be reformulated as \begin{equation} \sum_{j,k,l}^{N}\epsilon_{jkl}h_k(t_b)f_l(t_b)\hat T_j=0. \end{equation} Since the $\hat T_j$ generators are independent, the coefficients must satisfy \begin{equation} \sum_{k,l}^{3}\epsilon_{jkl}h_k(t_b)f_l(t_b)=0, \quad j=1,\dots,3,\quad t_b=0,T. \end{equation} For $\hat H_2(t)$, having $h_2(t)=0$ and $h_3(t)=\alpha_a$, these conditions read $f_2(t_b)=0$ and $\alpha_a/h_1(t_b)=f_3(t_b)/f_1(t_b)$ or, equivalently with the help of \eqref{eqF}, \begin{equation} \label{co1} f_1(t_b)=-h_1(t_b)\sqrt{\frac{c^2}{h_1^2(t_b)+\alpha_a^2}},\quad \dot f_1(t_b)=0. \end{equation} In addition, from Eq.\ \eqref{control} at the boundary times $t_b$, \begin{equation} \label{co2} \ddot f_1(t_b)=0. \end{equation} We then interpolate $f_1(t)$ with a simple polynomial $f_1(t)=\sum_{j=0}^5a_jt^j$ where the $a_j$ coefficients are determined from Eqs.\ \eqref{co1} and \eqref{co2}, or following some more sophisticated approach, e. g. to optimize some additional constraint, and construct the control $\widetilde{J}_1(t)$ using Eq.\ \eqref{control}. \section{The Lewis-Riesenfeld phase}\label{APpha} During the ramp-up and down the Lewis-Riesenfeld phase $\beta_n$ accumulated by the states, see Eq.\ \eqref{LR-phase} is fully determined by the ramp time $T$ and the particular election of the invariant auxiliary function $f_1(t)$. Since the ramp-up and down processes are symmetric, meaning that the control at $t_w+T+t$ is identical to itself at $T-t$, the term coming from the time derivative in the Lewis-Riesenfeld phase has the opposite sign at each ramp, meaning that this term gets cancelled when the whole operation is performed. Therefore, \begin{equation} \label{LR-totphase} \beta^{tot}_n\equiv\beta^{up}_n+\beta^{down}_n=-2\int_0^T dt \bra{\chi_n(t)}\hat H(t)\ket{\chi_n(t)}. \end{equation} The eigenvectors and eigenvalues of the invariant $\hat I(t)=\sum_{j}^3f_j(t)\frac{\hat\sigma_j}{2}$ with $\sum_{j}^3f_j^2(t)=c^2$ acting on the $S_2={\ket{01},\ket{10}}$ subspace are \begin{align} \label{eigenCZ} \ket{\chi_{\pm}}&=\frac{1}{\sqrt{2c(c\pm f_3)}}\left[\left(f_3\pm c\right)\ket{01}+\left(f_1+if_2\right)\ket{10}\right],\nonumber\\ \lambda_{\pm}&=\pm\frac{c}{2}, \end{align} with $\ket{\chi_{+}}=\ket{\chi_{\ket{10}}}$ and $\ket{\chi_{-}}=\ket{\chi_{\ket{01}}}$. Replacing \eqref{eigenCZ} into Eq.\ \eqref{LR-totphase} with $h_1(t)=2\tilde J_1(t)$, $h_2(t)=0$ and $h_3(t)=\alpha_a$, we finally find \begin{equation} \beta^{tot}_{\pm}=\mp\frac{2}{|\alpha_a|}\int_0^Tdt\left(2f_1\widetilde{J}_1+f_3\frac{\alpha_a}{2}\right). \end{equation} \section{Effective gate}\label{EffectiveGate} In principle, due to different phases accumulated by each of the computational states, with the described procedure we do not get a CZ gate, but instead a transformation that, acting on the computational basis, is given by the unitary matrix \begin{equation} \label{UCZ} \hat U = \left( \begin{matrix} e^{i\phi_{00}} & 0 & 0 & 0 \\ 0 & e^{i\phi_{01}} & 0 & 0 \\ 0 & 0 & e^{i\phi_{10}} & 0 \\ 0 & 0 & 0 & e^{i\phi_{11}} \end{matrix}\right). \end{equation} In fact, this would only correspond to a CZ gate if $\phi_{00}=\phi_{01}=\phi_{10}=0$ and $\phi_{11}=\pi$. An alternative way of writing Eq.\ \eqref{UCZ} is \begin{equation} \label{UCZ2} \hat U= \exp[i(\varphi^0+\varphi^1\hat\sigma_3\otimes\mathbb{I}+\varphi^2\mathbb{I}\otimes\hat\sigma_3+\varphi^{12}\hat\sigma_3\otimes\hat\sigma_3)]. \end{equation} $\varphi^0$ represents a global phase, while $\varphi^1$ and $\varphi^2$ are local phases that can be corrected at each qubit using one-qubit gates. $\varphi^{12}$ is the entangling phase resulting from the interaction between the two qubits. Each of these phases can be written in terms of $\{\phi_{ij}\}$. In particular, the entangling phase, \begin{equation} \varphi^{12}=\frac{\phi_{00}-\phi_{01}-\phi_{10}+\phi_{11}}{4}. \end{equation} Equations \eqref{UCZ2} and \eqref{phi12} reveal that the unitary transformation given by Eq.\ \eqref{UCZ} can be turned into a CZ gate by applying additional one qubit gates only if $\varphi^{12}=\pi/4$. Let us explicitly calculate the entangling phase. According to the Hamiltonian \eqref{HeffCZ} the state $\ket{00}$ is left completely unchanged ($\phi_{00}=0$). Independently of the type of passage, FAQUAD or invariants, the states $\ket{01}$ and $\ket{10}$ acquire the following phases \begin{equation} \phi_{01}=-(\omega_a+\frac{\alpha_a}{2})T_g-\Omega't_w-2\int_0^T dt \langle\hat H_2(t)\rangle_{\ket{01}}, \end{equation} \begin{equation} \phi_{10}=-(\omega_a+\frac{\alpha_a}{2})T_g+\Omega't_w-2\int_0^T dt \langle\hat H_2(t)\rangle_{\ket{10}}, \end{equation} where $\Omega'=\sqrt{(\alpha_a/2)^2+\widetilde{J}_1(T)^2}$, and $\langle\hat H_2(t)\rangle_{\ket{nm}}$ is the expectation value of the $S_2$ Hamiltonian at time $t$ when the state is initially $\ket{nm}$. This expectation value depends on the design of the control, since in the FAQUAD passage the states evolves approximately as the instantaneous eigenstates of $\hat H_2(t)$, while in the invariants passage, they coincide with the eigenstates of $\hat I(t)$. However, in both cases $\langle\hat H_2(t)\rangle_{\ket{01}}+\langle\hat H_2(t)\rangle_{\ket{10}}=0$, cancelling their contribution to the entangling phase. Finally, from Eq.\ \eqref{U3CZ}, the state $\ket{11}$ acquires a phase \begin{equation}\label{phi11} \phi_{11}=-(2\omega_a+\alpha_a)T_g+\pi, \end{equation} Thus, inserting these results into Eq.\ \eqref{phi12} we find that $\varphi^{12}$ is precisely equal to $\pi/4$. Consequently, the designed protocols produce a transformation which, after local rotations on each of the qubits, is equivalent to a CZ gate. \section{Schrieffer-Wolff transformation}\label{APswt} The Schrieffer–Wolff transformation is a unitary transformation used to diagonalize a given Hamiltonian to first perturbative order in the interaction. It is often used to project out the high (low) energy excitations of a given quantum many-body Hamiltonian in order to obtain an effective low (high) energy model. The transformation is conventionally written as \begin{equation} \hat H'=e^{\hat S}\hat He^{-\hat S}, \end{equation} where $\hat S$ is the generator of the transformation and $\hat H$ is a Hamiltonian that can be written as \begin{equation} \hat H=\hat H_0+\hat V, \end{equation} with $\hat H_0$ being block-diagonal and $\hat V$ purely off-diagonal in the eigenbasis of $\hat H_0$. In our particular case, the starting point is the $S_3$ subspace Hamiltonian that corresponds to the block of Eq.\ \eqref{HeffCZ} \begin{equation} \hat H_3(t) =\left( \begin{matrix} \alpha_a+\alpha_b+2\Delta & \widetilde{J}_2(t) & 0\\ \widetilde{J}_2(t) & \Delta & \widetilde{J}_3(t) \\ 0 & \widetilde{J}_3(t) & 0 \end{matrix}\right), \end{equation} where we have included the second qubit detuning $\omega_b = \omega_a+\alpha_a+\Delta$ and the zero energy has been shifted. We chose \begin{equation}\label{SWH0} \hat H_0 =\left( \begin{matrix} \alpha_a+\alpha_b+2\Delta & 0 & 0\\ 0 & \Delta & \widetilde{J}_3 \\ 0 & \widetilde{J}_3 & 0 \end{matrix}\right), \end{equation} and \begin{equation}\label{SWV} \hat V =\left( \begin{matrix} 0 & \widetilde{J}_2 & 0\\ \widetilde{J}_2 & 0 & 0 \\ 0 & 0 & 0 \end{matrix}\right), \end{equation} with the time-dependence notation dropped for simplicity. The transformation can be expanded in $\hat S$ using the Baker-Campbell-Haussdorf formula \begin{equation} \label{SWcondition} \hat H'=\hat H_0+\hat V+[\hat S,\hat H_0]+[\hat S,\hat V]+\frac{1}{2}[\hat S,[\hat S,\hat H]]+\cdots. \end{equation} The Hamiltonian can then be made diagonal to first order in $\hat V$ by choosing the generator $\hat S$ such that \begin{equation} [\hat S,\hat H_0]=-\hat V, \end{equation} so that the off-diagonal terms are cancelled to the first order in the perturbation. The difficult step is the computation of the generator of the Schrieffer-Wolff transformation. The method presented in~\cite{Rukhsan2019} to calculate the generator starts by calculating the commutator $[\hat H_0,\hat V]$ and replace every non-zero matrix element of the commutator by undetermined coefficients. Then, those coefficients are computed using Eq.\ \eqref{SWcondition}. Using from the commutator of \eqref{SWH0} and \eqref{SWV}, the generator takes the form \begin{equation} \hat S =\left( \begin{matrix} 0 & a_1 & a_2\\ -a_1 & 0 & 0 \\ -a_2 &0 & 0 \end{matrix}\right). \end{equation} Using the condition \eqref{SWcondition}, the coefficients are found to be \begin{equation} a_1 = \dfrac{\widetilde{J}_2(\alpha_a+\alpha_b+2\Delta)}{-\widetilde{J}_3^2+(\alpha_a+\alpha_b+\Delta)(\alpha_a+\alpha_b+2\Delta)}, \end{equation} \begin{equation} a_2 = \dfrac{\widetilde{J}_2\widetilde{J}_3}{\widetilde{J}_3^2-(\alpha_a+\alpha_b+\Delta)(\alpha_a+\alpha_b+2\Delta)}. \end{equation} Finally, the effective Hamiltonian is calculated using the first order of the BCH formula from the exponential expansion, \begin{equation} \hat H' =\hat H_0+\frac{1}{2}\left[\hat S,\hat V\right], \end{equation} which gives \begin{equation} \hat H'=\left( \begin{matrix} \alpha_a+\alpha_b+2\Delta-\delta \Omega & 0 & 0\\[6pt] 0 &\Delta + \delta \Omega & \widetilde{J}_3+\delta \widetilde{J}_3 \\[6pt] 0 & \widetilde{J}_3+\delta \widetilde{J}_3 & 0 \end{matrix}\right), \end{equation} where \begin{equation} \delta\Omega = \dfrac{\widetilde{J}_2^2(\alpha_a+\alpha_b+2\Delta)}{\widetilde{J}_3^2-(\alpha_a+\alpha_b+\Delta)(\alpha_a+\alpha_b+2\Delta)}, \end{equation} \begin{equation} \delta\widetilde{J}_3 =-\frac{1}{2} \dfrac{\widetilde{J}_2^2\widetilde{J}_3}{\widetilde{J}_3^2-(\alpha_a+\alpha_b+\Delta)(\alpha_a+\alpha_b+2\Delta)}, \end{equation} and generalizes \eqref{H3eff} in the presence of a detuning $\Delta$. \end{document}
\begin{document} \title{On commuting probability of finite rings II} \author{Parama Dutta and Rajat Kanti Nath\footnote{Corresponding author}} \date{} \maketitle \begin{center}\small{\it Department of Mathematical Sciences, Tezpur University,\\ Napaam-784028, Sonitpur, Assam, India.\\ Emails:\, [email protected] and [email protected]} \end{center} \begin{abstract} The aim of this paper is to study the probability that the commutator of an arbitrarily chosen pair of elements, each from two different subrings of a finite non-commutative ring equals a given element of that ring. We obtain several results on this probability including a computing formula, some bounds and characterizations. \end{abstract} \noindent {\small{\textit{Key words:} finite ring, commuting probability, ${\mathbb{Z}}$-isoclinism of rings.}} \noindent {\small{\textit{2010 Mathematics Subject Classification:} 16U70, 16U80.}} \section{Introduction} Throughout this paper $R$ denotes a finite non-commutative ring. The commuting probability of $R$, denoted by $\Pr(R)$, is the probability that a randomly chosen pair of elements of $R$ commute. That is \[ \Pr(R) = \frac{|\lbrace(r, s)\in R\times R : [r, s] = 0 \rbrace|}{|R\times R|} \] where $[r, s] := rs - sr$ is the additive commutator of $r$ and $s$ and $0$ is the zero element of $R$. Clearly $\Pr(R) = 1$ if and only if $R$ is commutative. The study of commuting probability of finite rings was initiated by MacHale \cite{dmachale} in the year 1976 motivated by the commuting probability of finite groups. After Erd$\ddot{\rm o}s$ \cite{pEpT68}, many authors have worked on the commuting probability of finite groups and its generalizations (conf. \cite{Dnp13} and the references therein) but somehow people have neglected commuting probability of finite rings. At this moment, we have very few papers in the literature on commuting probability of finite rings \cite{BM,BMS,jutireka,jutirekha2,dmachale}. In this paper, we study a generalization of $\Pr(R)$. Let $S$ and $K$ be two subrings of $R$ and $r \in R$. We define ${\Pr}_r(S, K)$ in the following way \begin{equation}\label{mainformula} {\Pr}_r(S, K) = \frac{|\lbrace(s, k)\in S\times K : [s,k] = r\rbrace|} {|S\times K|}. \end{equation} Thus ${\Pr}_r(S, K)$ is the probability that the additive commutator of a randomly chosen pair of elements, one from $S$ and the other from $K$, is equal to a given element $r$ of $R$. This generalizes $\Pr(R)$ since ${\Pr}_r(S, K) = \Pr(R)$ if $S = K = R$ and $r = 0$. If $r = 0$ then \[ {\Pr}_r(S, K) = \Pr(S, K) = \frac{|\lbrace(s, k) \in S\times K : sk = ks\rbrace|} {|S\times K|}. \] It may be mentioned here that some connections between $\Pr(S, K)$ and generalized non-commuting graph of $R$ can be found in \cite{jutirekha2}. In \cite{jutireka}, $\Pr(S, R)$ is studied extensively. In this paper, we obtain several results on ${\Pr}_r(S, K)$ including a computing formula, some bounds and characterizations. The motivation of this paper lies in \cite{DN10, nY15,PS08} where analogous generalizations of commuting probability of finite groups are studied. For any two subrings $S$ and $K$, we write $[S, K]$ and $[s,K]$ for $s\in S$ to denote the additive subgroups of $(R, +)$ generated by the sets $\lbrace [s, k] : s\in S,k\in K\rbrace$ and $\lbrace [s, k] : k\in K\rbrace$ respectively. It can be seen that any element of $[s,K]$ is of the form $[s, k]$ for some $k \in K$ and so $[s,K] = \{[s, k] : k\in K\}$. Let $Z(S, K) := \{s \in S : sk = ks\, \forall k \in K\}$. Then $Z(S, K) = S\cap Z(K)$ if $S\subseteq K$ and $Z(K, K) = Z(K)$, the center of $K$. Also, for $r \in R$ the set $C_S(r) := \{s\in S : sr = rs\}$ is a subring of $S$ and $\underset{r \in K}{\cap} C_S(r) = Z(S, K)$. We write $R/S$ or $\frac{R}{S}$ to denote the additive factor group, for any subring $S$ of $R$, and $|R : S|$ to denote the index of $(S, +)$ in $(R, +)$. The isomorphisms considered in this paper are the additive group isomorphisms. It is easy to see that ${\Pr}_r(S, K) = 1$ if and only if $r = 0$ and $[S,K] = \{0\}$. Also, ${\Pr}_r(S, K) = 0$ if and only if $r\notin \{[s,k]:s\in S,k\in K\}$. Therefore, we consider $r$ to be an element of $\{[s,k]:s\in S,k\in K\}$ throughout the paper. \section{Preliminary results} In this section, we deduce some elementary results on ${\Pr}_r(S, K)$ and derive a computing formula for ${\Pr}_r(S, K)$. We begin with the following result which shows that ${\Pr}_r(S, K)$ is not symmetric with respect to $S$ and $K$. \begin{proposition}\label{symmetricity} Let $S$ and $K$ be two subrings of $R$ and $r \in [S, K]$. Then ${\Pr}_r(S, K) = {\Pr}_{-r}(K, S)$. However, if $2r = 0$ then ${\Pr}_r(S, K) = {\Pr}_{r}(K, S)$. \end{proposition} \begin{proof} Let $X = \{(s, k) \in S \times K : [s, k] = r\}$ and $Y = \{(k, s) \in K \times S : [k, s] = -r\}$. It is easy to see that $(s, k) \mapsto (k, s)$ defines a bijective mapping from $X$ to $Y$. Therefore, $|X| = |Y|$ and the result follows. Second part follows from the fact that $r = -r$ if $2r = 0$. \end{proof} \begin{proposition} Let $S_i$ and $K_i$ be two subrings of finite non-commutative rings $R_i$ for $i = 1, 2$ respectively. If $(r_1, r_2) \in R_1 \times R_2$ then \[ {\Pr}_{(r_1, r_2)}(S_1 \times S_2, K_1\times K_2) = {\Pr}_{r_1}(S_1, K_1){\Pr}_{r_2}(S_2, K_2). \] \end{proposition} \begin{proof} Let $X_i = \{(s_i, k_i) \in S_i\times K_i : [s_i, k_i] = r_i\}$ for $i = 1, 2$ and $Y = \{((s_1, s_2), (k_1, k_2)) \in (S_1\times S_2) \times (K_1\times K_2) : [(s_1, s_2),(k_1, k_2)]= (r_1, r_2)\}$. Then $((s_1, k_1), (s_2, k_2)) \mapsto ((s_1, s_2),(k_1, k_2))$ defines a bijective map from $X_1 \times X_2$ to $Y$. Therefore, $|Y| = |X_1||X_2|$ and hence the result follows. \end{proof} Initially, it was challenging for us to derive a computing formula for ${\Pr}_r(S, K)$ since there is no analogous concept of conjugacy class and no analogous character theoretic results for rings. Finally, we are able to get a formula. The following two lemmas play important role in obtaining our computing formula for ${\Pr}_r(S, K)$. \begin{lemma}\label{lemma1} Let $K$ be any subring of $R$. If $x \in R$ then $ |[x, K]|=\frac {|K|}{|C_K(x)|}. $ \end{lemma} \begin{proof} Note that $[x, k] \mapsto k + C_K(x)$ defines an isomorphism from $[x, K]$ to $\frac {K}{C_K(x)}$. Hence, the lemma follows. \end{proof} \begin{lemma}\label{lemma2} Let $S$ and $K$ be two subrings of $R$ and $T_{s, r} = \{k\in K : [s, k] = r\}$ where $s\in S$ and $r\in R$. Then we have the followings \begin{enumerate} \item If $T_{s, r}\neq \phi$ then $T_{s, r} = t + C_K(s)$ for some $t\in T_{s, r}$. \item $T_{s, r} \ne \phi$ if and only if $r \in [s, K]$. \end{enumerate} \end{lemma} \begin{proof} Let $t \in T_{s, r}$ and $p \in t + C_K(s)$. Then $[s, p] = r$ and so $p \in T_{s, r}$. Therefore, $t + C_K(s) \subseteq T_{s, r}$. Again, if $k \in T_{s, r}$ then $(k - t) \in C_K(s)$ and so $k \in t + C_K(s)$. Therefore, $t + C_K(s)\subseteq T_{s, r}$. Hence part (a) follows. Part (b) follows from the fact that $y \in T_{s, r}$ if and only of $r \in [s, K]$. \end{proof} \noindent Now we state and prove our main result of this section. \begin{theorem}\label{com-thm} Let $S$ and $K$ be two subrings of $R$. Then \[ {\Pr}_r(S,K) = \frac {1}{|S||K|}\underset{r\in [s, K]}{\underset{s\in S}{\sum}}|C_K(s)| = \frac {1}{|S|}\underset{r\in [s, K]}{\underset{s\in S}{\sum}}\frac{1}{|[s, K]|}. \] \end{theorem} \begin{proof} Note that $\{(s, k) \in S\times K : [s, k] = r\} = \underset{s\in S}{\cup}(\{s\}\times T_{s, r})$. Therefore, by \eqref{mainformula} and Lemma \ref{lemma2}, we have \begin{equation} \label{comfor1} |S||K|{\Pr}_r(S,K) = \underset{s \in S}{\sum} |T_{s, r}| = \underset{r\in [s, K]}{\underset{s\in S}{\sum}}|C_K(s)|. \end{equation} The second part follows from \eqref{comfor1} and Lemma \ref{lemma1}. \end{proof} Using Proposition \ref{symmetricity}, we get the following corollary of Theorem \ref{com-thm}. \begin{corollary}\label{formula1} Let $S$ and $K$ be two subrings of $R$. Then \[ {\Pr}(K, S) = {\Pr}(S, K) = \frac {1}{|S||K|}\sum_{s\in S}|C_K(s)| = \frac {1}{|S|}\sum_{s\in S}\frac{1}{|[s, K]|}. \] \end{corollary} It is worth mentioning that Equation (2.1) of \cite{BMS} and \cite{jutireka} also follow from Corollary \ref{formula1}. We conclude this section by the following two lemmas. \begin{lemma}\label{lemma02} {\rm \cite[Lemma 2.12]{jutireka}} Let $H$ and $N$ be two subrings of a non-commutative ring $R$ such that $N$ is an ideal of $R$ and $N \subseteq H$. Then \[ \frac{C_H(x) + N}{N} \subseteq C_{H/N}(x + N) \; \text{for all} \; x \in R, \] where $H/N$ is a factor ring. The equality holds if $N \cap [H, R] = \{0\}$. \end{lemma} \begin{lemma}\label{lemma002} Let $S \subseteq K$ be two subrings of $R$. If $S$ is non-commutative then $\frac{S}{Z(S, K)}$ is not cyclic. \end{lemma} \section{Some bounds and characterizations} In this section, we derive some bounds for ${\Pr}_r(S,K)$ and characterizations of subrings $S$ and $K$ in terms of $\Pr(S, K)$. We begin with the following lower bounds. \begin{proposition} Let $S$ and $K$ be two subrings of $R$. If $r \ne 0$ then \begin{enumerate} \item ${\Pr}_r(S,K)\geq \frac{|Z(S,K)||Z(K,S)|}{|S||K|}$. \item If $S \subseteq K$ then ${\Pr}_r(S, K)\geq \frac{2|Z(S, K)||Z(K, S)|}{|S||K|}$. \item ${\Pr}_r(R)\geq \frac{3}{|R : Z(R)|^2}$. \end{enumerate} \end{proposition} \begin{proof} Since $r \ne 0$ we have the set ${\mathcal{C}} := \{(x, y) \in S \times K : [x, y] = r\}$ is non empty. Let $(s, k) \in {\mathcal{C}}$ then $(s, k) \notin Z(S, K)\times Z(K, S)$, otherwise $[s, k] = 0$. Now, for part (a) it is sufficient to note that the coset $(s, k) + \left(Z(S, K)\times Z(K, S)\right)$ is a subset of ${\mathcal{C}}$. If $S \subseteq K$ then $(s, k) + \left(Z(S, K)\times Z(K, S)\right)$ and $(s, k + s) + \left(Z(S, K)\times Z(K, S)\right)$ are two disjoint subsets of ${\mathcal{C}}$. Therefore, part (b) follows. For part (c), we consider $S = K = R$ and notice that $(s, k) + \left(Z(R)\times Z(R)\right)$, $(s + k, k) + \left(Z(R)\times Z(R)\right)$ and $(s, k + s) + \left(Z(R)\times Z(R)\right)$ are three disjoint subsets of $\{(x, y) \in R \times R : [x, y] = r\}$. \end{proof} \begin{proposition}\label{ub02} Let $S$ and $K$ be two subrings of $R$. Then ${\Pr}_r(S, K) \leq {\Pr}(S, K)$ with equality if and only if $r = 0$. Further, if $S \subseteq K$ then ${\Pr}(S, K) \leq |K : S|\Pr(K)$ with equality if and only if $S = K$. \end{proposition} \begin{proof} By Theorem \ref{com-thm} and Corollary \ref{formula1}, we have \[ {\Pr}_r(S, K) = \frac {1}{|S||K|}\underset{r \in [s, K]}{\underset{s\in S}{\sum}}|C_K(s)| \leq \frac {1}{|S||K|}\underset{s\in S}{\sum}|C_K(s)| = \Pr(S, K). \] The equality holds if and only if $r = 0$. If $S \subseteq K$ then we have \[ \Pr(S, K) = \frac {1}{|S||K|}\underset{s\in S}{\sum}|C_K(s)| \leq \frac {1}{|S||K|}\underset{s\in K}{\sum}|C_K(s)| = |K : S|\Pr(K). \] The equality holds if and only if $S = K$. \end{proof} \begin{proposition} Let $S$ and $K$ be two subrings of $R$. If $p$ is the smallest prime dividing $|R|$ and $r \ne 0$ then \[ {\Pr}_r(S, K)\leq \frac {|S| - |Z(S, K)|}{p|S|} < \frac {1}{p}. \] \end{proposition} \begin{proof} Since $r \ne 0$ we have $S \ne Z(S, K)$. If $s \in Z(S, K)$ then $r \notin [s, K]$. If $s \in S \setminus Z(S, K)$ then $C_K(s) \ne K$. Therefore, by Lemma \ref{lemma1}, we have $|[s, K]| = |K : C_K(s)| > 1$. Since $p$ is the smallest prime dividing $|R|$ we have $|[s, K]| \geq p$. Hence the result follows from Theorem \ref{com-thm}. \end{proof} \begin{proposition}\label{ub03} If $S_1\subseteq S_2$ and $K_1\subseteq K_2$ are subrings of $R$. Then \[ {\Pr}_r(S_1, K_1) \leq |S_2 : S_1||K_2 : K_1|{\Pr}_r(S_2, K_2). \] \end{proposition} \begin{proof} By Theorem \ref{com-thm}, we have \begin{align*} |S_1||K_1|{\Pr}_r(S_1, K_1) = &\underset{r \in [s, K_1]}{\underset{s \in S_1}{\sum}}|C_{K_1}(s)|\\ \leq &\underset{r \in [s, K_2]}{\underset{s\in S_2}{\sum}}|C_{K_2}(s)| = |S_2||K_2|{\Pr}_r(S_2,K_2). \end{align*} Hence the result follows. \end{proof} \noindent Note that equality holds in Proposition \ref{ub03} if and only if $r \notin [s, K_2]$ for all $s\in S_2 \setminus S_1$, $r \notin [s, K_2] \setminus [s, K_1]$ for all $s\in S_1$ and $C_{K_1}(s) = C_{K_2}(s$) for all $s\in S_1$ with $r \in [s, K_1]$. If $r = 0$ then the condition of equality reduces to $S_1 = S_2$ and $K_1 = K_2$. \begin{corollary} If $S_1\subseteq S_2$ are two subrings of $R$. Then \[ {\Pr}_r(S_1, R) \leq |S_2 : S_1|{\Pr}(S_2, R). \] The equality holds if and only if $r = 0$ and $S_1 = S_2$. \end{corollary} \begin{proof} Putting $K_1 = K_2 = R$ in Proposition \ref{ub03}, we get \[ {\Pr}_r(S_1, R) \leq |S_2 : S_1|{\Pr}_r(S_2, R). \] Hence the result follows from Proposition \ref{ub02}. \end{proof} \begin{proposition}\label{boundS_1K_1} Let $S, K_1$ and $K_2$ be three subrings of $R$. If $K_1\subseteq K_2$ then \[ {\Pr}(S, K_1) \geq {\Pr}(S, K_2)\geq \frac {1}{|K_2 : K_1|}\left(\Pr(S, K_1) + \frac {|K_2| - |K_1|}{|S||K_1|}\right). \] The first equality holds if and only if $[s, K_1] = [s, K_2]$ for all $s \in S$ and the second equality holds if and only if $C_S(k) = \{0\}$ for all $k \in K_2\setminus K_1$. \end{proposition} \begin{proof} Since $[s, K_1] \subseteq [s, K_2]$ for all $s \in S$, by Corollary \ref{formula1}, we have \[ {\Pr}(S, K_1) = \frac{1}{|S|} \sum_{s \in S}\frac{1}{|[s, K_1]|} \geq \frac{1}{|H|} \sum_{s \in S}\frac{1}{|[s, K_2]|} = {\Pr}(S, K_2) \] with equality if and only if $[s, K_1] = [s, K_2]$ for all $s \in S$. By Corollary \ref{formula1}, we also have \begin{align}\label{inequality-1} \Pr(S, K_2) = \Pr(K_2, S)\nonumber = & \frac {1}{|S||K_2|}\underset{k \in K_2}{\sum}|C_S(k)|\nonumber\\ = &\frac {\Pr(S,K_1)}{|K_2 : K_1|} + \frac {1}{|S||K_2|}\underset{k \in K_2\setminus K_1}{\sum}|C_S(k)|. \end{align} We have $|C_S(k)| \geq 1$ for all $k \in K_2\setminus K_1$. Therefore, \begin{equation}\label{inequality-2} \underset{k \in K_2\setminus K_1}{\sum}|C_S(k)| \geq |K_2| - |K_1| \end{equation} the equality holds if and only if $|C_S(k)| = 1$ for all $k\in K_2\setminus K_1$. Hence the result follows from \eqref{inequality-1} and \eqref{inequality-2}. \end{proof} \noindent It is worth mentioning that the second part of \cite[Theorem 2.4]{jutireka} follows from the first part of Proposition \ref{boundS_1K_1}. \begin{proposition} Let $S \subseteq K$ be two subrings of $R$. If $p$ is the smallest prime dividing $|R|$ and $|S : Z(S, K)| = p^n$ then $\Pr(S, K)\leq \frac {p^n + p - 1}{p^{n + 1}}$. Moreover, if $S = K$ then we have $\Pr(S, K) \geq \frac {p^n + p^{n - 1} - 1}{p^{2n - 1}}$. \end{proposition} \begin{proof} If $s\in S\setminus Z(S, K)$ then $C_K(s)\neq K$ and hence $\frac {|K|}{|C_K(s)|}\geq p$. Therefore, by Corollary \ref{formula1}, we have \begin{align*} \Pr(S, K) = &\frac {1}{|S||K|}\underset{s\in Z(S, K)}{\sum}|C_K(s)| + \frac {1}{|S||K|}\underset{s\in S\setminus Z(S, K)}{\sum}|C_K(s)|\\ \leq & \frac{|Z(S, K)|}{|S|} + \frac{|S|-|Z(S, K)|}{p|S|} = \frac {p^n + p - 1}{p^{n + 1}}. \end{align*} If $S = K$ then for $s \in S \setminus Z(S, K)$ we have $Z(S, K) \subsetneq C_K(s) \ne K$ and so $|C_K(s)| \geq p|Z(S, K)|$. Therefore, by Corollary \ref{formula1}, we have \[ \Pr(S, K) \geq \frac{|Z(S, K)|}{|S|} + \frac{p|Z(S, K)|(|S|-|Z(S, K)|)}{|S||K|} = \frac {p^n + p^{n - 1} - 1}{p^{2n - 1}}. \] \end{proof} \begin{theorem}\label{prop1} Let $S$ and $K$ be two subrings of $R$ and $p$ the smallest prime dividing $|R|$. Then \begin{align*} \Pr(S, K)\geq & \frac {|Z(S, K)|}{|S|}+\frac {p(|S|-|X_S|-|Z(S, K)|)+|X_S|}{|S||K|}\\ \textup{ and } \Pr(S,K)\leq &\frac {(p-1)|Z(S, K)|+|S|}{p|S|}-\frac {|X_S|(|K|-p)}{p|S||K|} \end{align*} where $X_S = \{s\in S: C_K(s) = \{0\}\}$. Moreover, in each of these bounds, $S$ and $K$ can be interchanged. \end{theorem} \begin{proof} If $[S, K] = \{0\}$ then $\Pr(S, K) = 1$, $Z(S, K) = S$ and $X_S = \{0\}$ or $\phi$ according as $K = \{0\}$ or $K \ne \{0\}$. If $K \ne \{0\}$ then both the sides of the above inequalities give $1$. Otherwise, it is routine to see that \[ 1 - \frac{p - 1}{|S|} < \Pr(S, K) < 1 + \frac{p - 1}{p|S|}. \] Let $[S, K]\neq \{0\}$ then $X_S\cap Z(S, K) = \phi$. Therefore \begin{align}\label{eq0002} \underset{s\in S}{\sum}|C_K(s)|=&\underset{s\in X_S}{\sum}|C_K(s)|+\underset{s\in Z(S,K)}{\sum}|C_K(s)|+\underset{s\in S\setminus (X_S\cup Z(S,K))}{\sum}|C_K(s)|\nonumber\\ =& |X_S| + |K| |Z(S, K)| + \underset{s\in S\setminus (X_S\cup Z(S,K))}{\sum}|C_K(s)|. \end{align} Notice that for all $s\in S\setminus (X_S\cup Z(S,K))$ we have $\{0\}\neq C_K(s)\neq K$ which gives $p\leq |C_K(s)|\leq \frac {|K|}{p}$. Hence \begin{align}\label{eq0003} (|S| - |X_S| - |Z(S,K)|)p \leq &\underset{s\in S\setminus (X_S\cup Z(S,K))}{\sum}|C_K(s)|\nonumber\\ \leq & (|S| - |X_S| - |Z(S,K)|)\frac{|K|}{p}. \end{align} Now the required inequalities can be obtained using Corollary \ref{formula1}, \eqref{eq0002} and \eqref{eq0003}. The last part of the proposition follows from the fact that $\Pr(S, K) = \Pr(K, S)$. \end{proof} Putting $K = R$ in Theorem \ref{prop1} we get an upper bound for $\Pr(S, R)$ which is better than the upper bound obtained in \cite[Theorem 2.5]{jutireka}. \begin{corollary} Let $S$ and $K$ be two subrings of $R$. If $[S, K]\neq \{0\}$ and $p$ the smallest prime dividing $|R|$ then $\Pr(S, K)\leq \frac {2p-1}{p^2}$. In particular, $\Pr(S,K)\leq \frac {3}{4}$. \end{corollary} \begin{proof} Since $[S, K]\neq \{0\}$ we have $Z(S,K) \ne S$. Therefore $|Z(S,K)|\leq \frac {|S|}{p}$. Hence, by second part of Theorem \ref{prop1}, we have \[ \Pr(S, K)\leq \frac {(p - 1)|Z(S, K)|+|S|}{p|S|} \leq \frac {(p - 1)\frac {|S|}{p} + |S|}{p|S|} = \frac {2p-1}{p^2}. \] The particular case follows from the fact $p \geq 2$ and $\frac{2p - 1}{p^2} \leq \frac{3}{4}$ for any prime~$p$. \end{proof} We have, for all $s \in S$ \begin{equation}\label{eqlb} |[S, K]| \geq |[s, K]| = |K : C_K(s)|. \end{equation} Therefore, by Corollary \ref{formula1} and \eqref{eqlb}, we have the following lower bound for $\Pr(S, K)$. \begin{proposition}\label{newlb1} Let $S$ and $K$ be two subrings of $R$. Then \[ \Pr(S, K) \geq \frac{1}{|[S, K]|}\left(1 + \frac{|[S, K]| - 1}{|S : Z(S, K)|} \right). \] In particular, if $Z(S, K) \ne S$ then $\Pr(S, K) > \frac{1}{|[S, K]|}$. \end{proposition} \noindent It is worth noting that \cite[Theorem 2. 17]{jutireka} follows from Proposition \ref{newlb1}. \begin{theorem}\label{prop2} Let $S$ and $K$ be two subrings of $R$ such that $\Pr(S, K) = \frac{2p - 1}{p^2}$ for some prime $p$. Then $p$ divides $|R|$. If $p$ is the smallest prime dividing $|R|$ then \[ \frac{S}{Z(S,K)}\cong\mathbb Z_p\cong\frac{K}{Z(K,S)} \] and hence $S\neq K$. In particular, if $\Pr(S, K) = \frac{3}{4}$ then \[ \frac{S}{Z(S,K)}\cong\mathbb Z_2\cong\frac{K}{Z(K,S)}. \] \end{theorem} \begin{proof} If $\Pr(S,K) = \frac{2p-1}{p^2}$ then, by Corollary \ref{formula1}, we have $p$ divides $|S||K|$ and hence $p$ divides $|R|$. For the second part we have, by Theorem \ref{prop1}, \[ \frac{2p-1}{p^2} \leq \frac {(p - 1)|Z(S, K)|+|S|}{p|S|} = \frac{p - 1}{p|S : Z(S, K)|} + \frac{1}{p} \] which gives $|S : Z(S, K)|\leq p$. Since $\Pr(S, K) \ne 1$ we have $S \ne Z(S, K)$ and hence $|S : Z(S, K)| = p$. Therefore, $\frac{S}{Z(S,K)}\cong\mathbb Z_p$. Interchanging the role of $S$ and $K$ we get $\frac{K}{Z(K, S)}\cong\mathbb Z_p$. If $S = K$ then $\frac{S}{Z(S,K)}\cong\mathbb Z_p\cong\frac{K}{Z(K,S)}$ implies $S$ and $K$ are commutative (by Lemma \ref{lemma002}). Therefore, $\frac{S}{Z(S,K)}$ and $\frac{K}{Z(K,S)}$ are trivial group, which is a contradiction. Hence, $S \ne K$. The last part follows considering $p = 2$. \end{proof} \noindent The following lemma is useful in the subsequent results. \begin{lemma}\label{newlemma} Let $S$ and $K$ be two subrings of $R$. If $[x, S] \subseteq [x, K]$ for all $x \in S\cup K$ then \[ \Pr(K) \leq \Pr(S,K) \leq \Pr(S). \] \end{lemma} \begin{proof} By Corollary \ref{formula1} we have \[ \Pr(S) = \frac {1}{|S|}\underset{s\in S}{\sum}\frac{|C_S(s)|}{|S|} \geq \frac {1}{|S|}\underset{s\in S}{\sum}\frac {|C_K(s)|}{|K|} = \Pr(S, K) \] and \[ \Pr(S, K) = \frac {1}{|K|}\underset{k \in K}{\sum}\frac{|C_S(k)|}{|S|} \geq \frac {1}{|K|}\underset{k \in K}{\sum}\frac {|C_K(k)|}{|K|} = \Pr(K). \] Hence the lemma follows. \end{proof} It may be mentioned here that \cite[Theorem 2.2]{jutireka} follows from the above lemma. \begin{proposition} Let $S$ and $K$ be two subrings of $R$ such that $[x, S] \subseteq [x, K]$ for all $x \in S$. If $S$ is non-commutative and $p$ is the smallest prime dividing $|S|$ then $\Pr(S,K)\leq \frac {p^2 + p - 1}{p^3}$. \end{proposition} \begin{proof} The result follows from \cite[Theorem 2]{dmachale} and Lemma \ref{newlemma}. \end{proof} \begin{theorem}\label{5/8like} Let $S \subseteq K$ be two non-commutative subrings of $R$ and $\Pr(S, K) = \frac {p^2 + p - 1}{p^3}$ for some prime $p$. Then $p$ divides $|R|$. If $p$ is the smallest prime dividing $|R|$ then \[ \frac {S}{Z(S, K)}\cong \mathbb Z_p\times \mathbb Z_p. \] In particular, if $\Pr(S, K) = \frac {5}{8}$ then $\frac {S}{Z(S, K)}\cong \mathbb Z_2\times \mathbb Z_2$. \end{theorem} \begin{proof} If $\Pr(S, K) = \frac {p^2 + p - 1}{p^3}$ then by Corollary \ref{formula1}, we have $p$ divides $|S||K|$ and hence $p$ divides $|R|$. By second part of Theorem \ref{prop1}, we have \[ \frac {p^2 + p - 1}{p^3} \leq \frac{(p - 1)|Z(S, K)| + |S|}{p|S|} = \frac{p - 1}{p|S : Z(S, K)|} + \frac{1}{p} \] which gives $|S : Z(S, K)| \leq p^2$. Since $\Pr(S, K) \ne 1$ we have $S \ne Z(S, K)$. Also $\frac{S}{Z(S, K)}$ is not cyclic as $S$ is non-commutative (by Lemma \ref{lemma002}). Hence $\frac{S}{Z(S, K)} \cong \mathbb Z_p\times \mathbb Z_p$. The particular case follows considering $p = 2$. \end{proof} The following proposition gives partial converse of Theorem \ref{prop2} and Theorem \ref{5/8like}. \begin{theorem} Let $S \subseteq K$ be two subrings of $R$. \begin{enumerate} \item If $\frac {S}{Z(S, K)}\cong \mathbb Z_p$ and $|K : S| = n$ then $\Pr(S, K)\geq \frac {n + p - 1}{np}$. Further, if $p$ is the smallest prime dividing $|R|$ and $|K : S| = p$ then $\Pr(S, K) = \frac {2p - 1}{p^2}$. \item If $\frac {S}{Z(S, K)}\cong \mathbb Z_p\times \mathbb Z_p$ and $|K : S| = n$ then $\Pr(S, K)\geq \frac {(n + 2)p^2 - 2}{np^4}$. Further, if $p$ is the smallest prime dividing $|R|$ and $|K : S| = 1$ then $\Pr(S, K) = \frac {p^2 + p - 1}{p^3}$. \end{enumerate} \end{theorem} \begin{proof} (a) Since $\frac {S}{Z(S, K)}$ is cyclic, we have $S$ is commutative (by Lemma \ref{lemma002}). Therefore, if $s\in S\setminus Z(S, K)$ then $|C_K(s)|\geq |S| = \frac {|K|}{n}$. Now, by Corollary \ref{formula1}, we have \begin{align*} \Pr(S, K) &= \frac {|Z(S,K)|}{|S|} + \frac {1}{|S||K|}\underset{s\in S\setminus Z(S, K)}{\sum}|C_K(s)|\\ &\geq \frac {1}{p} + \frac {|S|-|Z(S,K)|}{n|S|} = \frac{n + p - 1}{np}. \end{align*} If $p$ is the smallest prime dividing $|R|$ and $|K : S| = p$ then $|C_K(s)| = \frac {|K|}{p}$ for $s\in S\setminus Z(S, K)$ and hence $\Pr(S, K) = \frac {2p - 1}{p^2}$. (b) We have $Z(S, K) \subsetneq C_K(s)$ if $s\in S\setminus Z(S, K)$. Also, $|Z(S,K)|$ divides $|C_K(s)|$ and so \begin{equation}\label{eqconv1} |C_K(s)| \geq 2|Z(S,K)| = \frac{2|S|}{p^2} = \frac{2|K|}{np^2} \end{equation} for all $s\in S\setminus Z(S, K)$. Now, by Corollary \ref{formula1} and \eqref{eqconv1}, we have \[ \Pr(S, K) \geq \frac{1}{p^2} + \frac{2(|S| - |Z(S, K)|)}{np^2|S|} = \frac {(n + 2)p^2 - 2}{np^4}. \] If $p$ is the smallest prime dividing $|R|$ and $|K : S| = 1$ then $|C_K(s)| \geq p|Z(S,K)| = \frac{|S|}{p}$ for $s\in S\setminus Z(S, K)$. Also, $C_K(s) \subsetneq S$ and so $|C_K(s)| = \frac{|S|}{p}$ for $s\in S\setminus Z(S, K)$. Therefore, Corollary \ref{formula1} gives \[ \Pr(S, K) = \frac{1}{p^2} + \frac{|S| - |Z(S, K)|}{p|S|} = \frac {p^2 + p - 1}{p^3}. \] \end{proof} \noindent The following corollary follows immediately. \begin{corollary} Let $S \subseteq K$ be two subrings of $R$. Then \begin{enumerate} \item If $\frac {S}{Z(S, K)}\cong \mathbb Z_2$ and $|K : S| = 2$ then $\Pr(S, K)= \frac {3}{4}$. \item If $\frac {S}{Z(S, K)}\cong \mathbb Z_2\times \mathbb Z_2$ and $|K : S| = 1$ then $\Pr(S, K)=\frac{5}{8}$. \end{enumerate} \end{corollary} \noindent We also have the following result. \begin{proposition} Let $S$ and $K$ be two subrings of $R$ such that $\frac {S}{Z(S, K)}\cong \mathbb Z_p\times \mathbb Z_p$. If $p$ is the smallest prime dividing $|R|$ and $|[s,K]| = p$ for all $s \in S \setminus Z(S, K)$ then $\Pr(S, K) = \frac {p^2 + p - 1}{p^3}$. \end{proposition} \begin{proof} If $p$ is the smallest prime dividing $|R|$ and $|[s,K]| = p$ for all $s \in S \setminus Z(S, K)$ then by Corollary \ref{formula1}, we have \[ \Pr(S, K) = \frac{|Z(S,K)|}{|S|} + \frac{1}{|S|} \underset{s\in S\setminus Z(S, K)}{\sum}\frac{1}{p} = \frac {p^2 + p - 1}{p^3}. \] \end{proof} We shall conclude this section by the following proposition which is an improvement of \cite[Theorem 2.13]{jutireka}. \begin{proposition} Let $S$ and $K$ be two subrings of $R$ and $I$ be an ideal of $R$ such that $I\subseteq S \cap K$. Then $\Pr(S, K)\leq \Pr\left(\frac {S}{I},\frac {K}{I}\right)\Pr(I)$ where $\frac {S}{I}$ and $\frac {K}{I}$ are factor rings. The equality holds if $I \cap [S, R] = \{0\}$. \end{proposition} \begin{proof} By Corollary \ref{formula1} and Lemma \ref{lemma02}, we have \begin{align}\label{factor-eq1} |S||K| \Pr(S,K) = &\underset{P \in \frac{K}{I}}{\sum}\underset{k \in P}{\sum}\frac{|C_S(k)|}{|I \cap C_S(k)|} |C_I(k)| \nonumber\\ = &\underset{P \in \frac{K}{I}}{\sum}\underset{k \in P}{\sum}\left|\frac{C_S(k) + I}{I}\right| |C_I(k)| \nonumber\\ \leq &\underset{P \in \frac{K}{I}}{\sum}\underset{k \in P}{\sum}|C_{\frac{S}{I}}(k+I)||C_I(k)|\nonumber\\ = &\underset{P\in \frac{K}{I}}{\sum} |C_{\frac{S}{I}}(P)| \underset{u \in I}{\sum}|C_K(u) \cap P|. \end{align} Let $a + I = P$ where $a\in K\setminus I$. If $C_K(u)\cap P = \phi$ then $|C_K(u)\cap P| = 0$. Therefore, $|C_K(u)\cap P|<|C_I(u)|$ as $|C_I(u)|\geq 1$. On the other hand, if $C_K(u)\cap P\neq \phi$ then there exist $x\in C_K(u)\cap P$ and $x = a + v$ for some $v\in I$ which implies $x +I = a + I = P$. Therefore, \begin{align*} C_K(u)\cap P =(x + I)\cap (x + C_K(u)) = x + (I\cap C_K(u)) = x + C_I(u) \end{align*} and so $|C_K(u)\cap P| = |C_I(u)|$. Therefore, in both the cases, $|C_K(u)\cap P|\leq|C_I(u)|$ and so, by \eqref{factor-eq1}, we have \begin{align*} |S||K| \Pr(S,K) \leq &\underset{S \in \frac{K}{I}}{\sum} |C_{\frac{S}{I}}(P)| \underset{u\in I}{\sum}|C_I(u)|\\ =&\left|\frac{S}{I} \right| \left|\frac{K}{I} \right| \Pr \left(\frac{S}{I},\frac{K}{I}\right)|I|^2\Pr(I)\\ =&|S||K|\Pr \left(\frac{S}{I},\frac{K}{I}\right)\Pr(I). \end{align*} Thus, first part of the result follows. If $I\cap [S, R] = 0$ then equality holds in Lemma \ref{lemma02} and hence equality holds in \eqref{factor-eq1}. Further, if $P = a + I$ and $k\in P$ then $k = a + u$ for some $u\in I$ and $a\in K\setminus I$. Therefore, $k\in K$ and $u\in I\subseteq S$ and hence $[u, k]\in [S, K]$. Also $u\in I$ and $k \in K\subseteq R$ gives $uk, ku\in I$ and so $[u, k]\in I.$ Hence, $[u, k]\in [S, R]\cap I$ and so $k\in C_K(u)$. Therefore, $C_K(u)\cap P\neq \phi$ and $|C_K(u)\cap P|=|C_I(u)|$. Hence the equality holds. \end{proof} \section{Isoclinism and commuting probability} The concept of isoclinism between groups was introduced by Hall \cite{pH40} in 1940. In 1995, Lescot \cite{pL95} showed that the commuting probabilities of two isoclinic finite groups are same. Recently, Bukley, MacHale and Ni Sh$\acute{\textup{e}}$\cite{BMS} have introduced $\mathbb Z$-isoclinism between two rings and showed that the commuting probabilities of two $\mathbb Z$-isoclinic finite rings are same. Further, Dutta, Basnet and Nath \cite{jutirekha2} have generalized the concept of $\mathbb Z$-isoclinism between two rings as given in the following definition. \begin{definition} Let $R_1$ and $R_2$ be two rings with subrings $S_1, K_1$ and $S_2, K_2$ respectively such that $S_1\subseteq K_1$ and $S_2\subseteq K_2$. A pair of rings $(S_1,K_1)$ is said to be $\mathbb Z$-isoclinic to $(S_2, K_2)$ if there exist additive group isomorphisms $\phi :\frac {K_1}{Z(S_1, K_1)}\rightarrow \frac {K_2}{Z(S_2, K_2)}$ such that $\phi \left(\frac {S_1}{Z(S_1, K_1)}\right) = \frac {S_2}{Z(S_2, K_2)}$ and $\psi :[S_1, K_1]\rightarrow [S_2, K_2]$ such that $\psi ([u_1, v_1])=[u_2, v_2]$ whenever $u_i \in S_i$, $v_i\in K_i$ for $i = 1, 2$; $\phi (u_1 + Z(S_1, K_1)) = u_2 + Z(S_2, K_2)$ and $\phi (v_1 + Z(S_1, K_1)) = v_2 + Z(S_2, K_2)$. Such pair of mappings $(\phi,\psi)$ is called a $\mathbb Z$-isoclinism between $(S_1, K_1)$ and $(S_2, K_2)$. \end{definition} Dutta, Basnet and Nath \cite{jutireka} also showed that $\Pr(S_1, R_1) = \Pr(S_2, R_2)$ if the pairs $(S_1,R_1)$ and $(S_2,R_2)$ are $\mathbb Z$-isoclinic (see Theorem 3.3). In this section, we further generalize this result in the following way. \begin{theorem} Let $R_1$ and $R_2$ be two non-commutative rings with subrings $S_1, K_1$ and $S_2, K_2$ respectively. If $(\phi,\psi)$ is a $\mathbb Z$-isoclinism between $(S_1, K_1)$ and $(S_2, K_2)$ then \[ {\Pr}_r(S_1, K_1) = {\Pr}_{\psi (r)}(S_2, K_2). \] \end{theorem} \begin{proof} By Theorem \ref{com-thm}, we have \begin{align*} {\Pr}_r(S_1, K_1) =&\frac {|Z(S_1, K_1)|}{|S_1||K_1|}\underset{r \in [s_1, K_1]}{\underset{s_1 + Z(S_1, K_1)\in\frac {S_1}{Z(S_1, K_1)}}{\sum}|C_{K_1}(s_1)|} \end{align*} noting that $r \in [s_1, K_1]$ if and only if $r \in [s_1 + z, K_1]$ and $C_{K_1}(s_1) = C_{K_1}(s_1 + z)$ for all $z \in Z(S_1, K_1)$. Now, by Lemma \ref{lemma1}, we have \[ {\Pr}_r(S_1, K_1) = \frac {|Z(S_1, K_1)|}{|S_1|}\underset{r \in [s_1, K_1]}{\underset{s_1 + Z(S_1, K_1)\in\frac {S_1}{Z(S_1, K_1)}}\sum} \frac {1}{|[s_1,K_1]|}. \] Similarly, it can be seen that \[ {\Pr}_{\psi (r)}(S_2, K_2) = \frac {|Z(S_2, K_2)|}{|S_2|}\underset{\psi (r)\in [s_2, K_2]}{\underset{s_2 + Z(S_2, K_2)\in\frac {S_2}{Z(S_2, K_2)}}\sum} \frac {1}{|[s_2,K_2]|}. \] Since $(\phi,\psi)$ is a $\mathbb Z$-isoclinism between $(S_1, K_1)$ and $(S_2, K_2)$ we have $\frac {|S_1|}{|Z(S_1, K_1)|} = \frac {|S_2|}{|Z(S_2, K_2)|}$, $|[s_1,K_1]| = |[s_2, K_2]|$ and $r \in [s_1, K_1]$ if and only if $\psi (r) \in [s_2, K_2]$. Hence \[ {\Pr}_r(S_1, K_1) = {\Pr}_{\psi (r)}(S_2, K_2). \] \end{proof} We conclude this paper by the following result. \begin{proposition} Let $R_1$ and $R_2$ be two non-commutative rings with subrings $S_1, K_1$ and $S_2, K_2$ respectively. If $\phi_1 : \frac {S_1}{Z(S_1, R_1)}\to \frac {S_2}{Z(S_2, R_2)}$, $\phi_2 :\frac {K_1}{Z(K_1, R_1)} \to \frac {K_2}{Z(K_2, R_2)}$ and $\psi : [S_1, K_1]\to [S_2, K_2]$ are additive group isomorphisms such that \[ a_{(S_2, K_2)} \circ (\phi_1 \times \phi_2) = \psi \circ a_{(S_1, K_1)} \] where $a_{(S_i, K_i)}: \frac{S_i}{Z(S_i, R_i)} \times \frac{K_i}{Z(K_i, R_i)} \to [S_i, K_i]$ are well defined maps given by \[ a_{(S_i, K_i)}(x_i + Z(S_i, R_i), y_i + Z(K_i, R_i)) = [x_i, y_i] \] for all $x_i \in S_i, y_i \in K_i$ and $i = 1, 2$; and \[ (\phi_1 \times \phi_2)(x_1 + Z(S_1, R_1), y_1 + Z(K_1, R_1)) = (x_2 + Z(S_2, R_2), y_2 + Z(K_2, R_2)) \] whenever $\phi_1(x_1 + Z(S_1, R_1)) = x_2 + Z(S_2, R_2)$ and $\phi_2(y_1 + Z(K_1, R_1)) = y_2 + Z(K_2, R_2)$. Then \[ {\Pr}_r(S_1, K_1) = {\Pr}_{\psi (r)}(S_2, K_2). \] \end{proposition} \begin{proof} For $i=1,2$ let $Z_i := Z(S_i,R_i)$ and ${Z_i}^\prime := Z(K_i,R_i)$. Then we have \begin{tiny} \begin{align*} {\Pr}_r(S_1, K_1)& =\frac {1}{|S_1||K_1|}|\{(x_1,y_1)\in S_1\times K_1 : [s_1, k_1] = r\}|\\ &=\frac {|Z_1||{Z_1}^\prime|}{|S_1||K_1|}\left|\{(x_1 + Z_1, y_1 + {Z_1}^\prime)\in \frac {S_1}{Z_1}\times \frac {K_1}{{Z_1}^\prime}:a_{(S_1,K_1)}(x_1 + Z_1, y_1 + {Z_1}^\prime) = r\}\right|\\ &=\frac {|Z_1||{Z_1}^\prime|}{|S_1||K_1|}\left|\{(x_1 + Z_1, y_1 + {Z_1}^\prime) \in \frac {S_1}{Z_1}\times \frac {K_1}{{Z_1}^\prime} : \psi \circ a_{(S_1, K_1)}(x_1 + Z_1, y_1 + {Z_1}^\prime) = \psi(r)\}\right|\\ &=\frac {|Z_1||{Z_1}^\prime|}{|S_1||K_1|}\left|\{(x_1 + Z_1, y_1 + {Z_1}^\prime) \in \frac {S_1}{Z_1}\times \frac {K_1}{{Z_1}^\prime} : a_{(S_2,K_2)}\circ (\phi_1\times \phi_2)(x_1 + Z_1, y_1 + {Z_1}^\prime) = \psi(r)\}\right|\\ &=\frac {|Z_2||{Z_2}^\prime|}{|S_2||K_2|}\left|\{(x_2 + Z_2, y_2 + {Z_2}^\prime)\in \frac {S_2}{Z_2}\times \frac {K_2}{{Z_2}^\prime}:a_{(S_2, K_2)}(x_2 + Z_2, y_2 + {Z_2}^\prime)=\psi(r)\}\right|\\ &=\frac {|Z_2||{Z_2}^\prime|}{|S_2||K_2|}\left|\{(x_2 + Z_2, y_2 + {Z_2}^\prime)\in \frac {S_2}{Z_2}\times \frac {K_2}{{Z_2}^\prime} : [x_2 + Z_2, y_2 + {Z_2}^\prime] = \psi (r)\}\right|\\ &=\frac {1}{|S_2||K_2|}|\{(x_2,y_2)\in S_2\times K_2 : [x_2,y_2]=\psi(r)\}|\\ &={\Pr}_{\psi(r)}(S_2, K_2). \end{align*} \end{tiny} \end{proof} \end{document}
\begin{document} \begin{frontmatter} \title{Analysis of 1:1 Matched Cohort Studies and Twin Studies, with Binary Exposures and Binary Outcomes} \runtitle{Matched cohort studies} \begin{aug} \author[a]{\fnms{Arvid} \snm{Sj\"olander}\corref{}\ead[label=e1]{[email protected]}}, \author[b]{\fnms{Anna L. V.} \snm{Johansson}\ead[label=e2]{[email protected]}}, \author[c]{\fnms{Cecilia} \snm{Lundholm}\ead[label=e3]{[email protected]}}, \author[d]{\fnms{Daniel} \snm{Altman}\ead[label=e4]{[email protected]}}, \author[e]{\fnms{Catarina}~\snm{Almqvist}\ead[label=e5]{[email protected]}} \and \author[f]{\fnms{Yudi} \snm{Pawitan}\ead[label=e6]{[email protected]}} \runauthor{A. Sj\"olander et al.} \affiliation{Karolinska Institutet} \address[a]{Arvid Sj\"olander is Ph.D. Student, Department of Medical Epidemiology and Biostatistics, Karolinska Institutet, Solna, Sweden \printead{e1}.} \address[b]{Anna L. V. Johansson is Statistician, Department of Medical Epidemiology and Biostatistics, Karolinska Institutet, Solna, Sweden \printead{e2}.} \address[c]{Cecilia Lundholm is Statistician, Department of Medical Epidemiology and Biostatistics, Karolinska Institutet, Solna, Sweden \printead{e3}.} \address[d]{Daniel Altman is Associate Professor, Department of Medical Epidemiology and Biostatistics, Karolinska Institutet, Solna, Sweden, and Associate Professor, Division of Obstetrics and Gynecology, Department of Clinical Sciences, Danderyd Hospital, Karolinska Institutet, Stockholm, Sweden \printead{e4}.} \address[e]{Catarina Almqvist is Associate Professor, Department of Medical Epidemiology and Biostatistics, Karolinska Institutet, Solna, Sweden, and Associate Professor, Astrid Lindgren Children's Hospital and Department of Woman and Child Health, Karolinska Institutet, Stockholm, Sweden \printead{e5}.} \address[f]{Yudi Pawitan is Professor, Department of Medical Epidemiology and Biostatistics, Karolinska Institutet, Solna, Sweden \printead{e6}.} \end{aug} \begin{abstract} To improve confounder adjustments, observational studies are often matched on potential confounders. While matched case-control studies are common and well covered in the literature, our focus here is on matched cohort studies, which are less common and sparsely discussed in the literature. Matched data also arise naturally in twin studies, as a cohort of exposure--discordant twins can be viewed as being matched on a large number of potential confounders. The analysis of twin studies will be given special attention. We give an overview of various analysis methods for matched cohort studies with binary exposures and binary outcomes. In particular, our aim is to answer the following questions: (1) What are the target parameters in the common analysis methods? (2) What are the underlying assumptions in these methods? (3) How do the methods compare in terms of statistical power? \end{abstract} \begin{keyword} \kwd{Cohort studies} \kwd{likelihood} \kwd{matching}. \end{keyword} \end{frontmatter} \section{Introduction}\label{sec:introduction} A common goal of epidemiological research is to estimate the causal effect of a particular exposure on a particular outcome. The common tool is an observational study, utilizing, for example, hospital data, cohort data or health register data. In observational studies, the exposure-outcome association is invariably confounded by factors that induce spurious (i.e., noncausal) associations. For example, age may confound an exposure-outcome association if older people are more often exposed and more likely to develop the outcome. Without adjustment for age, that is, if the confounding influence by age is not accounted for in the analysis, there may be an association of exposure and outcome, even in the absence of a causal effect. Hence, the exposure-outcome association cannot, in general, be given a causal interpretation, unless all confounders are properly adjusted for. There are several strategies to adjust for potential confounders in the analysis, for example, stratification or regression modeling. Essentially, these methods solve the problem of confounding by comparing the exposed and unexposed within levels of the confounders, thus balancing the confounders across levels of the exposure and comparing ``like with like.'' If there is a strong association between the confound\-ers and the exposure, or between the confounders and the outcome, these strategies are often inefficient. In particular, some strata may contain few exposed subjects or few cases (i.e., subjects that developed the outcome); the lack of balance may lead to unstable estimates for these strata. One common method to increase the efficiency is to match the study on potential confounders. For example, matched case-control studies are constructed so that for each case, a fixed number of controls are selected, having the same confounder levels as the case. When each case is matched to one control, we say that the study is 1:1 matched. In case-control studies, matching forces the ratio of cases to controls to be constant across all strata of the matched factors, which implies that the association between the confounders and the outcome is broken. Matched case-control studies are commonplace, and well covered in the literature (e.g., \citecs{Breslow}; \citecs{Jewell}; \citecs{Woodward}). A matched cohort study can be constructed in a similar fashion; for each exposed subject, a fixed number of unexposed subjects are selected, having the same confounder levels as the exposed. In cohort studies, matching forces the ratio of exposed to unexposed to be constant across all strata of the matched factors, which implies that the association between the confounders and the exposure is broken. Matched cohort studies are relatively rare, and the literature is sparse and typically rather brief (e.g., Cummings et al., \citeyear{Cummings}). The reason, we believe, is mainly due to available data sources. Matched cohort studies are suitable for situations where a researcher has access to large population data sources with exposure information. Matched data also arise naturally in twin studies. By nature, a large number of potential confounders are shared (i.e., having constant levels) within each twin pair, for example, genetic factors, maternal uterine environment, gestational age, etc. It follows that a cohort of\vadjust{\goodbreak} exposure--discordant twin pairs (i.e., pairs in which one of the twins is exposed, and the other twin is unexposed) can be viewed as being 1:1 matched on all shared confounders. In such a cohort there is no association between the shared confounders and the exposure. An attractive feature of twin studies is that the shared confounders often include factors which are normally very difficult to match on, or even to measure. For example, monozygotic twins have identical genes and can thus can be viewed~as being matched on the whole genome. However, a~twin study is not simply a special case of a regular 1:1 matched cohort study; whereas the latter only contains exposure--discordant pairs, the former also contains pairs which are concordant in the exposure.~Be\-cause of their unique and attractive properties, twin studies will be given special attention in this paper.\looseness=-1 The aim of this paper is to give a detailed overview of different analysis methods for matched cohort studies with binary exposures and binary outcomes. In particular, our aim is to answer the following questions: (1) What are the target parameters in the common analysis methods? (2) What are the underlying assumptions in these methods? (3) How do the methods compare in terms of statistical power? We illustrate the methods with two examples. The first example is a register-based study on the effect of hysterectomy on the risk for cardiovascular disease (CVD) in Swedish women \citep{Ingelsson}. The study is matched on birth year, year of hysterectomy and county of residence at year of hysterectomy, so that for each hysterectomized woman (exposed), three nonhysterectomized women at same age and year were selected from the general population. The second study is a population-based twin study of the association between fetal growth and childhood asthma \citep{Ortqvist}. The paper is organized as follows. In Section~\ref{sec:margcondstd} we review the concepts of marginalization, conditioning and standardization. In Section~\ref{sec:matched} we define a matched cohort study. In Section~\ref{sec:analysis} we describe the most common analysis methods for matched cohorts. These methods can also be used to analyze the exposure--discordant pairs in twin studies. In Section~\ref{sec:twins} we demonstrate how these methods can be adapted for inclusion of the exposure--concordant pairs in twin studies as well. In Section~\ref{sec:sim} we carry out a simulation study. In Section~\ref{sec:examples} we provide the two illustrating examples. We will restrict our attention to 1:1 matching, and we will not consider additional covariate adjustments. Extensions to other matching schemes and adjustments for additional covariates are discussed in Section~\ref{sec:discussion}. \section{Marginalization, Conditioning and Standardization} \label{sec:margcondstd} We first establish the notations and briefly review the concepts of marginalization, conditioning and standardization, which are crucial for the understanding of matching and confounder adjustment. More thorough discussions can be found in standard epidemiological textbooks (e.g., Rothman et~al.,\break \citeyear{Rothman}). Let $X$ denote the binary exposure of interest (0/1), let $Y$ denote the binary outcome of interest (0/1) and let $Z$ denote a set of potential confounders for the association between $X$ and $Y$. We use $\operatorname{Pr}(\cdot)$ generically for both probabilities (population proportions) and densities, and we use $E(\cdot)$ for expected value (population average). We use $V_1\perp V_2|V_3$ as shorthand for ``$V_1$ and $V_2$ conditionally independent, given $V_3$.'' We use (log) odds ratios to quantify the $X$--$Y$ association. Other possible options would be risk differences or risk ratios. There are two reasons for focusing on odds ratios. First, regression models for odds ratios can be conveniently fitted without restrictions; see Section~\ref{sec:rtrt}. Second, in applied scenarios, it is often desirable to make results comparable with case control studies, in which only odds ratios are estimable. An unadjusted analysis targets the marginal\break (over~$Z$) association between $X$ and $Y$, for example, through the marginal odds ratio \[ \mathit{OR}_m=\frac{\operatorname{Pr}(Y=1|X=1)\operatorname{Pr}(Y=0|X=0)}{\operatorname{Pr}(Y=0|X=1)\operatorname{Pr}(Y=1|X=0)}. \] We define $\psi_m=\log(\mathit{OR}_m)$. In the presence of confounders $Z$, $\mathit{OR}_m$ fails to have a causal interpretation. In particular, it may differ from 1 in the absence of a causal effect. The influence of $Z$ can be eliminated by conditioning on $Z$, as in the conditional odds ratio \[ \mathit{OR}_c(Z)=\frac{\operatorname{Pr}(Y=1|X=1,Z)\operatorname{Pr}(Y=0|X=0,Z)}{\operatorname{Pr}(Y=0|X=1,Z)\operatorname{Pr}(Y=1|X=0,Z)}. \] The conditional odds ratio $\mathit{OR}_c(Z)$ depends, in general, on $Z$. If $Z$ is the only confounder for the $X$--$Y$ association, then $\mathit{OR}_c(Z)$ can be interpreted as the conditional causal effect of $X$ on $Y$, given $Z$, on the odds ratio scale. If there are additional confounders, then $\mathit{OR}_c(Z)$ has no causal interpretation. $\mathit{OR}_c(Z)$ is a subpopulation (i.e., $Z$-specific) effect. The effect for the whole population can be obtained through standardization. The standardized probability of $Y=1$ given $X=x$, is given by \begin{equation} \label{eq:std} E_Z\{\operatorname{Pr}(Y=1|X=x,Z)\}, \end{equation} where we have used subindex $Z$ to highlight that the expectation is taken over the marginal distribution $\operatorname{Pr}(Z)$. We emphasize that the expression in (\ref{eq:std}) is not, in general, equal to $E_{Z|X=x}\{\operatorname{Pr}(Y=1|X=x,Z)|X=x\}=\operatorname{Pr}(Y=1|X=x)$, which is the marginal (unadjusted) probability of $Y=1$, given $X=x$. If $Z$ is the only confounder, then $E_Z\{\operatorname{Pr}(Y=1|X=x,Z)\}$ can be interpreted as the hypothetical (counterfactual) probability of $Y=1$, had everybody attained level $X=x$ in the source population \citep{Hernan}. $\operatorname{Pr}(Y=1|X=x,Z)$ can be standardized to any proper distribution $\operatorname{Pr}^{*}(Z)$, not necessarily equal to $\operatorname{Pr}(Z)$. We let $E^*_Z(V)$ denote the expected value of $V$, where the expectation is taken over $\operatorname{Pr}^{*}(Z)$. If $Z$ is the only confounder, then $E^*_Z\{\operatorname{Pr}(Y=1|X=x,Z)\}$ can be interpreted as the hypothetical (counterfactual) probability of $Y=1$, had everybody attained level $X=x$ in the fictitious population where $Z$ follows the distribution $\operatorname{Pr}^{*}(Z)$. A standardized odds ratio is constructed as \begin{eqnarray*} &&\hspace*{-5pt}\mathit{OR}_s \\ &&\hspace*{-5pt}\quad=\frac{E\{\operatorname{Pr}(Y=1|X=1,Z)\}E\{\operatorname{Pr}(Y=0|X=0,Z)\}}{E\{\operatorname{Pr}(Y=0|X=1,Z)\}E\{\operatorname{Pr}(Y=1|X=0,Z)\}}. \end{eqnarray*} We define $\psi_s=\log(\mathit{OR}_s)$. In ($\ref{eq:std}$), $\operatorname{Pr}(Y=1|X=x,Z)$ is standardized to $\operatorname{Pr}(Z)$, that is, the distribution of~$Z$ in the source population. In order to keep the notation simple, we use $\mathit{OR}_s$ and $\psi_s$, even if $\operatorname{Pr}(Z)$ is replaced by $\operatorname{Pr}^*(Z)$, and we let it be clear from the context which distribution of $Z$ these parameters are standardized to. If $Z$ is the only confounder, then $\mathit{OR}_s$ can be interpreted as the causal effect of~$X$ on~$Y$ in the source/fictitious population, on the odds ratio scale. We emphasize that although the numerical values of $\mathit{OR}_s$ and $\psi_s$ may depend heavily on which distribution of $Z$ they are standardized to, they are always, by construction, adjusted for $Z$. In general, there is no ordering in the magnitudes of $\mathit{OR}_c(Z)$, and $\mathit{OR}_s$. An interesting special case occurs when $\mathit{OR}_c(Z)$ is constant across levels of $Z$, that is, \begin{equation} \label{eq:const} \log\{\mathit{OR}_c(Z)\}=\psi_c. \end{equation} It can be shown \citep{Neuhaus4} that $|\psi_c|\geq|\psi_s|$. \begin{table*}[b] \vspace*{3pt} \tabcolsep=0pt \caption{Crude summary of matched 1:1 cohort data}\label{tab:tab1} \begin{tabular*}{290pt}{@{\extracolsep{\fill}}lccc@{}} \hline &\multicolumn{2}{@{}c}{\textbf{Unexposed pair member ($\bolds{X=0}$)}}& \textbf{Totals}\\ \ccline{2-3} & \textbf{Event ($\bolds{Y=1}$)} & \textbf{\hspace*{3pt}No event ($\bolds{Y=0}$)\hspace*{-4pt}} &\\ \hline Exposed pair member ($X=1$) & & &\\ \quad Event ($Y=1$) & $T$ & $U$ & $T+U$\\ \quad No event ($Y=0$) & $V$ & $W$ & $V+W$\\ \quad Totals & $T+V$ & $U+W$ & $n$\\ \hline \end{tabular*} \end{table*} In general, there is no ordering in the magnitudes of $\mathit{OR}_m$ and $\mathit{OR}_c(Z)$, or of $\mathit{OR}_m$ and $\mathit{OR}_s$; confounding by $Z$ can both inflate or deflate the association between $X$ and $Y$. There are a few special cases though. If $Y\perp Z|X$, then $\operatorname{Pr}(Y=1|X,Z)=\operatorname{Pr}(Y=1|X)$ which implies that $\mathit{OR}_m=\mathit{OR}_c(Z)=\mathit{OR}_s$ for all\vadjust{\goodbreak} $Z$ and all standardization distributions $\operatorname{Pr}^*(Z)$. This would happen if the true causal structure between $X$, $Y$ and $Z$ is as in Figure \ref{fig:d2}. If $X\perp Z$, then $\operatorname{Pr}(Z|X)=\operatorname{Pr}(Z)$ which implies that $\mathit{OR}_m=\mathit{OR}_s$ for the particular distribution $\operatorname{Pr}(Z)$, that is, the distribution of $Z$ in the source population. This would happen if the true causal structure is as in Figure \ref{fig:d1}. \begin{figure} \caption{A causal structure for which $Y\perp Z|X$.} \label{fig:d2} \end{figure} \begin{figure} \caption{A causal structure for which $X\perp Z$.} \label{fig:d1} \end{figure} We note that in Figures \ref{fig:d2} and \ref{fig:d1}, $Z$ is not a confounder, and $\mathit{OR}_m$ can be given a causal interpretation. Thus, for these scenarios, adjusting for $Z$ is not necessary for causal inference. We further note that the structure in Figure \ref{fig:d1} does not render $\mathit{OR}_m$ equal to $\mathit{OR}_c(Z)$, even if $\mathit{OR}_c(Z)$ is constant across levels of $Z$. This is a consequence of the noncollapsibility of the odds ratio. For a more thorough discussion on (non)collapsibility and the special properties of odds ratios, we refer the reader to \citet{Greenland}.\looseness=-1 \section{Matched Cohort Studies} \label{sec:matched} \subsection{Design} A cohort study that is 1:1 matched on $Z$ consists of $n$ pairs of observations, each pair consisting of one exposed subject ($X=1$) and one unexposed subject ($X=0$). The pairs are constructed so that the two subjects within each pair have the same level of confounder $Z$; that is, $Z$ may vary between pairs, but not within pairs. Thus, $Z$ is equally distributed among exposed and unexposed in the matched cohort. The outcome $Y$ is assumed to be recorded for each subject. Ignoring $Z$, the paired data can be conveniently represented as in Table \ref{tab:tab1}. In practice, 1:1 matched pairs are typically constructed by first drawing an exposed person from the whole population, then drawing an unexposed person with an equal or similar level of confounder $Z$; we refer to this sampling scheme as \emph{exposure-driven} matching. We note that in twin studies $Z$ is not directly observed, but should be interpreted as all the unobserved factors that are common within a twin pair. \subsection{Likelihood Construction} Before discussing the various analysis methods, we construct the likelihood for the observed data. Let $Z_i$ denote the common value of $Z$ for pair $i$, $i\in\{1,2,\ldots,n\}$. Let $Y^0_i$ and $Y^1_i$ denote the outcome $Y$ for the unexposed ($X=0$) and the exposed ($X=1$) subject in pair $i$, respectively. The matched data consists of $n$ i.i.d. observations ($Y^0_i,Y^1_i,Z_i$). We suppress the index $i$ when not needed, so that $Y^x$ denotes $Y$ for the subject with $X=x$, $x\in (0,1)$, within an arbitrary pair. We use $\operatorname{Pr}(Y=y,X=x,Z=z)$ to denote the population probability of ($Y=y,{X=x},\break Z=z$), and we will use $\operatorname{Pr}^*(Y^0=y^0,Y^1=y^1,Z=z)$ to denote the probability for ($Y^0=y^0,Y^1=y^1,{Z=z}$) induced by the matched sampling scheme. Under exposure-driven matching, the design implies that \begin{subequation} \begin{equation} \label{eq:faca} \operatorname{Pr}^*(Y^x=y^x|Z)=\operatorname{Pr}(Y=y^x|X=x,Z) \end{equation} and \begin{eqnarray}\label{eq:facb} &&\operatorname{Pr}^*(Y^0=y^0,Y^1=y^1|Z)\nonumber \\[-8pt]\\[-8pt] &&\quad=\operatorname{Pr}^*(Y^0=y^0|Z)\operatorname{Pr}^*(Y^1=y^1|Z).\nonumber \end{eqnarray} \end{subequation} Equation (\ref{eq:faca}) ``ties'' the induced distribution to the source population distribution, thus allowing for samples from the former to be used for inference on the latter. Equation (\ref{eq:facb}) determines the correlation structure of the data, which is crucial for correct\vadjust{\goodbreak} standard error computations. In twin studies, (\ref{eq:faca}) and (\ref{eq:facb}) do not necessarily hold (see Section~\ref{sec:twins}), but are assumed throughout the paper. The induced marginal distribution of $Z$ is determined by the type of matching. Under exposure-driven matching, the induced marginal distribution of $Z$ equals the source population distribution of $Z$ among the exposed, that is, $\operatorname{Pr}^*(Z)=\operatorname{Pr}(Z|X=1)$. In twin studies restricted to the exposure--discordant pairs, we have that $\operatorname{Pr}^*(Z)=\operatorname{Pr}(Z|\mbox{discordant in }X)$. When $Z$ is observed (as in regular matched studies), the likelihood contribution for pair $i$ is \begin{eqnarray*} &&\operatorname{Pr}^*(Y^0_i=y^0_i,Y_i^1=y^1_i,Z_i) \\ &&\quad=\prod_{x=0}^1\operatorname{Pr}(Y=y^x_i|X=x,Z_i)\operatorname{Pr}^*(Z_i), \end{eqnarray*} so that the likelihood for the whole data set becomes equal to \[ \prod_{i=1}^n\prod_{x=0}^1\operatorname{Pr}(Y=y^x_i|X=x,Z_i)\operatorname{Pr}^*(Z_i). \] When $Z$ is unobserved (as in twin studies), the likelihood contribution for pair $i$ is \begin{eqnarray*} &&E^*_{Z_i}\{\operatorname{Pr}^*(Y^0_i=y^0_i,Y_i^1=y^1_i|Z_i)\} \\ &&\quad=E^*_{Z_i} \Biggl\{\prod_{x=0}^1\operatorname{Pr}(Y=y^x_i|X=x,Z_i) \Biggr\}, \end{eqnarray*} so that the the likelihood for the whole data set becomes equal to \[ \prod_{i=1}^nE^*_{Z_i} \Biggl\{\prod_{x=0}^1\operatorname{Pr}(Y=y^x_i|X=x,Z_i) \Biggr\}. \] We note that marginally (over $Z$), $Y^0$ and $Y^1$ are associated through the common value of $Z$; the strong\-er~conditional association between $Y$ and $Z$, giv\-en~$X$,~the stronger marginal association between $Y^0$\break and~$Y^1$. \section{Analysis Methods} \label{sec:analysis} In this section we describe and compare the most common analysis methods for matched cohorts. We emphasize that all these methods can in principle be used to analyze the exposure--discordant pairs in twin studies as well. However, the explicit regression model (Section~\ref{sec:explicit}) requires $Z$ to be observed, which is typically not the case in twin studies. \subsection{Regression Model Explicitly Involving $Z$} \label{sec:explicit} A straightforward way to adjust for $Z$ is to fit a~regression model for $Y$, given $X$ and $Z$, for example, \begin{equation} \label{eq:mod1} \hspace*{14pt}\operatorname{logit}\{\operatorname{Pr}(Y=1|X,Z;\psi_c,\gamma)\}=b(Z;\gamma)+\psi_c X,\hspace*{-14pt} \end{equation} where $b(Z;\gamma)$ is an explicitly specified parametric function of $Z$, typically a linear function $\gamma^TZ$ for continuous $Z$. We refer to a regression model for~$Y$, given $X$ and $Z$, as ``explicit.'' Under model (\ref{eq:mod1}), $\log\{\mathit{OR}_c(Z)\}=\psi_c$, so that the condition in (\ref{eq:const}) is met. This restriction is not crucial though; in principle we can add arbitrary interaction terms between~$X$ and any of the components of $Z$. Maximum likelihood estimates (MLEs) of ($\psi_c,\gamma$) are obtained by maximizing the conditional (given $Z$) likelihood \begin{eqnarray} \label{eq:lik1}\quad &&\prod_{i=1}^n\operatorname{Pr}^*(Y^0_i=y^0_i,Y_i^1=y^1_i|Z_i)\nonumber \\[-8pt]\\[-8pt] &&\quad=\prod_{i=1}^n\prod_{x=0}^1\operatorname{Pr}(Y=y^x_i|X=x,Z_i;\psi_c,\gamma),\nonumber \end{eqnarray} where the equality follows from (\ref{eq:faca}) and (\ref{eq:facb}). If (\ref{eq:facb}) is violated, then $Y^1$ and $Y^0$ are not conditionally independent, given $Z$, and the right-hand side of~(\ref{eq:lik1}) is not a proper likelihood. However, if (\ref{eq:faca}) holds (and model (\ref{eq:mod1}) is correct), then each separate term $\operatorname{Pr}(Y=y^x_i|X=x,Z_i;\psi_c,\gamma)$ in (\ref{eq:lik1}) equals the true marginal (over $Y_i^{1-x}$) likelihood $\operatorname{Pr}(Y_i^x=y_i^x|Z_i)$. It follows that the obtained estimate of $\psi_c$ is consistent under (\ref{eq:faca}), regardless of whether (\ref{eq:facb}) holds or not. \subsubsection{Disadvantages} \label{sec:rtrt} \begin{longlist} \item[(1)] If $Z$ is high dimensional, it may be difficult to well specify the function $b(Z;\gamma)$. \item[(2)] If $Z$ is not directly observed, as in twin studies, explicit specification of $b(Z;\gamma)$ is not possible. \item[(3)] In principle, explicit regression models can be adapted for risk differences and risk ratios, by using identity links or the log links, respectively. However, absolute risks and logarithms thereof are, unlike log odds, restricted to ranges $(0,1)$ and $(0,\infty)$, respectively. Thus, models utilizing identity links or log links have to be fitted under these restrictions, which can be rather inconvenient, or they may produce estimates which are outside the supported ranges. \end{longlist} \subsection{Conditional Logistic Regression} \label{sec:cond} Conditional logistic regression mitigates the problems with an explicit specification of $b(Z;\gamma)$. In conditional logistic regression, the function $b(Z;\gamma)$ in~(\ref{eq:mod1}) is replaced with a scalar pair-specific parameter~$b$: \begin{equation} \label{eq:mod2} \operatorname{logit}\{\operatorname{Pr}(Y=1|X,Z)\}=b+\psi_c X. \end{equation} Nothing is assumed about $b$, and thus the risk for model misspecification in $b(Z;\gamma)$ is avoided. A MLE of $\psi_c$ is obtained by conditioning on $Y^0_i+Y^1_i$, for each pair $i$, and maximizing the resulting conditional likelihood, which under (\ref{eq:faca}) and (\ref{eq:facb}) is given by \begin{equation} \label{eq:condlik} \prod_{i:y_i^0\neq y_i^1}\frac{e^{\psi_c y^1_i}}{1+e^{\psi_c }}. \end{equation} Since the conditional likelihood (\ref{eq:condlik}) does not involve~$b$ (or $Z$), it can be used, even if $Z$ is not directly observed, as in twin studies. The MLE of $\psi_c$ obtained by maximizing (\ref{eq:condlik}) is given by \begin{equation} \label{eq:condest} \hat{\psi}_{\mathit{c.clr}}=\log(U/V), \end{equation} with standard error $s.e.\{\hat{\psi}_{\mathit{c.clr}}\}=\sqrt{U^{-1}+V^{-1}}$. \subsubsection{Disadvantages} \begin{longlist} \item[(1)] The constant odds ratio assumption (\ref{eq:const}) is crucial in conditional logistic regression. If an interaction term is included between $b$ and $X$ in model~(\ref{eq:mod2}), then $b$ cannot be eliminated by conditioning arguments. If (\ref{eq:const}) is violated, then $\hat{\psi}_{\mathit{c.clr}}$ converges to a~weighted average of the $Z$-specific odds ratios; see Section~\ref{sec:std}. \item[(2)] $\hat{\psi}_{\mathit{c.clr}}$ is generally inconsistent if (\ref{eq:facb}) is violated. There is an important exception. Define the null hypothesis \begin{equation} \label{eq:h0} \mathrm{H}_{0}\dvtx \quad\mbox{(\ref{eq:const}) holds, with }\psi_c=0. \end{equation} In Appendix \ref{sec:app4} we show that $\hat{\psi}_{\mathit{c.clr}}$ converges to 0 under $\mathrm{H}_{0}$ and (\ref{eq:faca}), regardless of whether (\ref{eq:facb}) holds or not. \item[(3)] Conditional logistic regression cannot be used for other measures of association than the log odds ratio, since for other links than the logit link, $b$ cannot be eliminated by conditioning arguments. \end{longlist} \subsection{Mixed Model}\label{sec:mixed} In the mixed model approach, $b$ is assumed to be random, with a specified parametric distribution $\operatorname{Pr}^*(b;\theta)$. The MLE of $(\psi_c,\theta)$ is obtained by maximizing the marginal (over $b$) likelihood \begin{eqnarray} \label{eq:mixed} &&\prod_{i=1}^nE^*_{Z_i}\{\operatorname{Pr}^*(Y_i^0=y^0_i,Y_i^1=y^1_i|Z_i)\}\hspace*{-15pt}\nonumber\\[-2pt]\\[-14pt] &&\quad= \prod_{i=1}^nE^*_{b_i} \Biggl[ \Biggl\{ \prod_{x=0}^1\operatorname{Pr}(Y=y^x_i|X=x,b_i;\psi_c) \Biggr\}; \theta \Biggr],\nonumber\hspace*{-15pt} \end{eqnarray} where the equality follows from (\ref{eq:faca}) and (\ref{eq:facb}), and the expectation on the right-hand side is taken over $\operatorname{Pr}^*(b;\theta)$. \citet{Neuhaus2} showed that the mixed model estimate of $\psi_c$ is identical to $\hat{\psi}_{\mathit{c.clr}}$, under mild conditions. This implies that the two methods are equally efficient, and that the mixed model is robust against misspecification of $\operatorname{Pr}^*(b;\theta)$. \subsubsection{Disadvantages} \begin{longlist} \item[(1)] The constant odds ratio assumption (\ref{eq:const}) is crucial in the mixed model. \citet{Neuhaus2} showed that the mixed model is saturated, under mild conditions, so that an interaction term be-\break tween~$b$~and $X$ would lead to identifiability~\mbox{problems}. \item[(2)] The mixed model estimate of $\psi_c$ is generally inconsistent if (\ref{eq:facb}) is violated. \item[(3)] In principle, the mixed model can be adapted for risk differences and risk ratios, by using identity links or the log links, respectively. In practice, these adaptations require that the model is fitted under restrictions, or it may produce estimates outside the supported ranges. \item[(4)] Explicit maximization of the likelihood in (\ref{eq:mixed}) requires numerical techniques. This makes the meth\-od less transparent and relatively computer-intensive. \end{longlist} \subsection{Exposure--Discordant Crude Analysis}\label{sec:std} The methods described in Sections \ref{sec:explicit}--\ref{sec:mixed} all target the conditional odds ratio, $\mathit{OR}_c(Z)$. Matched~da\-ta can also be used to estimate a standardized odds ratio. Let $n_{yx}$ denote the number of subjects in the sample with $Y=y$ and $X=x$, so that $n_{00}=U+W$, $n_{01}=V+W$, $n_{10}=V+T$ and $n_{11}=U+T$. Under (\ref{eq:faca}) we have that $\operatorname{Pr}^*(Y^x=y^x)=E^*_Z\{\operatorname{Pr}(Y=y^x|X=x,Z)\}$, that is, $\operatorname{Pr}^*(Y^x=y^x)$ equals the probability of $Y=y^x$ given $X=x$, standardized to $\operatorname{Pr}^*(Z)$. Thus, under (\ref{eq:faca}) a consistent estimate of $\psi_s$ is given by the crude log odds ratio \begin{equation} \label{eq:psiest} \hat{\psi}_{\mathit{s.crude}}=\log \biggl( \frac{n_{11}n_{00}}{n_{01}n_{10}} \biggr). \end{equation} The standard error of $\hat{\psi}_{\mathit{s.crude}}$ (see Appendix \ref{sec:app2}) is given by \begin{equation} \label{eq:se} \hspace*{14pt}\sqrt{n_{11}^{-1}+n_{01}^{-1}+n_{10}^{-1}+n_{00}^{-1}-2n\frac{nT-n_{11}n_{10}}{n_{11}n_{00}n_{01}n_{10}}}.\hspace*{-14pt} \end{equation} The first four terms under the square root sign can be recognized from the usual standard error formula for a log odds ratio, and the fifth term is an adjustment for non-i.i.d. observations. We remind the reader that the interpretation of~$\psi_s$ depends on what distribution\vadjust{\goodbreak} of $Z$ that $\psi_s$ is standardized to. Under exposure-driven matching,\break $\operatorname{Pr}^*(Z)=\operatorname{Pr}(Z|X=1)$ so that $\psi_s$ is standardized to the distribution of $Z$ among the exposed. In a~twin study, $\operatorname{Pr}^*(Z)=\operatorname{Pr}(Z|\mbox{discordant in }X)$ so that $\psi_s$ is standardized to the distribution of $Z$ among the exposure--discordant pairs. \subsubsection{Advantages} One potential disadvantage of the exposure--discordant crude analysis is that it estimates a parameter that is rather nonstandard. In the simple scenario that we consider (i.e., 1:1 matching and no additional covariate adjustments) the exposure--discordant crude analysis does not suffer from any of the other disadvantages listed in Sections \ref{sec:explicit}--\ref{sec:mixed}. The relative advantages of the expo\-sure--discordant crude analysis are threefold: \begin{longlist} \item[(1)] The exposure--discordant crude analysis relies on fewer assumptions than the other methods. Specifically, it does not rely on assumptions (\ref{eq:const})\break and~(\ref{eq:facb}). \item[(2)] The exposure--discordant crude analysis is\break computationally simple. \item[(3)] In the exposure--discordant crude analysis, the standardized probabilities $\operatorname{Pr}^*(Y^0=1)$ and\break $\operatorname{Pr}^*(Y^1=1)$ can be estimated separately, and can subsequently be used to construct any standardized measure of the $X$--$Y$ association, for example, risk difference or risk ratio. For this reason, the exposure--discordant crude analysis easily extends to nonbina\-ry outcomes as well. For survival outcomes, for instance, an exposure--discordant crude analysis can~be used to produce standardized Kaplan--Meier curves. \end{longlist} \subsubsection{A closer comparison with conditional logistic regression} Because $\psi_s$ and $\psi_c$ are different parameters, it is not meaningful to compare the methods in Sections \ref{sec:explicit}--\ref{sec:mixed} with the exposure--discordant crude analysis in terms of efficiency of estimates. However, we can make a meaningful comparison in terms of statistical power. Define the null hypothesis \begin{equation} \label{eq:hostar} \mathrm{H}_{0}^*\dvtx \quad\psi_s=0. \end{equation} It is easy to show that $\mathrm{H}_{0}$ in (\ref{eq:h0}) implies $\mathrm{H}_{0}^*$, regardless of whether (\ref{eq:faca}) and (\ref{eq:facb}) hold or not. If both (\ref{eq:faca}) and (\ref{eq:facb}) hold, then a Wald test of $\mathrm{H}_{0}$ is based on the statistic $T_c=\hat{\psi}_{\mathit{c.clr}}/s.e.(\hat{\psi}_{\mathit{c.clr}})$. If (\ref{eq:faca}) holds, then a Wald test of $\mathrm{H}_{0}^*$ is based on the statistic $T_s=\hat{\psi}_{\mathit{s.crude}}/s.e.(\hat{\psi}_{\mathit{s.crude}})$. In Appendix \ref{sec:app4} we show that $T_c$ and $T_s$ are asymptotically equal. It immediately follows that the two Wald tests have the same asymptotic power, for any fixed alternative. One potential argument against the exposure--\break discordant crude analysis is that it does not inform us about the exposure effect in the source population. Under exposure-driven matching (and no confounders apart from $Z$), $\psi_s$ is a causal effect in a fictitious population where $Z$ is distributed as among the exposed. In a twin study restricted to the expo\-sure--discordant pairs (and no confounders apart\break from~$Z$), $\psi_s$ is a causal effect in a fictitious population where~$Z$ is distributed as among the exposure--discordant pairs. The effect in these fictitious populations may differ from the effect in the source population, and it is not always obvious whether these fictitious population effects are relevant targets for inference. However, a closer examination shows that a similar argument can be used against the methods that target $\psi_c$ as well, and in particular against conditional logistic regression. Conditional logistic regression relies on the constant odds ratio assumption~(\ref{eq:const}). This is a~very strong assumption, which in any real scenario is most likely violated, to some extent. Regardless of whether (\ref{eq:const}) holds or not, $\hat{\psi}_{\mathit{c.clr}}$ converges to\vspace*{-8pt} {\fontsize{9.5}{11.5}{\selectfont{ \begin{eqnarray} \label{eq:or} &&\hspace*{-5pt}\log \biggl\{ \frac{\operatorname{Pr}^*(Y^1=1,Y^0=0)}{\operatorname{Pr}^*(Y^0=1,Y^1=0)} \biggr\}\nonumber\\ &&\hspace*{-5pt}\quad =\log \biggl[\frac{E^*_Z\{\operatorname{Pr}^*(Y^1=1,Y^0=0|Z)\}}{E^*_Z\{\operatorname{Pr}^*(Y^0=1,Y^1=0|Z)\}} \biggr]\nonumber\\ &&\hspace*{-5pt}\quad \stackrel{\mathrm{(\ref{eq:faca}),(\ref{eq:facb})}}{=}\log \biggl[ \frac{E^*_Z\{\operatorname{Pr}(Y=1|X=1,Z)\operatorname{Pr}(Y=0|X=0,Z)\}}{E^*_Z\{\operatorname{Pr}(Y=1|X=0,Z)\operatorname{Pr}(Y=0|X=1,Z)\}} \biggr]\nonumber\\ &&\hspace*{-5pt}\quad =\log[E^*_Z\{W(Z)\mathit{OR}_c(Z)\}],\nonumber\\ \end{eqnarray}}}} where \begin{eqnarray*} &&W(Z) \\ &&\quad=\frac{\operatorname{Pr}(Y=1|X=0,Z)\operatorname{Pr}(Y=0|X=1,Z)}{E^*_Z\{\operatorname{Pr}(Y=1|X=0,Z)\operatorname{Pr}(Y=0|X=1,Z)\}}. \end{eqnarray*} In (\ref{eq:or}), the average is taken over $\operatorname{Pr}^*(Z)$, that is, the same distribution of $Z$ as being standardized to in the exposure--discordant crude analysis. Thus, if (\ref{eq:const}) is violated, then conditional logistic regression does not inform the analyst about exposure effects outside the fictitious population characterized by $\operatorname{Pr}^*(Z)$, to any wider extent than the exposure--discordant crude analysis. Furthermore, whereas $\psi_s$ has a clear interpretation as a population causal effect (when there are no confounders except $Z$), the weighted average in (\ref{eq:or}) does not have any such simple interpretation. An analyst is always at the liberty to assume a priori that (\ref{eq:const}) holds. But equally well, the analyst may assume that the effect in the fictitious population, characterized by $\operatorname{Pr}^*(Z)$, is equal to the effect in the source population, characterized by $\operatorname{Pr}(Z)$. Neither of these assumptions is stronger than the other, since neither of them implies the other. Furthermore, with paired data and $Z$ being unobserved (as in twin studies), these assumptions are both untestable. Although our focus is on cohort studies, we end this section by making a comparison with case control studies. A matched case control study is designed analogously to a matched cohort study, but the roles of exposure and outcome are ``switched'' in the sampling scheme; see Section~\ref{sec:introduction}. Thus, in a~match\-ed case control study the crude sample log odds ratio consistently estimates the standardized log odds ratio\vspace*{-8pt} {\fontsize{10.4}{12.4}{\selectfont{ \begin{eqnarray} \label{eq:orstt} &&\hspace*{-4pt}\log \biggl[ \frac{E^*_Z\{\operatorname{Pr}(X=1|Y=1,Z)\}E^*_Z\{\operatorname{Pr}(X=0|Y=0,Z)\}}{E^*_Z\{\operatorname{Pr}(X=0|Y=1,Z)\}E^*_Z\{\operatorname{Pr}(X=1|Y=0,Z)\}} \biggr] ,\nonumber\\&& \end{eqnarray}}}} where $\operatorname{Pr}^*(Z)=\operatorname{Pr}(Z|Y=1)$. In contrast to conditional odds ratios, standardized odds ratios are not symmetrical. That is, the log odds ratio in~(\ref{eq:orstt}), in which $X$ appears to the left of the conditioning sign, cannot be written as $\psi_s$, in which $X$ appears to the right of the conditioning sign. Hence, the log odds ratio in~(\ref{eq:orstt}) has no simple interpretation as a causal effect of $X$ on $Y$ on the log odds ratio scale, even if there are no confounders apart from $Z$. \section{Analysis of twin data} \label{sec:twins} In contrast to a regular 1:1 matched cohort study, a twin cohort also contains pairs that are concordant in the exposure. In this section we describe three common methods to incorporate the exposure--con\-cordant pairs in the analysis. To deal with twin studies we extend the notation slightly. Let $X_{ij}$ and $Y_{ij}$ denote $X$ and $Y$ for twin $j$ in pair $i$, $j\in (1,2)$. We suppress the index $i$ when not needed, so that $X_j$ and $Y_j$ denote $X$ and $Y$ for twin $j$, $j\in (1,2)$, within an arbitrary pair $i$. As before, $Z_i$ represents all the unobserved factors that are common within a twin pair. As discussed in Section~\ref{sec:introduction}, the exposure--discordant pairs in a twin cohort can be viewed as a 1:1 matched cohort. However, some care must be taken. All methods discussed in Section~\ref{sec:analysis} rely on assumption (\ref{eq:faca}), and conditional logistic regression (Section~\ref{sec:cond}) and mixed models\vadjust{\goodbreak} (Section~\ref{sec:mixed}) rely in addition on assumption~(\ref{eq:facb}). For an exposure--discordant twin pair we have that \begin{eqnarray} \label{eq:factwins} &&\hspace*{12pt}\operatorname{Pr}^*(Y^0=y^0,Y^1=y^1|Z)\nonumber\hspace*{-12pt} \\[-8pt]\\[-8pt] &&\hspace*{12pt}\quad=\operatorname{Pr}(Y_j=y^0,Y_{j'}=y^1|X_j=0,X_{j'}=1,Z).\nonumber\hspace*{-12pt} \end{eqnarray} The right-hand side of (\ref{eq:factwins}) can be factorized into $\operatorname{Pr}(Y_j=y^0|X_j=0,Z)\operatorname{Pr}(Y_{j'}=y^1|X_{j'}=1,Z)$ if \begin{subequation} \begin{equation} \label{eq:factwins1} Y_j\perp X_{j'}|(X_j,Z)\end{equation} and \begin{equation} \label{eq:factwins2} Y_1\perp Y_2|(X_1,X_2,Z). \end{equation} \end{subequation} Thus, the analogs to (\ref{eq:faca}) and (\ref{eq:facb}) for twin data are given by (\ref{eq:factwins1}) and (\ref{eq:factwins2}), respectively. Under (\ref{eq:factwins1}), (\ref{eq:faca}) holds, so that the explicit model (Section~\ref{sec:explicit}) and the exposure--discordant crude analysis (\ref{sec:std}) are valid when applied to the exposure--discordant pairs. We note though that it is typically not possible to fit an explicit model to twin data, since $Z$ is typically unobserved. If, in addition, (\ref{eq:factwins2}) holds, then~(\ref{eq:facb}) holds as well, and all methods in Section~\ref{sec:analysis} are valid when applied to the exposure--discordant pairs.\looseness=1 Potentially, (\ref{eq:factwins1}) could be violated if $X_{j'}$ has\break a~caus\-al effect on $Y_j$, that is, if the exposure for one twin affects the outcome for the other twin. Similarly, (\ref{eq:factwins2}) could be violated if $Y_{j'}$ has a causal effect on $Y_j$, that is, if the outcome of one twin affects the outcome for the other twin. \subsection{All-Pair Crude Analysis} \label{sec:crude2} Let $r_{yx}$ denote the number of subjects in the full (i.e., both exposure--concordant and exposure--dis\-cordant pairs) sample with $Y=y$ and $X=x$. One simple way to make use of all twin pairs in the analysis is to compute the crude sample log odds ratio \begin{equation} \label{eq:ty} \hat{\psi}_{m.crude}=\log \biggl( \frac{r_{11}r_{00}}{r_{01}r_{10}} \biggr), \end{equation} which consistently estimates the marginal log odds ratio $\psi_m$. Thus, unlike the exposure--discordant crude analysis (Section~\ref{sec:std}), the all-pair crude analysis does not adjust for confounding by $Z$. The standard error of $\hat{\psi}_{m.crude}$ is rather complicated, due to the paired nature of the data. In Appendix \ref{sec:app2} we provide an analytic expression for the standard error. We note that the standard error can also be computed numerically, through Generalized Estimating Equation (GEE) procedures, which are implemented in most common statistical softwares. \subsection{Decomposition into Within- and Between-Effects} \label{sec:withinbetween} In twin studies with continuous exposures and outcomes, a popular regression model is \begin{eqnarray} \label{eq:WB} \quad E(Y_j|X_j,X_{j'})&=&\beta_0+\beta_{\mathrm{W}}(X_j-\bar{X})+\beta_{\mathrm{B}}\bar{X}\nonumber\\[-8pt]\\[-8pt] &=&\beta_0+\beta_{\mathrm{W}}X_j+\beta_{\mathrm{B}}'\bar{X},\nonumber \end{eqnarray} with $\bar{X}=\frac{X_1+X_2}{2}$ and $\beta_{\mathrm{B}}'=\beta_{\mathrm{B}}-\beta_{\mathrm{W}}$ \citep{Carlin}. In (\ref{eq:WB}), the pair-specific mean $\bar{X}$ is thought of as conveying information about the confounders~$Z$, which are not observed, but constant within each pair. Thus, the parameter $\beta_{\mathrm{B}}$ is thought of as quantifying the strength of confounding, a ``between effect,'' and the parameter $\beta_{\mathrm{W}}$ is thought of as quantifying the adjusted $X$--$Y$ association, a ``within effect.'' When $X$ and $Y$ are binary, a natural analog to (\ref{eq:WB}) is \begin{eqnarray} \label{eq:WB2} &&\operatorname{logit}\{\operatorname{Pr}(Y_j=1|X_j,X_{j'})\}\nonumber \\[-8pt]\\[-8pt] &&\quad=\beta_0+\beta_{\mathrm{W}}X_j+\beta_{\mathrm{B}}'\bar{X}.\nonumber \end{eqnarray} To see the connection with the methods described in this paper, note that \begin{eqnarray*} \label{eq:betapsi} \beta_{\mathrm{W}}&=&\operatorname{logit}\{\operatorname{Pr}(Y_j=1|X_j=1,X_{j'}=0)\}\\ &&{}-\operatorname{logit}\{\operatorname{Pr}(Y_j=1|X_j=0,X_{j'}=1)\}\\ &=&\operatorname{logit}[E\{\operatorname{Pr}(Y_j=1|X_j=1,X_{j'}=0,Z)| \\ &&\hspace*{99pt}{}X_j=1,X_{j'}=0\}]\\ &&{}-\operatorname{logit}[E\{\operatorname{Pr}(Y_j=1|X_j=0,X_{j'}=1,Z)|\\ &&\hspace*{111pt}{}X_j=0,X_{j'}=1\}]\\ &=&\operatorname{logit}[E^*\{\operatorname{Pr}(Y_j=1|X_j=1,Z)\}]\\ &&{}-\operatorname{logit}[E^*\{\operatorname{Pr}(Y_j=1|X_j=0,Z)\}]\\ &=&\psi_s, \end{eqnarray*} where $\operatorname{Pr}^*(Z)=\operatorname{Pr}(Z|X_1\neq X_2)$, and the third equality follows from assumption (\ref{eq:factwins1}). Thus, the within-effect $\beta_W$ is identical to the log odds ratio standardized to the distribution of $Z$ among the exposure--discordant pairs. This argument shows that the decomposition into within- and between-effects is a legitimate method for binary exposures, which was questioned by \citet{Carlin}. When $X$ is binary, $\bar{X}$ can only take values 0, 0.5 and~1. Thus, it is feasible to replace the linear term $\beta_0+\beta_{\mathrm{B}}'\bar{X}$ in (\ref{eq:WB2}) with one parameter for each level of $\bar{X}$, that is, \begin{equation} \label{eq:WB3} \hspace*{14pt}\operatorname{logit}\{\operatorname{Pr}(Y_j=1|X_j,X_{j'})\}=\beta_{\mathrm{W}}X_j+m(\bar{X}),\hspace*{-14pt} \end{equation} with \begin{eqnarray} \label{eq:m} m(\bar{X})&=&\beta_0\mathbf{1}(\bar{X}=0)\nonumber\\[-8pt]\\[-8pt] &&{}+\beta_{0.5}\mathbf{1}(\bar{X}=0.5)+\beta_1\mathbf{1}(\bar{X}=1).\nonumber \end{eqnarray} It is easy to show that the model in (\ref{eq:WB3}) is saturated (i.e., imposes no restrictions on $\operatorname{Pr}(Y_j|X_1,X_2)$, which implies that the MLE of $\beta_{\mathrm{W}}$ based on (\ref{eq:WB3}) is identical to the crude sample log odds ratio in (\ref{eq:psiest}). \subsection{Mixed Model} \label{sec:mixedall} The model in (\ref{eq:mod2}) can be fitted to all pairs, assuming a parametric distribution of $b$ indexed with~$\theta$. Parameter estimates are obtained by maximizing the marginal (over $b$) likelihood \begin{eqnarray} \label{eq:mixedfull} &&\hspace*{-3pt}\prod_{i=1}^nE^*_{Z_i|X_{i1},X_{i2}}\{\operatorname{Pr}(Y_{i1}=y_{i1},Y_{i2}=y_{i2}|X_{i1},X_{i2},Z_i)|\nonumber \\ &&\hspace*{189pt}{}X_{i1},X_{i2}\}\nonumber\\[-8pt]\\[-8pt] &&\hspace*{-3pt}\quad =\prod_{i=1}^nE^*_{b_i|X_{i1},X_{i2}} \Biggl[ \Biggl\{ \prod_{j=1}^2\operatorname{Pr}(Y_{ij}=y_{ij}|X_{ij},b_i;\psi_c) \Biggr\}\Big|\nonumber\\ &&\hspace*{167pt}\quad{} X_{i1},X_{i2};\theta \Biggr].\nonumber \end{eqnarray} This approach, however, is associated with a severe problem which is often overlooked. Typically, the distribution of $b$ is specified to not depend on $(X_1,X_2)$, for example, a normal distribution with fixed but unspecified mean and variance. However, from the expression in (\ref{eq:mixedfull}) it is clear that this procedure only produces a proper likelihood under the additional assumption that $b\perp (X_1,X_2)$. In standard textbooks, this assumption is often stated without justification or interpretation (e.g., Fitzmaurice et al., \citeyear{Fitzmaurice}, page~329). Since $b$ is supposed to represent the potential confounders $Z$, we would not generally expect that $b\perp (X_1,X_2)$. Indeed, if $Z$ (and thus~$b$) is independent of $(X_1,X_2)$, it cannot be a~confounder, and there is no need to adjust for $Z$ in the first place. We note that in matched cohort studies, $(X_1,X_2)$ is constant and equal to $(0,1)$ for all pairs, so that an association between $b$ and $(X_1,X_2)$ is ruled out by design. When $b$ is associated with $(X_1,X_2)$, the aforementioned procedure can yield severely biased estimates (\citecs{Neuhaus}; \citecs{Neuhaus3}). In general, the proper marginal likelihood is obtained by averaging over a specified distribution $\operatorname{Pr}(b|X_1,X_2)$ for each pair. This procedure can be very computer intensive, and cannot be carried out with standard software. As noted by \citet{Neuhaus} and \citet{Neuhaus3}, there is a simple solution to this problem. Suppose that given $(X_1,X_2)$, $b$ has a normal distribution where the mean, but not the variance, depends on $(X_1,X_2)$. Without loss of generality, we can formulate this as \begin{equation} \label{eq:norm} b=d+m(\bar{X}), \end{equation} where $m(\bar{X})$ is defined in (\ref{eq:m}) and $d|X_1,X_2\sim\break N(0,\sigma^2)$. Under (\ref{eq:norm}), model (\ref{eq:mod2}) translates to \begin{equation} \label{eq:mod3} \hspace*{14pt}\operatorname{logit}\{ \operatorname{Pr}(Y_j=1|X_j,Z)\}=d+\psi_c X_j+m(\bar{X}),\hspace*{-14pt} \end{equation} where $d\perp (X_1,X_2)$ by construction. The model\break in~(\ref{eq:mod3}) can be fitted with standard mixed model software. By comparing the model in~(\ref{eq:mod3}) with the model in~(\ref{eq:WB3}), we see that the solution proposed by \citet{Neuhaus} and \citet{Neuhaus3} can be thought of as combining a mixed model with a within-between decomposition. \begin{table*} \caption{Simulation results for $\psi_c=0$, $\phi=4$} \label{tab:tab2} \begin{tabular}{@{}lcccc@{}} \hline \textbf{Analysis method} & \textbf{Target parameter} & \textbf{Mean est} & \textbf{Emp s.e.}& \textbf{Th s.e.} \\ \hline 1. Explicit & $\psi_c=0$ & 0.00 & 0.13 & 0.13 \\ 2. Cond log reg & $\psi_c=0$ & 0.00 & 0.13 & 0.13 \\ 3. Mixed discordant & $\psi_c=0$ & 0.00 & 0.13 & 0.13 \\ 4. Crude discordant & $\psi_s=0$ & 0.00 & 0.11 & 0.11 \\ 5. Crude all & $\psi_m=1.28$ & 1.28 & 0.08 & 0.08 \\ 6. Mixed all & $\psi_c=0$ & 0.00 & 0.12 & 0.12 \\ \hline \end{tabular} \end{table*} \begin{figure*} \caption{Simulation results for $\psi_c\in (0,0.6)$, $\phi=4$.} \label{fig:powerpsi} \end{figure*} \citet{Neuhaus} and \citet{Neuhaus3} observed that for various scenarios, the estimate of $\psi_c$ obtained by combining a~mixed model with a within-between decomposition is nearly identical to $\hat{\psi}_{\mathit{c.clr}}$. \citet{Neuhaus3} gave a theoretical motivation for this observation. We note that there are situations when the two estimates may differ; see \citet{Brumback} for an example. \section{Simulations} \label{sec:sim} \subsection{Part I: Efficiency and Power} \label{sec:eff} In this section we compare the performance of the methods described in Sections \ref{sec:analysis} and \ref{sec:twins}, in terms of efficiency and power. To enable a fair comparison, we analyze the simulated data so that all assumptions hold, for each method respectively. In these simulations, twin pairs were generated. We emphasize that this simulation scheme covers matched data as well, since the exposure--discordant twin pairs can be viewed as a matched cohort. For each twin pair, the random variables ($X_1,X_2,b,Y_1,Y_2$) were generated from the model \fontsize{10pt}{\baselineskip}\selectfont \makeatletter \def\tagform@#1{\normalsize\maketag@@@{(\ignorespaces#1\unskip\@@italiccorr)}} \makeatother \begin{equation} \label{eq:sim} \hspace*{12pt}\cases{ \displaystyle\frac{\operatorname{Pr}(X_1=1|X_2=0)}{\operatorname{Pr}(X_1=0|X_2=0)}=\frac{\operatorname{Pr}(X_2=1|X_1=0)}{\operatorname{Pr}(X_2=0|X_1=0)}\cr \displaystyle\quad=\rho=\frac{1}{2},\cr \displaystyle\frac{\operatorname{Pr}(X_1=1,X_2=1)\operatorname{Pr}(X_1=0,X_2=0)}{\operatorname{Pr}(X_1=1,X_2=0)\operatorname{Pr}(X_1=1,X_2=0)}=\phi,\cr \displaystyle b|X_1,X_2 \sim N \{\theta\bar{X},1 \},\cr \displaystyle Y_1\perp Y_2 |(X_1,X_2,b),\cr Y_j\perp X_{j'} | (X_j,b),\cr \displaystyle \operatorname{logit}\{\operatorname{Pr}(Y_j|X_j,b)\}=b+\psi_cX_j. }\hspace*{-12pt} \end{equation} \normalsize We highlight a few aspects of the model in (\ref{eq:sim}): \begin{longlist} \item[(1)] Under model (\ref{eq:sim}), assumptions (\ref{eq:const}), (\ref{eq:factwins1}), (\ref{eq:factwins2}) and (\ref{eq:norm}) all hold. \item[(2)] The restriction $\operatorname{Pr}(X_1=1|X_2=0)=\operatorname{Pr}(X_2=1|X_1=0)$ in the first row of (\ref{eq:sim}) follows by symmetry. \item[(3)] It may appear natural to first specify a mar\-ginal distribution of $b$, then specify a conditional distribution of ($X_1,X_2$), given $b$. The reason for doing it the other way around is twofold. First, it allows us to directly control the rate of exposure-discordance through~$\phi$. Second, it allows us to easily formulate the distribution of $b$ given ($X_1,X_2$) in such a way that (\ref{eq:norm}) holds. \item[(4)] It follows from results in \citet{Chen} that the joint distribution of $(X_1,X_2)$ is completely defined by $\rho$ and $\phi$. It also follows that $\rho$ and $\phi$ are variation independent (i.e., the value of $\rho$ does not restrict the value of $\phi$, and vice versa). \item[(5)] The values of $\phi$ and $\theta$ determine the degree of conditional association of $X_1$ and $X_2$, given $b$. It can be shown (see Appendix \ref{sec:app1}) that for $\theta=2\sqrt{\log(\phi)}$, $X_1\perp X_2|b$. For convenience, we have used $\theta=2\sqrt{\log(\phi)}$ throughout. We note though that none of the methods presented relies on this restriction. \end{longlist} In the first set of simulations, we used $\phi=4$ and $\psi_c=0$, that is, the data were generated under $\mathrm{H}_{0}$ in~(\ref{eq:h0}). For these values, $\psi_s=0$ and $\psi_m=1.28$, which implies a severe degree of confounding. Further,\break $\operatorname{Pr}(X_1\neq X_2)=0.33$, and $\operatorname{Pr}(X_1\neq X_2,Y_1\neq Y_2)= 0.11$. We generated 5000 samples, each of size $n=2000$. Each sample was analyzed with 6 different methods: \begin{longlist} \item[(1)] Explicit regression model $\operatorname{logit}\{\operatorname{Pr}(Y=1|\break X, b)\}=\gamma_0+\gamma_1b+\psi_cX$ (Section~\ref{sec:explicit}). We remind the reader that for twin data, $b$ (or rather, $Z$) is typically unobserved, which rules out the use of an explicit model. For a regular matched cohort, the explicit model is a viable choice. Thus, the model was only fitted to the exposure--discordant pairs. \item[(2)] Conditional logistic regression (Section~\ref{sec:cond}). \item[(3)] Mixed model fitted to the exposure--discordant pairs (Section~\ref{sec:mixed}). We used the model $\operatorname{Pr}(Y=1|\break X, b)=b+\psi_cX$, with $b|X_1\neq X_2 \sim N(\theta,\sigma^2)$. \item[(4)] Exposure--discordant crude analysis (Sec-\break tion~\ref{sec:std}). \item[(5)] All pair crude analysis (Section~\ref{sec:crude2}). \item[(6)] Mixed model fitted to all pairs (Section~\ref{sec:mixedall}). We used the model $\operatorname{Pr}(Y=1|X,b)=b+\psi_cX$, with $b|X_1,X_2 \sim N(\theta\bar{X},\sigma^2)$. \end{longlist} Table \ref{tab:tab2} displays the mean (over samples) point estimate, the empirical standard error and the mean theoretical standard error for each analysis, respectively. We note that all methods yield virtually unbiased estimates of their target parameters. For all methods the mean theoretical standard error is identical to the empirical standard error, to the second decimal. To compare the methods in terms of their power to reject $\mathrm{H}_{0}$, we carried out a second set of simulations. We used $\phi=4$ and varied $\psi_c$ over the range $(0,0.6)$. For each value of $\psi_c$, we drew 5000 samples of 2000 pairs each. Each sample was analyzed using methods 1, 2, 3, 4, 6. Figure \ref{fig:powerpsi} displays the empirical rejection probability (i.e., the power) for a Wald test at 5\% significance level, for each method as a~function of~$\psi_c$. We observe that the all methods have almost identical power, for the simulated scenarios.\looseness=1 In a third set of simulations, we used $\psi_c=0.4$ and varied $\phi$ over the range $(4,22)$. These values correspond to the range $(0.33,0.13)$ for $\operatorname{Pr}(X_1\neq X_2)$, and the range $(0.11,0.03)$ for $\operatorname{Pr}(X_1\neq X_2,Y_1\neq Y_2)$. For each value of $\phi$, we drew 5000 samples of 2000 pairs each. Each sample was analyzed using methods 1, 2, 3, 4, 6. Figure \ref{fig:powerphi} displays the power for each method as a function of $\phi$. Again, we observe that there is almost no difference between the methods, in terms of power, even when the discordance rate is very low. \begin{figure*} \caption{Simulation results for $\psi_c=0.4$, $\phi\in (4,22)$.} \label{fig:powerphi} \end{figure*} Some care must be taken when interpreting power curves. In small samples, parameter estimates can be biased, which may lead to an increased probability of rejection, both under the alternative hypothesis and under the null hypothesis. Thus, an increased power under the alternative hypothesis may come at the cost of a violated significance level under the null hypothesis. Figure \ref{fig:powerpsi} shows that the nominal significance level ($=$~5\% at $\psi_c=0$) is preserved for all methods when $\phi=4$. To confirm that the nominal significance level is preserved across the range $\phi\in (4,22)$, which generated the power curves in Figure \ref{fig:powerphi}, we carried out a fourth set of simulations, using $\psi_c=0$ and varying $\phi$ over the range $(4,22)$. For each value of $\phi$, we drew 5000 samples of 2000 pairs each. Each sample was analyzed using methods 1, 2, 3, 4, 6. Figure \ref{fig:powerphi2} displays the rejection probability for each method as a function of $\phi$. We observe that the rejection probability is close to 0.05, for all methods and all values of $\phi$ in the simulated range.\looseness=1 \begin{figure*} \caption{Simulation results for $\psi_c=0$, $\phi\in (4,22)$.} \label{fig:powerphi2} \end{figure*} Table \ref{tab:tab2} and Figure \ref{fig:powerpsi} indicate that methods 1--4, and 6 are unbiased under the null hypothesis. Additional simultions have confirmed that the methods are unbiased under various alternative hypotheses as well (data not shown). \subsection{Part II: Sensitivity to Underlying Assumptions} \label{sec:rob} In this section we demonstrate through examples that the explicit model, conditional logistic regression and the mixed model, can yield biased estimates, if their underlying assumptions are violated. We first consider the assumption that $b\perp (X_1,X_2)$, which is often made for mixed models; see Section~\ref{sec:mixedall}. Toward this end we reanalyzed the 5000 simulated samples which generated Table \ref{tab:tab2}, now fitting the mixed model $\operatorname{Pr}(Y=1|X,b)=b+\psi_cX$ to all pairs, with $b|X_1,X_2 \sim N(\theta,\sigma^2)$. We obtained a mean estimate of $\psi_c$ equal to 1.32, which is indeed biased as an estimate of the true value $\psi_c=0$. We note that this mean estimate is very close to the $\hat{\psi}_{m.crude}$ ($=1.28$) in Table \ref{tab:tab2}. This further demonstrates that ignoring the association between $b$ and ($X_1,X_2$) produces an estimate which is not adjusted for $Z$. Next, we consider the independence assumption (\ref{eq:facb})/(\ref{eq:factwins2}), which is a prerequisite for conditional logistic regression and mixed models. Toward this end we consider a simple scenario for which \begin{equation} \label{eq:ii} (Y_1,Y_2)\perp Z|(X_1,X_2), \end{equation} so that $\psi_c=\psi_s=\psi_m$; see Section~\ref{sec:margcondstd}. We define \begin{equation} \label{eq:sim2} \hspace*{12pt}\cases{ \displaystyle\operatorname{Pr}(Y_j=1|Y_{j'}=0,X_j=1,X_{j'}=0)=p,\cr \displaystyle\operatorname{Pr}(Y_j=1|Y_{j'}=0,X_j=0,X_{j'}=1)=q,\cr \displaystyle \operatorname{Pr}(Y_j=1,Y_{j'}=1|X_j=1,X_{j'}=0)\cr \displaystyle\quad{}\cdot\operatorname{Pr}(Y_j=0,Y_{j'}=0|X_j=1,X_{j'}=0)\cr \displaystyle\quad{}/\bigl(\operatorname{Pr}(Y_j=0,Y_{j'}=1|X_j=1,X_{j'}=0)\cr \displaystyle\hphantom{/}\quad {}\cdot\operatorname{Pr}(Y_j=1,Y_{j'}=0|X_j=1,X_{j'}=0)\bigr)=c. }\hspace*{-12pt} \end{equation} It follows from results in \citet{Chen} that the joint distribution of $Y_j$ and $Y_{j'}$ among the exposure--dis\-cordant pairs, $\operatorname{Pr}(Y_j,Y_{j'}|X_j=1,X_{j'}=0)$, is completely defined by the variation independent parameters $p$, $q$ and $c$. $c$ quantifies the degree of deviation from~(\ref{eq:factwins2}); in particular, (\ref{eq:factwins2}) is violated when $c\neq 1$. It is easy to show that assumption (\ref{eq:factwins1}) is logically compatible with all joint values of ($p,q,c$). Thus, we proceed by assuming that (\ref{eq:factwins1}) holds, so that the exposure--discordant crude analysis consistently estimates $\psi_s=\psi_c$. Combining (\ref{eq:ii}) and (\ref{eq:sim2}), and using results in \citet{Chen}, gives that $\hat{\psi}_{\mathit{c.clr}}$ converges to \begin{equation} \label{eq:conv} \log \biggl\{\frac{p(1-q)}{q(1-p)} \biggr\}, \end{equation} whereas the true value of $\psi_c(=\psi_s=\psi_m)$ is given by \begin{equation} \label{eq:trueval} \log \biggl\{\frac{p(1-q)}{q(1-p)} \biggr\}+\log \biggl\{\frac{1-q+qc}{1-p+pc} \biggr\}. \end{equation} Thus, the true value of $\psi_c$ depends on the association between $Y_1$ and $Y_2$ through the second term in~(\ref{eq:trueval}), whereas the asymptotic limit of $\hat{\psi}_{\mathit{c.clr}}$ does not. We used $p=0.3$, $q=0.1$, and $c=4$. For these values, $\psi_c=0.97$, whereas the asymptotic limit of $\hat{\psi}_{\mathit{c.clr}}$ equals 1.35, for conditional logistic regression. We generated 5000 samples, each consisting of $n=2000$ exposure--discordant twin pairs. For each pair, the random variables ($Y_1,Y_2$) were generated from the model in~(\ref{eq:sim2}). Each sample was analyzed with conditional logistic regression (method~2), the mixed model (method~3) and the exposure--discordant crude analysis (method~4). For these methods, we obtained an average estimate of $\psi_c$ equal to 1.35, 1.26 and 0.97, respectively. Thus, both conditional logistic regression and the mixed model produced biased estimates, whereas the exposure--discordant crude analysis estimate was unbiased. Next, we consider misspecification of the function $b(Z;\gamma)$, in the explicit model. We generated 5000 samples, each consisting of $n=2000$ twin pairs. For each\vadjust{\goodbreak} twin pair, the random variables $(Z,X_1,X_2,\break Y_1,Y_2)$ were generated from the model \begin{equation} \label{eq:sim5} \hspace*{12pt}\cases{ \displaystyle Z=(V,W),\cr \displaystyle V\perp W,\cr \displaystyle V\sim N(0,1),\cr \displaystyle W\sim \operatorname{Ber}(0.5),\cr \displaystyle X_1\perp X_2 | Z,\cr \displaystyle \operatorname{logit}\{\operatorname{Pr}(X_j=1|Z)\}\cr \displaystyle\quad=\alpha_0+\alpha_1V+\alpha_2W+\alpha_3VW,\cr \displaystyle Y_1\perp Y_2 | (X_1,X_2,b),\cr \displaystyle Y_j\perp X_{j'} | (X_j,b),\cr \displaystyle \operatorname{logit}\{\operatorname{Pr}(Y_j=1|X_j,Z)\}=b(Z;\gamma)+\psi_cX_j,\cr \displaystyle b(Z;\gamma)=\gamma_0+\gamma_1V+\gamma_2W+\gamma_3VW ,}\hspace*{-12pt} \end{equation} with $\alpha_0=2$, $\alpha_1=\alpha_2=1$, $\alpha_3=-1.5$, $\gamma_0=-2$, $\gamma_1=\gamma_2=-1$, $\gamma_3=1.5$, $\psi_c=1.3$. Each sample was analyzed with the misspecified explicit model\break $\operatorname{logit}\{\operatorname{Pr}(Y_j=1|X_j,Z)\}=\gamma_0+\gamma_1V+\gamma_2W+\psi_cX_j$. We obtained an average estimate of $\psi_c$ equal to 0.69, which is severly biased. Finally, we consider the assumption that the random effect $b(Z;\gamma)$ is normally distributed, which is commonly made for mixed models. Toward this end we reanalyzed the 5000 samples generated from model (\ref{eq:sim5}), now fitting the mixed model $\operatorname{Pr}(Y=1|X,b)=b+\psi_cX$ to the exposure--discordant pairs, with $b|X_1\neq X_2 \sim N(\theta,\sigma^2)$. Under the data generating model, the conditional distribution of $b(Z;\gamma)$, given $X_1\neq X_2$ is rather complicated, and, in particular, not normal. We obtained an average estimate of $\psi_c$ equal to 1.30, which is identical to the true value, to the second decimal. This finding supports the theoretical results in \citet{Neuhaus2}, which state that the mixed model is robust against the normal random effect assumption. \section{Real Data Examples} \label{sec:examples} \subsection{Matched Cohort Data} The first example is taken from a matched cohort study that aimed to investigate the effect of hysterectomy on risk for CVD \citep{Ingelsson}. A common surgery among perimenopausal women, hysterectomy is often performed on benign indications, but its long-term consequences are not fully understood. The study is based on the Swedish Inpatient Register, where all women who underwent hysterectomy between January 1973 and December 2003 (227,389 individuals) were identified. For each hysterectomized woman, three women who never had hysterectomy were randomly selected from the Register of Total Population. The three unexposed wom\-en were individually matched to the exposed woman by birth year, year of hysterectomy, and county of residence at year of hysterectomy. Information on CVD status was obtained from the Inpatient Register and information of follow up through record linkage to the Cause of Death Register, Emigration Register and Cancer Register. To avoid bias from CVD events occurring in relation to the hysterectomy surgery, the exposed women started their risk time from 30 days after hysterectomy; they were then followed until CVD, heart failure, cervical, corpus or ovarian cancer, death, emigration or end of study (Dec 31, 2003). Similarly, unexposed women started their risk time 30 days after the date of matching, that is, the date of hysterectomy of the corresponding exposed woman. For further details on the study, see \citet{Ingelsson}. In the current analysis we focus on 1:1 matched studies with binary outcomes. We constructed a binary outcome by defining $Y=1$ for women who developed CVD during follow-up, and $Y=0$ for the remaining women. We constructed a 1:1 matched sample by matching each exposed woman to one unexposed woman, which was randomly selected from the three unexposed women in the same set. After the exclusions described above, we ended up with 52,814 1:1 matched pairs, of which 6712 were discordant in both the exposure and the outcome. The data were analyzed with methods 1--4 described in Section~\ref{sec:sim}. For method 1 we used the explicit model $\operatorname{logit}\{\operatorname{Pr}(Y=1|Z,X)\}=\gamma_0+\gamma_1[\mbox{birth year}]+\gamma_2[\mbox{year at hysterectomy}]+\gamma_3[\mbox{county}]+\psi_cX$,\break where $\gamma_3$ is a factor parameter with one level for each county. \begin{table}[b] \tabcolsep=0pt \caption{Analysis results for the 1:1 matched subset of the hysterectomy-CVD data} \label{tab:tab4} \begin{tabular*}{\columnwidth}{@{\extracolsep{\fill}}lccc@{}} \hline \textbf{Analysis method} & \textbf{Target parameter} & \textbf{Point est} & \textbf{95\% CI} \\ \hline 1. Explicit & $\psi_c$ & 0.03 & $-$0.02, 0.08\\ 2. Cond log reg & $\psi_c$ & 0.03 & $-$0.02, 0.08\\ 3. Mixed discordant & $\psi_c$ & 0.03 & $-$0.02, 0.08\\ 4. Crude discordant & $\psi_s$ & 0.03 & $-$0.02, 0.07\\ \hline \end{tabular*} \end{table} Table \ref{tab:tab4} displays the results. For all three methods, there is a significant (at 5\% level) association between hysterectomy and CVD. The point estimates obtained by conditional logistic regression and expo\-sure--discordant crude analysis are almost identical, whereas the point estimate obtained from the mixed model is twice as large. According to theory (Neuhaus et al., \citeyear{Neuhaus2}) we would expect the mixed model estimate to be identical\vadjust{\goodbreak} to the estimate obtained from conditional logistic regression. Indeed, methods 1--4 all give identical estimates to the second decimal.\looseness=1 Although our focus is on 1:1 matching, all methods in this paper generalize directly to $m$:$n$ matching (see Section~\ref{sec:discussion}). Table \ref{tab:tab8} displays the results when the whole 1:3 matched data is analyzed, using methods 1--4 described in Section~\ref{sec:sim}. \begin{table} \tabcolsep=0pt \caption{Analysis results for the full 1:3 matched hysterectomy-CVD~data} \label{tab:tab8} \begin{tabular*}{\columnwidth}{@{\extracolsep{\fill}}lccc@{}} \hline \textbf{Analysis method} & \textbf{Target parameter} & \textbf{Point est} & \textbf{95\% CI} \\ \hline 1. Explicit & $\psi_c$ & 0.06 & 0.02, 0.09\\ 2. Cond log reg & $\psi_c$ & 0.06 & 0.02, 0.09\\ 3. Mixed discordant & $\psi_c$ & 0.06 & 0.02, 0.09\\ 4. Crude discordant & $\psi_s$ & 0.05 & 0.02, 0.09\\ \hline \end{tabular*} \vspace*{3pt} \end{table} \subsection{Twin Data} The second example is from a twin study of the association between fetal growth and asthma \citep{Ortqvist}. Several studies have shown that there is an association between asthma and low birth weight. This association could potentially be explained by a~causal effect of impaired fetal growth on asthma, but may also be explained by confounding factors. In particular, gestational age is correlated with both birth weight and asthma, and may confound the birth weight-asthma association ({\"O}rtqvist et~al.,\break \citeyear{Ortqvist}). Twins provide an excellent opportunity to separate the causal effect of birth weight from the confounding effect of gestational age, and at the same time adjust for other shared familial factors. All twins born in Sweden in June 1992 to June 1998 were identified through the Swedish Twin Register at the age of 9 or 12 years. Information on asthma and zygosity was collected in telephone interviews with their parents. Birth weight was retrieved from the Medical Birth Register (MFR). Of the 15,808 eligible twins 69\% (10,918 individuals) had information on asthma and could also be securely linked to the MFR. In total, there were 3107 MZ pairs. 1087 pairs were discordant in birth weight (exposure), where discordance was defined as a difference greater than 400 grams or 15\%, and 175 pairs were discordant on both birth weight and asthma (outcome). The data were analyzed using methods 2--6 described in Section~\ref{sec:sim}. Table \ref{tab:tab3} displays the results. The estimates obtained from conditional logistic\vadjust{\goodbreak} regression and the exposure--discordant crude analysis are both smaller than estimate obtained from the all-pair crude analysis. This finding suggests that the birth weight-asthma association is inflated by shared confounding. Methods 2, 3 and 6 gave very similar results, as predicted by theory (Neuhaus et~al., \citeyear{Neuhaus2}; \citecs{Neuhaus}). \begin{table} \tabcolsep=0pt \caption{Analysis results for the birth weight-asthma twin data} \label{tab:tab3} \begin{tabular*}{\columnwidth}{@{\extracolsep{\fill}}lccc@{}} \hline \textbf{Analysis method} & \textbf{Target parameter} & \textbf{Point est} & \textbf{95\% CI} \\ \hline 2. Cond log reg & $\psi_c$ & 0.29 & $-$0.01, 0.59 \\ 3. Mixed discordant & $\psi_c$ & 0.29 & $-$0.01, 0.59 \\ 4. Crude discordant & $\psi_s$ & 0.18 & $-$0.01, 0.37 \\ 5. Crude all & $\psi_m$ & 0.33 & \phantom{$-$}0.16, 0.50 \\ 6. Mixed all & $\psi_c$ & 0.30 & \phantom{$-$}0.00, 0.60 \\ \hline \end{tabular*} \vspace*{3pt} \end{table} \section{Discussion} \label{sec:discussion} We have given an overview of the most common analysis methods for matched cohort studies. We have identified the target parameters in each method, outlined the underlying assumptions and compared the methods in terms of statistical power. The analysis methods that we have considered do not estimate the same parameter; the exposure--discordant crude analysis and the within--between model estimate a standardized odds ratio, whereas the explicit method, conditional logistic regression, and the mixed model \mbox{estimate} a conditional odds ratio. Thus, the choice between these methods should primarily be guided by the research question being asked. In addition, it is also important to consider the statistical power, underlying assumptions, computer intensity and flexibility of the methods. Theoretical arguments suggest that when all underlying assumptions hold, all methods that we have considered have the same statistical power. This was confirmed in our simulation study. In terms of underlying assumptions, the methods differ significantly. The exposure--discordant crude analysis relies on few\-er assumptions than the other methods. In terms of computer intensity, the mixed model requires numerical optimization, and is far more time consuming than the other methods. In terms of flexibility, all methods, except the expo\-sure--discordant crude analysis, most naturally target odds ratios. The expo\-sure--discordant crude analysis however, can easily be used to target any measure of the exposure-out\-come association. We have considered 1:1 matching. Frequently, $m$:$n$ matching is employed, that is, each set is constructed by matching $m$ exposed subjects to $n$ unexposed subjects. All methods in this paper generalize directly to $m$:$n$ matching. Specifically, the underlying assumptions and the interpretation of the target parameters remains the same under $m$:$n$ matching. We conjecture that many of the theoretical properties that we have derived for 1:1 matching carry over to $m$:$n$ matching as well, for example, the asymptotic equivalence in terms of power. However, a stringent treatment of $m$:$n$ matching is more difficult. For instance, under violation of (\ref{eq:const}) the probability limit of $\hat{\psi}_{\mathit{c.clr}}$ has no longer an analytic expression, which hampers a theoretical comparison with the exposure--discordant crude analysis. Comparing the methods under $m$:$n$ is a topic for future research.\looseness=1 In practice, it is often desirable to adjust the analysis for additional covariates which are not matched on. In the model-based methods (i.e., all methods except the exposure--discordant crude analysis), adjustment for additional covariates can easily be accomplished by adding the covariates as a regressor in the model. It is not obvious though, how to adjust for additional covariates in the exposure--discordant crude analysis. Extensions of the exposure--discordant crude analysis for additional covariate adjustments is a topic for future research. \begin{appendix} \def\arabic{equation}{\arabic{equation}} \section{} \label{sec:app2} \setcounter{equation}{31} Define $p^x=\operatorname{Pr}(Y\!=\!1|X\!=\!x)$, $q=\operatorname{Pr}(X\!=\!1)$, $q^{00}=\operatorname{Pr}(X_1\!=\!X_2\!=\!0)$, $q^{11}=\operatorname{Pr}(X_1\!=\!X_2\!=\!1)$, $q^d=\operatorname{Pr}(X_1\!\neq\! X_2)$, $c^{00}=\operatorname{cov}(Y_1,Y_2|X_1\!=\!X_2\!=\!0)$, $c^{11}=\operatorname{cov}(Y_1,Y_2|\break X_1\!=\!X_2\!=\!1)$, $c^d=\operatorname{cov}(Y_1, Y_2|X_1\!\neq\! X_2)$, $\psi_0=\operatorname{logit}(p_0)$, $\psi_m=\operatorname{logit}(p_1)- \operatorname{logit}(p^0)$ and $\psi=(\psi_0,\psi_m)^T$. $\hat{\psi}_{m.crude}$ in (\ref{eq:ty}) can be expressed as the second element of the solution to $\sum_iU_i(\psi)=0$, where \begin{eqnarray*} &&U_i(\psi)\\ &&\quad= \left\{\matrix{ (1-X_{i1})(Y_{i1}-p^0)+(1-X_{i2})(Y_{i2}-p^0)\cr X_{i1}(Y_{i1}-p^1)+X_{i2}(Y_{i2}-p^1)} \right\}. \end{eqnarray*} It follows from standard theory that $n^{1/2}(\hat{\psi}-\psi)$ is asympotically normal with mean 0 and variance \[\label{eq:var} \biggl[ E \biggl\{\frac{\partial U_i(\psi)}{\partial\psi^T} \biggr\} \biggr] ^{-1}\operatorname{var}\{U_i(\psi)\} \biggl[ \biggl[ E \biggl\{\frac{\partial U_i(\psi)}{\partial\psi^T} \biggr\} \biggr] ^{-1} \biggr] ^T, \] where, after some algebra, \[ E \biggl\{\frac{\partial U_i(\psi)}{\partial\psi^T} \biggr\}= \pmatrix{ -2p^0(1-p^0) & 0\cr -2p^1(1-p^1) & -2p^1(1-p^1)} \] and{\fontsize{9}{11}{\selectfont{ \begin{eqnarray*} &&\hspace*{-5pt}\operatorname{var}\{U_i(\psi)\}\\ &&\hspace*{-5pt}\quad= \pmatrix{ 2(1-q)p^0(1-p^0)+q^{00}c^{00} & q^dc^d\cr q^dc^d & 2qp^1(1-p^1)+q^{11}c^{11} }. \end{eqnarray*}}}} After additional algebra, the asymptotic variance for $n^{1/2}(\hat{\psi}_{m.crude}-\psi_m)$ is obtained as \begin{eqnarray} \label{eq:vm} &&\frac{1}{2(1-q)p^0(1-p^0)}+\frac{1}{2qp^1(1-p^1)}\nonumber\\ && \quad{}+\frac{q^{00}c^{00}}{4\{p^0(1-p^0)\}^2} +\frac{q^{11}c^{11}}{4\{p^1(1-p^1)\}^2}\\ &&\quad{}-\frac{q^dc^d}{2q(1-q)p^0(1-p^0)p^1(1-p^1)}.\nonumber \end{eqnarray} Replacing the population parameters in (\ref{eq:vm}) with their sample counterparts gives the standard error for $\hat{\psi}_{m.crude}$. To derive the standard error formula in (\ref{eq:se}) we note that a regular 1:1 matched cohort can be obtained by setting $q=0.5$, $q^{00}=q^{11}=0$ and $q^d=1$. The expression in (\ref{eq:vm}) then simplifies to \begin{eqnarray} \label{eq:v} &&\frac{1}{p^0(1-p^0)}+\frac{1}{p^1(1-p^1)}\nonumber \\[-8pt]\\[-8pt] &&\quad{}-\frac{2c^d}{p^0(1-p^0)p^1(1-p^1)}.\nonumber \end{eqnarray} Replacing the population parameters in (\ref{eq:v}) with their sample counterparts gives the standard error formula in~(\ref{eq:se}). \section{} \label{sec:app4} Define $\psi_c^{\dagger}=\log \{ \frac{\operatorname{Pr}^*(Y^1=1,Y^0=0)}{\operatorname{Pr}^*(Y^0=1,Y^1=0)} \}$, $\mathrm{H}_c^{\dagger}\dvtx \psi_c^{\dagger}=0$, $\psi_s^{\dagger}=\log \{\frac{\operatorname{Pr}^*(Y^1=1)\operatorname{Pr}^*(Y^0=0)}{\operatorname{Pr}^*(Y^1=0)\operatorname{Pr}^*(Y^0=1)} \}$, $\mathrm{H}_s^{\dagger}\dvtx \psi_s^{\dagger}=0$. $\mathrm{H}_c^{\dagger}$ can be tested using the likelihood ratio test (LRT) statistic \[ T_{c,LR}^{\dagger}=-2\log \biggl \{\frac{ \sup _{\mathrm{H}_c^{\dagger}}(p_{00}^{W}p_{01}^Up_{10}^Vp_{11}^T)}{\sup(p_{00}^{W}p_{01}^Up_{10}^Vp_{11}^T)} \biggr\}, \] and $\mathrm{H}_s^{\dagger}$ can be tested using the LRT statistic \[ T_{s,LR}^{\dagger}=-2\log \biggl\{\frac{ \sup _{\mathrm{H}_s^{\dagger}}(p_{00}^{W}p_{01}^Up_{10}^Vp_{11}^T)}{\sup(p_{00}^{W}p_{01}^Up_{10}^Vp_{11}^T)} \biggr\}, \] where $p_{y^0y^1}\hspace*{-0.5pt}=\hspace*{-0.5pt}\operatorname{Pr}^*(Y^0\hspace*{-0.5pt}=\hspace*{-0.5pt}y^0,Y^1\hspace*{-0.5pt}=\hspace*{-0.5pt}y^1)$, and the suprema are taken under the restrictions $0<p_{y^0y^1}<1$ and $\sum _{y^0y^1}p_{y^0y^1}=1$. Regardless of whether (\ref{eq:const}), (\ref{eq:faca}) and (\ref{eq:facb}) hold or not, $\hat{\psi}_{\mathit{c.clr}}$ and $\hat{\psi}_{\mathit{s.crude}}$ are the nonparametric MLEs of $\psi_c^{\dagger}$ and $\psi_s^{\dagger}$, respectively. Thus, $T_{c,LR}^{\dagger}$ and $T_c$ are asymptotically equal, and $T_{s,LR}^{\dagger}$ and\vadjust{\goodbreak} $T_s$ are asymptotically equal. It is easy to show that $\mathrm{H}_c^{\dagger}$ and $\mathrm{H}_s^{\dagger}$ are equivalent (i.e., $\mathrm{H}_c^{\dagger}$ holds if and only if~$\mathrm{H}_s^{\dagger}$ holds), which implies that $T_{c,LR}^{\dagger}$ and $T_{s,LR}^{\dagger}$ are identical, which then in turn implies that $T_c$ and $T_s$ are asymptotically equal. It is easy to show that $\mathrm{H}_0$ and (\ref{eq:faca}) together imply~$\mathrm{H}_s^{\dagger}$, and thus also $\mathrm{H}_c^{\dagger}$. Because $\hat{\psi}_{\mathit{c.clr}}$ converges to~$\psi_c^{\dagger}$, it then follows that $\hat{\psi}_{\mathit{c.clr}}$ converges to 0 under~$\mathrm{H}_0$ and~(\ref{eq:faca}). \section{} \label{sec:app1} Under (\ref{eq:sim}), we have that \begin{eqnarray*} &&\operatorname{Pr}(X_1,X_2,b) \\ &&\quad=\frac{1}{\sqrt{2\pi}}e^{ \{b-\theta\bar{X} \}^2/2}\operatorname{Pr}(X_1,X_2)\\ &&\quad=h(X_1,b)h(X_2,b)e^{-\theta^2X_1X_2/4}\operatorname{Pr}(X_1,X_2), \end{eqnarray*} for some function $h(\cdot,\cdot)$. $X_1\perp X_2|b$ now implies that \[ e^{-\theta^2X_1X_2/4}\operatorname{Pr}(X_1,X_2)=k(X_1)k(X_2) \] for some function $k(\cdot)$, which in turn implies that $\theta=2\sqrt{\log(\phi)}$. \end{appendix} \section*{Acknowledgment} Arvid Sj\"olander acknowledges financial support from The Swedish Research Council (2008-5375). \end{document}
\begin{document} \title{Skew products and crossed products by coactions} \author[Kaliszewski]{S.~Kaliszewski} \address{Department of Mathematics \\ Arizona State University\\ Tempe, AZ 85287} \email{[email protected]} \author[Quigg]{John Quigg} \email{[email protected]} \author[Raeburn]{Iain Raeburn} \address{Department of Mathematics \\ University of Newcastle \\ NSW 2308 \\ Australia} \email{[email protected]} \thanks{Research partially supported by National Science Foundation Grant DMS9401253 and the Australian Research Council} \keywords{$C^*$-algebra, coaction, skew product, directed graph, groupoid, duality} \subjclass{Primary 46L55} \begin{abstract} Given a labeling $c$ of the edges of a directed graph $E$ by elements of a discrete group $G$, one can form a skew-product graph $E\times_c G$. We show, using the universal properties of the various constructions involved, that there is a coaction $\delta$ of $G$ on $C^*(E)$ such that $C^*(E\times_c G)$ is isomorphic to the crossed product $C^*(E)\times_\delta G$. This isomorphism is equivariant for the dual action $\widehat\delta$ and a natural action $\gamma$ of $G$ on $C^*(E\times_c G)$; following results of Kumjian and Pask, we show that $$ C^*(E\times_c G)\times_\gamma G \cong C^*(E\times_c G)\times_{\gamma,r}G \cong C^*(E)\otimes{\slashstyle K}(\ell^2(G)), \phantom{xxxxxxxxxxxxx}$$ and it turns out that the action $\gamma$ is always amenable. We also obtain corresponding results for $r$-discrete groupoids $Q$ and continuous homomorphisms $c\colon Q\to G$, provided $Q$ is amenable. Some of these hold under a more general technical condition which obtains whenever $Q$ is amenable or second-countable. \end{abstract} \maketitle \section{Introduction} \label{intro-sec} The $C^*$-algebra of a directed graph $E$ is the universal $C^*$-algebra $C^*(E)$ generated by a family of partial isometries which are parameterized by the edges of the graph and satisfy relations of Cuntz-Krieger type reflecting the structure of the graph. A labeling $c$ of the edges by elements of a discrete group $G$ gives rise to a skew-product graph $E\times_c G$, and the natural action of $G$ by translation on $E\times_c G$ lifts to an action $\gamma$ of $G$ by automorphisms of $C^*(E\times_c G)$. Kumjian and Pask have recently proved (\cite[Corollary 3.9]{KP-CD}) that \begin{equation} \label{KP-eq} C^*(E\times_c G)\times_\gamma G\cong C^*(E)\otimes{\slashstyle K}(\ell^2(G)). \end{equation} {From} this they obtained an elegant description of the crossed product $C^*(F)\times_\beta G$ arising from a free action of $G$ on a graph $F$ (\cite[Corollary 3.10]{KP-CD}). Kumjian and Pask studied $C^*(E\times_c G)$ by observing that the groupoid model for $E\times_c G$ is a skew product of the groupoid model for $E$, and establishing an analogous stable isomorphism for the $C^*$-algebras of skew-product groupoids. They also mentioned that one could obtain these stable isomorphisms from duality theory and a result of Masuda (see \cite[Note 3.7]{KP-CD}). This second argument raises some interesting issues, which are settled in this paper. We begin in \secref{skew-graph-sec} by tackling graph $C^*$-algebras directly. We show that $C^*(E\times_c G)$ can be realized as the crossed product $C^*(E)\times_\delta G$ by a coaction $\delta$ of $G$ (see Theorem~\ref{eqvt-isom}), and apply the duality theorem of Katayama \cite{Kat-TD} to deduce that \begin{equation}\label{kpiso} C^*(E\times_c G)\times_{\gamma,r}G\cong(C^*(E)\times_\delta G)\times_{\hat\delta,r}G\cong C^*(E)\otimes{\slashstyle K}(\ell^2(G)) \end{equation} (see Corollary~\ref{red-stable-cor}). Since Katayama's theorem involves the reduced crossed product, the result in (\ref{kpiso}) is slightly different from Kumjian and Pask's (\ref{KP-eq}) concerning full crossed products. Together, these two results suggest that the action of $G$ on $C^*(E\times_c G)$ should be amenable; we prove this in Section 3 by giving a new proof of the Kumjian-Pask theorem which allows us to see directly that the regular representation of $C^*(E\times_c G)\times_\gamma G$ is faithful. Our proof of the Kumjian-Pask theorem is elementary in the sense that it uses only the universal properties of graph $C^*$-algebras, and avoids groupoid and other models. It is therefore slightly more general, and will appeal to those who are primarily interested in graph $C^*$-algebras. Aficionados of groupoids, however, will naturally ask if we can produce similar results for the $C^*$-algebra $C^*(Q\times_c G)$ of a skew-product groupoid $Q\times_c G$. We do this (at least for $r$-discrete groupoids $Q$) in the second half of the paper. Masuda has already identified the groupoid algebra $C^*(Q\times_c G)$ as a crossed product by a coaction, in the context of spatially-defined groupoid $C^*$-algebras, coactions and crossed products (\cite[Theorem 3.2]{Mas-GD}). Nowadays, one would prefer to use full coactions and crossed products, and to give arguments which exploit their universal properties. The result we obtain this way, \thmref{gpdiso}, is more general than could be deduced from \cite{Mas-GD}, and highlights an intriguing technical question: does the $C^*$-algebra of the subgroupoid $c^{-1}(e)=\{q\in Q \mid c(q)=e\}$ embed faithfully in $C^*(Q)$? We answer this in the affirmative for $Q$ amenable (\lemref{amen-cond-lem}) or second countable (\thmref{faith}). In Section 5, we establish the amenability of the canonical action of $G$ on $C^*(Q\times_c G)$ when $Q$ is amenable. The results of Section 5 are analogous to those of Section 3, but here we show directly that the action is amenable (\propref{amenact}) using the theory of \cite{AR-AG} and \cite{MRW-EI}, and deduce the original version of \cite[Theorem 3.7]{KP-CD} for full crossed products. \subsection*{Conventions.} A \emph{directed graph} is a quadruple $E=(E^0,E^1,r,s)$ consisting of a set $E^0$ of vertices, a set $E^1$ of edges and maps $r,s\colon E^1\to E^0$ describing the range and source of edges. (This notation is becoming standard because one can then write $E^n$ for the set of paths of length $n$, and think of vertices as paths of length $0$.) The graph $E$ is \emph{row-finite} if each vertex emits at most finitely many edges. Our graphs may have sources and sinks. All groups in this paper are discrete. A \emph{coaction} of a group $G$ on a $C^*$-algebra $A$ is an injective nondegenerate homomorphism $\delta$ of $A$ into the spatial tensor product $A\otimes C^*(G)$ such that $(\delta\otimes\id)\circ\delta=(\id\otimes\delta_G)\circ \delta$. The \emph{crossed product} $A\times_\delta G$ is the universal $C^*$-algebra generated by a covariant representation $(j_A,j_G)$ of $(A,G,\delta)$. In general, we use the conventions of \cite{QuiDC}. We shall write $\lambda$ for the left regular representation of a group $G$ on $\ell^2(G)$, $\rho$ for the right regular representation, and $M$ for the representation of $C_0(G)$ by multiplication operators on $\ell^2(G)$. The characteristic function of a set $K$ will be denoted by~$\chi_K$. \section{Skew-product graphs and duality} \label{skew-graph-sec} Let $E=(E^0,E^1,r,s)$ be a row-finite directed graph. Following \cite{KPR-CK}, a \emph{Cuntz-Krieger $E$-family} is a collection $\{ {t}_f, {q}_v\mid f\in E^1, v\in E^0\,\}$ of partial isometries ${t}_f$ and mutually orthogonal projections ${q}_v$ in a $C^*$-algebra $B$ such that \begin{equation*} {t}_f^*{t}_f = {q}_{r(f)} \midtext{and} {q}_v =\sum_{s(f)=v} {t}_f{t}_f^* \end{equation*} for each $f\in E^1$ and every $v\in E^0$ which is not a sink. By \cite[Theorem~1.2]{KPR-CK}, there is an essentially unique $C^*$-algebra $C^*(E)$, generated by a Cuntz-Krieger $E$-family $\{\,{s}_f, {p}_v\,\}$, which is universal in the sense that for any Cuntz-Krieger $E$-family $\{\, {t}_f, {q}_v\,\}$ in a $C^*$-algebra $B$, there is a homomorphism $\Phi=\Phi_{{t},{q}}\colon C^*(E)\to B$ such that $\Phi({s}_f)={t}_f$ and $\Phi({p}_v)={q}_v$ for $f\in E^1, v\in E^0$. If $\sum_v {q}_v\to 1$ strictly in $M(B)$, we say that $\{\, {t}_f, {q}_v\,\}$ is a \emph{nondegenerate} Cuntz-Krieger $E$-family, and the homomorphism $\Phi_{{t},{q}}$ is then nondegenerate. Products $s_e^*s_f$ cancel (see \cite[Lemma~1.1]{KPR-CK}), so $C^*(E)$ is densely spanned by the projections ${p}_v$ and products of the form ${s}_\mu {s}_\nu^*={s}_{e_1}{s}_{e_2}\dots {s}_{e_n}{s}_{f_m}^*\dots{s}_{f_1}^*$, where $\mu$ and $\nu$ are finite paths in the graph $E$. For each $z\in {\b T}$, $\{\, z{s}_f, {p}_v\,\}$ is a Cuntz-Krieger $E$-family, so there is an automorphism $\alpha_z$ of $C^*(E)$ such that $\alpha_z({s}_f) = z{s}_f$ and $\alpha_z({p}_v) = {p}_v$. For each pair of paths $\mu,\nu$ the map $z\mapsto \alpha_z({s}_\mu {s}_\nu^*)$ is continuous, and it follows from a routine $\epsilon/3$-argument that $\alpha$ is a strongly continuous action of $\b T$ on $C^*(E)$. It was observed in \cite{aHR-IS} that the existence of this \emph{gauge action} $\alpha$ characterizes the universal $C^*$-algebra $C^*(E)$. The following extension of \cite[Theorem~2.3]{aHR-IS} will appear in \cite{BPR}; it is proved by modifying the proof of \cite[Theorem~2.3]{aHR-IS} to allow for infinite graphs and the possibility of sinks. \begin{lem}\label{gauge-lem} Let $E$ be a row-finite directed graph, and suppose $B$ is a $C^*$-algebra generated by a Cuntz-Krieger $E$-family $\{\, {t}_f, {q}_v\,\}$. If all the ${t}_f$ and ${q}_v$ are non-zero and there is a strongly continuous action $\beta$ of $\b T$ on $B$ such that $\beta_z({t}_f) = z{t}_f$ and $\beta_z({q}_v)=q_v$, then the canonical homomorphism $\Phi_{{t},{q}}\colon C^*(E)\to B$ is an isomorphism. \end{lem} A \emph{labeling} of $E$ by a group $G$ is just a function $c\colon E^1\to G$. The \emph{skew-product} graph $E\times_c G$ is the directed graph with vertex set $E^0\times G$, edge set $E^1\times G$, and range and source maps defined by \[ r(f,t) = (r(f),t)\midtext{and} s(f,t) = (s(f),c(f)t) \righttext{for} (f,t)\in E^1\times G. \] Since $s^{-1}(v,t) = \{\,(f,c(f)t)\mid f\in s^{-1}(v)\,\}$, the vertex $(v,t)\in(E\times_cG)^0$ emits the same number of edges as $v\in E^0$; thus $E\times_cG$ is row-finite if and only if $E$ is, and $(v,t)$ is a sink if and only if $v$ is. \begin{rem} Our skew product $E\times_cG$ is not quite the same as the versions $E(c)$ in \cite{KP-CD} and $E^c$ in \cite{GT-TG}; however, there are isomorphisms $\phi\colon E(c)\to E\times_cG$ and $\psi\colon E^c\to E\times_cG$ given by \[ \phi(t,v) = \psi(v,t) = (v,t^{-1}) \midtext{and} \phi(t,f) = \psi(f,t) = (f,c(f)^{-1}t^{-1}). \] Our conventions were chosen to make the isomorphism of \thmref{eqvt-isom} more natural. \end{rem} \begin{lem}\label{is-delta} Let $c$ be a labeling of a row-finite directed graph $E$ by a discrete group $G$. Then there is a coaction $\delta$ of $G$ on $C^*(E)$ such that \begin{equation}\label{delta-eq} \delta({s}_f) = {s}_f\otimes c(f)\midtext{and} \delta({p}_v) = {p}_v\otimes 1\righttext{for} f\in E^1, \mbox{ } v\in E^0. \end{equation} \end{lem} \begin{proof} Straightforward calculations show that $\{\, {s}_f\otimes c(f),\, {p}_v\otimes 1\,\}$ is a nondegenerate Cuntz-Krieger $E$-family, so the universal property gives a nondegenerate homomorphism $\delta\colon C^*(E)\to C^*(E)\otimes C^*(G)$ which satisfies \eqref{delta-eq}. \lemref{gauge-lem} implies that $\delta$ is injective: take $\beta=\alpha\otimes\id$, where $\alpha$ is the gauge action of $\b T$ on $C^*(E)$. It follows from \eqref{delta-eq} that the coaction identity $(\delta\otimes\id)\circ\delta = (\id\otimes\delta_G)\circ\delta$ holds on generators ${s}_f$ and ${p}_v$, and it extends by algebra and continuity to all of $C^*(E)$. \end{proof} The group $G$ acts on the graph $E\times_cG$ by right translation, so that $t\cdot(v,s)= (v,st^{-1})$ and $t\cdot(f,s)= (f,st^{-1})$; this induces an action $\gamma:G\to\Aut C^*(E\times_c G)$ such that \begin{equation}\label{gamma-eq} \gamma_t({s}_{(f,s)}) = {s}_{(f,st^{-1})}\midtext{and} \gamma_t({p}_{(v,s)}) = {p}_{(v,st^{-1})}. \end{equation} \begin{thm}\label{eqvt-isom} Let $c$ be a labeling of a row-finite directed graph $E$ by a discrete group $G$, and let $\delta$ be the coaction from \lemref{is-delta}. Then $$C^*(E)\times_\delta G\cong C^*(E\times_cG),$$ equivariantly for the dual action $\widehat\delta$ and the action $\gamma$ of Equation~\eqref{gamma-eq}. \end{thm} \begin{proof} We use the calculus of \cite{EQ-IC} to handle elements of the crossed product $C^*(E)\times_\delta G$. For each $t\in G$, let $C^*(E)_t = \{\, a\in C^*(E)\mid \delta(a) = a\otimes t\,\}$ denote the corresponding spectral subspace; we write $a_t$ to denote a generic element of $C^*(E)_t$. (This subscript convention conflicts with the standard notation for Cuntz-Krieger families: each partial isometry ${s}_f$ is in $C^*(E)_{c(f)}$, and each projection ${p}_v$ is in $C^*(E)_e$, where $e$ is the identity element of $G$. We hope this does not cause confusion.) Then $C^*(E)\times_\delta G$ is densely spanned by the set $\{\, (a_t,u)\mid a_t\in C^*(E)_t;\, t,u\in G\,\}$, and the algebraic operations are given on this set by \[ (a_r,s)(a_t,u) = (a_ra_t,u)\case{s}{tu}, \mbox{\ and\quad} (a_t,u)^* = (a_t^*, tu). \] (If $(j_{C^*(E)},j_G)$ is the canonical covariant homomorphism of $(C^*(E),C_0(G))$ into $M(C^*(E)\times_\delta G)$, then $(a_t,u)$ is by definition $j_{C^*(E)}(a_t)j_G(\chi_{\{u\}})$.) The dual action $\widehat\delta$ of $G$ on $C^*(E)\times_\delta G$ is characterized by $\widehat\delta_s(a_t,u) = (a_t,us^{-1})$. We aim to define a Cuntz-Krieger $E\times_cG$-family $\{{t}_{(f,t)},{q}_{(v,t)}\}$ in $C^*(E)\times_\delta G$ by putting \[ {t}_{(f,t)} = ({s}_f,t)\midtext{and} {q}_{(v,t)} = ({p}_v,t) \] for $(f,t)\in (E\times_cG)^0$ and $(v,t)\in (E\times_cG)^1$. To see that this is indeed a Cuntz-Krieger family, note first that ${p}_v\in C^*(E)_e$ for all vertices $v$, so the ${q}_{(v,t)}$ are mutually orthogonal projections. Next note that ${s}_f\in C^*(E)_{c(f)}$, so \[ {t}_{(f,t)}^*{t}_{(f,t)} =({s}_f^*,c(f)t)({s}_f,t)= ({s}_f^*{s}_f,t)=({p}_{r(f)},t)={q}_{r(f,t)}; \] if $(v,t)$ is a not sink, then $v$ is not a sink in $E$, so \begin{eqnarray*} {q}_{(v,t)} & = & ({p}_v,t)=\sum_{s(f)=v}({s}_f{s}_f^*,t)= \sum_{s(f)=v}({s}_f,c(f)^{-1}t)({s}_f^*,t)\\&=& \sum_{s(f)=v}({s}_f,c(f)^{-1}t)({s}_f,c(f)^{-1}t)^*= \sum_{s(f,r)=(v,t)}{t}_{(f,r)}{t}_{(f,r)}^*. \end{eqnarray*} This shows that $\{{t}_{(f,t)},{q}_{(v,t)}\}$ is a Cuntz-Krieger $E\times_cG$-family. The universal property of the graph algebra now gives a homomorphism $\Phi=\Phi_{{t},{q}}\colon C^*(E\times_cG)\to C^*(E)\times_\delta G$ such that $\Phi({s}_{(f,t)}) = {t}_{(f,t)}$ and $\Phi({p}_{(v,t)}) = {q}_{(v,t)}$; we shall prove that it is an isomorphism using \lemref{gauge-lem}. The gauge action $\alpha$ of $\b T$ on $C^*(E)$ commutes with the coaction $\delta$, in the sense that $\delta(\alpha_z(a))= \alpha_z\otimes\id(\delta(a))$ for each $z\in {\b T}$ and $a\in C^*(E)$; it therefore induces an action $\alpha\times \id$ of $\b T$ on $C^*(E)\times_\delta G$ such that \[ (\alpha\times \id)_z({t}_{(f,t)})=(\alpha\times \id)_z({s}_f,t) = (z{s}_f,t) = z{t}_{(f,t)}\midtext{and} (\alpha\times \id)_z( {q}_{(v,t)})= q_{(v,t)}. \] One can see that the elements of $\{{t}_{(f,t)},{q}_{(v,t)}\}$ are all non-zero by fixing a faithful representation $\pi$ of $C^*(E)$ and considering the regular representation $\Ind\pi=((\pi\otimes\lambda)\circ\delta)\times(1\otimes M)$ of $C^*(E)\times_\delta G$ induced by $\pi$: the operator $\Ind\pi({t}_{(f,t)})$, for example, is just $(\pi({s}_f)\otimes \lambda_{c(f)})(1\otimes M(\chi_{\{t\}}))$, which has non-zero initial projection $\pi({p}_{r(f)})\otimes M(\chi_{\{t\}})$. Since $({s}_e, c(f)t)({s}_f, t) = ({s}_e{s}_f, t)$ and $({s}_e,c(f)^{-1}t)({s}_f^*, t) = ({s}_e{s}_f^*, t)$, the range of $\Phi$ contains the generating family $\{j_{C^*(E)}({s}_\mu{s}_\nu^*)j_G(\chi_{\{t\}}), j_{C^*(E)}({p}_v)j_G(\chi_{\{t\}})\}$, and hence is all of $C^*(E)\times_\delta G$. Thus \lemref{gauge-lem} applies, and $\Phi$ is an isomorphism of $C^*(E\times_cG)$ onto $C^*(E)\times_\delta G$. Finally, we check that $\Phi$ intertwines $\gamma$ and $\widehat\delta$: \[ \Phi(\gamma_r({s}_{(f,t)})) = \Phi({s}_{(f,tr^{-1})})=({s}_f,tr^{-1})=\widehat\delta_r({s}_f,t)= \widehat\delta_r(\Phi({s}_{(f,t)})), \] and this completes the proof. \end{proof} \begin{cor}\label{red-stable-cor} Let $c$ be a labeling of a row-finite directed graph $E$ by a discrete group $G$, and let $\gamma$ be the action of Equation~\eqref{gamma-eq}. Then $$C^*(E\times_cG)\times_{\gamma,r}G \cong C^*(E)\otimes{\slashstyle K}(\ell^2(G)).$$ \end{cor} \begin{proof} The corollary follows from \thmref{eqvt-isom} and Katayama's duality theorem \cite[Theorem~8]{Kat-TD}. (Even though we are using full coactions, Katayama's theorem still applies: the regular representation is an isomorphism of $C^*(E)\times_\delta G$ onto the (reduced) crossed product by the reduction of $\delta$; see \cite[Corollary~2.6]{NilDF}). \end{proof} \section{Skew-product graphs: the full crossed product} \label{full-graph-sec} \begin{thm}\label{direct-isom-thm} Let $c$ be a labeling of a row-finite directed graph $E$ by a discrete group $G$, and let $\gamma$ be the action of $G$ defined by Equation~\eqref{gamma-eq}. Then $$C^*(E\times_cG)\times_\gamma G\cong C^*(E)\otimes{\slashstyle K}(\ell^2(G)).$$ \end{thm} \begin{proof} Since $G$ is discrete, $C^*(E\times_cG)\times_\gamma G$ is generated by the set of products $\{\, {s}_{(f,r)}u_t,\, {p}_{(v,r)}u_t\,\}$, where $\{\,{s}_{(f,r)},\, {p}_{(v,r)}\,\}$ is a nondegenerate Cuntz-Krieger $E\times_cG$-family and $u$ is the canonical homomorphism of $G$ into $U\!M(C^*(E\times_cG)\times_\gamma G)$ satisfying \begin{equation}\label{univ1-eq} u_t {s}_{(f,r)} = {s}_{(f,rt^{-1})}u_t\midtext{and} u_t {p}_{(v,r)} = {p}_{(v,rt^{-1})}u_t\righttext{for}t\in G. \end{equation} Moreover, the crossed product is universal in the sense that for any nondegenerate Cuntz-Krieger $E\times_cG$-family $\{\,{t}_{(f,r)}, {q}_{(v,r)}\,\}$ in a $C^*$-algebra $B$ and any homomorphism $v$ of $G$ into $U\!M(B)$ satisfying the analogue of \eqref{univ1-eq}, there is a unique nondegenerate homomorphism $\Theta =\Theta_{{t},{q},v}$ of $C^*(E\times_cG)\times_\gamma G$ into $B$ which takes each generator to its counterpart in $B$. We now construct such a family $\{\, {t}_{(f,r)}, {q}_{(v,r)}, v_t\,\}$ in $C^*(E)\otimes {\slashstyle K}(\ell^2(G))$. With $\{{s}_f,{p}_v\}$ denoting the canonical generators of $C^*(E)$ and writing $\chi_r$ for $M(\chi_{\{r\}})$, we set \begin{equation*} {t}_{(f,r)} = {s}_f\otimes \lambda_{c(f)}\chi_r\midtext{and} {q}_{(v,r)} = {p}_v\otimes \chi_r. \end{equation*} Then the ${q}_{(v,r)}$ are clearly mutually orthogonal projections, and $\sum_{v,r} {q}_{(v,r)}\to 1$ strictly in $M(C^*(E)\otimes{\slashstyle K}(\ell^2(G)))$. Further, we have \[ {t}_{(f,r)}^*{t}_{(f,r)} ={s}_f^*{s}_f\otimes \chi_r^*\lambda_{c(f)}^*\lambda_{c(f)}\chi_r={s}_f^*{s}_f\otimes \chi_r={p}_{r(f)}\otimes \chi_r= {q}_{r(f,r)}, \] and \begin{eqnarray*} {q}_{(v,r)} & = & \sum_{s(f)=v}{s}_f{s}_f^*\otimes \chi_r\\ & = & \sum_{s(f)=v}({s}_f\otimes \chi_r\lambda_{c(f)})({s}_f\otimes \chi_r\lambda_{c(f)})^*\\ & = & \sum_{s(f)=v}({s}_f\otimes \lambda_{c(f)}\chi_{c(f)^{-1}r}) ({s}_f\otimes \lambda_{c(f)}\chi_{c(f)^{-1}r})^*\\ & = & \sum_{s(f)=v}{t}_{(f,c(f)^{-1}r)}{t}_{(f,c(f)^{-1}r)}^*\\ & = & \sum_{s(f,t)=(v,r)} {t}_{(f,t)}{t}_{(f,t)}^*, \end{eqnarray*} so $\{{t}_{(f,r)},{q}_{(v,r)}\}$ is a Cuntz-Krieger $E\times_c G$-family. The unitary elements $1\otimes \rho_t$ of $M(C^*(E)\otimes {\slashstyle K}(\ell^2(G)))$ satisfy \begin{eqnarray*} (1\otimes \rho_t){t}_{(f,r)} = {s}_f\otimes \rho_t\lambda_{c(f)}\chi_r &=&{s}_f\otimes \lambda_{c(f)}\chi_{rt^{-1}}\rho_t = {t}_{(f,rt^{-1})}(1\otimes \rho_t), \ \mbox{ and}\\ (1\otimes \rho_t){q}_{(v,r)} = {p}_v\otimes \rho_t\chi_r &=& {p}_v\otimes \chi_{rt^{-1}}\rho_t = {q}_{(v,rt^{-1})}(1\otimes \rho_t); \end{eqnarray*} thus we get a nondegenerate homomorphism $\Theta=\Theta_{{t},{q},1\otimes\rho}\colon C^*(E\times_cG)\times_\gamma G\to C^*(E)\otimes {\slashstyle K}(\ell^2(G))$ such that \begin{equation}\label{defTheta} \Theta({s}_{(f,r)}) = {t}_{(f,r)},\ \ \Theta({p}_{(v,r)}) = {q}_{(v,r)},\ \mbox{ and } \ \Theta(u_t) = 1\otimes \rho_t. \end{equation} To construct the inverse for $\Theta$, we use a universal property of $C^*(E)\otimes {\slashstyle K}(\ell^2(G))$. Let $\sigma$ denote the action of $G$ on $C_0(C)$ by right translation: $\sigma_s(f)(t)=f(ts)$. The regular representation $M\times \rho$ is an isomorphism of the crossed product $C_0(G)\times_\sigma G$ onto ${\slashstyle K}(\ell^2(G))$, so we can view ${\slashstyle K}(\ell^2(G))$ as the universal $C^*$-algebra generated by the set of products $\{\chi_r\rho_t\mid r,t\in G\}$, where $\rho$ is a unitary homomorphism $\rho$ of $G$ and $\{\,\chi_r\,\}$ is a set of mutually orthogonal projections satisfying \begin{equation}\label{yu-eq} \rho_t\chi_r = \chi_{rt^{-1}}\rho_t. \end{equation} Thus to get a homomorphism defined on $C^*(E)\otimes {\slashstyle K}(\ell^2(G))$ we need a Cuntz-Krieger $E$-family $\{{t}_f,{q}_v\}$ and a family $\{y_r,u_t\}$ analogous to $\{\chi_r, \rho_t\}$ which commutes with the Cuntz-Krieger family. We begin by constructing a family $\{y_r,u_t\}$ in $M(C^*(E\times_cG)\times_\gamma G)$. We claim that, for fixed $r\in G$, the sum $\sum_v {p}_{(v,r)}$ converges strictly in $M(C^*(E\times_cG)\times_\gamma G)$. Because the canonical embedding $j_{C^*(E\times_cG)}$ has a strictly continuous extension, it is enough to check that the sum converges strictly in $M(C^*(E\times_c G))$. Because all the finite sums are projections, they have norm uniformly bounded by $1$, and it is enough to check that $\left(\sum_v {p}_{(v,r)}\right){s}_\mu {s}_\nu^*$ and ${s}_\mu {s}_\nu^*\left(\sum_v {p}_{(v,r)}\right)$ converge for each pair of paths $\mu,\nu$ in $E\times_c G$; and that $\left(\sum_v {p}_{(v,r)}\right){p}_{(u,t)}$ and ${p}_{(u,t)}\left(\sum_v {p}_{(v,r)}\right)$ converge for each vertex $(u,t)$ in $E\times_c G$. But in each case these sums reduce to a single term, so this is trivially true. Thus we may put $y_r = \sum_v {p}_{(v,r)}\in M(C^*(E\times_cG)\times_\gamma G)$. Now $\{\,y_r\mid r\in G\,\}$ is a mutually orthogonal family of projections, and $\sum_{v,r}{p}_{(v,r)}\to 1$ strictly in $M(C^*(E\times_cG))$, so $\sum_s y_s\to 1$ strictly in $M(C^*(E\times_cG)\times_\gamma G)$. Moreover, if $u$ is the canonical homomorphism of $G$ into $M(C^*(E\times_cG)\times_\gamma G)$, then $$u_ty_r = u_t\sum_v {p}_{(v,r)} = \sum_v {p}_{(v,rt^{-1})} u_t = y_{rt^{-1}}u_t;$$ thus the family $\{\,y_r,u_t\,\}$ satisfies the analogue of (\ref{yu-eq}), and therefore gives a nondegenerate homomorphism $y\times u\colon {\slashstyle K}(\ell^2(G))\to M(C^*(E\times_cG)\times_\gamma G)$. This homomorphism extends to ${\slashstyle B}(\ell^2(G))=M({\slashstyle K}(\ell^2(G)))$, and we can define unitaries $w_t=y\times u(\lambda_t)$ which satisfy $w_ty_r = y_{tr}w_t$ and $w_tu_r = u_rw_t$ for each $r,t\in G$. Arguing as for the $y_r$ shows that, for each fixed $v$ and $f$, the sums $\sum_r{p}_{(v,r)}$ and $\sum_r {s}_{(f,r)}$ converge strictly in $M(C^*(E\times_cG))$. Thus we may define ${t}_f$ and ${q}_v$ in $M(C^*(E\times_cG)\times_\gamma G)$ by \[ {t}_f = \left(\sum_r {s}_{(f,r)}\right)w_{c(f)^{-1}} \midtext{and} {q}_v = \sum_r{p}_{(v,r)}. \] Now $\{{q}_v\}$ is a family of mutually orthogonal projections; to check the Cuntz-Krieger relations for $\{\, {t}_f,\,{q}_v\,\}$, first note that \[ \biggl(\sum_r{s}_{(f,r)}\biggr)^* \biggl(\sum_t{s}_{(f,t)}\biggr) =\sum_{r,t}{s}_{(f,r)}^*{s}_{(f,t)} =\sum_r {s}_{(f,r)}^*{s}_{(f,r)} =\sum_r {p}_{r(f,r)} ={q}_{r(f)}, \] so that \begin{equation}\label{twq-eq} {t}_f^*{t}_f = w_{c(f)}\biggl(\sum_r{s}_{(f,r)}\biggr)^* \biggl(\sum_t{s}_{(f,t)}\biggr)w_{c(f)}^* = w_{c(f)}{q}_{r(f)}w_{c(f)}^*. \end{equation} Easy calculations show that $y_t{q}_v = q_vy_t$ and $u_tq_v=q_vu_t$, so each ${q}_v$ commutes with everything in the range of $y\times u$ in $M(C^*(E\times_cG)\times_\gamma G)$, and in particular with each $w_t$; thus Equation (\ref{twq-eq}) implies that $t^*_f{t}_f={q}_{r(f)}$. We also have \begin{eqnarray*} {q}_v & = & \sum_r \sum_{s(f,t)=(v,r)} {s}_{(f,t)}{s}_{(f,t)}^*\\ & = & \sum_r \sum_{s(f)=v} {s}_{(f,c(f)^{-1}r)}{s}_{(f,c(f)^{-1}r)}^*\\ & = & \sum_{s(f)=v} \sum_r {s}_{(f,r)}{s}_{(f,r)}^*\\ & = & \sum_{s(f)=v} \biggl(\sum_r {s}_{(f,r)}w_{c(f)^{-1}}\biggr) \biggl(\sum_t {s}_{(f,t)}w_{c(f)^{-1}}\biggr)^*\\ & = & \sum_{s(f)=t} {t}_f{t}_f^*, \end{eqnarray*} and $\sum_v{q}_v = \sum_{v,r}{p}_{(v,r)}\to 1$ strictly in $M(C^*(E\times_cG)\times_\gamma G)$, so the set $\{\, {t}_f,{q}_v\,\}$ is a nondegenerate Cuntz-Krieger $E$-family. We have already observed that each ${q}_v$ commutes with the range of $y\times u$. Further calculations show that \begin{eqnarray} y_s{t}_f & = & \biggl(\sum_v {p}_{(v,s)}\biggr) \biggl(\sum_r {s}_{(f,r)}\biggr) w_{c(f)^{-1}}\notag\\ & = & {s}_{(f,c(f)^{-1}s)}w_{c(f)^{-1}}\label{yt=ty}\\ & = & \biggl(\sum_r {s}_{(f,r)}\biggr)\biggl(\sum_v {p}_{(v,c(f)^{-1}s)}\biggr)w_{c(f)^{-1}}\notag\\ & = & \biggl(\sum_r{s}_{(f,r)}\biggr)y_{c(f)^{-1}s}w_{c(f)^{-1}}\notag\\ & = & \biggl(\sum_r{s}_{(f,r)}\biggr)w_{c(f)^{-1}}y_s\notag\\ & = & {t}_fy_s\notag \end{eqnarray} and \[ u_t{t}_f =u_t\biggl(\sum_r {s}_{(f,r)}\biggr)w_{c(f)^{-1}} =\biggl(\sum_r {s}_{(f,rs^{-1})}\biggr)u_tw_{c(f)^{-1}} =\biggl(\sum_r {s}_{(f,r)}\biggr)w_{c(f)^{-1}}u_t={t}_fu_t. \] Thus the homomorphisms $\Phi_{{t},{q}}$ of $C^*(E)$ and $y\times u$ of ${\slashstyle K}(\ell^2(G))$ into $M(C^*(E\times_cG)\times_\gamma G)$ have commuting ranges, and combine to give a homomorphism ${\slashstyle U}psilon$ of $C^*(E)\otimes{\slashstyle K}(\ell^2(G))$ into $M(C^*(E\times_cG)\times_\gamma G)$ such that ${\slashstyle U}psilon({s}_f\otimes 1) = {t}_f$, ${\slashstyle U}psilon({p}_v\otimes 1) = {q}_v$, ${\slashstyle U}psilon(1\otimes \chi_r) = y_r$, and ${\slashstyle U}psilon(1\otimes \rho_t) = u_t$. {From} (\ref{yt=ty}) we deduce that \begin{equation}\label{into-calc1} {\slashstyle U}psilon({s}_f\otimes \chi_r\rho_t) = {t}_fy_ru_t ={s}_{(f,c(f)^{-1}r)}w_{c(f)^{-1}}u_t = {s}_{(f,c(f)^{-1}r)}u_tw_{c(f)^{-1}}; \end{equation} since this and ${\slashstyle U}psilon({p}_v\otimes \chi_r\rho_t) = {q}_vy_ru_t= {p}_{(v,r)}u_t$ belong to $C^*(E\times_cG)\times_\gamma G$, it follows that ${\slashstyle U}psilon$ maps $C^*(E)\otimes {\slashstyle K}(\ell^2(G))$ into $C^*(E\times_cG)\times_\gamma G$. We shall show that $\Theta$ and ${\slashstyle U}psilon$ are inverses of one another by checking that ${\slashstyle U}psilon\circ\Theta$ is the identity on the generating set $\{{s}_{(f,r)}u_t,\, {p}_{(v,r)}u_t\}$ for $C^*(E\times_c G)\times_\gamma G$, and that $\Theta\circ {\slashstyle U}psilon$ is the identity on a generating set for $C^*(E)\otimes {\slashstyle K}(\ell^2(G))$. First we note that $T\mapsto {\slashstyle U}psilon(1\otimes T)$ is just $y\times u$ on products $\chi_r\rho_t\in {\slashstyle K}(\ell^2(G))$, so ${\slashstyle U}psilon(1\otimes \lambda_t)=w_t$ by definition of $w_t$. And since $\Theta$ extends to a strictly continuous map on $M(C^*(E\times_cG)\times_\gamma G)$, we have $$\Theta(y_ru_t) = \Theta\biggl(\sum_v{p}_{(v,r)}u_t\biggr) = \sum_v{p}_v\otimes \chi_r\rho_t = 1\otimes \chi_r\rho_t,$$ which implies that $\Theta(w_t) = 1\otimes \lambda_t$ for $t\in G$. We can now compute: \begin{eqnarray*} {\slashstyle U}psilon\circ\Theta({s}_{(f,s)}u_t) & = & {\slashstyle U}psilon({s}_f\otimes \lambda_{c(f)}\chi_s\rho_t)\\ & = & \biggl(\sum_r{s}_{(f,r)}\biggr)w_{c(f)^{-1}}w_{c(f)} \biggl( \sum_v{p}_{(v,s)}\biggr)u_t\\ \end{eqnarray*} \begin{eqnarray*} \phantom{{\slashstyle U}psilon\circ\Theta({s}_{(f,s)}u_t)} & = & \biggl(\sum_r{s}_{(f,r)}\biggr)\biggl( \sum_v{p}_{(v,s)}\biggr)u_t\\ & = & {s}_{(f,s)}u_t \end{eqnarray*} and \[ {\slashstyle U}psilon\circ\Theta({p}_{(v,s)}u_t) ={\slashstyle U}psilon({p}_v\otimes \chi_s\rho_t) =\biggl(\sum_r{p}_{(v,r)}\biggr)\biggl(\sum_w {p}_{(w,s)}\biggr)u_t ={p}_{(v,s)}u_t, \] which shows that ${\slashstyle U}psilon\circ\Theta$ is the identity. Using \eqref{into-calc1} gives \begin{eqnarray*} \Theta\circ{\slashstyle U}psilon({s}_f\otimes \chi_r\rho_t) & = & \Theta({s}_{(f,c(f)^{-1}r)}u_tw_{c(f)^{-1}})\\ & = & {s}_f\otimes \lambda_{c(f)}\chi_{c(f)^{-1}r}\lambda_{c(f)^{-1}}\rho_t\\ & = & {s}_f\otimes \chi_r\rho_t \end{eqnarray*} and \begin{equation*} \Theta\circ{\slashstyle U}psilon({p}_v\otimes \chi_r\rho_t) = \Theta({p}_{(v,r)}u_t) = {p}_v\otimes \chi_r\rho_t, \end{equation*} which shows that $\Theta\circ{\slashstyle U}psilon$ is the identity. \end{proof} \thmref{direct-isom-thm} and \corref{red-stable-cor} imply that $C^*(E\times_cG)\times_\gamma G$ and $C^*(E\times_cG)\times_{\gamma,r}G$ are isomorphic $C^*$-algebras, so it is natural to ask if the action $\gamma$ is amenable in the sense that the regular representation of the crossed product is faithful. To see that it is, consider the following diagram: \begin{equation}\label{reg-diag} \begin{diagram} \node{C^*(E\times_cG)\times_\gamma G} \arrow[2]{e,t}{\rm\thmref{eqvt-isom}} \arrow{se,t}{\rm\thmref{direct-isom-thm}} \arrow[2]{s,l}{\begin{smallmatrix}{\rm regular}\\ {\rm representation}\end{smallmatrix}} \node[2]{C^*(E)\times_\delta G\times_{\what\delta}G} \arrow[2]{s,r}{\begin{smallmatrix}{\rm regular}\\ {\rm representation}\end{smallmatrix}}\\ \node[2]{C^*(E)\otimes{\slashstyle K}}\\ \node{C^*(E\times_cG)\times_{\gamma,r}G} \arrow[2]{e,b}{\rm\thmref{eqvt-isom}} \node[2]{C^*(E)\times_\delta G\times_{\what\delta,r}G.} \arrow{nw,t}{{\rm Katayama}} \end{diagram} \end{equation} Let $(j_{C^*(E)},j_G)\colon (C^*(E),C_0(G))\to M(C^*(E)\times_\delta G)$ and $(i_{C^*(E)\times G},i_G)\colon (C^*(E)\times_\delta G,G)\to M(C^*(E)\times_\delta G\times_{\what\delta}G)$ be the canonical maps. Inspection of the formulas on page~768 of \cite{LPRS-RC} shows that composing the regular representation of $C^*(E)\times_\delta G\times_{\what\delta}G$ with Katayama's isomorphism (\cite[Theorem~8]{Kat-TD}) gives \begin{gather*} i_{C^*(E)\times G}(j_{C^*(E)}(a)) \mapsto \id\otimes\lambda(\delta(a)), \mbox{ }i_{C^*(E)\times G}(j_G(g)) \mapsto 1\otimes M(g), \mbox{ } i_G(t)\mapsto 1\otimes\rho_t \end{gather*} for $a\in C^*(E)$, $g\in C_c(G)$, and $t\in G$. Thus chasing generators in $C^*(E\times_cG)\times_\gamma G$ round the outside of the upper right-hand triangle in Diagram~\eqref{reg-diag} yields \begin{gather*} {s}_{(f,r)}\mapsto i_{C^*(E)\times G}(j_{C^*(E)}({s}_f)j_G(\chi_r)) \mapsto \id\otimes\lambda(\delta({s}_f))1\otimes \chi_r = {t}_{(f,r)},\\ {p}_{(v,r)}\mapsto i_{C^*(E)\times G}(j_{C^*(E)}({p}_v)j_G(\chi_r))\mapsto \id\otimes\lambda(\delta({p}_v))1\otimes \chi_r = {q}_{(v,r)},\\ u_t\mapsto i_G(t)\mapsto 1\otimes \rho_t. \end{gather*} Since this is exactly what the isomorphism $\Theta$ from \thmref{direct-isom-thm} does (see Equation (\ref{defTheta})), the upper right-hand corner of Diagram~\eqref{reg-diag} commutes. But the outside rectangle commutes by general nonsense, so the lower left-hand corner commutes too. This proves: \begin{cor}\label{amen-cor} Let $c$ be a labeling of a row-finite directed graph $E$ by a discrete group $G$. Then the action $\gamma$ of $G$ from Equation~\eqref{gamma-eq} is amenable in the sense that the regular representation of $C^*(E\times_cG)\times_\gamma G$ is faithful. \end{cor} \begin{cor}\label{reducedKP2} Let $G$ be a discrete group acting freely on a row-finite directed graph $F$, and let $\beta$ be the action of $G$ on $C^*(F)$ determined by $\beta_t({s}_f)={s}_{t\cdot f}$ and $\beta_t({p}_v)={p}_{t\cdot v}$. Then the regular representation of $C^*(F)\times_\beta G$ is faithful, and $$C^*(F)\times_\beta G\cong C^*(F)\times_{\beta,r}G\cong C^*(F/G)\otimes{\slashstyle K}(\ell^2(G)).$$ \end{cor} \begin{proof} Since $G$ acts freely, there is a labeling $c\colon (F/G)^1\to G$ and an isomorphism of $F$ onto $(F/G)\times_cG$ which carries the given action to the action of $G$ by right translation (\cite[Theorem~2.2.2]{GT-TG}). Thus this corollary follows by applying Corollaries~\ref{red-stable-cor} and~\ref{amen-cor} to $E=F/G$. \end{proof} \section{Skew-product groupoids and duality} \label{gpd} We will now give groupoid versions of the results in \secref{skew-graph-sec}. Throughout, we consider a discrete group $G$, and a groupoid $Q$ which is $r$-discrete in the modern sense that the range map $r$ is a local homeomorphism (so that counting measures on the sets $Q^u=r^{-1}(u)$ for $u$ in the unit space $Q^0$ give a Haar system on $Q$). In several of the following arguments, we use the fact that the $C^*$-algebra of an $r$-discrete groupoid $Q$ is the enveloping $C^*$-algebra of $C_c(Q)$; this follows from \cite[Theorems~7.1 and~8.1]{QS-CA}. Let $c\colon Q\to G$ be a continuous homomorphism. The \emph{skew-product groupoid} $Q\times_c G$ is the set $Q\times G$ with the product topology and operations given for $(x,y)\in Q^2$ and $s\in G$ by \[ (x,c(y)s)(y,s)=(xy,s)\midtext{and} (x,s)^{-1}=(x^{-1},c(x)s). \] Since the range map on the skew-product groupoid is thus given by $r(x,s) = (r(x),c(x)s)$, $Q\times_cG$ is $r$-discrete whenever $Q$ is. The formula \begin{equation} \label{gpdact} s\cdot (x,t)=(x,ts^{-1}) \righttext{for}s,t\in G,\mbox{ }x\in Q \end{equation} defines an action of $G$ by automorphisms of the topological groupoid $Q\times_c G$. We let $\beta$ denote the induced action on $C^*(Q\times_c G)$, which satisfies \begin{equation} \label{beta} \beta_s(f)(x,t)=f\bigl(s^{-1}\cdot (x,t)\bigr)=f(x,ts) \righttext{for}s,t\in G,\mbox{ }f\in C_c(Q\times_c G),\mbox{ }x\in Q. \end{equation} \begin{rem} It is easily checked that the map $(x,s)\mapsto(x,c(x)^{-1}s^{-1})$ gives a topological isomorphism of Renault's skew product \cite[Definition~I.1.6]{RenGA} onto ours, which transports Renault's action (also used in \cite[Proposition~3.7]{KP-CD}) into $\beta$. Our conventions were chosen to make the isomorphism of \thmref{gpdiso} more natural. \end{rem} For $s\in G$ define \begin{equation} \label{bundle} C_s=\{f\in C_c(Q)\mid\supp f\subseteq c^{-1}(s)\}, \end{equation} and put $\c C=\bigcup_{s\in G}C_s$. Then with the operations from $C_c(Q)$, $\c C$ becomes a $^*$-algebraic bundle (with incomplete fibers) over $G$ in the sense that $C_sC_t\subseteq C_{st}$ and $C_s^*=C_{s^{-1}}$. Since $Q$ is the disjoint union of the open sets $\{c^{-1}(s)\}_{s\in G}$, we have $\spn_{s\in G}C_s = C_c(Q)$, which we identify with the space ${\slashstyle G}amma_c(\c C)$ of finitely supported sections of $\c C$. \begin{lem} \label{gpdcoact} Let $c$ be a continuous homomorphism of an $r$-discrete Hausdorff groupoid $Q$ into a discrete group $G$. Then there is a coaction $\delta$ of $G$ on $C^*(Q)$ such that \[ \delta(f_s)=f_s\otimes s\righttext{for}s\in G,\mbox{ }f_s\in C_s. \] \end{lem} \begin{proof} The above formula extends uniquely to a $^*$-homomorphism of $C_c(Q)$ into $C^*(Q)\otimes C^*(G)$. Since $C^*(Q)$ is the enveloping $C^*$-algebra of $C_c(Q)$, $\delta$ further extends uniquely to a homomorphism of $C^*(Q)$ into $C^*(Q)\otimes C^*(G)$. The coaction identity obviously holds on the generators (that is, the elements of the bundle $\c C$), hence on all of $C^*(Q)$. The homomorphism $\delta$ is nondegenerate, that is, \[ \clsp\{\delta(C^*(Q))(C^*(Q)\otimes C^*(G))\} =C^*(Q)\otimes C^*(G), \] since $\delta(f_s)(1\otimes s^{-1}t)=f_s\otimes t$. To see that $\delta$ is injective, let $1_G$ denote the trivial one-dimensional representation of $G$, and check on the generators that $(\id\otimes 1_G)\circ\delta=\id$. \end{proof} Let $N=c^{-1}(e)$ be the kernel of the homomorphism $c$, which is an open subgroupoid of $Q$. Since the restriction of a Haar system to an open subgroupoid gives a Haar system, counting measures give a Haar system on $N$, so $N$ is an $r$-discrete groupoid. The inclusion of $C_c(N)$ in $C_c(Q)$ extends to the enveloping $C^*$-algebras to give a natural homomorphism $i$ of $C^*(N)$ into $C^*(Q)$. For our next results we will need to require that $i$ be faithful. We have been unable to show that this holds in general, although it does hold when $Q$ is amenable (\lemref{amen-cond-lem}), and when $Q$ is second countable (\thmref{faith}). \begin{thm} \label{gpdiso} Let $c$ be a continuous homomorphism of an $r$-discrete Hausdorff groupoid $Q$ into a discrete group $G$, let $N=c^{-1}(e)$, and let $\delta$ be the coaction from \lemref{gpdcoact}. Assume that the natural map $i\colon C^*(N)\to C^*(Q)$ is faithful. Then \[ C^*(Q)\times_\delta G\cong C^*(Q\times_c G), \] equivariantly for the dual action $\widehat\delta$ and the action $\beta$ of Equation~\eqref{beta}. \end{thm} \begin{proof} Let $\c C$ be the $^*$-algebraic bundle over $G$ defined by Equation \eqref{bundle}, let $\c C\times G$ be the product bundle over $G\times G$ whose fiber over $(s,t)$ is $C_s\times\{t\}$, and give $\c C\times G$ the algebraic operations \[ (f_s,tu)(g_t,u)=(f_sg_t,u)\midtext{and} (f_s,t)^*=(f_s^*,st). \] Then the space ${\slashstyle G}amma_c(\c C\times G)$ of finitely supported sections becomes a $^*$-algebra, which can be identified with a dense $^*$-subalgebra of the crossed product $C^*(Q)\times_\delta G$; the dual action is characterized by $\widehat\delta_s(f,t) = (f,ts^{-1})$, for $s,t\in G$ and $f\in \c C$. We claim that $C^*(Q)\times_\delta G$ is the enveloping $C^*$-algebra of ${\slashstyle G}amma_c(\c C\times G)$. Since $C^*(Q)$ is the enveloping $C^*$-algebra of ${\slashstyle G}amma_c(\c C) = C_c(Q)$, by \cite[Theorem~3.3]{EQ-IC} it suffices to show that the unit fiber algebra $C^*(Q)_e = \{\, f\in C^*(Q)\mid \delta(f) = f\otimes e\,\}$ of the Fell bundle associated to $\delta$ is the enveloping $C^*$-algebra of $C_e$. To see this, first note that $C^*(Q)_e$ is the closure of $C_e$ in $C^*(Q)$, which in turn is just $i(C^*(N))$ because $i$ maps $C_c(N)$ onto $C_e$. But $C^*(N)$ is the enveloping $C^*$-algebra of $C_c(N)$. Since $i$ is assumed to be faithful, it follows that $C^*(Q)_e = i(C^*(N))$ is the enveloping $C^*$-algebra of $C_e = i(C_c(N))$, and this proves the claim. Now for each $s,t\in G$ put $D_{s,t} =\{f\in C_c(Q\times_c G)\mid\supp f\subseteq c^{-1}(s)\times\{t\}\}$, so $C_c(Q\times_c G)=\spn_{s,t\in G}D_{s,t}$. For $f\in \c C$ and $t\in G$ define $\Psi(f,t)\in C_c(Q\times_c G)$ by \begin{equation} \Psi(f,t)(x,u)=f(x) \qquad\case{t}{u}. \end{equation} Then $\Psi$ extends uniquely to a linear bijection $\Psi$ of ${\slashstyle G}amma_c(\c C\times G)$ onto $C_c(Q\times_c G)$, since it gives a linear bijection of each fiber $C_s\times\{t\}$ onto the corresponding fiber $D_{s,t}$. In fact, $\Psi$ is a homomorphism of $^*$-algebras. It is enough to show $\Psi$ preserves multiplication and involution. For $s,t,u,v,z\in G$, $f_s\in C_s$, $g_u\in C_u$, and $x\in Q$, \begin{align*} &\bigl(\Psi(f_s,t)\Psi(g_u,v)\bigr)(x,z) \\&\quad=\sum_{r(y,w)=r(x,z)}\Psi(f_s,t)(y,w) \Psi(g_u,v)\bigl((y,w)^{-1}(x,z)\bigr) \\&\quad=\sum_{\begin{smallmatrix}{r(y)=r(x)}\\ {c(y)w=c(x)z}\end{smallmatrix}}f_s(y) \Psi(g_u,v)\bigl((y^{-1},c(y)w)(x,z)\bigr) &&\case{t}{w} \\&\quad=\sum_{r(y)=r(x)}f_s(y)\Psi(g_u,v)(y^{-1}x,z) &&\case{t}{c(y^{-1}x)z} \\&\quad=\sum_{r(y)=r(x)}f_s(y)g_u(y^{-1}x) &&\twocase{t}{uz}{v}{z} \\&\quad=(f_sg_u)(x) &&\twocase{t}{uv}{v}{z} \\&\quad=\Psi(f_sg_u,v)(x,z) &&\case{t}{uv} \\&\quad=\Psi\bigl((f_s,t)(g_u,v)\bigr)(x,z), \end{align*} and for $s,t,u\in G$, $f_s\in C_s$, and $x\in Q$, \begin{align*} \Psi(f_s,t)^*(x,u) &=\overline{\Psi(f_s,t)\bigl((x,u)^{-1}\bigr)} \\&=\overline{\Psi(f_s,t)(x^{-1},c(x)u)} \\&=\overline{f_s(x^{-1})} &&\case{t}{c(x)u} \\&=f_s^*(x) &&\case{st}{u} \\&=\Psi(f_s^*,st)(x,u) \\&=\Psi\bigl((f_s,t^*\bigr)(x,u). \end{align*} It follows that $\Psi$ extends to an isomorphism of $C^*(Q)\times_\delta G$ onto $C^*(Q\times_cG)$, since these are enveloping $C^*$-algebras. A straightforward calculation shows that $\Psi$ intertwines the actions $\widehat\delta$ and $\beta$. \end{proof} \begin{rem} For $Q$ amenable, the isomorphism of \thmref{gpdiso} can be deduced from \cite[Theorem~3.2]{Mas-GD}, although Masuda does everything spatially, with reduced coactions, reduced groupoid $C^*$-algebras, and crossed products represented on Hilbert space. To see this, note that the amenability of the skew product $Q\times_c G$ follows from that of $Q$ by \cite[Proposition~II.3.8]{RenGA}, and that $C^*(Q)\times_\delta G$ is isomorphic to the spatial crossed product by the reduction of $\delta$ according to results in \cite{QuiFR} and \cite{RaeOCP}. \end{rem} \begin{cor}\label{duality} With the same hypotheses as \thmref{gpdiso}, \[ C^*(Q\times_c G)\times_{\beta,r}G\cong C^*(Q)\otimes\c K(\ell^2(G)). \] \end{cor} \begin{proof} This follows immediately from \thmref{gpdiso} and Katayama's duality theorem \cite[Theorem~8]{Kat-TD}. (See also the parenthetical remark in the proof of \corref{red-stable-cor}.) \end{proof} \section{Skew-product groupoids: the full crossed product} \label{gpd-full-sec} In this section we prove a version of \corref{duality} for full crossed products, from which we can recover Proposition~3.7 of \cite{KP-CD}. For this, we shall want to relate semidirect-product groupoids to crossed products. In general, if a discrete group $G$ acts on a topological groupoid $R$, the \emph{semidirect-product groupoid} $R \rtimes G$ is the product space $R\times G$ with the structure \[ (x,s)(y,t)=(x(s\cdot y),st) \midtext{and} (x,s)^{-1}=(s^{-1}\cdot x^{-1},s^{-1}) \] whenever this makes sense. (This is readily seen to coincide with Renault's version in \cite[Definition~I.1.7]{RenGA}.) If $R$ is $r$-discrete and Hausdorff then so is $R \rtimes G$. The following result is presumably folklore, but it never hurts to record groupoid facts. \begin{prop} \label{semi cross} Let $G$ be a discrete group acting on an $r$-discrete Hausdorff groupoid $R$, and let $\beta$ denote the associated action on $C^*(R)$. Then \[ C^*(R \rtimes G) \cong C^*(R)\times_\beta G. \] \end{prop} \begin{proof} For $f\in C_c(R \rtimes G)$ define $\Phi(f)\in C_c(G,C_c(R)) \subseteq C_c(G,C^*(R))$ by \[ \Phi(f)(s)(x)=f(x,s) \righttext{for}s\in G,x\in R. \] Then $\Phi$ is a $^*$-homomorphism, since for $f,g\in C_c(R \rtimes G)$ we have \begin{align*} \bigl( \Phi(f)\Phi(g) \bigr)(s)(x) &=\sum_t \bigl( \Phi(f)(t) \beta_t(\Phi(g)(t^{-1}s)) \bigr)(x) \\&=\sum_t \sum_{r(y)=r(x)} \Phi(f)(t)(y) \beta_t(\Phi(g)(t^{-1}s))(y^{-1}x) \\&=\sum_{r(y)=r(x)} \sum_t f(y,t) g(t^{-1}\cdot (y^{-1}x),t^{-1}s) \\&=\sum_{r(y,t)=r(x,s)} f(y,t) g((y,t)^{-1}(x,s)) \\&=(fg)(x,s) =\Phi(fg)(s)(x) \end{align*} and \begin{gather*} \Phi(f)^*(s)(x) =\beta_s(\Phi(f)(s^{-1})^*)(x) =\Phi(f)(s^{-1})^*(s^{-1}\cdot x) =\overline{\Phi(f)(s^{-1})(s^{-1}\cdot x^{-1})}\\ =\overline{f(s^{-1}\cdot x^{-1},s^{-1})} =\overline{f((x,s)^{-1})} =f^*(x,s) =\Phi(f^*)(s)(x). \end{gather*} Since $C^*(R \rtimes G)$ is the enveloping $C^*$-algebra of $C_c(R \rtimes G)$, $\Phi$ extends uniquely to a homomorphism $\Phi$ of $C^*(R \rtimes G)$ into $C^*(R)\times_\beta G$. To show $\Phi$ is an isomorphism, it suffices to find an inverse for the map $\Phi\colon C_c(R \rtimes G)\to C_c(G,C_c(R))$, since $C^*(R)\times_\beta G$ is the enveloping $C^*$-algebra of the $^*$-algebra $C_c(G,C_c(R))$ (see, for example, \cite[Lemma~3.3]{EQ-IC}). Given $f\in C_c(G,C_c(R))$ define $\Psi(f)\in C_c(R \rtimes G)$ by \[ \Psi(f)(x,s)=f(s)(x). \] Since the support of $\Phi(f)$ in $R\times G$ is just the finite union of compact sets $\{s\}\times\supp f(s)$ as $s$ runs through $\supp f$, $\Psi(f)$ has compact support. Moreover, it is obvious that $\Psi$ is the required inverse for $\Phi$ at the level of $C_c$-functions. \end{proof} To show that the isomorphism $\Phi$ of \propref{semi cross} is suitably compatible with regular representations, we use two lemmas. For the first, consider an action $\beta$ of a discrete group $G$ on a $C^*$-algebra $A$. For any invariant closed ideal $I$ of $A$, let $q\colon A\to A/I$ be the quotient map, and let $\tilde\beta$ be the associated action of $G$ on $A/I$. Let $\ind q\colon A\times_\beta G\to A/I\times_{\tilde\beta,r} G$ be the unique homomorphism such that \[ \ind q(f)=q\circ f\righttext{for} f\in C_c(G,A). \] Then standard techniques from \cite[Th\'eor\`eme~4.12]{ZelPC} yield the following: \begin{lem}\label{ind q} With the above assumptions and notation, there is a unique conditional expectation $P_{A\times G}$ of $A\times_\beta G$ onto $A$ such that $P_{A\times G}(f)=f(e)$ for $f\in C_c(G,A)$. The composition $q \circ P_{A \times G}$ is a conditional expectation of $A\times_\beta G$ onto $A/I$ such that for $b\in A\times_\beta G$, \[ \ind q(b)=0 \midtext{if and only if} q\circ P_{A\times G}(b^*b)=0. \] \end{lem} Now let $G$ act on an $r$-discrete Hausdorff groupoid $R$, and let $\beta$ denote the action of $G$ on $C^*(R)$ such that \[ \beta_s(f)(x)=f(s^{-1}\cdot x) \righttext{for} f\in C_c(R),\mbox{ }s\in G,\mbox{ }x\in R. \] Also let $\lambda_R\colon C^*(R)\to C^*_r(R)$ be the regular representation, viewed as a quotient map, and let $P_R$ be the conditional expectation of $C^*(R)$ onto $C_0(R^0)$ such that \[ P_R(f)=f|_{R^0} \righttext{for} f\in C_c(R). \] Then it follows from \cite[Proposition~II.4.8]{RenGA} that for $b\in C^*(R)$, $\lambda_R(b)=0$ if and only if $P_R(b^*b)=0$. \begin{lem}\label{ker-lem} With the above assumptions and notation, the kernel of the regular representation $\lambda_R$ is a $\beta$-invariant ideal of $C^*(R)$. \end{lem} \begin{proof} It suffices to show that for $b\in C^*(R)$ and $s\in G$, $P_R(b)=0$ {if and only if} $P_R\circ\beta_s(b)=0$. Let $f\in C_c(R)$. Then \begin{align*} \norm{P_R\circ\beta_s(f)} &=\sup_{u\in R^0}\abs{\beta_s(f)(u)} =\sup_{u\in R^0}\abs{f(s^{-1}\cdot u)} =\sup_{u\in R^0}\abs{f(u)} =\norm{P_R(f)}. \end{align*} Hence $\norm{P_R\circ\beta_s(b)}=\norm{P_R(b)}$ for all $b\in C^*(R)$, which proves the lemma. \end{proof} Note that \lemref{ker-lem} ensures that the map $\Ind\lambda_R$ is well-defined. \begin{prop} \label{red semi cross} Let $G$ be a discrete group acting on an $r$-discrete Hausdorff groupoid $R$, let $\beta$ denote the associated action on $C^*(R)$, and let $\Phi$ be the isomorphism of \propref{semi cross}. Then there is an isomorphism $\Phi_r$ such that the following diagram commutes: \begin{equation*} \begin{diagram} \node{C^*(R \rtimes G)} \arrow{e,t}{\Phi} \arrow{s,l}{\lambda_{R \rtimes G}} \node{C^*(R)\times_\beta G} \arrow{s,r}{\ind \lambda_R} \\ \node{C^*_r(R \rtimes G)} \arrow{e,b}{\Phi_r} \node{C^*_r(R)\times_{\beta,r} G.} \end{diagram} \end{equation*} \end{prop} \begin{proof} We need only show that $\ker\bigl( \ind \lambda_R \circ \Phi \bigr) =\ker \lambda_{R\times G}$. Take a positive element $b$ of $C^*(R \rtimes G)$. By \lemref{ind q}, $\ind \lambda_R \circ \Phi(b)=0$ {if and only if} $\lambda_R \circ P_{C^*(R)\times G} \circ \Phi(b)=0$ (because $\Phi(b)$ is positive in $C^*(R)\times_\beta G$), so that $\ind \lambda_R \circ \Phi(b)=0$ {if and only if} $P_R \circ P_{C^*(R)\times G} \circ \Phi(b)=0$ (because $P_{C^*(R)\times G} \circ \Phi(b)$ is positive in $C^*(R)$). On the other hand, $\lambda_{ R \rtimes G }(b) = 0$ if and only if $P_{ R \rtimes G }(b)=0$. Thus, it suffices to show that for all $b\in C^*(R \rtimes G)$, \[ \norm{ P_{R \rtimes G}(b) } = \norm{ P_R \circ P_{C^*(R)\times G} \circ \Phi(b) }, \] and for this it suffices to take $b\in C_c(R \rtimes G)$: \begin{gather*} \norm{ P_R \circ P_{ C^*(R)\times G } \circ \Phi(b) } = \norm{ P_{ C^*(R)\times G } \circ \Phi(b) |_{R^0} } = \norm{ \Phi(b)(e) |_{R^0} } = \sup_{ u \in R^0 } \abs{ \Phi(b)(e)(u) } \\ = \sup_{ u \in R^0 } \abs{ b(u,e) } = \norm{ b |_{( R^0 \times \{e\} )} } = \norm{ b |_{( R \rtimes G )^0} } = \norm{ P_{ R \rtimes G }(b) }. \end{gather*} This completes the proof. \end{proof} We write $Q\times_c G\rtimes G$ for the semidirect product of $G$ acting on $Q\times_c G$, and we write the elements as triples. \begin{prop} \label{equiv} Let $c$ be a continuous homomorphism of an $r$-discrete Hausdorff groupoid $Q$ into a discrete group $G$, and let $G$ act on the skew product $Q\times_c G$ as in Equation~\eqref{gpdact}. Then the semidirect-product groupoid $Q\times_c G\rtimes G$ is equivalent to $Q$. \end{prop} \begin{proof} We will show that the space $Q\times_c G$ implements a groupoid equivalence (in the sense of \cite[Definition~2.1]{MRW-EI}) between $Q\times_c G\rtimes G$ (acting on the left) and $Q$ (acting on the right). For the right action we need a continuous open surjection $\sigma$ from $Q\times_c G$ onto the unit space of $Q$. For $(x,s)\in Q\times_c G$ define $\sigma(x,s)=s(x)$. Then $\sigma$ is a continuous and open surjection onto $Q^0$. Now put \[ (Q\times_c G)*Q= \{((x,s),y)\in (Q\times_c G)\times Q\mid \sigma(x,s)=r(y)\}, \] and define a map $((x,s),y)\mapsto (x,s)y$ from $(Q\times_c G)*Q$ to $Q\times_c G$ by \[ (x,s)y=(xy,c(y)^{-1}s). \] The continuity and algebraic properties of this map are easily checked, so we have a right action of $Q$ on the space $Q\times_c G$. For the left action we need a continuous and open surjection $\rho$ from $Q\times_c G$ onto the unit space of $Q\times_c G\rtimes G$. Note that this unit space is $Q^0\times G\times \{e\}$, and the range and source maps in $Q\times_c G\rtimes G$ are given by \[ r(x,s,t)=(r(x),c(x)s,e) \midtext{and} s(x,s,t)=(s(x),s,e). \] For $(x,s)\in Q\times_c G$ define $\rho(x,s)=(r(x),c(x)s,e)$. Then $\rho$ is a continuous surjection onto $Q^0\times G\times \{e\}$, and $\rho$ is open since $r$ is and $G$ is discrete. Now put \begin{multline*} (Q\times_c G\rtimes G)*(Q\times_c G) \\= \{(x,s,t),(y,r))\in (Q\times_c G\rtimes G)\times (Q\times_c G)\mid s(x,s,t)=\rho(y,r)\}, \end{multline*} and define a map $((x,s,t),(y,r))\mapsto (x,s,t)(y,r)$ from $(Q\times_c G\rtimes G)*(Q\times_c G)$ by \[ (x,s,t)(y,r)=(xy,rt^{-1}). \] The continuity and algebraic properties of this map are also easily checked, so we have a left action of $Q\times_c G\rtimes G$ on the space $Q\times_c G$. Next we must show that both actions are free and proper, and that they commute. If $(x,s,t)(y,r)=(y,r)$, then $xy=y$ and $rt^{-1}=r$, so $x$ is a unit and $t=e$, hence $(x,s,t)$ is a unit; thus the left action is free. For properness of the left action, it is enough to show that if $L$ is compact in $Q$ and $F$ is finite in $G$, then there is some compact set in $(Q\times_c G\rtimes G)*(Q\times_c G)$ containing all pairs $((x,s,t),(y,r))$ for which \[ ((x,s,t)(y,r),(y,r))\in (L\times F)\times (L\times F). \] But the above condition forces $x\in LL^{-1}$, $s\in c(L)FF^{-1}F$, $t\in F^{-1}F$, $y\in L$, and $r\in F$, so the left action is proper. Freeness and properness of the right action is checked similarly (but more easily), and it is straightforward to verify that the actions commute. To show $Q\times_c G$ is a $(Q\times_c G\rtimes G)$-$Q$ equivalence, it remains to verify that the map $\rho$ factors through a bijection of $(Q\times_c G)/Q$ onto $(Q\times_c G\rtimes G)^0$, and similarly that the map $\sigma$ factors through a bijection of $(Q\times_c G\rtimes G)\setminus (Q\times_c G)$ onto $Q^0$. Since $\rho$ and $\sigma$ are surjective and the actions commute, it suffices to show that $\rho(x,s)=\rho(y,t)$ implies $(x,s)\in (y,t)Q$, and $\sigma(x,s)=\sigma(y,t)$ implies $(x,s)\in (Q\times_c G\rtimes G)(y,t)$. For the first, if $\rho(x,s)=\rho(y,t)$ then $r(x)=r(y)$ and $c(x)s=c(y)t$. Put $z=y^{-1}x$; then $x=yz$ and $c(z)^{-1}t=c(x)^{-1}c(y)t=s$, so $(x,s)=(y,t)z$. For the second, if $\sigma(x,s)=\sigma(y,t)$ then $s(x)=s(y)$. Put $z=xy^{-1},r=c(y)s,\text{ and }q=s^{-1}t$; then $x=zy$, $s=tq^{-1}$, and $c(y)tq^{-1}=c(y)s=r$, so $(x,s)=(z,r,q)(y,t)$. \end{proof} \begin{prop} \label{amenact} Let $c$ be a continuous homomorphism of an $r$-discrete Hausdorff groupoid $Q$ into a discrete group $G$, and suppose $Q$ is amenable. Then the action $\beta$ of $G$ on $C^*(Q\times_c G)$ defined by Equation \eqref{beta} is amenable in the sense that the regular representation of $C^*(Q\times_cG)\times_\beta G$ is faithful. \end{prop} \begin{proof} First note that \cite[Proposition~6.1.7]{AR-AG}, for example, implies that the full and reduced $C^*$-algebras of an amenable groupoid coincide. Since $Q$ is amenable so is the skew product $Q \times_c G$, by \cite[Proposition~II.3.8]{RenGA}; hence $C^*( Q \times_c G ) = C^*_r( Q \times_c G )$ and $\ind \lambda_{ Q \times_c G }$ is just the regular representation $\lambda_{ C^*( Q \times_c G ) \times G }$. The semidirect-product groupoid $Q \times_c G \rtimes G$ is also amenable, by \propref{equiv}, since groupoid equivalence preserves amenability (\cite[Theorem~2.2.13]{AR-AG}). Thus, \propref{red semi cross} gives a commutative diagram \begin{equation*} \begin{diagram} \node{ C^*( Q \times_c G \rtimes G ) } \arrow{e,t}{ \Phi } \arrow{se,b}{ \Phi_r } \node{ C^*( Q \times_c G ) \times_\beta G } \arrow{s,r}{ \lambda_{ C^*( Q \times_c G ) \times G } } \\ \node[2]{ C^*( Q \times_c G ) \times_{\beta,r} G } \end{diagram} \end{equation*} in which $\Phi$ and $\Phi_r$ are isomorphisms. This proves the proposition. \end{proof} \begin{rem} The above result could also be proved using \cite[Th\'eor\`eme 4.5 and Proposition 4.8]{AnaSD}, since both $C^*(Q \times_c G)$ and $C^*(Q \times_c G)\times_{\beta,r} G$ are nuclear (by \cite[Proposition~3.3.5 and Corollary~6.2.14]{AR-AG} and \cite[Proposition II.3.8]{RenGA}). \end{rem} \begin{lem}\label{amen-cond-lem} Let $c$ be a continuous homomorphism of an $r$-discrete Hausdorff groupoid $Q$ into a discrete group $G$, and put $N=c^{-1}(e)$. Assume that $Q$ is amenable. Then the canonical map $i\colon C^*(N)\to C^*(Q)$ is faithful. \end{lem} \begin{proof} Since $Q$ is amenable, so is $N$ \cite[Proposition~5.1.1]{AR-AG}. Let $P_Q\colon C^*(Q)\to C_0(Q^0)$ denote the unique conditional expectation extending the map $f\mapsto f|_{Q^0}$ at the level of $C_c$-functions. Since $Q$ is amenable, the regular representation of $C^*(Q)$ onto $C^*_r(Q)$ is faithful \cite[Proposition~6.1.7]{AR-AG}. By \cite[Proposition~II.4.8]{RenGA}, this implies $P_Q$ is faithful in the sense that $a\in C^*(Q)$ and $P_Q(a^*a)=0$ imply $a=0$, and similarly for $P_N$ (Renault assumes $Q$ is principal, but this is not used in showing his conditional expectation is faithful on the reduced $C^*$-algebra $C^*_r(Q)$). It is easy to see by checking elements of $C_c(N)$ that $P_N=P_Q\circ i$. If $a\in \ker i$ then so is $a^*a$, thus $P_N(a^*a)=0$, so $a^*a=0$ since $N$ is amenable, hence $a=0$. \end{proof} It is easy to check that roughly the same argument as above would work if we only assume $N$ itself is amenable. \begin{thm} \label{full-gpd-thm} Let $c$ be a continuous homomorphism of an amenable $r$-discrete Hausdorff groupoid $Q$ into a discrete group $G$, and let $\beta$ be the action of Equation~\eqref{beta}. Then \[ C^*(Q\times_c G)\times_\beta G\cong C^*(Q)\otimes\c K(\ell^2(G)). \] \end{thm} \begin{proof} \lemref{amen-cond-lem} ensures that the hypotheses of \corref{duality} are satisfied, which gives \[ C^*(Q\times_c G)\times_{\beta,r} G\cong C^*(Q)\otimes\c K(\ell^2(G)). \] The theorem now follows from \propref{amenact}. \end{proof} \section{Embedding $C^*(N)$ in $C^*(Q)$} \label{embed} In this section we fulfill the promise made just before \thmref{gpdiso} by showing the map $i\colon C^*(N)\to C^*(Q)$ is faithful when $Q$ is second countable. But first we need the following elementary lemma, which we could not find in the literature. \begin{lem} Let $Q$ be an $r$-discrete Hausdorff groupoid, and let $\pi$ be a $^*$-homomorphism from $C_c(Q)$ to the $^*$-algebra of adjointable linear operators on an inner product space $\c H$. Then for all $a\in C_c(Q)$, the operator $\pi(a)$ is bounded and $\norm{\pi(a)}\le\norm{a}$, where $C_c(Q)$ is given the largest $C^*$-norm. \end{lem} \begin{proof} Let $a\in C_c(Q)$. Since $C_c(Q)$ has the largest $C^*$-norm, it suffices to show $\pi(a)$ is bounded. Choose open bisections (``$Q$-sets'', in Renault's terminology) $\{U_i\}_1^n$ of $Q$ such that $\supp a\subseteq\bigcup_1^n U_i$, and a partition of unity $\{\phi_i\}_1^n$ subordinate to the open cover $\{U_i\}_1^n$ of $\supp a$. Then $a=\sum_1^n a\phi_i$, and $\supp a\phi_i\subseteq U_i$. Conclusion: without loss of generality there exists an open bisection $U$ of $Q$ such that $\supp a\subseteq U$. Then $\supp a^*a\subseteq U^{-1}U$, a relatively compact subset of the unit space $Q^0$. Choose an open set $V\subseteq Q^0$ such that $\overline{U^{-1}U}\subseteq V$ and $\overline V$ is compact. Then $a^*a\in C_0(V)$, which is a $C^*$-subalgebra of the commutative $^*$-subalgebra $C_c(Q^0)$ of $C_c(Q)$. Since $\pi$ restricts to a $^*$-homomorphism from $C_0(V)$ to the adjointable linear operators on $\c H$, $\pi(a^*a)$ is bounded. Since $\pi(a)^*\pi(a)=\pi(a^*a)$, $\pi(a)$ is bounded as well. \end{proof} \begin{thm} \label{faith} Let $c$ be a continuous homomorphism of an $r$-discrete Hausdorff groupoid $Q$ into a discrete group $G$, and put $N=c^{-1}(e)$. Assume that $Q$ is second countable. Then the canonical map $i\colon C^*(N)\to C^*(Q)$ is faithful. \end{thm} \begin{proof} For notational convenience, throughout this proof we suppress the map $i$, and {identify} $C_c(N)$ and $C^*(N)$ with their images in $C^*(Q)$. Our strategy is to find a $C^*$-seminorm on $C_c(Q)$ which restricts to the greatest $C^*$-norm on $C_c(N)$. This suffices, for then \emph{a fortiori} the greatest $C^*$-norm on $C_c(Q)$ restricts to the greatest $C^*$-norm on $C_c(N)$, which is what we need to prove. To get this $C^*$-seminorm on $C_c(Q)$, we make $C_c(Q)$ into a pre-Hilbert $C_c(N)$-module, and show that by left multiplication $C_c(Q)$ acts by bounded adjointable operators. We do this by showing that the space $Q$ implements a groupoid equivalence in the sense of \cite[Definition~2.1]{MRW-EI} between $N$ (acting on the right) and a suitable groupoid $H$ (acting on the left); then the construction of \cite{MRW-EI} shows that $C_c(Q)$ is a pre-imprimitivity bimodule, and in particular a right pre-Hilbert $C_c(N)$-module. We define \[ H=\{(x,c(y))\mid x,y\in Q,s(x)=r(y)\}, \] which is a subgroupoid of the skew product $Q\times_c G$. We claim that $H$ is open in $Q\times_c G$. Let $(x,t)\in H$. There exists $y\in Q^{s(x)}$ such that $c(y)=t$, and then there exists a neighborhood $V$ of $y$ such that $c(V)\subseteq\{s\}$. Then $r(V)$ is a neighborhood of $r(y)=s(x)$, so there exists a neighborhood $U$ of $x$ such that $s(U)\subseteq r(V)$. By construction, for all $z\in U$ there exists $w\in V$ such that $r(w)=s(z)$, and then $c(w)=t$. Therefore, the open subset $U\times\{t\}$ of $Q\times G$ is contained in $H$, so $(x,t)$ is an interior point of $H$. This proves the claim. Since the restriction of a Haar system to an open subgroupoid gives a Haar system, counting measures give a Haar system on $H$. Since $Q$ is second countable, the image of the homomorphism $c$ in $G$ is countable, hence the groupoid $H$ is second countable. Since the skew-product groupoid $Q\times_c G$ is $r$-discrete, so is the open subgroupoid $H$. The subgroupoid $N$ acts on the right of $Q$ by multiplication. We want to define a left action of the groupoid $H$ on the space $Q$. For this we need a continuous and open surjection $\rho$ from $Q$ onto the unit space of $H$. We have $H^0=\{(u,t)\in Q^0\times G\mid t\in c(Q^u)\}$, and the range and source maps in $H$ are given by \[ r(x,t)=(r(x),c(x)t)\midtext{and}s(x,t)=(s(x),t). \] For $y \in Q$ define \[ \rho(y)=(r(y),c(y)). \] Then $\rho$ is a continuous surjection onto $H^0$, and is open since $r$ is and $G$ is discrete. Now put \[ H*Q=\{((x,t),y)\in H\times Q\mid s(x,t)=\rho(y)\}, \] and define a map $((x,t),y)\mapsto (x,t)y$ from $ H*Q$ to $Q$ by \[ (x,t)y=xy. \] The continuity and algebraic properties of this map are easily checked, so we have an action of $H$ on $Q$. Next we must show that both actions are free and proper, and that the actions commute. Since $(x,t)y=y$ implies that $x$, hence $(x,t)$, is a unit, the left action is free. For properness of the left action, let $K$ be a compact subset of $Q\times Q$. We must show that the inverse image of $K$ under the map $((x,t),y)\mapsto ((x,t)y,y)$ from $ H*Q$ to $Q\times Q$ is compact. Without loss of generality suppose $K=L\times L$ for some compact subset $L$ of $Q$. For all $((x,t),y)\in H*Q$, if $((x,t)y,y)\in L\times L$ then $x\in LL^{-1}$, $t\in c(L)$, and $y\in L$, so the inverse image of $K$ is contained in \[ (LL^{-1}\times c(L))*L, \] which is compact in $(Q\times_c G)*Q$. It is easier to see that the right $N$-action is free and proper, and straightforward to check that the actions commute. To show $Q$ is an $H$--$N$ equivalence, it remains to verify that the map $\rho$ factors through a bijection of $Q/N$ onto $H^0$, and similarly that the map $s\colon Q\to H^0$ factors through a bijection of $ H\backslash Q$ onto $N^0$. Since $\rho$ and $s$ are surjective and the actions commute, it suffices to show that $\rho(y)=\rho(z)$ implies $z\in yN$ and $s(y)=s(z)$ implies $z\in Hy$. For the first, if $\rho(y)=\rho(z)$ then $r(y)=y(z)$ and $c(y)=c(z)$. Put $n=y^{-1}z$. Then $c(n)=c(y)^{-1}c(z)=e$, so $n\in N$, and $z=yn$. For the second, if $s(y)=s(z)$, put $x=zy^{-1}$. Then $(x,c(z))\in H$ and $z=xy=(x,c(z))y$. Now the theory of \cite{MRW-EI} tells us $C_c(Q)$ becomes a pre-Hilbert $C_c(N)$-module, where $C_c(N)$ is given the $C^*$-norm from $C^*(N)$. {From} the formulas in \cite{MRW-EI} the right module multiplication is given by \begin{equation*} ac(x)=\sum_{r(n)=s(x)}a(xn)c(n^{-1}), \end{equation*} where $a\in C_c(Q)$ and $c\in C_c(N)$, and the inner product is \begin{equation} \label{inner} \rip{a,b}{C_c(N)}(n)=\sum_{r(x,s)=\rho(y)} \overline{a((x,s)^{-1}y)}b((x,s)^{-1}yn), \end{equation} where $a,b\in C_c(Q)$ and $y$ is any element of $Q$ with $s(y)=r(n)$. The right module action is just right multiplication by the subalgebra $C_c(N)$ inside the algebra $C_c(Q)$. The inner product also simplifies in our situation: let $a,b\in C_c(Q)$, and write $a=\sum_{t\in G}a_t$ and $b=\sum_{t\in G}b_t$ with $a_t,b_t\in C_t = \{f\in C_c(Q)\mid \supp f\subseteq c^{-1}(t)\}$. We claim that \[ \rip{a,b}{C_c(N)}=\sum_{t\in G}a_t^*b_t. \] Of course, we are identifying $a_t^*b_t$ with $a_t^*b_t|_N$, but this causes no harm since $a_t^*b_t$ is supported in $N$. In Equation~\eqref{inner} we can take $y=r(n)$, so that $\rho(y)=(r(n),e)$. Then the condition $r(x,s)=\rho(y)$ becomes $r(x)=r(n)$ and $c(x)s=e$, so that \begin{align*} \rip{a,b}{C_c(N)}(n) &=\sum_{\substack{r(x)=r(n)\\c(x)s=e}} \overline{a((x^{-1},c(x)s)r(n))} b((x^{-1},c(x)s)n)\\ &=\sum_{\substack{r(x)=r(n)\\c(x)s=e}} \overline{a(x^{-1})}b(x^{-1}n)\\ &=\sum_{\substack{r(x)=r(n)\\c(x)s=e}} a^*(x)b(x^{-1}n)\\ &=\sum_{t,r\in G}\sum_{\substack{r(x)=r(n)\\c(x)s=e}} a^*_t(x)b_r(x^{-1}n)\\ &=\sum_{t\in G}\sum_{r(x)=r(n)}a^*_t(x)b_t(x^{-1}n). \end{align*} Since in this last expression we need only consider terms with $c(x)=t^{-1}$ and $c(x^{-1}n)=r$, which forces $t=r$, and then $s=t$ in the inner sum, this gives \[ \rip{a,b}{C_c(N)}(n) =\sum_{t\in G}a^*_tb_t(n). \] This proves the claim. Now we show that for fixed $a\in C_c(Q)$, the map $b\mapsto ab$ is a bounded adjointable operator on the pre-Hilbert $C_c(N)$-module $C_c(Q)$, with adjoint $b\mapsto a^*b$. This will give a representation of $C_c(Q)$ in $\c L_{C_c(N)}(C_c(Q))$, hence a $C^*$-seminorm on $C_c(Q)$. We first handle the adjointability. Without loss of generality let $a\in C_s$ and take $b=\sum_tb_t,c=\sum_tc_t\in C_c(Q)$ with $b_t,c_t\in C_t$. Then \begin{align*} \rip{ab,c}{C_c(N)} &=\rip{\textstyle\sum_t ab_t,\sum_t c_t}{C_c(N)} =\textstyle\sum_t (ab_t)^*c_{st} \righttext{(since $ab_t\in C_{st}$)}\\ &=\textstyle\sum_t b^*_ta^*c_{st} =\rip{\textstyle\sum_t b_t,\sum_t a^*c_{st}}{C_c(N)} \righttext{(since $a^*c_{st}\in C_t$)}\\ &=\rip{b,a^*\textstyle\sum_t c_{st}}{C_c(N)} =\rip{b,a^*c}{C_c(N)}. \end{align*} For the boundedness, let $\omega$ be a state on $C^*(N)$, and let $\rip{\cdot,\cdot}\omega =\omega(\rip{\cdot,\cdot}{C_c(N)})$ be the associated semi-inner product on $ C_c(Q)$. Let $\c H$ be the corresponding inner product space, and let $\Theta\colon C_c(Q)\to \c H$ be the quotient map. Then left multiplication defines a $^*$-homomorphism $\pi$ from $C_c(Q)$ to the $^*$-algebra of adjointable linear operators on $\c H$ via $\pi(a)\Theta(b)=\Theta(ab)$. As we show in the general lemma below, for all $a\in C_c(Q)$, the operator $\pi(a)$ is bounded and $\norm{\pi(a)}\le\norm{a}$. Hence, for all $a\in C_c(Q)$ and $b\in C_c(Q)$, \begin{multline*} \omega\bigl(\rip{ a b,a b}{C_c(N)}\bigr) =\rip{\pi(a)\Theta(b),\pi(a)\Theta(b)}\omega\\ \le\norm{\pi(a)}^2\rip{\Theta(b),\Theta(b)}\omega \le\norm{a}^2\omega\bigl(\rip{b,b}{C_c(N)}\bigr). \end{multline*} Since the state $\omega$ was arbitrary, \[ \rip{a b,a b}{C_c(N)}\le\norm{a}^2\rip{b,b}{C_c(N)}, \] as required. We can now define a $C^*$-seminorm $\norm{\cdot}_*$ on $ C_c(Q)$ by letting $\norm{a}_*$ be the norm of the operator $b\mapsto ab$ in $\c L_{C_c(N)}(C_c(Q))$. To finish, we need to know that for $a\in C_c(N)$ the norm $\norm{a}_*$ agrees with the greatest $C^*$-norm $\norm{a}$, and it suffices to show $\norm{a}\le \norm{a}_*$: \[ \norm{a}^2=\norm{a^*a}\le\norm{a^*}_*\norm{a}=\norm{a}_*\norm{a}, \] since $a^*a$ is a value of the operator $c\mapsto a^*c$, and then canceling $\norm{a}$ gives the desired inequality. This completes the proof. \end{proof} \providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} \end{document}
\begin{equation}gin{document} \title{Dimers, webs, and local systems} \abstract{ For a planar bipartite graph $\mathcal{G}$ equipped with a $\mathrm{SL}_n$-local system, we show that the determinant of the associated Kasteleyn matrix counts ``$n$-multiwebs" (generalizations of $n$-webs) in $\mathcal{G}$, weighted by their web-traces. We use this fact to study random $n$-multiwebs in graphs on some simple surfaces.} \section{Introduction} \subsection{Multiwebs} Let $\mathcal{G}=(V,E)$ be a bipartite graph (which is not necessarily simple, that is, we allow multiple edges between two vertices). We always assume there are the same number $N$ of white vertices as black vertices, $N=|B|=|W|$. An \varepsilonmph{$n$-multiweb $m$ of $\mathcal{G}$} is a multiset of edges with degree $n$ at each vertex, that is, a mapping $m:E\to\{0,1,2,\dots\}$ such that for each vertex $v\in\mathcal{G}$ we have $\sum_{u\sim v} m_{uv}=n$; that is, each vertex is an endpoint of exactly $n$ edges of $m$, counted with multiplicity. See Figure \ref{3multiweb} for a $3$-multiweb in a grid. The notion of $n$-multiweb is a generalization of the notion of \varepsilonmph{$n$-web}, which is, by definition, an $n$-valent bipartite graph. We let $\Omega_n(\mathcal{G})$ be the set of $n$-multiwebs of $\mathcal{G}$. We often call an $n$-multiweb simply a web when the context is clear. \begin{equation}gin{figure} \center{\includegraphics[width=2in]{3multiweb}} \caption{\lambdaabel{3multiweb}A $3$-multiweb in the $6\times6$ grid.} \varepsilonnd{figure} When $n=1$, an $n$-multiweb of $\mathcal{G}$ is a \varepsilonmph{dimer cover} of $\mathcal{G}$, also known as a \varepsilonmph{perfect matching} of $\mathcal{G}$. When $n=2$ an $n$-multiweb of $\mathcal{G}$ is a \varepsilonmph{double dimer cover}. Dimer covers and double dimer covers are classical combinatorial objects studied starting in the 1960's by Kasteleyn \cite{Kasteleyn61Physica}, Temperley/Fisher \cite{Temperley61PhilosMag}, and many others, see e.g. \cite{Kenyon09Statisticalmechanics} for a survey. Our goal here is to study $n$-fold dimer covers, or equivalently, $n$-multiwebs, for $n\ge 3$. In \cite{Kenyon14CMP}, $\text{SL}_2$-local systems were used to study topological properties of double dimer covers on planar graphs. We extend this here to $\mathrm{SL}_n$-local systems for $n\ge 2$. On a bipartite graph $\mathcal{G}$ on a surface with an $\mathrm{SL}_n$-local system, we define the \varepsilonmph{trace} of an $n$-multiweb, a small generalization of the trace of an $n$-web. Traces of $n$-webs are used in the study of tensor networks, representation theory, cluster algebras, and knot theory \cite{Jaeger92DiscreteMath, Kuperberg96CommMathPhys, SikoraTrans01, Morse10Involve, FominAdvMath16, Fraser19TransAmerMathSoc}. In this paper we study these traces from a probabilistic and combinatorial point of view. To distinguish our trace from the trace of a matrix we should in principle refer to it as a \varepsilonmph{web trace}. However we say ``trace" when there is no risk of confusion (we need to be careful precisely when the web is a loop, because the web trace is not generally equal to the trace of the associated monodromy around the loop; see Section \ref{loop}). Our main result computes the determinant of a certain operator $K(\mathbb{P}hi)$, the \varepsilonmph{Kasteleyn matrix} for the planar bipartite graph $\mathcal{G}$ in the presence of an $\mathrm{SL}_n$-local system $\mathbb{P}hi$, as a sum of traces of webs: \noindent{\bf Theorem. } Up to a global sign, $ \det \tilde K(\mathbb{P}hi) = \sum_{n\text{-multiwebs } m \in \Omega_n(\mathcal{G})} \mathbb{T}r(m)$. Here $\tilde K \in \mathrm{M}_{nN}(\mathbb{R})$ is obtained from $K \in \mathrm{M}_N(\mathrm{M}_n(\mathbb{R}))$ in the obvious way. For the definitions and precise statement, see below and Theorem \ref{main}. If we ignore gauge invariance, which is not necessary for the statement, this theorem holds more generally for $M_n$-connections (connections with parallel transports in $M_n(\mathbb{R}) = \text{End}(\mathbb{R}^n)$). In the case $n=1$, we have $\mathbb{T}r(m) = 1$ for any $1$-multiweb for an $\text{SL}_1$-local system, or simply the product of edge weights for an $M_1$-connection; in this case $K$ is the usual Kasteleyn matrix. In this sense our result generalizes Kasteleyn's theorem from \cite{Kasteleyn61Physica}. In the case $n=2$, we give a new proof of (a slightly more general version of) a theorem of \cite{Kenyon14CMP} enumerating double-dimer covers, see Section \ref{ddsection}. As another application of the theorem, in the case $n=3$ we show how to enumerate isotopy classes of ``reduced" $3$-multiwebs (see below), on either the annulus or the pair of pants (see Sections \ref{annulussection} and \ref{pantssection}). \subsection{Colorings} For the \varepsilonmph{identity} connection, the trace of an $n$-multiweb has a simple combinatorial interpretation. The trace for the identity connection is a signed count of the number of edge-$n$-colorings (see Proposition \ref{tracecolor} below), and in fact for planar webs, it is the number of edge-$n$-colorings (see Proposition \ref{tracecolorplanar} below). Here, an \varepsilonmph{edge-$n$-coloring} of an $n$-multiweb $m$ is a coloring of the edges of $m$ with colors from $\mathbb{C}C=\{1,2,\dots,n\}$ so that at each vertex all $n$ colors are present. More precisely, an edge-$n$-coloring is a map from the edges of $m$ into $2^{\mathbb{C}C}$, the set of subsets of $\mathbb{C}C$, with the property that, first, an edge of multiplicity $k$ maps to a subset of $\mathbb{C}C$ of size $k$, and secondly, the union of the color sets over all edges at a vertex is $\mathbb{C}C$. For example, the trace of the multiweb appearing in Figure \ref{colored3mw} is $\mathbb{T}r(m)=48*24*1*1*1$, thus $m$ has $1152$ edge-$3$-colorings. \begin{equation}gin{figure} \center{\includegraphics[width=2in]{colored3mw}} \caption{\lambdaabel{colored3mw}An edge-$3$-coloring of the multiweb of Figure \protect{\ref{3multiweb}}.} \varepsilonnd{figure} For a planar graph $\mathcal{G}$, we define the \varepsilonmph{partition function of $n$-multiwebs} to be \begin{equation}\lambdaabel{Zndtrivial}Z_{nd}(I) := \sum_{m\in\Omega_n(\mathcal{G})} \mathbb{T}r_+(m),\varepsilone where we use the identity connection $I$. (The $+$ subscript corresponds to a choice of positive cilia, see Section \ref{planarwebscn} below and comments after Corollary \ref{poscilia}). We also define $Z_d$ to be the number of single dimer covers of $\mathcal{G}$. \begin{equation}gin{prop}\lambdaabel{thm1} $Z_{nd}(I) = (Z_d)^n$. \varepsilonnd{prop} \begin{equation}gin{proof} There is a natural map from ordered $n$-tuples of single dimer covers to $n$-multiwebs, obtained by taking the union and recording multiplicity over each edge. The fiber over a fixed $n$-fold dimer cover is the number of its edge-$n$-colorings, which is $\mathbb{T}r_+(m)$. \varepsilonnd{proof} Associated to $Z_{nd}(I)$ is therefore a natural probability measure $\mu_n$ on $n$-multiwebs of $\mathcal{G}$, where a multiweb $m$ has probability $\mathbb{P}r(m) = \frac{\mathbb{T}r_+(m)}{Z_{nd}(I)}.$ One of our motivations is to analyze this measure; as a tool to probe this measure we use $\mathrm{SL}_n$-local systems and our main theorem, Theorem \ref{main}. \subsection{The case \texorpdfstring{$n=3$}{n=3}} In the case $n=3$, on a graph on a surface with a flat $\mathrm{SL}_nth$-connection $\mathbb{P}hi$, one can use \varepsilonmph{skein relations} (see Section \ref{ssec:skeinrelations}) to write any $3$-multiweb as a linear combination of \varepsilonmph{reduced} (or \varepsilonmph{non-elliptic}) multiwebs. These are multiwebs where each contractible face has $6$ or more sides. We can thus rewrite the right-hand side of Theorem \ref{main} as a sum over isotopy classes $\lambdaambda$ of reduced multiwebs: $$Z_{3d}(\mathbb{P}hi) = \sum_{\lambdaambda\in\Lambda_3}C_\lambdaambda\mathbb{T}r(\lambdaambda).$$ Here $\Lambda_3$ is the set of isotopy classes of reduced $3$-multiwebs. By a theorem of Sikora and Westbury \cite{SikoraAlgGeomTop07} the coefficients $C_\lambdaambda$ can be extracted from $Z_{3d}(\mathbb{P}hi)$ as $\mathbb{P}hi$ varies over flat connections. See Sections \ref{annulussection} and \ref{pantssection} for applications. \section{Background} \subsection{\texorpdfstring{$\mathrm{SL}_n$-local systems}{SLn-local systems}} \lambdaabel{ssec:slnlocalsystems} Let $\mathbb{P}hi=\{\phi_e\}_{e\in E}$ be an \textit{$\mathrm{SL}_n(\mathbb{R})$-local system} on $\mathcal{G}$. This is the data of, for each edge $e=bw$ of $\mathcal{G}$, a matrix $\phi_{bw}\in\mathrm{SL}_n(\mathbb{R})$, with $\phi_{wb} = (\phi_{bw})^{-1}$. A coordinate-free definition can be given as follows. Let $V$ be a $n$-dimensional vector space, fixed once and for all, and associate to each vertex $v$ of $\mathcal{G}$ a copy $V_v=V$; we call $\mathcal{V} := \bigoplus_v V_v$ a \varepsilonmph{$V$-bundle} on $\mathcal{G}$. A \varepsilonmph{$\mathrm{GL}(V)$-connection} on $\mathcal{V}$ is a choice of isomorphisms $\phi_{bw}:V_b\to V_w$ along edges $e=bw$, with $\phi_{wb}=\phi_{bw}^{-1}$. Similarly we can talk about a \textit{$\mathrm{SL}(V)$-connection}. Choosing a basis $\begin{equation}ta = \lambdaeft\{v_i\right\}$ of $V$ allows us to talk about a \textit{$\mathrm{SL}_n(\mathbb{R})$-connection}, also called a $\mathrm{SL}_n(\mathbb{R})$-local system. (In practice, one simply takes $V=\mathbb{R}^n$ at each vertex.) More generally, we define an \textit{$\mathrm{End(V)}$-connection} to be the assignment of a linear map $\phi_{bw}: V_b \to V_w$ to each edge $e=bw$, not requiring invertibility (and we do not define any linear map from $w$ to $b$). Choosing a basis $\begin{equation}ta$ of $V$ allows us to talk about a \textit{$\mathrm{M}_n(\mathbb{R})$-connection.} Two $\mathrm{End}(V)$-connections $\mathbb{P}hi=\{\phi_e\},\mathbb{P}hi'=\{\phi'_e\}$ on the same graph are \varepsilonmph{$\mathrm{GL}(V)$-gauge equivalent} (resp. \textit{$\mathrm{SL}(V)$-gauge equivalent}) if there are $A_v\in\mathrm{GL}(V)$ (resp. $A_v \in \mathrm{SL}(V)$) such that for all edges $bw$, we have $\phi'_{bw} = A_w^{-1}\phi_{bw}A_b$. Similarly, we can talk about $\mathrm{M}_n(\mathbb{R})$-connections being $\mathrm{GL}_n(\mathbb{R})$- or $\mathrm{SL}_n(\mathbb{R})$-gauge equivalent. Given a local system $\mathbb{P}hi$ and a closed oriented loop $\gamma$ on $\mathcal{G}$ with a base vertex $v\in\gamma$, the monodromy of $\mathbb{P}hi$ around $\gamma$ is the composition of the isomorphisms around $\gamma$ starting at $v$. The conjugacy class of the monodromy is well-defined independently of gauge and is independent of the base point $v$. If $\mathcal{G}$ is embedded on a surface $\Sigma$, a \varepsilonmph{flat} connection is a connection for which the monodromy around any null-homotopic loop is the identity. \subsection{Dimer model} For background on the dimer model see \cite{Kenyon09Statisticalmechanics}. Let $\mathcal{G}=(W\cup B,E) $ be a planar bipartite graph with the same number of white vertices as black vertices: $N = |W|=|B|$. Let $\nu:E\to\mathbb{R}_{>0}$ be a positive weight function on its edges. A dimer cover of $\mathcal{G}$ is a perfect matching: a collection of edges covering each vertex exactly once. A dimer cover $m$ has weight $\nu_m$ given by the product of its edge weights: $\nu_m = \prod_{e\in m}\nu_e$. Let $\Omega_1$ be the set of dimer covers and let $Z=\sum_{m\in\Omega_1}\nu_m$ be the sum of weights of all dimer covers. Kasteleyn showed \cite{Kasteleyn61Physica} that $Z= |\det K|$, where the matrix $K=(K_{wb})_{w\in W,b\in B}$, the (small) Kasteleyn matrix, satisfies $$K_{wb} = \begin{equation}gin{cases} \varepsilonps_{wb} \nu_{wb}& w\sim b\\0&\text{else}.\varepsilonnd{cases}$$ Here the $\varepsilonps_{wb}\in\{\pm1\}$ are signs chosen using the ``Kasteleyn rule": faces of length $l$ have $(l/2+1)\bmod 2$ minus signs. This condition determines $K$ uniquely up to gauge, that is, up to left- and right-multiplication by a diagonal matrix of $\pm 1$'s (see \cite{Kenyon09Statisticalmechanics}). Note that in our definition, $K$ has rows indexing white vertices and columns indexing black vertices. Some references define the (big) Kasteleyn matrix $K$ indexed by all vertices, in which case $Z=|\det K|^{1/2}$. \subsection{Double dimer model}\lambdaabel{ddsection} A double-dimer configuration on $\mathcal{G}$ is another name for a $2$-multiweb on $\mathcal{G}$. This is a decomposition of the graph into a collection of disjoint (unoriented) loops and doubled edges. If $\mathbb{P}hi=\{\phi_{bw}\}_{bw\in E}$ is an $\text{SL}_2$-connection on $\mathcal{G}$, and $m$ is a $2$-multiweb, we can compute the web trace by $$\mathbb{T}r(m) = \prod_{\text{loops $\gamma$}} \mathrm{Tr}(\phi_{\gamma})$$ where $\mathrm{Tr}$ is the matrix trace, and where the product is over loops $\gamma$ of $m$ (each with some chosen orientation). Note that $\text{SL}_2$ has the special property that $\mathrm{Tr}(A)=\mathrm{Tr}(A^{-1})$, so $\mathrm{Tr}(\phi_{\gamma})$ is independent of the choice of orientation of $\gamma$. In this setting we can construct an associated Kasteleyn matrix $K=(K_{wb})_{w\in W,b\in B}$ as for single dimer covers, but with entries $K_{wb} = \varepsilonps_{wb}\phi_{bw}$ in $\mathrm{M}_N(\mathrm{M}_2(\mathbb{R}))$, if $wb$ are adjacent, where $\varepsilonps_{wb}$ are Kasteleyn signs as before. Note that we use $\varepsilonps_{wb}\phi_{bw}$ for the $K_{wb}$ entry, and not $\varepsilonps_{wb}\phi_{wb}$. This is because, having chosen $K$ to have white rows and black columns (rather than the reverse), now both $\phi_{bw}$ and $K$ are maps from functions on black vertices to functions on white vertices. Let $\tilde K$ be the $2|W|\times 2|B|$ matrix obtained by replacing each entry with its $2\times 2$ block of numbers. In \cite{Kenyon14CMP} it was shown that \begin{equation}\lambdaabel{detK2}\det\tilde K=\sum_{m\in\Omega_2}\mathbb{T}r(m).\varepsilone The main result of the present paper is to generalize this to $n\ge 3$. However even in the case $n=2$ our main theorem generalizes (\ref{detK2}) to $\mathrm{M}_2$-connections, where the trace of a loop is no longer the trace of its monodromy. \section{Web traces} \subsection{Tensor network definition} A \textit{$n$-web} is a bipartite graph, regular of degree $n$ (we allow parallel edges but each edge has multiplicity $1$, unlike a multiweb), embedded on an oriented surface. We consider its edges to be oriented from black to white. We let $\mathbb{P}hi=\{\phi_{bw}\}$ be an $M_n$-connection on $\mathcal{G}$. This is just the assignment of an $n\times n$ matrix $\phi_{bw}$ to each edge of $\mathcal{G}$, not requiring invertibility. The trace of a web is a scalar function of $\mathbb{P}hi$ associated to the web. We define the trace of a web as follows (the definition of trace for a multiweb is given in Section \ref{multiplicity} below). This is a standard tensor network definition; see for example \cite{SikoraTrans01}. We fix, for each vertex, a linear order of the edges, compatible with the counterclockwise cyclic order at black vertices, and compatible with the clockwise cyclic order at white vertices. We can record this choice of linear order by placing a mark in the wedge at $v$ between the first and last edge. This mark is called the \varepsilonmph{cilium} at $v$, see \cite{FockAmerMathSocTransl}. For $n$ odd the trace will be well-defined independently of the cilia, but for $n$ even the trace is only defined up to a sign which depends on the locations of the cilia. To record this dependence we write $\mathbb{T}r_L$ where $L$ is the choice of cilia. Let $m$ be a web. Let $V=\mathbb{R}^n$ with fixed basis $e_1,\dots,e_n$, and $V^*$ its dual with dual basis $f_1,\dots,f_n$. We associate to each edge $e=bw$ of $m$ a copy $V_e,V_e^*$ of $V$ and $V^*$. Here $V_e$ is associated to the end of $e$ near $b$, and $V_e^*$ to the end near $w$. At each black vertex of $m$ with adjacent vector spaces $V_1,\dots, V_n$ in the given linear order we associate a canonical vector $v_b\in V_1\otimes \dots \otimes V_n$: $$v_b = \sum_{\sigma\in S_n} (-1)^{\sigma} e_{\sigma(1)}\otimes \dots\otimes e_{\sigma(n)},$$ called the \varepsilonmph{codeterminant}. Likewise at each white vertex with adjacent vector spaces $V_1^*,\dots,V_n^*$ in order we associate a canonical codeterminant $v_w\in V_1^*\otimes \dots\otimes V_n^*$: $$v_w = \sum_{\sigma\in S_n} (-1)^{\sigma} f_{\sigma(1)}\otimes \dots\otimes f_{\sigma(n)}.$$ Note that changing the cilium at a black (resp. white) vertex by one ``notch" will change the sign of $v_b$ (resp. $v_w$) if $n$ is even, but not if $n$ is odd. Now along each edge $e=bw$ we have a contraction of tensors using $\phi_{bw}$. That is, we take the tensor product $$X=\bigotimes_{b} v_b \in \bigotimes_{e} V_e$$ of $v_b$ over all black vertices, and the tensor product $$Y=\bigotimes_{w} v_w \in \bigotimes_e V^*_e$$ of $v_w^*$ over all white vertices. Then we contract component by component along edges: a simple tensor $\bigotimes_{e=bw} v_b$ and a simple tensor $\bigotimes_{e=bw} v_w$ contract to give $\prod_e v_w(\phi_{bw}v_b),$ or, in a more symmetric notation $\prod_e\lambdaangle v_w|\phi_{bw}|v_b\rangle$. Summing we have $$\mathbb{T}r_L(m) := \lambdaeft\lambdaangle \bigotimes_{w} v_w \Big|\bigotimes_{e=bw}\phi_{bw}\Big|\bigotimes_bv_b\right\rangle.$$ The above definition of trace {\it a priori} depends on the choice of basis for the $V_e,V_e^*$, but in fact does not depend on these choices, in the following sense: \begin{equation}gin{prop}\lambdaabel{indep} The above definition of the trace of a web (with multiplicity 1 on each edge) is independent of $\mathrm{SL}_n$-change of basis at any vertex. That is, for any $\mathrm{M}_n$-connection the trace of a web is $\mathrm{SL}_n$-gauge invariant (Section \ref{ssec:slnlocalsystems}). \varepsilonnd{prop} \begin{equation}gin{proof} An $\mathrm{SL}_n$-change of basis at a black vertex $b$ preserves $v_b$. Likewise at a white vertex. \varepsilonnd{proof} \subsubsection{Coordinate-free description} There is an equivalent, coordinate-free definition of the trace $\mathbb{T}r_L(m)$ of a web, as follows. Recall from Section \ref{ssec:slnlocalsystems} that the vector spaces $V_v = V$ assigned to the vertices $v$ are copies of a fixed vector space $V$. Let $\mathbb{P}hi$ be a $\mathrm{End}(V)$-connection on $\mathcal{G}$. Then the trace of $m$ is the composition $$ \mathbb{C} \overset{\mathbf{codet}}{\lambdaongrightarrow} \bigotimes_{\text{half edges at black vertices}} V\overset{\mathbb{P}hi}{\lambdaongrightarrow} \bigotimes_{\text{half edges at white vertices}} V \overset{\mathbf{det}}{\lambdaongrightarrow} \mathbb{C},$$ evaluated at $1 \in \mathbb{C}$, where the first map $\mathbf{codet}$ is the ``codeterminant", the second $\mathbb{P}hi$ is $\bigotimes_{e}\phi_{e}$, and the third $\mathbf{det}$ is the ``determinant'' map taking a tensor product of $n$ vectors in $V$ to their wedge product in $\wedge^n V\cong\mathbb{C}$; see below for explicit formulas. Both $\mathbf{codet}$ and $\mathbf{det}$ depend on a choice $\begin{equation}ta$ of an isomorphism between $\mathbb{C}$ and $\wedge^n V$, that is, on a volume form on $V$. Note that, although $\mathbf{codet}$ and $\mathbf{det}$ depend on this choice of a basis $\begin{equation}ta$ of $V$, the composition $\mathbf{det}\circ\mathbb{P}hi\circ\mathbf{codet}$ does not; thus, the trace is well-defined. Explicitly, for $\mathrm{dim}(V)=n$, if $\begin{equation}ta = \lambdaeft\{ v_i \right\}$ is the chosen basis for $V$, then $\mathbf{codet} : \mathbb{C} \to V^{\otimes n}$, at each black vertex, is the linear map defined by the property $1 \mapsto \sum_{\sigma \in \mathfrak{S}_n} (-1)^\sigma v_{\sigma(1)} \otimes v_{\sigma(2)} \otimes \cdots \otimes v_{\sigma(n)}$, and $\mathbf{det} : V^{\otimes n} \to \mathbb{C}$, at each white vertex, is the map defined by $v_{i_1} \otimes v_{i_2} \otimes \cdots \otimes v_{i_n} \mapsto (-1)^{(i_1, i_2, \dots, i_n)}$ where the latter sign is zero if $(i_1, i_2, \dots, i_n)$ does not define a permutation. The web trace is analogous to the definition of the usual trace $\mathrm{Tr}(\varphi)$ of an endomorphism $\varphi \in \mathrm{End}(V)$ as the composition of ``coevaluation'' and ``evaluation'' maps, respectively, $\mathbf{coeval} : \mathbb{C} \to V \otimes V$ and $\mathbf{eval} : V \otimes V \to \mathbb{C}$, depending on the choice of basis $\begin{equation}ta$ but whose composition does not. For the usual trace, the intermediate map is $\varphi \otimes \mathrm{id_V} : V \otimes V \to V \otimes V$. \subsection{Multiwebs and their traces}\lambdaabel{multiplicity} As discussed above, a multiweb is a web allowing edge multiplicities (including zero). More precisely, an \varepsilonmph{$n$-multiweb} $m$ is an assignment of edge multiplicities $m_e\ge 0$ to $\mathcal{G}$, which is regular of degree $n$: $\sum_{u\sim v}m_{uv}=n$. We define the trace of a multiweb $\mathcal{G}$ as follows. Split each edge $e$ of multiplicity $m_e$ into $m_e$ parallel edges, removing edges of multiplicity $0$; let $\mathcal{G}'$ be the resulting $n$-regular graph. If the original edge has parallel transport $\phi_{bw}$, put $\phi_{bw}$ on each of the new edges. The cilia for $\mathcal{G}'$ are in the same location as they were in $\mathcal{G}$: the cilia remain outside of the multiple edges. We now define \begin{equation}\lambdaabel{mwdef}\mathbb{T}r_L(\mathcal{G}) := \frac{\mathbb{T}r_L(\mathcal{G}')}{\prod_e \lambdaeft( m_e! \right)}.\varepsilone That is, we divide the trace of the web $\mathcal{G}'$ by the factorials of the multiplicities. Proposition \ref{tracecolor0} below justifies this definition. \subsection{Trace examples} \subsubsection{Theta graph} \lambdaabel{sssec:thetagraph} Consider the following example for $\mathrm{SL}_nth$-connections (as opposed to $\mathrm{M}_3$-connections). Let $m$ be a ``theta'' graph consisting of two vertices $w,b$ with three edges between them $e^1,e^2,e^3$, each of multiplicity $1$, in counterclockwise order at $b$ and clockwise order at $w$ (with respect to a planar embedding of the graph). Let $A,B,C$ be the parallel transports from $b$ to $w$ along edges $e^1,e^2,e^3$ respectively (recall that we always orient edges from black to white). Then $$v_b=e_r\otimes e_g\otimes e_b - e_r\otimes e_b\otimes e_g+ \dots -e_b\otimes e_g\otimes e_r$$ and $$v_w=f_r\otimes f_g\otimes f_b - f_r\otimes f_b\otimes f_g+ \dots -f_b\otimes f_g\otimes f_r.$$ The contraction contains $36$ terms; for example, contracting the first terms of $v_b$ and $v_w$ gives $A_{rr}B_{gg}C_{bb}$, and contracting the first term of $v_b$ and the second term of $v_w$ gives $-A_{rr} B_{bg} C_{gb}$. Summing, the trace can be written compactly as $$\mathbb{T}r(m) = \mathrm{Tr}(AB^{-1})\mathrm{Tr}(CB^{-1})- \mathrm{Tr}(AB^{-1}CB^{-1})$$ (see Section \ref{reductions} below) or, more symmetrically, as \begin{equation}\lambdaabel{xyz}[xyz]\det(xA+yB+zC),\varepsilone that is, the coefficient of $xyz$ in the expansion of the determinant of $xA+yB+zC$ as a polynomial in $x,y,z$ (see (\ref{xyzthm}) below); note \varepsilonqref{xyz} is valid for any $\mathrm{M}_3$-connection. Also notice that when $A=B=C$ then the trace $\mathbb{T}r(m)=+6$; in this case the $\mathrm{SL}_3$-local system is trivializable, and the trace is thus the number of edge-$3$-colorings (see Proposition \ref{tracecolorplanar} below). More generally, for the $\mathrm{M}_n$-connection on the $n$-theta graph $m$ (two vertices with $n$ edges) consisting of the same matrix $A \in \mathrm{M}_n$ attached to each edge, by Proposition \ref{tracecolor0} below the trace $\mathbb{T}r(m) = n! \mathrm{det}(A)$. As one last variation, again in the case $n=3$, note that if the cyclic ordering of the vertices is that induced by the nonplanar embedding of the theta graph in the torus, then its trace $\mathbb{T}r(m)$ with respect to the identity connection is equal to $-6$ (since we just reverse the cyclic order at one of the two vertices); contrast this with the calculation above and Proposition \ref{tracecolorplanar}. \subsubsection{Loop}\lambdaabel{loop} As another example, let $m$ consist of a cycle $b_1w_1b_2w_2$ with edges $b_{i}w_{i}$ of multiplicity $1$, and edges $b_iw_{i-1}$ of multiplicity $2$, as in Figure \ref{4cycle}. \begin{equation}gin{figure}[htbp] \center{\includegraphics[width=1in]{4cycle}} \caption{\lambdaabel{4cycle}A $4$-cycle in a $3$-multiweb.} \varepsilonnd{figure} At the black vertex $b_1$, say, $$v_{b_1} = \sum_\sigma(-1)^{\sigma} e_{\sigma(1)}\otimes e_{\sigma(2)}\otimes e_{\sigma(3)}= (e_r\wedge e_g)\otimes e_b +(e_g\wedge e_b)\otimes e_r +(e_b\wedge e_r)\otimes e_g.$$ Here we have used the wedge notation $e\wedge e':= e\otimes e' - e'\otimes e \in \mathbb{R}^3 \wedge \mathbb{R}^3$. Choosing in addition a $\mathrm{M}_3$-connection as in the figure, starting at $b_1$ this leads to \begin{equation}gin{equation}\lambdaabel{eq:looptrace} \mathbb{T}r(m) = \mathrm{Tr}((D \wedge D)\cdot C\cdot(B\wedge B)\cdot A).\varepsilonnd{equation} For an $\mathrm{SL}_nth$-connection, this becomes $\mathbb{T}r(m)=\mathrm{Tr}(D^{-1}CB^{-1}A)$, namely the trace of the monodromy when the cycle is oriented clockwise in the figure. In particular, note that even though the cycle is not naturally oriented, the $3$-web-trace nevertheless picks out an orientation: the one determined by following from black to white along the non-doubled edges. Equation \varepsilonqref{eq:looptrace} needs to be properly interpreted, where $B \wedge B \in \mathrm{M}_3(\mathbb{R})$ is defined as follows: Let $\widetilde{B \wedge B} \in \mathrm{M}_3(\mathbb{R})$ be the matrix of the linear map $\mathbb{R}^3 \wedge \mathbb{R}^3 \to \mathbb{R}^3 \wedge \mathbb{R}^3$ induced by $B$ written in the basis $\lambdaeft\{ e_r \wedge e_g, e_r \wedge e_b, e_g \wedge e_b \right\}$, and let $\varphi = \lambdaeft( \begin{equation}gin{smallmatrix} 0&0&1\\0&-1&0\\1&0&0 \varepsilonnd{smallmatrix} \right)$. Then $B \wedge B := ( \varphi \circ \widetilde{B \wedge B} \circ \varphi^{-1} )^T$. Conceptually speaking, $\varphi$ induces an isomorphism (of $\mathrm{SL}_3$-representations) $\mathbb{R}^3 \wedge \mathbb{R}^3 \cong (\mathbb{R}^3)^*$, and the transpose in the formula for $B \wedge B$ allows one to pass from the dual $(\mathbb{R}^3)^*$ to $\mathbb{R}^3$. It also corresponds to the transpose in the formula for the inverse of $B$ in terms of its cofactor matrix, so that $B \wedge B = B^{-1}$ when $B \in \mathrm{SL}_3(\mathbb{R})$; hence, we have the formula above for the trace $\mathbb{T}r(m)$ for $\mathrm{SL}_3$-connections. For general $n$, consider a cycle $b_1w_1b_2w_2\dots b_\varepsilonll w_\varepsilonll$ with edges $b_{i}w_{i}$ of multiplicity $k$, and edges $b_iw_{i-1}$ of multiplicity $n-k$, and an $M_n$-connection with parallel transport $A_i$ from $b_i$ to $w_{i}$, and $B_{i-1}$ from $b_{i}$ to $w_{i-1}$. Then, we have $\mathbb{T}r_L(m) = \pm\mathrm{Tr}(C)$, where $C$ is the product $$C=(\wedge^{n-k}B_\varepsilonll)(\wedge^{k} A_\varepsilonll) \cdots (\wedge^{n-k} B_2) (\wedge^{k}A_2)(\wedge^{n-k}B_1)(\wedge^{k}A_1)$$ appropriately interpreted, similar to the case $n=3$. For an $\mathrm{SL}_n$-connection with total monodromy $M=B_\varepsilonll^{-1} A_\varepsilonll \cdots B_1^{-1} A_1^{-1}$ clockwise around the loop, we thus have $$\mathbb{T}r_L(m) = \pm\mathrm{Tr}(\wedge^k M).$$ Indeed, by first taking advantage of the $\mathrm{SL}_n$-gauge invariance of the web trace (Proposition \ref{indep}) to concentrate the connection $M$ entirely on the edge $b_1 w_1$, this is then an immediate consequence of the above equation. We remind the reader that, for $n$ odd the sign is $+$ but for $n$ even the sign depends on the choice of cilia~$L$. Note that when $k=n$, corresponding to when $m$ is the $n$-multiedge, this last calculation is consistent with the calculation for the $n$-theta graph of Section \ref{sssec:thetagraph}: in this case, $\mathbb{T}r_L(m)=+\mathrm{det}(M)=1$. \subsubsection{Nonplanar example} Consider the complete bipartite graph on three vertices $m=K_{3,3}$. This is nonplanar. For the cyclic ordering on the vertices induced by the embedding of $m$ in the torus, one computes $\mathbb{T}r(m)=0$; for example, one can use the skein relations of Section \ref{reductions} together with the calculation at the very end of Section \ref{sssec:thetagraph}. On the other hand, $m$ has $12$ edge-colorings. This demonstrates that Proposition \ref{tracecolor} is special to planar graphs. \subsection{Traces in terms of edge colorings} The trace of a web or multiweb $m$ can be given a more combinatorial definition as follows. At each vertex $v$ of $m$, choose a coloring of the half-edges at $v$ with $n$ colors $\mathbb{C}C=\{1,\dots,n\}$, using each color once, and so that multiple edges get a subset of colors. In other words at a vertex $v$ with edge multiplicities $M_1,\dots,M_k$, we partition $\mathbb{C}C$ into $k$ disjoint subsets of sizes $M_1,\dots,M_k$, one for each half-edge at $v$. Note that there are $\binom{n}{M_1,\dots,M_k}$ possible such colorings at $v$. To such a coloring at $v$ we associate a sign $c_v$, defined as follows. List the colors according to the linear order on edges at $v$ and for multiple edges, list the colors in their natural order. Then this list is a permutation of $\mathbb{C}C$ and $c_v$ is its signature. Now on each edge $e$ of multiplicity $m_e$ there are two sets of colors, each of size $m_e$, one associated with the black vertex and one at the white vertex. We associate a corresponding matrix element, using the bijection of colors with indices: if edge $e$ has parallel transport $A\in SL_n$ from black to white, and is colored with set $S$ at the white vertex and $T$ at the black vertex, then the corresponding matrix element is $\det A_{S,T}$, that is, the determinant of the $S,T$-minor of $A$ (the submatrix of $A$ with rows $S$ and columns $T$). To each coloring of all the half-edges we take the product of the associated matrix elements, and multiply this by the sign $\prod_v c_v$. Then this quantity is summed over all possible colorings to define the trace: \begin{equation}gin{prop}\lambdaabel{tracecolor0} The above procedure computes the trace of the $n$-multiweb $m$, that is, \begin{equation}\lambdaabel{trline0}\mathbb{T}r_L(m) = \sum_{\textnormal{colorings $c$}} (-1)^{\sum_v c(v)}\prod_{e=bw}\det(\phi_{bw})_{S_e,T_e}.\varepsilone \varepsilonnd{prop} \begin{equation}gin{proof} If $\mathcal{G}$ is a web, that is, if all edges have multiplicity $1$, then a coloring of the half-edges at $b$ corresponds to a single term in $v_b$, and its sign is the coefficient in front of this term. So a coloring of all the half-edges at all vertices corresponds to a single term in the expansion of \begin{equation}\lambdaabel{trdef}\mathbb{T}r_L(\mathcal{G}) = \lambdaangle \bigotimes_wv_w |\bigotimes_{e=bw}\phi_{bw}|\bigotimes_{b} v_b\rangle\varepsilone when we expand $ \otimes_{b} v_b$ and $ \otimes_{w} v_w$ over all permutations. Thus the two formulas (\ref{trdef}) and (\ref{trline0}) agree for webs. Now suppose $\mathcal{G}$ is a multiweb, obtained from a web $\mathcal{G}'$ by collapsing $k$ parallel edges, each with parallel transport $\phi_{bw}$, into a single edge $e=bw$ of multiplicity $k$, with parallel transport $\phi_{bw}$. In $\mathcal{G}$ a coloring assigns subsets $S_e,T_e\subset\mathbb{C}C$ of size $k$ to $e$, and the contribution from this edge is $\det (\phi_{bw})_{S_e,T_e}$ for the multiweb $\mathcal{G}$. The corresponding contribution in $\mathbb{T}r_L(\mathcal{G}')$ from this set of colorings involves all possible ways of distributing the colors $S_e$ to the $k$ half edges at $w$, and likewise for $T_e$. There are $k!$ bijections of $S_e$ with $e_1,\dots,e_k$, and $k!$ bijections of $T_e$ with $e_1,\dots,e_k$. Each such choice is a term in $\det (\phi_{bw})_{S_e,T_e}$, and there are $k!$ choices corresponding to each term; moreover the signs agree. Thus the Proposition holds for $\mathcal{G}$, using the definition $\mathbb{T}r_L(\mathcal{G}') = k!\mathbb{T}r_L(\mathcal{G})$ from (\ref{mwdef}). Splitting any other multiple edges, we argue analogously. This completes the proof. \varepsilonnd{proof} If the connection is \varepsilonmph{trivial}, we have a nonzero contribution to the trace only when for each edge $e$, $S_e=T_e$, that is, the subsets of colors for the two half-edges are equal. In this case the matrix element is exactly $1$ for each edge. The trace is thus the signed number of edge-$n$-colorings, where the sign is $\prod_v c_v$. \begin{equation}gin{prop}\lambdaabel{tracecolor} The trace of an $n$-multiweb for the identity connection is the signed number of its edge-$n$-colorings. \varepsilonnd{prop} A similar proposition holds for diagonal connections, where now the trace is a weighted, signed number of edge-$n$-colorings. \subsection{Planar webs}\lambdaabel{planarwebscn} Proposition \ref{tracecolor} simplifies for planar webs: \begin{equation}gin{prop}\lambdaabel{tracecolorplanar} The trace of a planar $n$-multiweb for the identity connection is equal to $\pm1$ times the number of edge-$n$-colorings (with sign $+$ if $n$ is odd). \varepsilonnd{prop} \begin{equation}gin{proof} First note that given an edge-$n$-coloring $C$ of a multiweb $m$, the set of edges containing color $i$ is a dimer cover of $m$: each vertex has exactly one adjacent edge containing color $i$. We will use the fact (originally due to Thurston \cite{Thurston90AmerMathMonthly}) that two dimer covers on a planar bipartite graph can be connected by a sequence of ``face moves" as in Figure \ref{facemove}. \begin{equation}gin{figure} \begin{equation}gin{center}\includegraphics[width=3in]{facemove} \varepsilonnd{center} \caption{\lambdaabel{facemove} Suppose every other edge of a face is occupied by a dimer; one can shift all dimers around that face to get a new dimer cover. This is the \varepsilonmph{face move}. Such moves connect the set of all dimer covers for any planar bipartite graph.} \varepsilonnd{figure} We now show that two edge-$n$-colorings $C,C'$ have the same sign. The idea is to move the color-$1$ edges of $C$ to those of $C'$ using face moves. Then move the color-$2$ edges using face moves, and so on, until $C=C'$. The proof is different when $n$ is odd and $n$ is even. Suppose first that $n$ is odd. When we rotate color-$1$ edges around a face $f$, keeping the other colors fixed, we create a new multiweb with different edge multiplicities. Let us compute how the sign changes. Suppose $f$ has vertices $b_1,w_1,\dots,b_k,w_k$ in counterclockwise order, and edges $b_iw_i$ have multiplicities $M_i$, and edges $w_{i}b_{i+1}$ have multiplicities $M_i'$. Since $n$ is odd, we can rotate the cilia at vertices of $f$ so that at each vertex of $f$ the cilia lies in $f$, that is, the linear order starts inside the face $f$. Then the first edge out of $b_i$ is $b_iw_{i-1}$ and the first edge out of $w_{i-1}$ is $w_{i-1}b_{i}$. Suppose in $C$ edge $b_iw_{i-1}$ contains color $1$. When we rotate this color so that edge $b_iw_i$ now has color $1$, in the linear ordering of colors at $b_i$, color $1$ has moved from the beginning to position $M_i-1$ from the end. Likewise at $w_{i-1}$, color $1$ has moved from the beginning to position $M_{i-1}-1$ from the end. The net sign change after the face move, which involves all color-$1$ edges of the face, is $(-1)^{\sum_{i=1}^k(M_i+M_{i-1}-2)}=1$. The sign is thus preserved. Likewise for the other colors: change the ordering of the colors does not change the sign (since $\mathcal{G}$ has an even number of vertices) so any other color can be considered to be the first color, and then the same argument holds. The global sign can be determined by transforming all dimers covers to the same dimer cover, so that the multiweb is a union of $n$-fold edges. In this case the sign is $+$, since at each vertex the sign is $+$. This completes the proof in the case $n$ is odd. Now assume $n$ is even. The argument works as for the $n$ odd case if all cilia are in the face $f$. However we also pick up a sign change from rotating the cilia. When we rotate the cilium at a vertex on $f$ to move it into face $f$, we pick up a sign change depending on the parity of the multiplicity of the edges we cross (since $n$ is even, moving it clockwise or counterclockwise results in the same sign change). Then, after rotating color $1$ around $f$, we rotate the cilium back to its original position; we get another sign change. However the multiplicities of edges being crossed have changed by exactly $1$, so the new sign change is the opposite of the first sign change, and so the net sign change is $-1$. Thus rotating color $1$ at face $f$ gives a net sign change of $(-1)^{\text{out cilia}}$, that is, $-1$ per cilia of vertices on $f$ which are not pointing into $f$. Now given two colorings of the same multiweb, we claim that when we transform one to the other doing face moves, each face move is performed an even number of times, that is, each face is toggled an even number of times. To see this, take a path $\gamma_f$ in the dual graph from $f$ to the exterior face. Compute the $\mathbb{Z}/2\mathbb{Z}$-intersection number $X_f=\lambdaangle\gamma_f,m\rangle$ of $\gamma_f$ with the multiweb $m$. Each $f$-face move changes $X_f$ by $1\bmod 2$, and each face move by any other faces changes $X_f$ by $0\bmod 2$. Any sequence of face moves changing $m$ back to $m$ necessarily does not change $X_f$. This completes the proof. \varepsilonnd{proof} From the above proof we see that we can arrange the cilia so that (for $n$ even and the identity connection) all traces are positive: \begin{equation}gin{cor}\lambdaabel{poscilia} For $n$ even and the identity connection, if cilia $L$ are chosen so that each face of $\mathcal{G}$ has an even number of cilia, then for any multiweb $m$ in $\mathcal{G}$, $Tr_L(m)$ is equal to the number of edge-$n$-colorings. \varepsilonnd{cor} We call a choice of cilia \varepsilonmph{positive} if it yields positive traces, and we write $\mathbb{T}r_+$ instead of $\mathbb{T}r_L$. One way to choose cilia so that each face has an even number (and is thus positive) is as follows. Let $m_1$ be a dimer cover of $\mathcal{G}$. For each edge of $m_1$, choose the cilia at the vertices at its endpoints to be both on the same face containing one of the two sides of $e$. \subsubsection{Finding an edge-\texorpdfstring{$n$}{n}-coloring of a multiweb} There is a simple algorithm, communicated to us by Charlie Frohman, for finding an explicit edge-$n$-coloring of an $n$-multiweb of a planar graph, as follows; see also \cite{FrohmanJKnotTheoryRam19}. Define a height function on faces taking values in $\mathbb{Z}/n\mathbb{Z}$ as follows. On a fixed face $f_0$ define $h(f_0)=0$. For any other face $f$ let $\gamma$ be a path in the dual graph from $f_0$ to $f$. The height change along every edge of $\gamma$ is given by the algebraic intersection number of the common edge with $\gamma$ (considering edges oriented from black to white, and taking multiplicity into account). In other words given two adjacent faces $f_1,f_2$ with edge $bw$ between them of multiplicity $k$, where $f_1$ is on the left, then $h(f_1)-h(f_2) = k$. Note that $h$ is well-defined since it is well-defined around each vertex. Given $h$, the color set of an edge of multiplicity $k$ is the interval of colors $\{a,a+1,\dots,a+k-1\}$ if the face to its right has height $a$. \section{Kasteleyn matrix} Let $\mathcal{G}$ be a bipartite \textit{planar} graph, with the same number of black vertices and white vertices. We fix a choice of positive cilia $L$ for $\mathcal{G}$, in the sense of Corollary \ref{poscilia} and its subsequent paragraph. Let $\mathbb{P}hi=\{\phi_e\}_{e\in E}$ be an $\mathrm{M}_n(\mathbb{R})$-connection on $\mathcal{G}$. Let $K\in \mathrm{M}_N(\mathrm{M}_n(\mathbb{R}))$ be the associated Kasteleyn matrix: $K$ has rows indexing white vertices and columns indexing black vertices, with entries $K(w,b)=0_n$ (the $n\times n$ zero matrix) if $w,b$ are not adjacent and otherwise $K(w,b) = \varepsilonps_{wb} \phi_{bw}$, where the signs $\varepsilonps_{wb}\in\{-1,+1\}$ are given by the Kasteleyn rule. We let $\tilde K$ be the $nN\times nN$ matrix obtained from $K$ by replacing each entry with its $n\times n$ array of real numbers. In the case that we have an $\mathrm{SL}_n$-local system on $\mathcal{G}$, rather than an $\mathrm{M}_n$-connection, then $\tilde K$ depends on a particular choice of gauge. The gauge group $(\mathrm{SL}_n)^V$ acts on $K$ by left- and right-multiplication by diagonal matrices with entries in $\mathrm{SL}_n$, or equivalently, acts on $\tilde K$ by left- and right-multiplication by block-diagonal matrices with blocks in $\mathrm{SL}_n$. These gauge equivalences do not change the determinant of $\tilde K$. Note there is also the obvious coordinate-free description. Given a $\mathrm{End}(V)$-connection $\mathbb{P}hi$ on $\mathcal{G}$, the Kasteleyn determinant $\det \tilde{K}(\mathbb{P}hi)$, depending on a choice of Kasteleyn sign for $\mathcal{G}$ and an ordering of the black and white vertices, is defined as the determinant of the induced linear endomorphism $V^{|B|} \to V^{|W|}$. Recall that $\Omega_n(\mathcal{G})$ is the set of $n$-multiwebs in $\mathcal{G}$. We define $$Z_{nd}(\mathbb{P}hi):=\sum_{m\in\Omega_n(\mathcal{G})}\mathbb{T}r_{L}(m).$$ This generalizes the case of the identity connection from (\ref{Zndtrivial}). Our main theorem is \begin{equation}gin{thm}\lambdaabel{main} Let $\mathbb{P}hi$ be an $\mathrm{End}(V)$-connection on the bipartite graph $\mathcal{G}$. For $n$ even, and a choice of positive cilia $L$ as discussed above, \begin{equation}gin{equation*} \det \tilde K(\mathbb{P}hi) = \sum_{m \in \Omega_n(\mathcal{G})} \mathbb{T}r_L(m); \varepsilonnd{equation*} for $n$ odd, \begin{equation}gin{equation*} \pm \det \tilde K(\mathbb{P}hi) = \sum_{m \in \Omega_n(\mathcal{G})} \mathbb{T}r(m). \varepsilonnd{equation*} \varepsilonnd{thm} Note that the theorem implies that for trivial connections, and $n$ even, the determinant of the Kasteleyn matrix of a graph admitting a dimer cover is always strictly greater than zero. Also note that for $n=2$ the theorem gives a new and different proof of (a slightly more general version of) Theorem 2 of \cite{Kenyon14CMP}. For our definition of $\tilde K$ the sign ambiguity is inevitable when $n$ is odd since the sign of $\det\tilde K$ depends on an arbitrary choice of order for both white vertices and black vertices, and a choice of Kasteleyn signs. \begin{equation}gin{proof} We assume for notational simplicity that $\mathcal{G}$ is simple. If $\mathcal{G}$ has multiple edges between pairs of vertices, a slight variation of the following proof will hold. Let $\mathcal{G}_n$ be the graph obtained from $\mathcal{G}$ by replacing each vertex $v$ with $n$ copies $v^1,\dots,v^n$, and replacing each edge $bw$ with the complete bipartite graph $K_{n,n}$ connecting the $b^j$ and $w^i$. See Figure \ref{proof} below. For edge $e=bw$ of $\mathcal{G}$, let $A_{bw}=\varepsilonps_{wb}\phi_{bw}$ be the parallel transport $\phi_{bw}$ times the Kasteleyn sign $\varepsilonps_{wb}$. For $i,j\in\mathbb{C}C$ put weight $A_{bw}^{ij}:=(A_{bw})_{ij}$ on the edge $b^jw^i$ of $\mathcal{G}_n$ lying over $e$. If any entry $A_{bw}^{ij}$ is zero, we remove that edge from $\mathcal{G}_n$. Now when expanded over the symmetric group $S_{nN}$, nonzero terms in $\det\tilde K$ are in bijection with single-dimer covers $\sigma$ of $\mathcal{G}_n$: a single dimer cover $\sigma$ is a bijection from black vertices $b^j$ to adjacent white vertices $w^i$, and has ``weight" $(\tilde K)_\sigma := (-1)^\sigma\prod(A_{bw})_{ij}$ where the product is over dimers in the cover. \begin{equation}gin{figure}[t] \center{\includegraphics[scale=1.5]{proof_alt}} \caption{\lambdaabel{proof} Example in the case $n=3$. The graph $\mathcal{G}$ is equipped with a $3$-multiweb $m$ (bottom), and one of many possible lifts of $m$ to $\mathcal{G}_n$ is chosen (top). Note that only part of $\mathcal{G}$ is shown. There are two possible lifts of $m$ over this part of the graph; the other lift connects G to R, and B to G, over the doubled edge.} \varepsilonnd{figure} Each single-dimer cover $\sigma$ of $\mathcal{G}_n$ projects to an $n$-multiweb $m$ of $\mathcal{G}$. We group all single dimer covers of $\mathcal{G}_n$ according to their corresponding $n$-multiweb $m$. That is $$\det\tilde K = \sum_{m\in\Omega_n(\mathcal{G})}\sum_{\sigma\in m} (\tilde K)_\sigma.$$ We claim that the interior sum is, up to a global sign, $\mathbb{T}r_L(m)$; this will complete the proof. That is, we need to prove, for a constant sign $s$ (independent of $m$) and for $m\in\Omega_n(\mathcal{G})$, \begin{equation}\lambdaabel{det1}s\mathbb{T}r_L(m) = \sum_{\sigma\in m}(-1)^\sigma\prod_{e=\tilde b\tilde w}(A_{bw})_{\tilde w\tilde b},\varepsilone where the product is over edges of the dimer cover $\sigma$ of $\mathcal{G}_n$, or in other words, $\tilde b=\sigma(\tilde w).$ In this product, $\tilde w\tilde b\in\mathcal{G}_n$ lies over an edge $wb$ of the multiweb $m$; the vertex $\tilde w$ corresponds to a choice of color of the half edge at $w$ and $\tilde b$ corresponds to a choice of color of the half edge at $b$. Now we group the $\sigma\in S_{nN}$ according to colorings of the edges of $\mathcal{G}$. An edge $e=wb$ of $\mathcal{G}$ of multiplicity $k$ is colored by two sets $S_e,T_e$ of colors, both of size $k$, where $S_e$ is associated to the white vertex and $T_e$ to the black vertex. There are then $k!$ corresponding dimer covers of $\mathcal{G}_n$ lying over that edge, one for each bijection $\pi_e$ from $S_e$ to $T_e$. We group the $\sigma$ into colorings $c$ with the same sets of colors $S_e,T_e$ on each edge. Each permutation $\sigma$ corresponds to a choice of such a coloring $c$ of $m$ and a choice, for each edge $e$ of multiplicity $k$, of a bijection $\pi_e$ between the sets $S_e$ and $T_e$. After this grouping we can write the RHS of (\ref{det1}) as a sum over colorings $c$ of $m$: \begin{equation}gin{align}\nonumber&= \sum_{c}\sum_{\sigma\in c}(-1)^\sigma\prod\varepsilonps_{\tilde w\tilde b}\prod_{e=\tilde w\tilde b}(\phi_{bw})_{\tilde w\tilde b}\\ \nonumber&= \sum_{c}\lambdaeft(\prod\varepsilonps_{\tilde w\tilde b}\right)\sum_{\sigma\in c}(-1)^\sigma\prod_{e=\tilde w\tilde b}(\phi_{bw})_{\tilde w\tilde b} \varepsilonnd{align} where we used the fact that, once the multiplicities are fixed, $\prod\varepsilonps_{\tilde w\tilde b}$ is independent of $\sigma$. Now $\sigma\in S_{nN}$ is a composition $\sigma=(\prod\pi_e)\sigma_0$ of a permutation $\sigma_0$ (which depends only on $c$, and is the permutation matching each element of each $S_e$ with the corresponding element of $T_e$ when both sets are taken in their natural order) and the individual $\pi_e$. More precisely, we should write $\sigma = (\prod_e \pi'_e)\sigma_0$ where $\pi'_e$ is the bijection from $S_e$ to $S_e$ which, when composed with the natural-order bijection from $S_e$ to $T_e$, gives $\pi_e$. Thus \begin{equation}gin{align} \nonumber&= \sum_{c}\lambdaeft(\prod\varepsilonps_{\tilde w\tilde b}\right)(-1)^{\sigma_0}\prod_{e=bw}\sum_{\pi'_e}(-1)^{\pi'_e}\prod_{s\in S_e} (\phi_{bw})_{s,\pi'_e(s)}\\ &= \sum_{c}\lambdaeft(\prod\varepsilonps_{\tilde w\tilde b}\right)(-1)^{\sigma_0}\prod_{e=bw}\det(\phi_{bw})_{S_e,T_e}.\lambdaabel{detline} \varepsilonnd{align} Recalling (see Proposition \ref{tracecolor0}) the definition of trace of a multiweb we have \begin{equation}\lambdaabel{trline}\mathbb{T}r_L(m) = \sum_{c} (-1)^{\sum_v c(v)}\prod_{e=bw}\det(\phi_{bw})_{S_e,T_e}\varepsilone where the sum is over colorings $c$ of the half-edges, and the product is over edges of $m$. There is a one-to-one correspondence between the terms of (\ref{detline}) and those of (\ref{trline}); it remains to compare their signs. Let us take a \varepsilonmph{pure} dimer cover of $\mathcal{G}_n$: a dimer covering which matches like colors. Then each $\pi_e$ is the identity map, and $\sigma=\sigma_0$. This dimer cover projects to an edge-$n$-coloring of $\mathcal{G}$. By Kasteleyn's theorem, $(-1)^{\sigma_0} \prod\varepsilonps_{\tilde w\tilde b}$ is constant, that is, the signature $(-1)^\sigma=(-1)^{\sigma_0}$ of a pure dimer cover exactly cancels the product $\prod\varepsilonps_{\tilde w\tilde b}$ of the Kasteleyn signs (up to a global sign choice). Likewise $(-1)^{\sum_v c(v)}$ is a constant for edge-$n$-colorings, by Proposition \ref{tracecolorplanar}. So we just need to compare the sign of an arbitrary coloring with an edge-$n$-coloring. Suppose we change a coloring by transposing two colors at a single, without loss of generality white, vertex $w$. For the purposes of computing the sign change we can assume $m$ is simple: all multiplicities are $1$, by splitting multiple edges into parallel edges. Now in this case under a transposition of colors at $w$ both $\sigma_0$ and $c(w)$ change sign, so both (\ref{detline}) and (\ref{trline}) change sign. Since transpositions at vertices connect the set of all colorings we see that (\ref{detline}) and (\ref{trline}) agree up to a global sign for all colorings. \varepsilonnd{proof} A small modification of Theorem \ref{main} allows us to put positive real edge weights $\{\nu_e\}_{e\in E}$ on the edges of $\mathcal{G}$: we simply multiply the entries in $K$ by the corresponding weight $\nu_e$. Then $$Z_{nd}(\mathbb{P}hi,\nu):=\sum_{m\in\Omega_n(\mathcal{G})}\lambdaeft(\prod_{e\in m}\nu_e\right)\mathbb{T}r_L(m) = \pm\det\tilde K.$$ This allows us to compute $\mathbb{T}r_L(m)$ in practice for any particular web $m$ as follows. Put variable edge weights $x_e$ on edges of $m$, and extract the coefficient of $\prod_{e\in m}x_e$ from $\det \tilde K$: \begin{equation}\lambdaabel{xyzthm}\mathbb{T}r_L(m) = \pm \lambdaeft[\prod_{e\in m}x_e\right]\det \tilde K.\varepsilone \section{Reductions for \texorpdfstring{$\mathrm{SL}_nth$}{SL3}}\lambdaabel{reductions} The trace of a large web is mysterious and hard to compute; the method outlined at the end of the previous section is only an exponential-time (in the size of the web) algorithm. While we cannot improve generally on the exponential nature of this computation, we can make it conceptually simpler in the case of $\mathrm{SL}_3$ by applying certain reductions, or \textit{skein relations}, as described in \cite{Jaeger92DiscreteMath, Kuperberg96CommMathPhys}, which simplify its computation. (Skein relations also exist for $\mathrm{SL}_n$ for $n>3$ \cite{SikoraTrans01}, but it is not clear how to use them to ``reduce" a web, as in Section \ref{reducedwebs} below.) Throughout this section, we assume the (not necessarily planar) graph $\mathcal{G}$ is embedded in a surface $\Sigma$. \subsection{Skein relations} \lambdaabel{ssec:skeinrelations} \begin{equation}gin{figure}[htbp] \center{\includegraphics[width=3.7in]{skeinsmall}} \caption{\lambdaabel{skeinsmall}Basic skein relation for $\mathrm{SL}_nth$. Note that we write composition of parallel transports in the natural order, that is, in the composition order, orienting edges from black to white. Each term is part of a larger, possibly nonplanar, web. } \varepsilonnd{figure} The skein relations we consider are relations between formal linear combinations of webs. They correspond to and arise from algebraic relations between the traces of webs. They also work on the level of multiwebs, see Section \ref{mwskein} below. Let us start with any $\mathrm{SL}_3$-connection $\mathbb{P}hi$ on the graph $\mathcal{G}$. The basic skein relation involves replacing two adjacent vertices of a web (adjacent via an edge of multiplicity $1$), and their adjacent edges, with either two parallel edges or two crossing edges as shown in Figure \ref{skeinsmall}. This replaces a single web with a linear combination of two webs, one of which is locally nonplanar. The trace of the first web is the signed sum of the traces of the resulting two webs. We have drawn the skein relation along with the parallel transports on its edges, and indicated how they transform under the relation. In our applications however we will only apply the skein relation when the parallel transports on the relevant edges are the identity. (Note that we can locally trivialize the connection on these edges, since the relevant subgraph is contractible). Using only this basic relation, each web can be reduced to a linear combination of (possibly nonplanar) unions of loops. This shows that the trace of a $3$-web is a linear combination of traces of matrices formed from certain loops running through it. The type-I skein relation, for a connection which is (locally) trivial around a loop, removes the loop and multiplies the coefficient of the web by $3$, as shown in Figure \ref{loopremoval3}. The type-II skein relation, for a connection which is (locally) trivial around a bigon, is depicted in Figure \ref{loopremoval} and is a consequence of the basic skein relation. It removes a degree-$2$ face as shown, and replaces it with a single edge, and multiplying the coefficient of the web by $2$. Note this relation preserves planarity. The type-III skein relation is a relation for a connection which is (locally) trivial around a square. It is shown in Figure \ref{skeinsquare}; this is also a consequence of the basic relation. Note that this relation also preserves planarity. \begin{equation}gin{figure}[htb] \center{\includegraphics[width=1in]{loopskeinrelation}} \caption{\lambdaabel{loopremoval3} Type-I: loop removal (locally trivial connection).} \varepsilonnd{figure} \begin{equation}gin{figure}[htb] \center{\includegraphics[width=2.5in]{bigonskein}} \caption{\lambdaabel{loopremoval} Type-II: bigon removal (locally trivial connection).} \varepsilonnd{figure} \begin{equation}gin{figure}[htb] \center{\includegraphics[width=4in]{skeinsquare}} \caption{\lambdaabel{skeinsquare}Type-III: square removal (locally trivial connection).} \varepsilonnd{figure} When the connection is trivial, these skein relations also work at the level of colorings, see Figure \ref{skeincolor}. That is, there is a bijection between ``before" colorings and ``after" colorings. \begin{equation}gin{figure} \center{\includegraphics[width=4in]{colorskein1}} \center{\includegraphics[width=2in]{colorskein2}} \center{\includegraphics[width=4in]{colorskein3}} \center{\includegraphics[width=3in]{edgecontract}} \caption{\lambdaabel{skeincolor}The skein relations for edge colorings: if all four diagonal edges of a square are the same color (color $2$ in this illustration), there are two ways to color the edges of the central square face with the two remaining colors, and likewise two possible reductions on the right-hand side. If the diagonal edges have two colors, the like colors must be adjacent, as shown; then there is only one way to complete the coloring on the edges of the central square face, and one reduction on the right-hand side. A $2$-gon can be colored in two ways, and can be replaced with a single edge (joining its neighboring edges into a single edge as shown) with a factor $2$. Finally, a doubled edge can be removed, joining its neighboring edges into a single edge as shown.} \varepsilonnd{figure} These skein relations are special to $\mathrm{SL}_3$-connections. For instance, one does not obtain a relation for $\mathrm{M}_3$-connections by using $C \wedge C$ in place of $C^{-1}$ in Figure \ref{skeinsmall}, in contrast to the phenomenon we saw in Section \ref{loop}. This is analogous to the $n=2$ case, where the Kauffman bracket skein relation corresponds (after twisting by signs) to the classical trace relation $\mathrm{Tr}(A)\mathrm{Tr}(B)=\mathrm{Tr}(AB)+\mathrm{Tr}(AB^{-1})$ specific to matrices $A, B \in \mathrm{SL}_2$. \subsubsection{Multiweb skein relations}\lambdaabel{mwskein} \begin{equation}gin{figure} \center{\includegraphics[width=4.2in]{multiwebskein2}} \caption{\lambdaabel{multiwebskein2} Removing a bigon, and replacing with a path in two different ways.} \varepsilonnd{figure} \begin{equation}gin{figure} \center{\includegraphics[width=4.2in]{multiwebskein1}} \caption{\lambdaabel{multiwebskein1} Removing a square.} \varepsilonnd{figure} We can also define a notion of skein relation for multiwebs in a fixed graph $\mathcal{G}$. \old{ More precisely, (for any fixed $n$) if we let $m$ denote a multiweb in $\mathcal{G}$, then a \textit{submultiweb} $m^\prime$ of $m$ is a multiweb in a subgraph $\mathcal{G}^\prime \subset \mathcal{G}$ that is also contained in $m$ (but not necessarily respecting the multiplicities, see e.g. Figure \ref{loopremoval}). Below, we will also need to make use of the \textit{empty} submultiweb $\varepsilonmptyset$. } A \varepsilonmph{multiweb skein relation} is an operation that takes a $3$-multiweb $m$ to a formal linear combination of other $3$-multiwebs in $\mathcal{G}$. The type-I, loop-removal, skein relation has a multiweb version which replaces a loop (consisting in an alternating sequence of single and double edges) with a sequence of tripled edges, by increasing the multiplicity of the doubled edges and decreasing the multiplicity of the single edges along the loop; the resulting web has coefficient multiplied by $3$. The skein relations of type-II and type-III above also have multiweb versions as shown in Figure \ref{multiwebskein2},\ref{multiwebskein1}. For the type-II, we take two vertices which have two disjoint paths joining them as shown, not necessarily of the same length, and so that this bigon is topologically trivial. We replace this ``bigon" by the two webs shown: each is obtained by increasing the multiplicity on every other edge by $1$ and decreasing the multiplicity on the remaining edges. For type-III, we have four vertices $a,b,c,d$ connected into a topologically trivial cycle by four paths (which may have lengths larger than $1$, unlike as shown). This is replaced by two webs, each again obtained by increasing the multiplicity of every other edge of the cycle by $1$ and decreasing the multiplicity of the other edges of the cycle. \old{can be thought as planar skein relations, where we only allow ourselves to perform these topological relations if the bigon or square involved is contractible in the surface $\Sigma$. Note the basic skein relation is not planar. For these two kinds of relations, the submultiwebs involved are for $\mathcal{G}^\prime = \mathcal{G}$. We define two more, somewhat tautological, planar skein relations, that we will need below. These are the type-0 and type-I relations, shown in Figures \ref{tripleedgeremoval} and \ref{loopremoval2}. Again, the loop involved is assumed to be contractible in $\Sigma$. For these relations, the submultiwebs involved are for a proper subset $\mathcal{G}^\prime \subset \mathcal{G}$, since the components are removed from the web. (Of course, these skein relations also correspond to algebraic trace relations for the trivial connection, in which case it is not necessary that the involved face be topologically trivial.) It is worth briefly comparing to the $\mathrm{SL}_2$ case. There are only two planar skein relations: a type-I relation that removes a contractible loop at the cost of multiplying the web by 2; and, a type-0 relation that removes a doubled edge for free. Note that loops are unoriented for $\mathrm{SL}_2$ (as opposed to $\mathrm{SL}_3$!). } \subsection{Reduced webs}\lambdaabel{reducedwebs} A $3$-multiweb $m^\prime$ in $\mathcal{G} \subset \Sigma$ is said to be \varepsilonmph{reduced} (or \varepsilonmph{nonelliptic}) if it has no topologically trivial loops, bigons or quadrilateral faces. Given an unreduced multiweb $m$ in $\mathcal{G}$, we can apply reductions to $m$ to reduce it to a formal linear combination of reduced multiwebs: $m \mapsto \sum c' m'$. Such a reduction is not unique, due to the type-III skein relation; indeed, it is easy to produce examples of $m$ having more than one square, where different choices of sequences of resolutions of the squares give different end ``states'' $\sum c' m'$. For a reduced web $m'$ we denote by $[m']$ its isotopy type as a map from an abstract trivalent graph into $\Sigma$, where the image is reduced as a subset of $\Sigma$. Let $\Lambda_3$ denote the set of isotopy classes of reduced webs $[m']$ coming from multiwebs $m$ in $\mathcal{G}$. It turns out \cite{Jaeger92DiscreteMath, Kuperberg96CommMathPhys} that, although the end states depend on the sequence of resolutions, the formal linear combination of isotopy classes $\sum c' [m']$ does not. Thus we can say that the collection of isotopy classes of multiwebs in $m$ is canonical. Note that if the entire multiweb $m$ is topologically trivial on $\Sigma$, for example if $m$ consists only of tripled edges and contractible loops, then its reduction will be a positive integer times the isotopy class $[\varepsilonmptyset] \in \Lambda_3$ of the ``empty'' web consisting only in tripled edges. At the level of traces, let us assume $\mathcal{G}$ is equipped with a flat $\mathrm{SL}_nth$-connection $\mathbb{P}hi$, in the sense of Section \ref{ssec:slnlocalsystems}. Then, by the flatness property, we see that this reduction of a multiweb $m$ into reduced multiwebs applies as well at the level of traces with respect to $\mathbb{P}hi$. We gather that $$\mathbb{T}r(m) = \sum_{\lambdaambda} C_{m,\lambdaambda}\mathbb{T}r(\lambdaambda)$$ where $C_{m,\lambdaambda}$ is the number of reduced isotopy classes $\lambdaambda$ ``contained" in $m$, that is, resulting from the reduction of $m$. Now, for $\mathcal{G}$ planar, Theorem \ref{main} writes the determinant $\det\tilde K$ as a sum over unreduced multiwebs $m$. We can further reduce each multiweb to write $\det\tilde K$ as a sum of traces of isotopy classes of reduced multiwebs: \begin{equation}gin{thm} \lambdaabel{mainred}For a graph $\mathcal{G}$ embedded in a genus-$0$ surface minus $k$ disjoint open disks, $k \geq 0$, with a flat $\mathrm{SL}_nth$ local system, we have $$\pm \det\tilde K = \sum_{\lambdaambda\in\Lambda_3}C_{\lambdaambda}\mathbb{T}r(\lambdaambda)$$ where the sum is over isotopy classes of reduced 3-webs $\lambdaambda$ of $\mathcal{G}$, and the $C_\lambdaambda$ are positive integers. \varepsilonnd{thm} The most interesting cases are for $k \geq 2$. Indeed, when $k=0,1$ then $\Lambda_3$ consists only of the isotopy class of the empty web, $[\varepsilonmptyset]$. We describe how to extract the coefficients $C_\lambdaambda$ in some simple cases, for $k=2$ and $k=3$, in the next section. Due to the dependence on choices when resolving multiwebs, we do not have (at present) a canonical probability measure $\mu_3$ on the set of reduced webs in $\mathcal{G}$. However, reduction does induce a probability measure $\mu'_3$ on the set of isotopy classes of reduced webs, $\Lambda_3$. It is defined by (recalling \varepsilonqref{Zndtrivial}) $$\mathbb{P}r(\lambdaambda) = \frac{C_{\lambdaambda}}{Z_{3d}(I)}.$$ This is in contrast to the case of $\mathrm{SL}_2$, where $\mu_2$ is defined on the set of $2$-multiwebs (loops and doubled edges) in $\mathcal{G}$. Here, the only planar $\mathrm{SL}_2$ skein relation resolves a topologically trivial loop, in two different ways, into a sequence of doubled edges. \section{Multiwebs on simple surfaces} We continue studying the case $n=3$. Sikora and Westbury \cite[Theorem 9.5]{SikoraAlgGeomTop07} showed that reduced webs form a basis for the ``$\mathrm{SL}_3$-skein algebra" for any surface. That is, using skein relations any web can be reduced to a unique linear combination of non-elliptic webs. Thus, in the classical setting \cite[Theorem 3.7]{SikoraTrans01}, the traces for nonelliptic webs form a basis for the algebra of invariant regular functions on the space of flat $\mathrm{SL}_3$-connections modulo gauge (the ``$\mathrm{SL}_3$-character variety"). \subsection{Annulus}\lambdaabel{annulussection} We consider here the case where the graph $\mathcal{G}$ is embedded on an annulus. The result of \cite{SikoraAlgGeomTop07} for the annulus can be stated as follows. \begin{equation}gin{prop}\lambdaabel{planarred} By use of skein relations 2 and 3, any web on an annulus with a flat $\mathrm{SL}_nth$-local system can be reduced to a unique positive integer linear combination of collections of disjoint noncontractible oriented cycles. \varepsilonnd{prop} \begin{equation}gin{proof} For a bipartite connected trivalent graph on a sphere we have (by Euler characteristic) \begin{equation}\lambdaabel{EC}6=2n_2+n_4-1n_8+\dots+(3-k)n_{2k}+\dots\varepsilone where $n_k$ is the number of faces of degree $k$. When $\mathcal{G}$ is embedded on an annulus at most two faces contain boundary components, so there is at least one contractible degree-$2$ face or $2$ contractible degree-$4$ faces. We first perform type-$2$ skein relations to remove any contractible degree-$2$ faces. Then there remain at least $2$ faces of degree $4$, upon which we can perform a type-$3$ skein reduction. This reduces the number of vertices in $\mathcal{G}$ and possibly disconnects $\mathcal{G}$; continue with each component until each component is a loop. \varepsilonnd{proof} Let $W_{j,k}$ be the set of isotopy classes of reduced webs on $\Sigma$ with $j$ loops of homology class $+1$ and $k$ loops of homology class $-1$. (We orient a $3$-multiweb which is a loop so that the single edges are oriented from black to white, and the doubled edges are oriented from white to black.) Suppose $\mathbb{P}hi$ is a flat connection with monodromy $A\in\mathrm{SL}_nth$ around the generator of $\pi_1$. A noncontractible simple loop has trace $\mathrm{Tr}(A)$ or $\mathrm{Tr}(A^{-1})$ depending on orientation. For a general web $m$, by Theorem \ref{planarred}, $\mathbb{T}r(m)$ will be a polynomial in $\mathrm{Tr}(A)$ and $\mathrm{Tr}(A^{-1})$ whose coefficients are nonnegative integers: $$\mathbb{T}r(m) = \sum_{j,k\ge0}M_{j,k}\mathrm{Tr}(A)^j\mathrm{Tr}(A^{-1})^k$$ where $M_{j,k}$ counts reduced subwebs in $W_{j,k}$. \begin{equation}gin{figure} \center{\includegraphics[width=2.5in]{sqweb}} \caption{\lambdaabel{sqweb} A web on an annulus with flat connection having monodromy $A$. We can for example put parallel transports $A, A^{-1}$ as marked and $I$ on the remaining edges.} \varepsilonnd{figure} For example for the web of Figure \ref{sqweb} the trace is $$\mathbb{T}r(m) = 15+\mathrm{Tr}(A)\mathrm{Tr}(A^{-1}).$$ By Theorem \ref{mainred} for a graph $\mathcal{G}$ on an annulus we have $$\det \tilde K(A) = \sum_{j,k\ge0} C_{j,k}\mathrm{Tr}(A)^j\mathrm{Tr}(A^{-1})^k.$$ We can compute $C_{j,k}$ concretely as follows. Assume $A$ has eigenvalues $x,y,z$ (with $xyz=1$). Let $u=Tr(A),v=Tr(A^{-1})$. Then $x,y,z$ are roots of $x^3-ux^2+vx-1=0$. We can write \begin{equation}\lambdaabel{tripleK}\det\tilde K = \det K(x) \det K(y)\det K(z)\varepsilone where the $K(\cdot)$ are the corresponding scalar matrices. This is a symmetric polynomial of $x,y,z$ and so can be written as a polynomial in $u,v$. As a concrete example, take a $2m\times n$ square grid $\mathcal{G}_{2m,n}$ on an annulus (with circumference $2m$), see Figure \ref{gridannulus}. By Proposition \ref{anndet} in the appendix, for $n$ even, $$\det K_{2m,n}(x) = \pm \prod_{j=1}^{n/2}\frac{(x+\bm{\alpha}ha_j^m)(x+\bm{\alpha}ha_j^{-m})}{x}$$ where $\bm{\alpha}ha_j=-\cos\theta+\sqrt{1+\cos^2\theta}$ and $\theta=\frac{\pi j}{n+1}$. Note $(x+r)(y+r)(z+r)= 1+ur+vr^2+r^3.$ Hence using (\ref{tripleK}), \begin{equation}\lambdaabel{Kprod1}\det\tilde K = \pm\prod_{j=1}^{n/2}(1+u\bm{\alpha}ha_j^{2m}+v\bm{\alpha}ha_j^{4m}+\bm{\alpha}ha_j^{6m})(1+u\bm{\alpha}ha_j^{-2m}+v\bm{\alpha}ha_j^{-4m}+\bm{\alpha}ha_j^{-6m}).\varepsilone Writing $\frac{\det\tilde K(u,v)}{\det\tilde K(3,3)}=\sum_{p,q} C_{p,q}u^pv^q$, we can interpret (\ref{Kprod1}) as the probability generating function for an $n$-step random walk in $\mathbb{Z}^2$ starting from $(0,0)$: $$(X,Y)=(X_1,Y_1)+(X_{-1},Y_{-1}) + (X_2,Y_2) +\dots+(X_{-n/2},Y_{-n/2})$$ where for $j>0$ the step $(X_j,Y_j)$ is $(0,0),(1,0),(0,1)$ with probabilities respectively $$\frac{1+\bm{\alpha}ha_j^{6m}}{1+3\bm{\alpha}ha_j^{2m}+3\bm{\alpha}ha_j^{4m}+\bm{\alpha}ha_j^{6m}}, \frac{3\bm{\alpha}ha_j^{2m}}{1+3\bm{\alpha}ha_j^{2m}+3\bm{\alpha}ha_j^{4m}+\bm{\alpha}ha_j^{6m}}, \frac{3\bm{\alpha}ha_j^{4m}}{1+3\bm{\alpha}ha_j^{2m}+3\bm{\alpha}ha_j^{4m}+\bm{\alpha}ha_j^{6m}}$$ and a similar formula for $j<0$ (with the $(1,0)$ and $(0,1)$ probabilities reversed). We have $$\bar X=\bar Y= \sum_{j=1}^{n/2} \frac{3\bm{\alpha}ha_j^{2m}+3\bm{\alpha}ha_j^{4m}}{1+3\bm{\alpha}ha_j^{2m}+3\bm{\alpha}ha_j^{4m}+\bm{\alpha}ha_j^{6m}}= \sum_{j=1}^{n/2} \frac{3\bm{\alpha}ha_j^{2m}}{(1+\bm{\alpha}ha_j^{2m})^2}.$$ Now let $m,n$ tend to $\infty$ with $m/n\to\tau$. Let us estimate $\bar X,\bar Y$ for $m,n$ large. We only get a nontrivial contribution if $|\bm{\alpha}ha_j|\approx 1$, that is, when $j\approx n/2$. We can write (changing $j$ to $n/2-j$) $$\bm{\alpha}ha_j= 1+\frac{\pi(j+\frac12)}{n} + O(\frac{j^2}{n^2})$$ and so $$\bm{\alpha}ha_j^{2m} = q^{-2j-1}(1+o(1))$$ where $q=e^{-\pi\tau}$. We thus have, up to errors tending to zero as $m,n\to\infty$, $$\bar X = \bar Y = \sum_{j=0}^{\infty}\frac{3q^{2j+1}}{(1+q^{2j+1})^2}.$$ Now suppose $\tau$ is large: we have a long thin annulus. Then $q$ is small. Then from (\ref{Kprod1}) we have $$\frac{\det\tilde K(u,v)}{\det\tilde K(3,3)}=1+o(1).$$ With high probability there are zero crossings. The probability of a $(j,k)-$crossing for $j+k\ge0$ is to leading order $q^{\delta_{j,k}}3^{j+k}$ where the ``crossing exponent" is $\delta_{j,k} = \lambdaceil \frac{2(j^2+jk+k^2)}3\rceil$. This can be seen by expanding (\ref{Kprod1}) and extracting the appropriate term to leading order. (see the calculation in the appendix, Section \ref{extract}) A similar computation can be done for a $n$-multiweb on an annulus, for $n>3$, since \cite{DanandTom} shows that a $n$-multiweb on an annulus can be reduced to a collection of loops (of $n-1$ types: with multiplicities $(k,n-k)$ for $k=1,\dots,n-1$). \subsection{Pair of pants}\lambdaabel{pantssection} \begin{equation}gin{figure} \begin{equation}gin{center} \includegraphics[width=4cm]{vartheta} \varepsilonnd{center} \caption{\lambdaabel{pantswebs} Non-elliptic webs on a pants. Shown is $W_\varepsilonta$ for $\varepsilonta=2+\omega$.} \varepsilonnd{figure} Let $\Sigma$ be a pair of pants, that is, a sphere with three holes. Let $\omega=e^{\pi i/3}$ and let $\varepsilonta\in\mathbb{C}$ have the form $\varepsilonta=a+b\omega$ where $a,b\in\mathbb{Z}_{\ge0}$. We construct a reduced web $W_{\varepsilonta}$ on $\Sigma$ as follows; see Figure \ref{pantswebs}. We take the rhombus with sides $\varepsilonta\omega^{-1}$ and $\varepsilonta\omega$ in $\mathbb{C}$, and glue sides as shown, putting punctures at the corners, to form a $3$-punctured sphere $\Sigma$. The image of the standard honeycomb graph (dual to the regular triangulation of $\mathbb{C}$) descends to a reduced web on $\Sigma$. \begin{equation}gin{prop} A reduced web on a pair of pants is a union of a collection of loops and at most one of the webs $W_\varepsilonta$. \varepsilonnd{prop} \begin{equation}gin{proof} If the web has a bigon or quad face not containing a boundary component, it is not reduced. If any boundary face is not a bigon, there must be (by (\ref{EC})) a non-boundary quad face, so it is not reduced. So if reduced, all boundary faces are bigons, and all other faces are hexagons, again by (\ref{EC}). The dual of such a web is a triangulation with all vertices of degree $6$ except for three vertices of degree $2$. Geometrically (replacing triangles with equilateral triangles) it is a $(3,3,3)$-orbifold. Such a space has a $3$-fold branched cover (over the vertices of degree $2$) which is an equilateral torus. It is a quotient of the regular hexagonal triangulation by a hexagonal sublattice. So these triangulations are indexed by Eisenstein integers $\varepsilonta=a+be^{\pi i/3}$, where $a,b\in\mathbb{Z}_{\ge0}$, and $a^2+ab+b^2$ is the number of white vertices (or black vertices). \varepsilonnd{proof} Note that there are two possible orientations of each $W_\varepsilonta$ (obtained from switching the colors), the other denoted $W_\varepsilonta^*$, but at most one can occur in any reduced web. Letting $A,B,C=AB^{-1}$ be the monodromies around the punctures of a flat connection on $\Sigma$, we have $$\det\tilde K = \sum_{i_1,i_2,j_1,j_2,k_1,k_2,\varepsilonta}C_{\vec{i}}\mathrm{Tr}(A)^{i_1}\mathrm{Tr}(A^{-1})^{i_2}\mathrm{Tr}(B)^{j_1}\mathrm{Tr}(B^{-1})^{j_2}\mathrm{Tr}(C)^{k_1}\mathrm{Tr}(C^{-1})^{k_2}\mathbb{T}r(W_\varepsilonta)$$ for some integers $C_{\vec{i}}=C_{i_1,i_2,j_1,j_2,k_1,k_2,\varepsilonta}$. While extracting the coefficients $C_{\vec{i}}$ can be done in principle (by the result of Sikora-Westbury mentioned at the beginning of this section), in practice it is not easy. Let us only consider one simplified situation. Suppose a bipartite graph $\mathcal{G}$ is embedded on $\Sigma$, and two of the three boundary components of the pants are in \varepsilonmph{adjacent} faces of $\mathcal{G}$. Then subwebs of $\mathcal{G}$ of type $W_\varepsilonta$ can only occur if $\varepsilonta=1$, that is, except for loop components a reduced web $W$ in $\mathcal{G}$ can only be a $W_1$ (or a $W_1^*$, depending on the orientation of the edge between the adjacent faces); such a web is homeomorphic to a theta graph, see Section \ref{sssec:thetagraph}. For the identity connection we then have $\det\tilde K = Z_0+Z_1\mathbb{T}r(W_1) $, where $Z_0$ is the weighted sum of multiwebs not containing $W_1$ in their reduction, and $Z_1$ is the weighted sum of reduced subwebs containing a component of type $W_1$. We can compute $Z_0,Z_1$ as follows. Suppose we impose a flat connection with monodromy $A,B$ around the generators of $\pi_1$, where $A,B$ are chosen so that traces of simple loops are $3$ and the trace of a $W_1$ is $x$. For example $A=\begin{equation}gin{pmatrix}1&a&1\\0&1&1\\0&0&1\varepsilonnd{pmatrix}$ and $B=\begin{equation}gin{pmatrix}1&0&0\\1&1&0\\-a&-a^2&1\varepsilonnd{pmatrix}$ so that $Tr(AB)=3-a^2$. Note that $Tr(W_1)=Tr(A)Tr(B)-Tr(AB)=6+a^2 =: x$. So $Z_1$ is the coefficient of $a^2$ in $\det \tilde K$. \section{Appendix: annulus determinant} \begin{equation}gin{prop}\lambdaabel{anndet} For the grid graph $\mathcal{G}_{2m,n}$ on an annulus as in Figure \ref{gridannulus} with $n$ even and $m$ odd we have $$\det K_{2m,n}(z) = \pm\prod_{k=1}^{n/2}\frac{(z+\bm{\alpha}ha_k^{2m})(z+\bm{\alpha}ha_k^{-2m})}{z}$$ where $\bm{\alpha}ha_k=-\cos\theta+\sqrt{1+\cos^2\theta}$ with $\theta=\frac{\pi k}{n+1}$. If $n$ is odd and $m$ is odd the result is $$\det K_{2m,n}(z) = \pm(\sqrt{z}+\frac1{\sqrt{z}})\prod_{k=1}^{(n-1)/2}\frac{(z+\bm{\alpha}ha_k^{2m})(z+\bm{\alpha}ha_k^{-2m})}{z}$$ with $\bm{\alpha}ha$ as above. \varepsilonnd{prop} \begin{equation}gin{figure}[t] \begin{equation}gin{center}\includegraphics[width=3in]{gridannulus} \varepsilonnd{center} \caption{\lambdaabel{gridannulus} $\mathcal{G}_{6,4}$ is obtained by gluing the left and right sides of this figure. To make a Kasteleyn matrix with flat connection having monodromy $z$ around the annulus, take $\zeta$ such that $\zeta^{2m}=z$ and put connection $\zeta$ on all east-going edges (and $1$ on vertical edges).}\varepsilonnd{figure} \begin{equation}gin{proof} Recall that the Kasteleyn matrix $K$ has rows indexing white vertices and columns indexing black vertices. We consider here the \varepsilonmph{large} Kasteleyn matrix $\hat K$, with rows (and columns) indexing \varepsilonmph{all} vertices, both black and white. We have $\hat K(\zeta) = \begin{equation}gin{pmatrix}0&K(\zeta)\\mathbb{K}^t(1/\zeta)&0\varepsilonnd{pmatrix}$ and, by symmetry, $\det K(\zeta) = \det K(1/\zeta)$ so $\det K(\zeta)= \pm \det\hat K(\zeta)^{1/2}.$ (The sign depends on choice of gauge and vertex order.) We put Kasteleyn ``signs" $i=\sqrt{-1}$ on vertical edges and $1$ on horizontal edges, as in \cite{Kenyon09Statisticalmechanics}. Indexing vertices by their $x,y$-coordinates, where $(x,y)\in[0,2m-1]\times[1,n]$, the eigenvectors of $\hat K$ are $$f_{k,j}(x,y) = e^{2\pi ijx/(2m)}\sin\frac{\pi ky}{n+1}$$ where $j\in\{0,..,2m-1\}$ and $k\in\{1,2,\dots,n\}$. The corresponding eigenvalues are $$\lambdaambda_{j,k} = \zeta e^{2\pi ij/(2m)}+\zeta^{-1}e^{-2\pi ij/(2m)} + 2i\cos\frac{\pi k}{n+1}.$$ Thus \begin{equation}gin{align*}\det K &= \pm\lambdaeft[\prod_{j=0}^{2m-1}\prod_{k=1}^{n}\zeta e^{2\pi ij/(2m)}+\zeta^{-1}e^{-2\pi ij/(2m)} + 2i\cos\frac{\pi k}{n+1}\right]^{1/2}\\ &= \pm\lambdaeft[\prod_{k=1}^{n}\prod_{\zeta^{2m}=z}\zeta +\zeta^{-1} + 2i\cos\frac{\pi k}{n+1}\right]^{1/2}\\ &= \pm\lambdaeft[\prod_{k=1}^{n}\prod_{\zeta^{2m}=z}\frac{(\zeta-\begin{equation}ta_k)(\zeta-\gamma_k)}{\zeta}\right]^{1/2}\\ &= \pm\lambdaeft[\prod_{k=1}^{n}\frac{(z-\begin{equation}ta_k^{2m})(z-\gamma_k^{2m})}{-z}\right]^{1/2} \varepsilonnd{align*} where $\begin{equation}ta_k,\gamma_k$ are roots $\zeta$ of $\zeta +\zeta^{-1} + 2i\cos\frac{\pi k}{n+1}$, that is, $\begin{equation}ta_k,\gamma_k = i(-\cos\theta\pm\sqrt{1+\cos^2\theta})$ with $\theta=\frac{\pi k}{n+1}$. If $n$ is even we can pair the $k$ and $n+1-k$ terms which are identical, to get $$\det K=\pm\prod_{k=1}^{n/2}\frac{(z-\begin{equation}ta_k^{2m})(z-\gamma_k^{2m})}{z}.$$ If $n$ is odd the $k=(n+1)/2$ term is $-(z+1/z+2)$, yielding $$\det K=\pm(\sqrt{z}+\frac1{\sqrt{z}})\prod_{k=1}^{(n-1)/2}\frac{(z-\begin{equation}ta_k^{2m})(z-\gamma_k^{2m})}{z}.$$ Letting $\bm{\alpha}ha_k=-i\begin{equation}ta_k$ (and noting that $\gamma_k=\begin{equation}ta_k^{-1}$, and $m$ is odd) gives the result. \varepsilonnd{proof} \section{Appendix: coefficient extraction}\lambdaabel{extract} We compute the leading-order term of the coefficient of $u^j v^k$ in the expansion $$\prod_{i=0}^\infty (1+q^{2i+1}u+q^{4i+2}v)(1+q^{2i+1}v+q^{4i+2}u).$$ To accomplish this, we need to take the ``$u$" term from $j$ factors and the ``$v$'' term from $k$ factors. Let $A=\prod_{i=0}^\infty (1+q^{2i+1}u+q^{4i+2}v)$ and $B=\prod_{i=0}^\infty(1+q^{2i+1}v+q^{4i+2}u).$ From $A$ we take $\varepsilonll_1$ of the $u$ terms, with leading-order coefficients $q^x$ for $x\in\mathcal{L}_1$ for some subset $\mathcal{L}_1\subset\{1,3,5,\dots\}$ of cardinality $\varepsilonll_1$. Likewise from $B$ we take $\varepsilonll_2=j-\varepsilonll_1$ of the $u$ terms, with coefficients $q^{2x}$ for $x\in\mathcal{L}_2$ for some subset $\mathcal{L}_2\subset\{1,3,5,\dots\}$ of cardinality $\varepsilonll_2$. Likewise we take $m_1$ of the $v$ terms from $A$ and $m_2=k-m_1$ of the $v$ terms from $B$, corresponding to subsets $\mathrm{M}c_1,\mathrm{M}c_2\subset\{1,3,5,\dots\}$ of cardinalities $m_1,m_2$, and the corresponding coefficients are to leading order $q^{2x}$ for $x\in \mathrm{M}c_1$ and $q^x$ for $x\in \mathrm{M}c_2$. We require $\mathcal{L}_1\cap\mathrm{M}c_1=\varepsilonmptyset = \mathcal{L}_2\cap\mathrm{M}c_2$. Let $L_1=\sum_{i\in\mathcal{L}_1}i$ and likewise define $L_2,M_1,M_2$. We need to make these choices to minimize the exponent of the leading-order term of $u^jv^k$ which is $L_1+2L_2+2M_1+M_2$. Note that $$L_1+M_1 = 1+3+5+\dots+(2\varepsilonll_1+2m_1-1) = (\varepsilonll_1+m_1)^2$$ and $$L_2+M_2 = 1+3+5+\dots+(2\varepsilonll_2+2m_2-1) = (\varepsilonll_2+m_2)^2= (j+k-\varepsilonll_1-m_1)^2.$$ So $$L_1+2L_2+2M_1+M_2=(\varepsilonll_1+m_1)^2+(j+k-\varepsilonll_1-m_1)^2 + L_2+M_1.$$ The first two terms here are independent of the choices of individual terms (just depending on $\varepsilonll_1,m_1$), so we can just choose the individual terms to minimize $L_2$ and $M_1$ separately, that is, $\mathcal{L}_2 = \{1,3,\dots,(2j-2\varepsilonll_1-1)\}$ and $\mathrm{M}c_2=\{1,3,\dots,2m_1-1\}$, giving $L_2=(j-\varepsilonll_1)^2$ and $M_2=m_1^2$. We are left with minimizing, for fixed $j,k$, $$L_1+2L_2+2M_1+M_2=(\varepsilonll_1+m_1)^2+(j+k-\varepsilonll_1-m_1)^2 +(j-\varepsilonll_1)^2 + m_1^2,$$ subject to the constraints that $\varepsilonll_1,m_1\ge0$. Suppose $j\ge k$. Then the minimum for $\varepsilonll_1\ge0$ occurs when $m_1=0$, and we have (setting $m_1=0$ and differentiating with respect to $\varepsilonll_1$) $\varepsilonll_1=\frac23j+\frac13k$, which yields the minimum exponent of $\frac23(j^2+jk+k^2)$. This is the minimum for real $\varepsilonll_1$; taking into account the fact that the minimum must be an integer leads to the exponent $\lambdaceil\frac23(j^2+jk+k^2)\rceil$. The argument for $j<k$ is symmetric. \varepsilonnd{document}
\begin{document} \title{Universal controlled-phase gate with cat-state qubits in circuit QED} \author{Yu Zhang$^{1}$, Xiong Zhao$^{1}$, Li Yu$^{1,2}$, Qi-Ping Su$^{1}$} \author{Chui-Ping Yang$^{1}$} \email{[email protected]} \address{$^1$Department of Physics, Hangzhou Normal University, Hangzhou, Zhejiang 310036, China} \address{$^2$CAS Key Laboratory of Quantum Information, University of Science and Technology of China, Hefei 230026, China} \date{\today} \begin{abstract} Cat-state qubits (qubits encoded with cat states) have recently drawn intensive attention due to their enhanced life times with quantum error correction. We here propose a method to implement a universal controlled-phase gate of two cat-state qubits, via two microwave resonators coupled to a superconducting transmon qutrit. During the gate operation, the qutrit remains in the ground state; thus decoherence from the qutrit is greatly suppressed. This proposal requires only two basic operations and neither classical pulse nor measurement is needed; therefore the gate realization is simple. Numerical simulations show that high-fidelity implementation of this gate is feasible with current circuit QED technology. The proposal is quite general and can be applied to implement the proposed gate with two microwave resonators or two optical cavities coupled to a single three-level natural or artificial atom. \end{abstract} \pacs{03.67.Bg, 42.50.Dv, 85.25.Cp, 76.30.Mi} \maketitle \date{\today} \begin{center} \textbf{I. INTRODUCTION} \end{center} Circuit quantum electrodynamics (QED), composed of superconducting (SC) qubits and microwave resonators or cavities, has developed fast in the past decade. The circuit QED is considered as one of the most feasible candidates for quantum information processing (QIP) [1-4]. Due to controllability of their level spacings, scalability of the circuits, and improvement of coherence times [5-12], SC qubits are of great importance in QIP. The strong coupling and ultrastrong coupling between a SC qubit and a microwave resonator have been demonstrated in experiments [13,14]. In addition, a coplanar waveguide microwave resonator with a (loaded) quality factor $ Q=10^{6}$ [15,16] and a three-dimensional microwave resonator with a (loaded) quality factor $Q\sim3.5 \times 10^{7}$ [17] have been reported in experiments. A microwave resonator or cavity with a high quality factor can act as a quantum data bus [18-20] and be used as a quantum memory [21,22], because it contains microwave photons whose life times are much longer than that of a SC qubit [23]. Recently, quantum state engineering and QIP with microwave fields or photons have attracted considerable interest. Many theoretical proposals have been presented for preparation of Fock states, squeezed states, coherent states, schr$\ddot{o}$dinger cat states, and an arbitrary superposition of Fock states of a single microwave resonator [24-27]. Also, a Fock state and a superposition of Fock states of a single microwave resonator has been created experimentally [21,28,29] For two microwave resonators, theoretical proposals have been proposed for generation of nonclassical microwave field in two resonators [30-33], construction of two-qubit controlled-phase gates with microwave photons in two resonators [34], and implementation of quantum state transfer between microwave photons in two resonators [35-37]. Experimentally, the creation of N-photon NOON states in two microwave resonators has been reported [38]. A complete quantum state transfer of a microwave photon qubit between two resonators can be experimentally realized, by combination of two previous experiments [39,40] which employed the transfer protocol proposed in Ref.~[36]. Moreover, schemes have been proposed for generation of multipartite entangled states of microwave photons in multiple resonators [41] and creation of entangled coherent states of microwave fields in many resonators or cavities [42]. The focus of this work is on QIP with cat-state qubits (qubits encoded with cat states). Cat-state qubits have drawn much attention due to their enhanced life time with quantum error correction (QEC). For instance, Ofek \textit{et al}. have made the lifetime of a cat-state qubit up to 320 $\mu$s with QEC [43]. Recently, there is an increasing interest in QIP with cat-state encoding qubits. Mirrahimi \textit{et al.} have presented approaches to realize a set of universal gates on a single cat-state qubit as well as an entangling gate for creating a Bell state of two cat-state qubits [44]. Nigg has proposed a method for a deterministic Hadamard gate on a single cat-state qubit [45]. Heeres \textit{et al}. have experimentally implemented a set of universal gate on a single cat-state qubit [46]. Yang \textit{et al}. have proposed a scheme for implementing a SWAP gate of two cat-state qubits [47]. Moreover, Wang \textit{et al.} have experimentally generated an entangled Bell state with two cat-state qubits [48]. However, after a deep search of literature, we found that how to realize a controlled-phase gate of two \textit{cat-state} qubits has not been investigated so far. As is well known, a two-qubit controlled phase gate is \textit{universal}, because two-qubit controlled phase gates, together with single-qubit gates, form the building blocks of quantum information processors. In this paper, we propose a method to realize a universal two-qubit controlled-phase gate with cat-state qubits, via two microwave resonators coupled to a SC transmon qutrit (a three-level artificial atom) (Fig.~1). During the gate operation, the qutrit stays in the ground state; thus decoherence from the qutrit is greatly suppressed. The gate implementation is simple because only two basic operations are needed and no classical pulse or measurement is required. Our numerical simulations show that high-fidelity implementation of this gate is feasible with current circuit QED technology. This paper is organized as follows. In Sec. II, we explicitly show how to realize a universal controlled-phase gate of two cat-state qubits. In Sec. III, we numerically calculate the fidelity and briefly discuss the experimental feasibility. We end up with a conclusion in Sec. IV. \begin{center} \textbf{II. CONTROLLED-PHASE GATE OF CAT-STATE QUBITS} \end{center} Consider a system consisting of two microwave resonators coupled to a transmon qutrit (Fig.~1). The three level of the qutrit are labeled as $ |g\rangle $, $|e\rangle $ and $|f\rangle $, as shown in Fig. 2. It is worth noting that for an ideal transmon, the $|g\rangle $ $\leftrightarrow $ $ |f\rangle $ coupling is theoretically zero due to the selection rule [49]; however in practice, there exists a weak coupling between these two states [50]. Supposed that resonator $a$ is off-resonantly coupled to the $ |g\rangle \leftrightarrow |e\rangle $ transition of the qutrit with coupling constant $g$ while resonator $b$ is off-resonantly coupled to the $|e\rangle \leftrightarrow |f\rangle $ transition of the qutrit with coupling constant $ \mu$ (Fig.~2). In addition, assume that resonator $a$ is highly detuned (decoupled) from the $|e\rangle \leftrightarrow |f $ transition of the qutrit and resonator $(b)$ is highly detuned (decoupled) from the $|g\rangle \leftrightarrow |e\rangle $ transition of the qutrit (Fig.~3). Note that these conditions can be achieved by prior adjustment of the level spacings of the qutrit or/and the resonator frequency. Under these considerations, the Hamiltonian of the whole system, in the interaction picture and after making the rotating-wave approximation (RWA), can be written as (in units of $\hbar =1$) \begin{equation} H_{\mathrm{I,1}}=g(e^{i\delta _{a}t}\hat{a}\sigma _{eg}^{+}+h.c.)+\mu (e^{i\delta _{b}t}\hat{b}\sigma _{fe}^{+}+h.c.), \end{equation} where $\sigma _{eg}^{+}=|e\rangle \langle g|$, $\sigma _{fe}^{+}=|f\rangle \langle e|$, $\delta _{a}=\omega _{eg}-\omega _{a}<0$ and $\delta _{b}=\omega _{fe}-\omega _{b}>0.$ The detunigs $\left\vert \delta _{a}\right\vert $ and $\left\vert \delta _{b}\right\vert $ in Fig. 2 are given by $\left\vert \delta _{a}\right\vert =\omega _{a}-\omega _{eg}$ and $ \left\vert \delta _{b}\right\vert =\omega _{fe}-\omega _{b}$. Here, $\hat{a} ^{+}$ ($\hat{b}^{+}$) is the photon creation operator of resonator $a$ $(b)$ , $\omega _{fe}$ $(\omega _{eg})$ is the $|e\rangle \leftrightarrow |f\rangle (|g\rangle \leftrightarrow |e\rangle )$ transition frequency of the qutrit, while $\omega _{a}$ $(\omega _{b})$ is the frequency of resonator $a$ $(b)$. \begin{figure} \caption{(Color online) (a) Diagram of two microwave resonators $a$ and $b$ coupled to a transmon qutrit ($T_{q} \label{fig:1} \end{figure} \begin{figure} \caption{(Color online) Resonator $a$ is far-off resonant with the $ |g\rangle\leftrightarrow|e\rangle$ transition of the qutrit with coupling strength $g$ and detuning $\left\vert \protect\delta _{a} \label{fig:2} \end{figure} \begin{figure} \caption{(Color online) Illustration of resonator $a$ ($b$) is highly detuned (decoupled) from the $|e\rangle\leftrightarrow|f\rangle$ $ (|g\rangle\leftrightarrow|e\rangle)$ transition of the qutrit. The high detuning (or decoupling) can be made by prior adjustment of the level spacings of the transmon qutrit or/and the frequency of resonator $a$ ($b$), such that $\left\vert \protect\delta _{a} \label{fig:3} \end{figure} Under the large-detuning conditions $\left\vert \delta _{a}\right\vert \gg g$ and $\left\vert \delta _{b}\right\vert \gg \mu $, the Hamiltonian (1) becomes [46] \begin{align} H_{\mathrm{e}}=& -\lambda _{a}(\hat{a}^{+}\hat{a}|g\rangle \langle g|-\hat{a} \hat{a}^{+}|e\rangle \langle e|) \notag \\ & -\lambda _{b}(\hat{b}^{+}\hat{b}|e\rangle \langle e|-\hat{b}\hat{b} ^{+}|f\rangle \langle f|) \notag \\ & +\lambda (e^{-i\bigtriangleup t}\hat{a}^{+}\hat{b}^{+}\sigma _{fg}^{-}+h.c.), \end{align} where $\lambda _{a}=g^{2}/\delta _{a}$, $\lambda _{b}=\mu ^{2}/\delta _{b}$, $\lambda =\left( g\mu /2\right) (1/|\delta _{a}|+1/|\delta _{b}|)$, $ \bigtriangleup =\left\vert \delta _{b}\right\vert -|\delta _{a}|$, and $ \sigma _{fg}^{-}=|g\rangle \langle f|$. The first four terms of Eq.(2) describe the photon-number dependent stark shifts of the energy levels $ |g\rangle $, $|e\rangle $ and $|f\rangle $, while the last two terms describe the $|f\rangle $ $\leftrightarrow $ $|g\rangle $ coupling caused due to the two-resonator cooperation. For $|\bigtriangleup |\gg \{\lambda _{a},\lambda _{b},\lambda \}$, the effective Hamiltonian $H_{\mathrm{e}}$ changes to [51] \begin{align} H_{\mathrm{e}}=& -\lambda _{a}(\hat{a}^{+}\hat{a}|g\rangle \langle g|-\hat{a} \hat{a}^{+}|e\rangle \langle e|) \notag \\ & -\lambda _{b}(\hat{b}^{+}\hat{b}|e\rangle \langle e|-\hat{b}\hat{b} ^{+}|f\rangle \langle f|) \notag \\ & +\chi (\hat{a}\hat{a}^{+}\hat{b}\hat{b}^{+}|f\rangle \langle f|-\hat{a}^{+} \hat{a}\hat{b}^{+}\hat{b}|g\rangle \langle g|), \end{align} where $\chi =\lambda ^{2}/\Delta $. From Eq.~(3) one can see that each term is associated with the level $|g\rangle $, $|e\rangle $, or $|f\rangle $. When the levels $|e\rangle $ and $|f\rangle $ are not occupied, they will remain unpopulated under the Hamiltonian (3). In this case, the effective Hamiltonian (3) reduces to \begin{equation} H_{\mathrm{e}}=H_{0}+H_{\mathrm{int}}, \end{equation} with \begin{eqnarray} H_{0} &=&-\lambda _{a}\hat{a}^{+}\hat{a}|g\rangle \langle g|=-\lambda _{a} \hat{n}_{a}|g\rangle \langle g|, \notag \\ H_{\mathrm{int}} &=&-\chi \hat{a}^{+}\hat{a}\hat{b}^{+}\hat{b}|g\rangle \langle g|=-\chi \hat{n}_{a}\hat{n}_{b}|g\rangle \langle g|, \end{eqnarray} where $\hat{n}_{a}=\hat{a}^{+}\hat{a}$ $(\hat{n}_{b}=\hat{b}^{+}\hat{b})$ is the photon number operator for resonator $a$ $(b)$. Because of $[H_{0},H_{ \mathrm{int}}]=0$, the unitary operator $U_{1}=e^{-iH_{e}t}$ can be written as \begin{equation} U_{1}=e^{-iH_{0}t}e^{-iH_{\mathrm{int}}t}=\exp \left( i\lambda _{a}\hat{n} _{a}|g\rangle \langle g|t\right) \exp \left( i\chi \hat{n}_{a}\hat{n} _{b}|g\rangle \langle g|t\right) . \end{equation} The two logical states $|0\rangle $ and $|1\rangle $ of a cat-state qubit are encoded with cat states of a resonator, i.e., $|0\rangle =M_{\alpha }^{+}(|\alpha \rangle +|-\alpha \rangle )$ and $|1\rangle =M_{\alpha }^{-}(|\alpha \rangle -|-\alpha \rangle )$, respectively. Here, $M_{\alpha }^{\pm }=1/\sqrt{2(1\pm e^{-2|\alpha |^{2}})}$ are normalization coefficients. In terms of $|\alpha \rangle =e^{-|\alpha |^{2}/2}\sum\limits_{n=0}^{\infty }\frac{\alpha ^{n}}{\sqrt{n!}}|n\rangle $ and $|-\alpha \rangle =e^{-|\alpha |^{2}/2}\sum\limits_{n=0}^{\infty }\frac{ (-\alpha )^{n}}{\sqrt{n!}}|n\rangle $, we have \begin{equation} |0\rangle =\sum\limits_{m=0}^{\infty }C_{2m}|2m\rangle ,\ \ |1\rangle =\sum\limits_{n=0}^{\infty }C_{2n+1}|2n+1\rangle , \end{equation} where $C_{2m}=2M_{\alpha }^{+}e^{-|\alpha |^{2}/2}\alpha ^{2m}/\sqrt{(2m)!}$ and $C_{2n+1}=2M_{\alpha }^{-}e^{-|\alpha |^{2}/2}\alpha ^{2n+1}/\sqrt{ (2n+1)!}$. From Eq.~(7), one can see that the state $|0\rangle $ is orthogonal to the state $|1\rangle $, which is independent of $\alpha $ (except for $\alpha =0$). The four logical states of two cat-state qubits are $|00\rangle _{ab}$, $ |01\rangle _{ab}$, $|10\rangle _{ab}$ and $|11\rangle _{ab}$, where the left 0 and 1 are encoded with cat states of resonator $a$ while the right 0 and 1 are encoded with cat states of resonator $b$. Suppose that the qutrit is initially in the ground state $|g\rangle $. For an interaction time $t=t_{1}$ , the unitary operation $U_{1}$ leads to the following state transformations (see Appendix for details) \begin{align} U_{1}|00\rangle _{ab}|g\rangle & =\sum\limits_{m,m^{\prime }=0}^{\infty }F_{1}(m,m^{\prime },t_{1})C_{2m}C_{2m^{\prime }}|2m\rangle _{a}|2m^{\prime }\rangle _{b}|g\rangle , \notag \\ U_{1}|01\rangle _{ab}|g\rangle & =\sum\limits_{m,n^{\prime }=0}^{\infty }F_{2}(m,n^{\prime },t_{1})C_{2m}C_{2n^{\prime }+1}|2m\rangle _{a}|2n^{\prime }+1\rangle _{b}|g\rangle , \notag \\ U_{1}|10\rangle _{ab}|g\rangle & =\sum\limits_{n,m\prime =0}^{\infty }F_{3}(n,m^{\prime },t_{1})C_{2n+1}C_{2m^{\prime }}|2n+1\rangle _{a}|2m^{\prime }\rangle _{b}|g\rangle , \notag \\ U_{1}|11\rangle _{ab}|g\rangle & =\sum\limits_{n,n^{\prime }=0}^{\infty }F_{4}(n,n^{\prime },t_{1})C_{2n+1}C_{2n^{\prime }+1}|2n+1\rangle _{a}|2n^{\prime }+1\rangle _{b}|g\rangle , \end{align} with \begin{eqnarray} F_{1}(m,m^{\prime },t_{1}) &=&\exp (i\lambda _{a}2mt_{1})\exp \left[ i(2m)(2m^{\prime })\chi t_{1}\right] , \notag \\ F_{2}(m,n^{\prime },t_{1}) &=&\exp (i\lambda _{a}2mt_{1})\exp [i(2m)(2n^{\prime }+1)\chi t_{1}], \notag \\ F_{3}(n,m^{\prime },t_{1}) &=&\exp [i\lambda _{a}(2n+1)t_{1}]\exp [i(2n+1)(2m^{\prime })\chi t_{1}], \notag \\ F_{4}(n,n^{\prime },t_{1}) &=&\exp [i\lambda _{a}(2n+1)t_{1}]\exp [i(2n+1)(2n^{\prime }+1)\chi t_{1}]. \end{eqnarray} We now adjust the frequency of resonator $a$ such that resonator $a$ is far-off resonant with the $|g\rangle \leftrightarrow |e\rangle $ transition of the qutrit with coupling strength $\widetilde{g}$ and detuning $| \widetilde{\delta }_{a}|$ (Fig. 4), while it is highly detuned (decoupled) from the $|e\rangle \leftrightarrow |f\rangle $ transition (Fig. 5). Here, $| \widetilde{\delta }_{a}|=\omega _{eg}-\tilde{\omega}_{a}$ (Fig. 4), with $ \widetilde{\omega }_{a}$ being the adjusted frequency of resonator $a$. In addition, adjust the frequency of resonator $b$ such that resonator $b$ is decoupled from the qutrit. Note that the frequency of a microwave resonator can be rapidly adjusted with a few nanoseconds [52,53]. Under these considerations, the Hamiltonian in the interaction picture and after making the RWA is given by \begin{figure} \caption{(Color online) Resonator $a$ is far-off resonant with the $ |g\rangle\leftrightarrow|e\rangle$ transition of the qutrit with coupling strength $\widetilde{g} \label{fig:4} \end{figure} \begin{figure} \caption{(Color online) Illustration of resonator $a$ is highly detuned (decoupled) from the $|e\rangle\leftrightarrow|f\rangle$ transition of the qutrit. The decoupling can be made as long as the condition $\left\vert \widetilde{\protect\delta } \label{fig:5} \end{figure} \begin{equation} H_{\mathrm{I,2}}=\tilde{g}(e^{i\widetilde{\delta }_{a}t}\hat{a}\sigma _{eg}^{+}+h.c.), \end{equation} where $\tilde{\delta}_{a}=|\widetilde{\delta _{a}}|=\omega _{eg}-\tilde{ \omega}_{a}>0.$ For $\tilde{\delta}_{a}\gg \tilde{g}$ and the level $|e\rangle $ being not occupied, we have \begin{equation} \tilde{H}_{\mathrm{e}}=-\widetilde{\lambda }_{a}\hat{n}_{a}|g\rangle \langle g|, \end{equation} with $\widetilde{\lambda }_{a}=\tilde{g}^{2}/\tilde{\delta}_{a}$. Then, performing a unitary transformation $U_{2}=\exp (i\widetilde{\lambda }_{a} \hat{n}_{a}|g\rangle \langle g|t_{2})$ for an interaction time $t=t_{2}$, we obtain from Eqs.~(8) and (9) \begin{align} U_{2}U_{1}|00\rangle _{ab}|g\rangle & =\sum\limits_{m,m^{\prime }=0}^{\infty }\widetilde{F}_{1}(m,m^{\prime },t_{1})C_{2m}C_{2m^{\prime }}|2m\rangle _{a}|2m^{\prime }\rangle _{b}|g\rangle , \notag \\ U_{2}U_{1}|01\rangle _{ab}|g\rangle & =\sum\limits_{m,n^{\prime }=0}^{\infty }\widetilde{F}_{2}(m,n^{\prime },t_{1})C_{2m}C_{2n^{\prime }+1}|2m\rangle _{a}|2n^{\prime }+1\rangle _{b}|g\rangle , \notag \\ U_{2}U_{1}|10\rangle _{ab}|g\rangle & =\sum\limits_{n,m^{\prime }=0}^{\infty }\widetilde{F}_{3}(n,m^{\prime },t_{1})C_{2n+1}C_{2m^{\prime }}|2n+1\rangle _{a}|2m^{\prime }\rangle _{b}|g\rangle , \notag \\ U_{2}U_{1}|11\rangle _{ab}|g\rangle & =\sum\limits_{n,n^{\prime }=0}^{\infty }\widetilde{F}_{4}(n,n^{\prime },t_{1})C_{2n+1}C_{2n^{\prime }+1}|2n+1\rangle _{a}|2n^{\prime }+1\rangle _{b}|g\rangle . \end{align} with \begin{eqnarray} \widetilde{F}_{1}(m,m^{\prime },t_{1}) &=&\exp [i2m(\lambda _{a}t_{1}+ \widetilde{\lambda }_{a}t_{2})]\exp [i\left( 2m\right) \left( 2m^{\prime }\right) \chi t_{1}], \notag \\ \widetilde{F}_{2}(m,n^{\prime },t_{1}) &=&\exp [i2m(\lambda _{a}t_{1}+ \widetilde{\lambda }_{a}t_{2})]\exp [i(2m)(2n+1)\chi t_{1}], \notag \\ \widetilde{F}_{3}(n,m^{\prime },t_{1}) &=&\exp [i(2n+1)(\lambda _{a}t_{1}+ \widetilde{\lambda }_{a}t_{2})]\exp [i(2n+1)(2m^{\prime })\chi t_{1}], \notag \\ \widetilde{F}_{4}(n,n^{\prime },t_{1}) &=&\exp [i(2n+1)(\lambda _{a}t_{1}+ \widetilde{\lambda }_{a}t_{2})]\exp [i(2n+1)(2n^{\prime }+1)\chi t_{1}]. \end{eqnarray} Note that the index factors $\left( 2m\right) \left( 2m^{\prime }\right) $, $ (2m)(2n+1)$, and $(2n+1)(2m^{\prime })$ of Eq.~(13) are even numbers, while the index factor $(2n+1)(2n^{\prime }+1)$ is an odd number. By setting $ \lambda _{a}=-\widetilde{\lambda }_{a}$ (i.e., $g^{2}/\delta _{a}=-\tilde{g} ^{2}/\tilde{\delta}_{a}$) and $t_{2}=t_{1}=\pi /\left\vert \chi \right\vert $ , we have $\widetilde{F}_{1}(m,m^{\prime },t_{1})=\widetilde{F} _{2}(m,n^{\prime },t_{1})=\widetilde{F}_{3}(n,m^{\prime },t_{1})=1$ but $ \widetilde{F}_{4}(n,n^{\prime },t_{1})=-1.$ Hence, the states (12) become \begin{align} U_{2}U_{1}|00\rangle _{ab}|g\rangle & =|00\rangle _{ab}|g\rangle , \notag \\ U_{2}U_{1}|01\rangle _{ab}|g\rangle & =|01\rangle _{ab}|g\rangle , \notag \\ U_{2}U_{1}|10\rangle _{ab}|g\rangle & =|10\rangle _{ab}|g\rangle , \notag \\ U_{2}U_{1}|11\rangle _{ab}|g\rangle & =-|11\rangle _{ab}|g\rangle , \end{align} which shows that the above two basic operations (i.e., $U_{1}$ and $U_{2}$) have completed a universal controlled-phase gate of two cat-state qubits, described by $|00\rangle _{ab}\rightarrow |00\rangle _{ab}$, $|01\rangle _{ab}\rightarrow |01\rangle _{ab}$, $|10\rangle _{ab}\rightarrow |10\rangle _{ab}$, and $|11\rangle _{ab}\rightarrow -|11\rangle _{ab}$. After this gate, an arbitrary pure state of two cat-state qubits, given by $|\phi \rangle _{ab}=\alpha |00\rangle _{ab}+\beta |01\rangle _{ab}+\gamma |10\rangle _{ab}+\zeta |11\rangle _{ab}$, is transformed as follows \begin{equation} |\phi \rangle _{ab}\rightarrow \alpha |00\rangle _{ab}+\beta |01\rangle _{ab}+\gamma |10\rangle _{ab}-\zeta |11\rangle _{ab}. \end{equation} From description given above, one can see that the qutrit remains in the ground state during the entire operation. Hence, decoherence from the qutrit is greatly suppressed. As shown above, the Hamiltonian (11) for the second unitary operation ($ U_{2} $) was constructed by tuning cavity frequency. However, we point out that tuning cavity frequency is unnecessary. Alternatively, one can obtain the Hamiltonian (11) by adjusting the level spacings of the qutrit to meet the conditions required for constructing this Hamiltonian (11). Note that for a SC qutrit, the level spacings can be rapidly (within 1-3 ns) adjusted by varying external control parameters (e.g., magnetic flux applied to the superconducting loop of a SC phase, transmon [54], Xmon [10], or flux qubit/qutrit [55]). We should mention that the Hamiltonian (4) was previously proposed to realize a controlled-phase gate of two \textit{discrete-variable} qubits [56], for which the two logic states of a qubit are encoded with \textit{the vacuum state and a single-photon state} of a cavity mode. In stark contrast, the present work aims at implementing a controlled-phase gate of two \textit{ continuous-variable} qubits, for which the two logic states of a qubit are encoded with \textit{cat states} of a resonator or cavity. \begin{center} \textbf{III. POSSIBLE EXPERIMENTAL IMPLEMENTATION} \end{center} In above, we have explicitly shown how to realize a controlled-phase gate of two cat-state qubits. We now give a brief discussion on the experimental feasibility by considering a setup of a SC transmon qutrit coupled to two 3D microwave resonators or cavities. From the description given above, one can see that the gate implementation involves the following two basic operations: (i) The first operation is described by the Hamiltonian (1). In reality, the inter-resonator crosstalk between the two resonators is inevitable [57], and there exist the unwanted coupling of resonator $a$ with the $|e\rangle \leftrightarrow |f\rangle $ transition and the unwanted coupling of resonator $b$ with the $|g\rangle \leftrightarrow |e\rangle $ transition of the qutrit. When these factors are taken into account, the Hamiltonian (1) becomes \begin{eqnarray} \widetilde{H}_{\mathrm{I,}1} &=&g(e^{i\delta _{a}t}\hat{a}\sigma _{eg}^{+}+h.c.)+\mu (e^{i\delta _{b}t}\hat{b}\sigma _{fe}^{+}+h.c.) \notag \\ &&+g^{\prime }(e^{i\delta _{a}^{\prime }t}\hat{a}\sigma _{fe}^{+}+h.c.)+\mu ^{\prime }(e^{i\delta _{b}^{\prime }t}\hat{b}\sigma _{eg}^{+}+h.c.) \notag \\ &&+g_{ab}(e^{-i\bigtriangleup _{ab}t}\hat{a}\hat{b}^{+}+h.c.), \end{eqnarray} where the first bracket term represents the interaction of resonator $a$ with the $|g\rangle \leftrightarrow |e\rangle $ transition, the second bracket term represents the interaction of resonator $b$ with the $|e\rangle \leftrightarrow |f\rangle $ transition, the third bracket term represents the unwanted coupling between resonator $a$ and the $|e\rangle \leftrightarrow |f\rangle $ transition with coupling strength $g^{\prime }$ and detuning $\delta _{a}^{\prime }=\omega _{fe}-\omega _{a}<0$ (Fig. 3), and the fourth bracket term represents the unwanted coupling between resonator $b$ and the $|g\rangle \leftrightarrow |e\rangle $ transition of the qutrit with coupling strength $\mu ^{\prime }$ and detuning $\delta _{b}^{\prime }=\omega _{eg}-\omega _{b}>0$ (Fig. 3). In addition, the last bracket term of Eq. (16) represents the inter-resonator crosstalk, where $ g_{ab}$ is the coupling strength between the two resonators while $ \bigtriangleup _{ab}=\omega _{a}-\omega _{b}$ is the difference between the two-resonator frequencies. (ii) The second operation is described by the Hamiltonian (10). In practice, the inter-resonator crosstalk between the two resonators and the unwanted coupling of resonator $a$ with the $|e\rangle \leftrightarrow |f\rangle $ transition should be considered. Note that for the second operation, the frequency of resonator $b$ was far detuned such that resonator $b$ is decoupled from the qutrit. When these factors are taken into account, the Hamiltonian (10) becomes \begin{eqnarray} \widetilde{H}_{\mathrm{I,}2} &=&\tilde{g}(e^{i\widetilde{\delta }_{a}t}\hat{a }\sigma _{eg}^{+}+h.c.)+\tilde{g}^{\prime }(e^{i\widetilde{\delta } _{a}^{\prime }t}\hat{a}\sigma _{fe}^{+}+h.c.) \notag \\ &&+\widetilde{g}_{ab}(e^{-i\widetilde{\bigtriangleup }_{ab}t}\hat{a}\hat{b} ^{+}+h.c.), \end{eqnarray} where the first bracket term represents the interaction of resonator $a$ with the $|g\rangle \leftrightarrow |e\rangle $ transition, while the second bracket term represents the unwanted coupling between resonator $a$ and the $ |e\rangle \leftrightarrow |f\rangle $ transition with coupling strength $ \widetilde{g}^{\prime }$ and detuning $\widetilde{\delta }_{a}^{\prime }=\omega _{fe}-\widetilde{\omega }_{a}<0$ (Fig. 5). The last bracket term of Eq. (17) represents the inter-resonator crosstalk, where $\widetilde{g}_{ab}$ is the coupling strength between the two resonators while $\widetilde{ \bigtriangleup }_{ab}=\widetilde{\omega }_{a}-\widetilde{\omega }_{b}$ is the difference between the two-resonator frequencies. The dynamics of the lossy system is determined by \begin{align} \frac{d\rho }{dt}=& -i[\widetilde{H}_{\mathrm{I,}i},\rho ]+\kappa _{a} \mathcal{L}[a]+\kappa _{b}\mathcal{L}[b] \notag \\ & +\gamma _{eg}\mathcal{L}[\sigma _{eg}^{-}]+\gamma _{fe}\mathcal{L}[\sigma _{fe}^{-}]+\gamma _{fg}\mathcal{L}[\sigma _{fg}^{-}] \notag \\ & +\sum\limits_{j=e,f}\{\gamma _{\varphi j}(\sigma _{jj}\rho \sigma _{jj}-\sigma _{jj}\rho /2-\rho \sigma _{jj}/2)\}, \end{align} where $\widetilde{H}_{\mathrm{I,}i}$ is the full Hamiltonian given above ($ i=1,2$), $\sigma _{eg}^{-}=|g\rangle \langle e|$, $\sigma _{fe}^{-}=|e\rangle \langle f|$, $\sigma _{fg}^{-}=|g\rangle \langle f|$, $ \sigma _{jj}=|j\rangle \langle j|(j=e,f)$; and $\mathcal{L}[\xi ]=\xi \rho \xi ^{\dag }-\xi ^{\dag }\xi \rho /2-\rho \xi ^{\dag }\xi /2$, with $\xi =a,b,\sigma _{eg}^{-},\sigma _{fe}^{-},\sigma _{fg}^{-}$. Here, $\kappa _{a}(\kappa _{b})$ is the photon decay rate of resonator $a$ $(b)$. In addition, $\gamma _{eg}$ is the energy relaxation rate for the level $ |e\rangle $ of the qutrit, $\gamma _{fe}(\gamma _{fg})$ is the energy relaxation rate of the level $|f\rangle $ of the qutrit for the decay path $ |f\rangle \longrightarrow |e\rangle (|g\rangle )$, and $\gamma _{\varphi j}$ is the dephasing rate of the level $|j\rangle (j=e,f)$ of the qutrit. \newline The fidelity of the operations is given by \begin{equation} \mathcal{F}=\sqrt{\langle \psi _{\mathrm{id}}|\rho |\psi _{\mathrm{id} }\rangle }, \end{equation} where $|\psi _{\mathrm{id}}\rangle $ is the output state of an ideal system without dissipation, dephasing and crosstalk \textit{etc}.; while $\rho $ is the final practical density operator of the system when the operation is performed in a realistic situation. For simplicity, choose $\alpha =\cos \theta \cos \varphi ,$ $\beta =\cos \theta \sin \varphi ,$ $\gamma =\sin \theta \cos \varphi ,$ and $\zeta =\sin \theta \sin \varphi ,$ which satisfy the normalization condition $\left\vert \alpha \right\vert ^{2}+\left\vert \beta \right\vert ^{2}+\left\vert \gamma \right\vert ^{2}+\left\vert \zeta \right\vert ^{2}=1.$ The initial state of the qutrit-resonator system is thus written as $|\psi _{\mathrm{in}}\rangle =\left( \cos \theta \cos \varphi |00\rangle _{ab}+\cos \theta \sin \varphi |01\rangle _{ab}+\sin \theta \cos \varphi |10\rangle _{ab}+\sin \theta \sin \varphi |11\rangle _{ab}\right) |g\rangle $. The output state is $|\psi _{\mathrm{id}}\rangle =\left( \cos \theta \cos \varphi |00\rangle _{ab}+\cos \theta \sin \varphi |01\rangle _{ab}+\sin \theta \cos \varphi |10\rangle _{ab}-\sin \theta \sin \varphi |11\rangle _{ab}\right) |g\rangle $. In the following, we will consider the cases: (i) $\theta =\varphi =\pi /4;$ (ii) $\theta =\varphi =\pi /3;$ (iii) $\theta =\pi /4,\varphi =\pi /3;$ and (iv) $\theta =\pi /3,\varphi =\pi /4;$ which correspond to four initial states. \begin{figure} \caption{(Color online) Fidelity versus $\protect\kappa^{-1} \label{fig:6} \end{figure} For a transmon qutrit, the typical transition frequency between two neighboring levels can be varied from 3 to 10 GHz. In addition, the anharmonicity of the level spacings for a transmon qutrit can be made to be within $100\sim 500$ MHz [14]. As an example, we thus consider $\omega _{eg}/2\pi =6.5$ GHz and $\omega _{fe}/2\pi =6$ GHz. By choosing $\delta _{a}/2\pi =-1.0$ GHz and $\delta _{b}/2\pi =1.1$ GHz, we have $\omega _{a}/2\pi =7.5$ GHz and $\omega _{b}/2\pi =4.9$ GHz, for which we have $ \triangle _{ab}/2\pi =2.6$ GHz. We set $\widetilde{\delta }_{a}/2\pi =1.0$ GHz, for which we have $\widetilde{\omega }_{a}/2\pi =5.5$ GHz. By choosing $ \widetilde{\omega }_{b}/2\pi =3.5$ GHz, we have $\widetilde{\triangle } _{ab}/2\pi =2$ GHz. In addition, we have $\delta _{a}^{\prime }/2\pi =-1.5$ GHz, $\delta _{b}^{\prime }/2\pi =1.6$ GHz, and $\widetilde{\delta } _{a}^{\prime }/2\pi =-0.5$ GHz. Other parameters used in the numerical simulation are: (i) $\gamma _{eg}^{-1}=60$ $\mu $s, $\gamma _{fg}^{-1}=150$ $ \mu $s [58], $\gamma _{fe}^{-1}=30$ $\mu $s, $\gamma _{\phi e}^{-1}=\gamma _{\phi f}^{-1}=20$ $\mu $s, (ii) $g/2\pi =\mu /2\pi =95$ MHz (available in experiments [14]), and (iii) $\alpha =0.5$. Here, we consider a rather conservative case for decoherence time of transmon qutrits because energy relaxation time with a range from 65 $\mu $s to 0.1 ms and dephasing time from 25 $\mu $s to 70 $\mu $s have been experimentally reported for a 3D superconducting transmon device [7,11,48]. The value of $\widetilde{g}$ is determined according to $g^{2}/\delta _{a}=-\tilde{g}^{2}/\tilde{\delta}_{a}$ , given $g$, $\delta _{a}$, and $\widetilde{\delta }_{a}$. For a transmon qutrit [49], one has $g^{\prime }\sim \sqrt{2}g,$ $\mu ^{\prime }\sim \mu / \sqrt{2},$ and $\widetilde{g}^{\prime }\sim \sqrt{2}\widetilde{g}.$ We set $ g_{ab}=0.01g$, which can be readily achieved in experiments [33]. For simplicity, assume $\kappa _{a}=\kappa _{b}=\kappa .$ By solving the master equation (18), we numerically calculate the fidelity versus $\kappa ^{-1}$, as shown in Fig.~6. Fig. 6(a) is plotted for $\theta =\varphi =\pi /4.$ Fig. 6(b) is for $\theta =\varphi =\pi /3.$ Fig. 6(c) is for $\theta =\pi /4,\varphi =\pi /3.$ Fig. 6(d) is for $\theta =\pi /3,\varphi =\pi /4$. The red curves in Fig. 6 are drawn by numerical simulations, which are based on the full Hamiltonians $\widetilde{H}_{\mathrm{I,}1}$ in Eq. (16)\ and $ \widetilde{H}_{\mathrm{I,}2}$ in Eq. (17) and take decoherence and the inter-resonator crosstalk into account. The red curves illustrate that when $\kappa ^{-1}\geq 300$ $\mu $, fidelity exceeds: (i) 0.9918 for $\theta =\varphi =\pi /4;$ (ii) 0.9854 for $\theta =\varphi =\pi /3;$ (iii) 0.9910 for $\theta =\pi /4,\varphi =\pi /3;$ and (iv) 0.9868 for $\theta =\pi /3,\varphi =\pi /4.$ These results imply that the fidelity depends on the choice of the initial state of the two resonators and a high fidelity can be obtained when the gate is performed in a realistic situation. To see how good the approximations are, we have calculated the fidelity based on the effective Hamiltonians given in Eq. (4) and Eq. (11) and by considering decoherence and the inter-resonator crosstalk (see the blue curves in Fig. 6). From the red curves and the bule curves depicted in Fig. 6, one can see that compared to the case of the gate being performed based on the effective Hamiltonians, the fidelity for the gate performed in a realistic situation is slightly decreased by $0.9\%-1.5\%.$ This implies that the approximations made for the effective Hamiltonians are reasonable. Lifetime $\sim 1$ ms of microwave photons has been experimentally demonstrated in a coaxial resonator [17,48]. For $\kappa ^{-1}=300$ $\mu $s, we have $Q_{a}=1.2\times 10^{7}$ for $\omega _{a}/2\pi =6.5$ GHz, $ \widetilde{Q}_{a}=1.0\times 10^{7}$ for $\widetilde{\omega }_{a}/2\pi =5.5$ GHz, $Q_{b}=9.2\times 10^{6}$ for $\omega _{b}/2\pi =4.9$ GHz, and $ \widetilde{Q}_{b}=6.6\times 10^{6}$ for $\widetilde{\omega }_{b}/2\pi =3.5$ GHz. Note that a high quality factor $Q=3.5\times 10^{7}$ of a 3D superconducting resonator has been experimentally demonstrated [17]. The analysis here implies that the high-fidelity implementation of the proposed gate is feasible within the current circuit QED technology. \begin{center} \textbf{IV. CONCLUSIONS} \end{center} We have proposed a method to realize a universal controlled-phase gate of two cat-state qubits, via two microwave resonators coupled to a superconducting transmon qutrit. This method can be extended to a wide range of physical systems such as two microwave or optical cavities coupled to a single three-level natural or artificial atom. As shown above, this proposal has these features. During the gate operation, the qutrit remains in the ground state; thus decoherence from the qutrit is greatly suppressed. Because only two basic operations are needed and neither classical pulse nor measurement is required, the gate realization is simple. Our numerical simulations show that high-fidelity implementation of the proposed gate is feasible with current circuit QED technology. To the best of our knowledge, this work is the first to demonstrate the implementation of a controlled-phase gate with cat-state qubits based on cavity- or circuit-QED. We hope that this work will stimulate experimental activities in the near future. \begin{center} \textbf{ACKNOWLEDGMENTS} \end{center} This work was supported by Ministry of Science and Technology of China (No. 2016YFA0301802); National Natural Science Foundation of China (11504075, 11074062, 11247008, 11374083); Zhejiang Natural Science Foundation (LZ13A040002); Hangzhou-City Quantum Information and Quantum Optics Innovation Research Team; The open project funding from CAS Key Laboratory of Quantum Information, University of Science and Technology of China (project number KQI201710). \begin{center} \textbf{APPENDIX } \end{center} Under the unitary operation $U_{1}$ and for an interaction time $t=t_{1}$, the state transformations for the four logical states $|00\rangle _{ab},|01\rangle _{ab},|10\rangle _{ab}$ and $|11\rangle _{ab}$ of the two cat-state qubits are listed below in details. \begin{align} & U_{1}|00\rangle _{ab}|g\rangle \notag \\ & =\exp (i\lambda _{a}\hat{n}_{a}|g\rangle \langle g|t_{1})\exp (i\chi \hat{n }_{a}\hat{n}_{b}|g\rangle \langle g|t_{1})\sum\limits_{m=0}^{\infty }C_{2m}|2m\rangle _{a}\sum\limits_{m^{\prime }=0}^{\infty }C_{2m^{\prime }}|2m^{\prime }\rangle _{b}|g\rangle \notag \\ & =\exp (i\lambda _{a}\hat{n}_{a}|g\rangle \langle g|t_{1})\exp (i\chi \hat{n }_{a}\hat{n}_{b}|g\rangle \langle g|t_{1})\sum\limits_{m=0}^{\infty }\sum\limits_{m^{\prime }=0}^{\infty }C_{2m}C_{2m^{\prime }}|2m\rangle _{a}|2m^{\prime }\rangle _{b}|g\rangle \notag \\ & =\exp (i\lambda _{a}\hat{n}_{a}t_{1})\exp (i\chi \hat{n}_{a}\hat{n} _{b}t_{1})\sum\limits_{m=0}^{\infty }\sum\limits_{m^{\prime }=0}^{\infty }C_{2m}C_{2m^{\prime }}|2m\rangle _{a}|2m^{\prime }\rangle _{b}|g\rangle \notag \\ & =\sum\limits_{m=0}^{\infty }\sum\limits_{m^{\prime }=0}^{\infty }C_{2m}C_{2m^{\prime }}\exp (i\lambda _{a}\hat{n}_{a}t_{1})\exp (i\chi \hat{n }_{a}\hat{n}_{b}t_{1})|2m\rangle _{a}|2m^{\prime }\rangle _{b}|g\rangle \notag \\ & =\sum\limits_{m,m^{\prime }=0}^{\infty }F_{1}\left( m,m^{\prime },t_{1}\right) C_{2m}C_{2m^{\prime }}|2m\rangle _{a}|2m^{\prime }\rangle _{b}|g\rangle , \end{align} \begin{align} & U_{1}|01\rangle _{ab}|g\rangle \notag \\ & =\exp \left( i\lambda _{a}\hat{n}_{a}|g\rangle \langle g|t_{1}\right) \exp (i\chi \hat{n}_{a}\hat{n}_{b}|g\rangle \langle g|t_{1})\sum\limits_{m=0}^{\infty }C_{2m}|2m\rangle _{a}\sum\limits_{n^{\prime }=0}^{\infty }C_{2n^{\prime }+1}|2n^{\prime }+1\rangle _{b}|g\rangle \notag \\ & =\exp (i\lambda _{a}\hat{n}_{a}|g\rangle \langle g|t_{1})\exp (i\chi \hat{n }_{a}\hat{n}_{b}|g\rangle \langle g|t_{1})\sum\limits_{m=0}^{\infty }\sum\limits_{n^{\prime }=0}^{\infty }C_{2m}C_{2n^{\prime }+1}|2m\rangle _{a}|2n^{\prime }+1\rangle _{b}|g\rangle \notag \\ & =\exp (i\lambda _{a}\hat{n}_{a}t_{1})\exp (i\chi \hat{n}_{a}\hat{n} _{b}t_{1})\sum\limits_{m=0}^{\infty }\sum\limits_{n^{\prime }=0}^{\infty }C_{2m}C_{2n^{\prime }+1}|2m\rangle _{a}|2n^{\prime }+1\rangle _{b}|g\rangle \notag \\ & =\sum\limits_{m=0}^{\infty }\sum\limits_{n^{\prime }=0}^{\infty }C_{2m}C_{2n^{\prime }+1}\exp (i\lambda _{a}\hat{n}_{a}t_{1})\exp (i\chi \hat{n}_{a}\hat{n}_{b}t_{1})|2m\rangle _{a}|2n^{\prime }+1\rangle _{b}|g\rangle \notag \\ & =\sum\limits_{m,n^{\prime }=0}^{\infty }F_{2}\left( m,n^{\prime },t_{1}\right) C_{2m}C_{2n^{\prime }+1}|2m\rangle _{a}|2n^{\prime }+1\rangle _{b}|g\rangle , \end{align} with \begin{eqnarray} F_{1}\left( m,m^{\prime },t_{1}\right) &=&\exp (i\lambda _{a}2mt_{1})\exp [i(2m)(2m^{\prime })\chi t_{1}], \notag \\ F_{2}\left( m,n^{\prime },t_{1}\right) &=&\exp (i\lambda _{a}2mt_{1})\exp [i(2m)(2n^{\prime }+1)\chi t_{1}]. \end{eqnarray} Similarly, one can easily find that \begin{align} U_{1}|10\rangle _{ab}|g\rangle & =\exp (i\lambda _{a}\hat{n}_{a}|g\rangle \langle g|t_{1})\exp (i\chi \hat{n}_{a}\hat{n}_{b}|g\rangle \langle g|t_{1})\otimes \notag \\ & \;\;\,\sum\limits_{n=0}^{\infty }C_{2n+1}|2n+1\rangle _{a}\sum\limits_{m^{\prime }=0}^{\infty }C_{2m^{\prime }}|2m^{\prime }\rangle _{b}|g\rangle \notag \\ & =\sum\limits_{n,m^{\prime }=0}^{\infty }F_{3}\left( n,m^{\prime },t_{1}\right) C_{2n+1}C_{2m^{\prime }}|2n+1\rangle _{a}|2m^{\prime }\rangle _{b}|g\rangle , \end{align} \begin{align} U_{1}|11\rangle _{ab}|g\rangle & =\exp (i\lambda _{a}\hat{n}_{a}|g\rangle \langle g|t_{1})\exp (i\chi \hat{n}_{a}\hat{n}_{b}|g\rangle \langle g|t_{1})\otimes \notag \\ & \;\;\,\sum\limits_{n}^{\infty }C_{2n+1}|2n+1\rangle _{a}\otimes \sum\limits_{n^{\prime }=0}^{\infty }C_{2n^{\prime }+1}|2n^{\prime }+1\rangle _{b}|g\rangle \notag \\ & =\sum\limits_{n,n^{\prime }=0}^{\infty }F_{4}\left( n,n^{\prime },t_{1}\right) C_{2n+1}C_{2n^{\prime }+1}|2n+1\rangle _{a}|2n^{\prime }+1\rangle _{b}|g\rangle , \end{align} with \begin{eqnarray} F_{3}\left( n,m^{\prime },t_{1}\right) &=&\exp [i\lambda _{a}(2n+1)t_{1}]\exp [i(2n+1)(2m^{\prime })\chi t_{1}], \notag \\ F_{4}\left( n,n^{\prime },t_{1}\right) &=&\exp [i\lambda _{a}(2n+1)t_{1}]\exp [i(2n+1)(2n^{\prime }+1)\chi t_{1}]. \end{eqnarray} \end{document}
\begin{document} \title{Cliques in Squares of Graphs with\\ Maximum Average Degree less than 4} \maketitle \abstract{ Hocquard, Kim, and Pierron constructed, for every even integer $D\geqslant 2$, a 2-degenerate graph $G_D$ with maximum degree $D$ such that $\omega(G_D^2)=\frac52D$. They asked whether (a) there exists $D_0$ such that every 2-degenerate graph $G$ with maximum degree $D\geqslant D_0$ satisfies ${\mathrm{ch}}i(G^2)\leqslant \frac52D$ and (b) whether this result holds more generally for every graph $G$ with $\textrm{mad}(G)<4$. In this direction, we prove upper bounds on the clique number $\omega(G^2)$ of $G^2$ that match the lower bound given by this construction, up to small additive constants. We show that if $G$ is 2-degenerate with maximum degree $D$, then $\omega(G^2)\leqslant \frac52D+72$ (with $\omega(G^2)\leqslant \frac52D+60$ when $D$ is sufficiently large). And if $G$ has $\textrm{mad}(G)<4$ and maximum degree $D$, then $\omega(G^2)\leqslant \frac52D+532$. Thus, the construction of Hocquard et al.~is essentially best possible. } \section{Introduction} The \Emph{square, $G^2$}, of a graph $G$ is formed from $G$ by adding an edge between each pair of vertices at distance 2 in $G$. Over the past 40 years tremendous effort has gone into proving upper bounds on the chromatic number of squares of graphs; for a recent survey, see~\cite{squares-survey}. If a graph $G$ has maximum degree $\Delta$, then trivially ${\mathrm{ch}}i(G^2)\geqslant \Delta+1$ since, for every vertex $v$, the closed neighborhood $N[v]$ is a clique in $G^2$. A graph $G$ is \Emph{$k$-degenerate} if each subgraph contains a vertex of degree at most $k$. Equivalently, we require a vertex order $\sigma$ such that each vertex has at most $k$ neighbors later in $\sigma$. By greedily coloring vertices in the order that is the reverse of $\sigma$, we see that ${\mathrm{ch}}i(G)\leqslant k+1$. It is easy to check that $G^2$ is $\Delta^2$-degenerate; thus ${\mathrm{ch}}i(G^2)\leqslant \Delta^2+1$. This bound holds with equality for the 5-cycle and the Petersen graph (but only for 1 or 2 connected graphs besides these). In general, the upper bound ${\mathrm{ch}}i(G^2)\leqslant \Delta^2$ cannot be improved much, as witnessed by incidence graphs of finite projective planes. Whenever $q$ is a power of a prime, there exists such a plane with $q^2+q+1$ points and $q^2+q+1$ lines. So its incidence graph $G$ is $(q+1)$-regular with $\omega(G^2)=q^2+q+1=\Delta^2-\Delta+1$. To prove better upper bounds on ${\mathrm{ch}}i(G^2)$, researchers have focused on more structured classes of graphs. For brevity, we mention only two types of these. {Erd\H{o}s} and {Ne\v{s}et\v{r}il} introduced the notion of strong edge-coloring, which is equivalent to coloring the square of the line graph $L(G)$. A blow-up $B_D$ of the 5-cycle replaces each vertex with an independent set of size $D/2$ and each edge with a copy of $K_{D/2,D/2}$. It is easy to check that $L(B_D)^2$ is a complete graph with order $\frac54D^2$. {Erd\H{o}s} and {Ne\v{s}et\v{r}il} conjectured that if $G$ has $\Delta\leqslant D$, then its strong edge-chromatic number, denoted ${\mathrm{ch}}i'_s(G)$, is at most $\frac54D^2$. That is, the worst case is when $L(G)^2$ is complete. The best results in this direction all come from randomized coloring procedures~\cite{BruhnJ,BPP,HdjdVK}. The current strongest bound~\cite{HdjdVK} is ${\mathrm{ch}}i'_s(G)\leqslant 1.772D^2$. Another direction of work has focused on bounding the clique number $\omega(G^2)$. Initial efforts in this vein~\cite{ChungGTT90} showed that if $L(G)^2$ is a complete graph, then its order is at most $\frac54D^2$. Later, \'{S}leszy\'{n}ska-Nowak~\cite{SN} showed that $\omega(L(G)^2)\leqslant \frac32D^2$. More recently, Faron and Postle~\cite{FP} strengthened this bound to $\omega(L(G)^2)\leqslant \frac43D^2$. The other class of graphs that we discuss in detail is planar graphs. In 1977, Wegner conjectured that if $G$ is planar with $\Delta\geqslant 8$, then ${\mathrm{ch}}i(G^2)\leqslant \lfloor\frac32\Delta\rfloor+1$. (His conjecture is a chief reason for much of the interest in bounding ${\mathrm{ch}}i(G^2)$.) To see that this conjecture is sharp, consider a so-called ``fat triangle'' formed from $K_3$ by replacing each edge with $s$ parallel edges. Subdivide once all edges of the fat triangle except for two non-parallel edges. The resulting graph $G$ has $\Delta=2s$ and $G^2$ is a complete graph of order $3s+1$. (For smaller $\Delta$, Wegner conjectured weaker upper bounds on ${\mathrm{ch}}i(G^2)$. In each case, the conjectured upper bound is sharp, as witnessed by a graph whose square is complete.) Wegner's Conjecture has been proved asymptotically~\cite{HavetHMR}: ${\mathrm{ch}}i(G^2)\leqslant \frac32\Delta(1+o(1))$, where the $o(1)$ is as $\Delta\to \infty$. In fact, the same bound holds for list coloring~\cite{HavetHMR} and for graphs embedded in an arbitrary fixed surface~\cite{AEvdH}. But for exact bounds, the problem remains wide open except for the case $\Delta=3$ (where 7 colors are both sufficient and often necessary), which was confirmed by two groups~\cite{HJT,thomassen-wegner3}. This state of affairs motivates interest in bounds on $\omega(G^2)$. Amini et al.~\cite{AEvdH} proved that $\omega(G^2)\leqslant \frac32\Delta+O(1)$ for graphs embedded in each fixed surface. For the plane, they proved $\omega(G^2)\leqslant \frac32\Delta+76$ when $\Delta\geqslant 11616$.\footnote{This paper attributes to Cohen and van den Heuvel the bound $\omega(G^2)\leqslant \lfloor \frac32\Delta\rfloor+1$ for all planar graphs with $\Delta\geqslant 41$. However, we contacted Cohen and he informed us that he and van den Heuvel have neither written a proof of this statement, nor have plans to do so. Thus, the problem should be considered open.} Now we reach the specific paper that inspired our present work. The \EmphE{maximum average degree}{-4mm} of a graph $G$, denoted \EmphE{$\textrm{mad}(G)$}{3mm}, is defined as $\max_{H\subseteq G}2|E(H)|/|V(H)|$. Hocquard, Kim, and Pierron~\cite{HKP} studied ${\mathrm{ch}}i(G^2)$ when $\textrm{mad}(G)<4$. They showed that $G^2$ is $3\Delta$-degenerate; thus, ${\mathrm{ch}}i(G^2)\leqslant 3\Delta+1$. They also constructed such a graph $G$ with $\omega(G^2)=\frac52\Delta$, whenever $\Delta$ is even; see Example~\ref{example1}. As a possible strengthening, they asked the two questions below. \begin{enumerate} \item[(i)] Is there $D_0$ such that every 2-degenerate graph $G$ with $\Delta(G)\geqslant D_0$ has ${\mathrm{ch}}i(G^2)\leqslant \frac52\Delta(G)$? \item[(ii)] Is there $D_0$ such that every $G$ with $\Delta(G)\geqslant D_0$ and $\textrm{mad}(G)<4$ has ${\mathrm{ch}}i(G^2)\leqslant \frac52\Delta(G)$? \end{enumerate} A natural approach to these questions is to try to strengthen the bound on the degeneracy of $G^2$ whenever $G$ is 2-degenerate. Unfortunately, this is not really possible. In Example~\ref{example2} below, for each integer $D\geqslant 2$, we construct a family of graphs $G$ such that $G$ is 2-degenerate, $\Delta(G)=D$, and $G^2$ is \emph{not} $(3D-5)$-degenerate. \begin{figure} \caption{A graph $G_D$ with maximum degree $D$. In $G_D^2$, the black vertices form a clique of order $5D/2$. Each pair of black vertices with no common white neighbor has a gray neighbor that is adjacent only to them (though only a few such gray vertices are shown). \label{construction-fig} \label{construction-fig} \label{fig1} \end{figure} \begin{example}[\cite{HKP}] \label{example1} For every positive even integer $D$, there exists a 2-degenerate graph $G_D$ with maximum degree $D$ such that $\omega(G_D^2)=\frac52D$; see Figure~\ref{fig1}. Fix a positive integer $D$ that is divisible by 4. To form $G_D$, (1) start with the complete graph $K_5$. (2) Replace each edge $vw$ with a copy of $K_{2,D/4}$, identifying the vertices in the part of size 2 with $v$ and $w$. (3) Now for each pair, $x$ and $y$, of vertices of degree 2 with no common neighbor, add a vertex $z_{xy}$ adjacent to both $x$ and $y$. Call the resulting graph $G_D$. Clearly, $G_D$ is 2-degenerate. It is easy to check that also $\omega(G_D^2)=\frac52D$; the set $S$ of all vertices adjacent to two vertices of the original $K_5$ is a clique in $G_D^2$. Furthermore, $G_D$ has maximum degree $4(D/4)=D$; this is the degree of each vertex of the original $K_5$ and every other vertex has lower degree. (If $D$ is even, but not divisible by 4, perform the construction above with $D':=D+2$, but for each edge on the outer 5-cycle of the original $K_5$, after (2) but before (3), delete one vertex in the part of size $D'/4$.) \exampleEnd \end{example} \begin{example} \label{example2} For each positive integer $D$, there exists a graph $H_D$ that is 2-degenerate and has maximum degree $D$, but such that $H_D^2$ is not $(3D-5)$-degenerate; see Figure~\ref{example2-fig}. Let $G_D$ be a $D$-regular graph with $4s$ vertices, for some $s\geqslant 3$ (for example, $G_D$ could be a circulant); let $T:=V(G_D)$. Subdivide each edge of $G_D$; call the resulting graph $G_D'$ and call these new vertices $S$. Build an auxiliary graph $J$ with vertex set $S$ and $vw\in E(J)$ if $v$ and $w$ are distinct and have no common neighbor in $G_D'$. Note that $J$ is regular of degree $|E(G_D)|-(2D-1)=2sD-(2D-1)>\frac12|E(G_D)|=\frac12|S|=\frac12|V(J)|$. Thus, by Dirac's Theorem, $J$ has a Hamiltonian cycle, $C$. By deleting the edges of $C$ and repeating the argument, we find $\lceil\frac12(D-2)\rceil$ edge-disjoint Hamiltonian cycles $C_i$ in $J$. If $D$ is even, let $J_1$ be the union of these cycles $C_i$; if $D$ is odd, let $J_1$ be the union of all but one of these cycles $C_i$ and a 1-factor from the final cycle. Note that $J_1$ is $(D-2)$-regular. Now, for each edge $vw\in E(J_1)$, add to $G'_D$ a new vertex $z_{vw}$ adjacent to $v$ and $w$; call the resulting graph $H_D$. Now we can check that $H_D$ is 2-degenerate with maximum degree $D$ and $H_D^2[S]$ is regular of degree $3D-4$. \exampleEnd \end{example} \begin{figure} \caption{The process of forming $H_D$ from $G_D$ in Example~\ref{example2} \label{example2-fig} \end{figure} We can generalize Example~\ref{example2} to something similar for $k$-degenerate graphs with arbitrary $k\geqslant 2$. But to keep our focus here on 2-degenerate graphs, we defer these details to Example~\ref{example3}. Example~\ref{example2} shows that even proving the bound ${\mathrm{ch}}i(G^2)\leqslant 3D-4$ will require new ideas. The degeneracy of $G^2$ alone will not suffice. This observation motivates us to instead seek bounds on $\omega(G^2)$. Our two main results are as follows: \begin{thm} \label{main1} Fix a positive integer $D$. If a graph $G$ is 2-degenerate with $\Delta(G)\leqslant D$, then $\omega(G^2)\leqslant \frac52D+72$. Furthermore, if $D\geqslant 1729$, then $\omega(G^2)\leqslant \frac52D+60$. \end{thm} \begin{thm} \label{main2} Fix a positive integer $D$. If a graph $G$ has $\textrm{mad}(G)<4$ and $\Delta(G)\leqslant D$, then $\omega(G^2)\leqslant \frac52D+532$. \end{thm} The proofs of both results follow a similar outline, though the latter is more complicated. We have made some effort to minimize the constant in Theorem~\ref{main1}, but have not made much effort in this direction in Theorem~\ref{main2}, prefering to present a simpler proof. \begin{defn} \label{def1} A \EmphE{$k$-degeneracy order}{0mm} for a graph $G$ is an order of $V(G)$ such that each vertex has at most $k$ neighbors later in the order. A graph $G$ is \EmphE{nice}{4mm}, w.r.t. a clique $S$ in $G^2$, if (a) $S$ is a clique in $G^2$, (b) $S$ is an independent set in $G$, and (c) $G$ has a $2$-degeneracy order $\sigma$ such that all vertices of $S$ appear consecutively in $\sigma$. The notion is inspired by the construction of Hocquard, Kim, and Pierron, described in Example~\ref{example1}, which is indeed nice. \end{defn} In Section~\ref{nice-sec}, we consider only graphs $G$ that are nice w.r.t.~a maximum clique $S$ in $G^2$. \begin{thm} \label{main3} If $G$ is nice w.r.t.~a maximum clique $S$ in $G^2$ and $\Delta(G)\leqslant D$, then $\omega(G^2)\leqslant \frac52D$. \end{thm} Theorem~\ref{main3} is exactly sharp, as witnessed by the graphs in Example~\ref{example1}. (In fact, these graphs and a class of similar ones are the unique extremal examples, as can be seen by studying the proof more carefully. We give more details following the proof of Proposition~\ref{prop1}.) To prove Theorem~\ref{main1}, we consider a graph $G$ that is 2-degenerate and fix a vertex subset $S$ such that $S$ is a maximum clique in $G^2$. We show that there exists $S'\subseteq S$ such that $|S\setminus S'|\leqslant 72$ and there exists a graph $G'$ (a subgraph of $G$) such that $G'$ is nice w.r.t.~$S'$. Then the result follows by applying Theorem~\ref{main3} to $G'$. To prove Theorem~\ref{main2}, our approach is similar. We consider a graph $G$ with $\textrm{mad}(G)<4$ and fix a vertex subset $S$ such that $S$ is a maximum clique in $G^2$. We show that there exists $S'\subseteq S$ such that $|S\setminus S'|\leqslant 460$ and there exists a graph $G'$ (a subgraph of $G$) such that $G'$ is nice w.r.t.~$S'$. Then the result again follows by applying Theorem~\ref{main3} to $G'$. In the proof of Theorem~\ref{main1} (resp.~Theorem~\ref{main2}), we introduce a technique of ``token passing'', where vertices are deleted in a 2-degeneracy (resp.~3-degeneracy) order $\sigma$ and each vertex passes tokens, immediately before it is deleted, to its neighbors later in $\sigma$. These tokens facilitate a sort of amortized analysis that is common in certain areas of algorithm analysis. However, we have not previously seen it used for the types of problems we consider in the present paper. And we believe it is likely to be applicable to further similar problems. \section{Big Cliques in Squares of Nice 2-Degenerate Graphs} \label{nice-sec} The goal of this section is to prove the following result. \begin{thm} Let $H$ be a multigraph with $\Delta(H)\leqslant D$, for some positive integer $D$. If each edge of $H$ shares at least one endpoint with all but at most $D-2$ other edges of $H$, then $|E(H)|\leqslant \frac52 D$. \label{thm1} \end{thm} Intuitively, Theorem~\ref{thm1} essentially rephrases Theorem~\ref{main3} in terms of edges in multigraphs, for which we have standard terminology. More formally, we have the following proposition. \begin{prop} Theorem~\ref{thm1} implies Theorem~\ref{main3}. \label{prop1} \end{prop} \begin{proof} Let $G$\aside{$G$, $S$} be nice w.r.t.~a maximum clique $S$ in $G^2$ and $\Delta(G)\leqslant D$. We may assume that $V(G)\setminus S$ is an independent set; otherwise we delete all edges induced by $V(G)\setminus S$ and again call the resulting graph $G$ (note that it still satisfies all hypotheses of Theorem~\ref{main3}). Let \Emph{$\sigma$} be a 2-degeneracy order witnessing that $G$ is nice w.r.t.~$S$ and partition $V(G)$ into \Emph{$R,S,T$}, where $R$ are vertices before $S$ in $\sigma$, and $T$ are vertices after $S$. Form \Emph{$H$} from $G$ as follows: delete all vertices of $R$ and contract one edge incident to each vertex of $S$. Figure~\ref{H-from-G-fig} shows an example. \begin{figure} \caption{The graph $G_D$ (left) from Example~\ref{example1} \label{H-from-G-fig} \end{figure} It is easy to check that $\Delta(H)\leqslant \Delta(G)$ and $|E(H)|=|S|$. Consider $v\in S$. Since $\sigma$ is a 2-degeneracy order, each neighbor $w\in R$ of $v$ gives rise to at most one edge $vv'\in E(G^2)$. If $v$ has at most one neighbor in $T$, then $v$ has degree at most $2(D-1)$ in $G^2$, so $|S|\leqslant 2D$. So assume instead that $v$ has at least two neighbors in $T$ (exactly two, in fact, since $\sigma$ is a 2-degeneracy order). Since $\Delta(G)\leqslant D$, vertex $v$ has at most $D-2$ neighbors in $R$. So for all but at most $D-2$ vertices $v'$ in $S$, vertex $v$ has a neighbor $w$ in $T$ in common with $v'$. Thus, the edge in $H$ arising from $v$ has a common endpoint with all edges of $H$ except for at most $D-2$. Hence, $H$ satisfies the hypotheses of Theorem~\ref{thm1}. As a result, $\omega(G^2)=|E(H)|\leqslant \frac52D$, as desired. \end{proof} To see that Theorem~\ref{thm1} is best possible, start with $K_5$ and replace each edge with $D/4$ parallel edges. We can also say a bit about uniqueness and stability. The bulk of the work in proving Theorem~\ref{thm1} is considering the case $|V(H)|\geqslant 6$ and proving that $|E(H)|\leqslant \frac52D$; in fact, we can prove that this inequality is strict. Thus, every graph $H$ that satisfies Theorem~\ref{thm1} with equality has $|V(H)|=5$. However, we do not have much ``stability'', since (for infinitely many values of $D$) there is a graph $H$ with $|V(H)|=6$ and $|E(H)|=\frac52D-5$. Namely, start with $K_6$ and replace each edge with $\frac16(D-2)$ parallel edges. In fact, even the extremal examples on 5 vertices are not unique. Such examples require only that the graph is $D$-regular and that each edge shares an endpoint with all but at most $D-2$ other edges. For large values of $D$, there are many such graphs. For example, starting with the example in the first sentence of this paragraph, we can pick an arbitrary 5-cycle $\mathcal{C}$ and delete $2$ copies of each edge on $\mathcal{C}$. This gives an example with maximum degree $D-4$ that differs from that first example. \subsection{Proof of Theorem~\ref{thm1}} We assume Theorem~\ref{thm1} is false and let $H$ be a counterexample minimizing $|E(H)|+|V(H)|$. \begin{defn} For a multigraph $H$, we write $H_0$\aside{$H_0$, $\overline{H_0}$} for the underlying simple graph and $\overline{H_0}$ for the complement of this simple graph. For each $vw\in E(H)$, we write \Emph{$\mu(vw)$} to denote the multiplicity of edge $vw$. For each $v\in V(H)$, let \Emph{$E(v)$} denote the set of edges incident to $v$. \end{defn} If we can show that $H_0$ is a complete graph, say $K_h$, then we can conclude with a simple counting argument, taking advantage of the symmetry of $K_h$; see Claim~\ref{lem8}. So this is the main work of the present section. Equivalently, we show that $\overline{H_0}$ is edgeless. To this end, we show that $\overline{H_0}$ is, first, a vertex disjoint union of stars (Claim~\ref{lem5}); second, a matching (Claim~\ref{lem6}); and third, a graph with at most one edge (Claim~\ref{lem7}). \begin{clm} $H$ is connected and $|V(H)|\geqslant 6$. \label{lem0} \end{clm} \begin{clmproof} Since $|E(H)|+|V(H)|$ is minimum, $H$ has no isolated vertices. If $H$ is disconnected, then some edge $e$ of $H$ is in a component with at most half of the edges of $H$. But now $e$ has no endpoint in common with at least $\frac12|E(H)|>\frac54D$, a contradiction. If $|V(H)|\leqslant 5$, then $|E(H)| = \frac12 \sum_{v\in V(H)}d(v)\leqslant \frac52D$, so $H$ is not a counterexample. \end{clmproof} \begin{clm} If $vw\in E(H)$, then $d(v)+d(w)-\mu(vw) \geqslant \frac32D+2$. \label{lem1} \end{clm} \begin{clmproof} By definition, $D-2\geqslant |E(H)\setminus (E(v)\cup E(w))|= |E(H)|-(d(v)+d(w)-\mu(vw))$. Thus, $d(v)+d(w)-\mu(vw)\geqslant |E(H)|-(D-2)\geqslant \frac32D+2$. \end{clmproof} \begin{clm} We have $\delta(H_0)\geqslant 3$. \label{lem2} \end{clm} \begin{clmproof} By Claim~\ref{lem0}, $d_{H_0}(v)\geqslant 1$ for each $v\in V(H_0)$. Suppose there exists $v\in V(H_0)$ with $d_{H_0}(v)=1$; denote its neighbor by $w$. Claim~\ref{lem1} gives $\frac32 D < d(w)+(d(v)-\mu(vw))=d(w)\leqslant D$, a contradiction. So suppose instead that $d_{H_0}(v)=2$; denote the neighbors of $v$ by $w$ and $x$. By symmetry, assume that $\mu(vw)\leqslant \mu(vx)$, so $\mu(vw)\leqslant \frac12D$. Similar to above, $\frac32 D <d(v)+d(x) - \mu(vx) = d(x)+ \mu(vw) \leqslant \frac32D$, a contradiction. Thus, $\delta(H_0)\geqslant 3$, as claimed. \end{clmproof} \begin{clm} \label{lem3} If $v,w,x,y\in V(H)$ are distinct vertices and $vw,xy\in E(H)$, then $\mu(vx)+\mu(vy)+\mu(wx)+\mu(wy) \geqslant \frac12D+2$. \end{clm} \begin{clmproof} If an edge $e$ shares an endpoint with both $vw$ and $xy$, then $e$ is counted by $\mu(vx)+\mu(vy)+\mu(wx)+\mu(wy)$. Each of $vw$ and $xy$ shares an endpoint with all but at most $D-2$ edges of $H$, so $\mu(vx)+\mu(vy)+\mu(wx)+\mu(wy)\geqslant |E(H)|-|\{vw,xy\}|-2(D-2)\geqslant \frac52 D-(2D-2)=\frac12D+2$. \end{clmproof} \begin{clm} For all distinct $v,w\in V(H)$, we have $\sum_{x\in V\setminus\{v,w\}}\mu(vx)>\frac12 D$; so $\mu(vw)<\frac12D$. \label{lem4} \end{clm} \begin{clmproof} First suppose that $vw\in E(H)$. By Claim~\ref{lem1}, we have $\frac32D<d(w)+d(v)-\mu(vw) = d(w)+(\sum_{x\in V\setminus\{v\}}\mu(vx))-\mu(vw) = d(w)+\sum_{x\in V\setminus\{v,w\}}\mu(vx) \leqslant D+\sum_{x\in V\setminus\{v,w\}}\mu(vx)$. Thus, $\frac12D < \sum_{x\in V\setminus\{v,w\}}\mu(vx)$, as claimed. So $\mu(vw)<D-\frac12D$. Suppose instead that $vw\notin E(H)$. By Claim~\ref{lem0}, $v$ has some incident edge $vz$. As above, $\sum_{x\in V\setminus\{v,z\}}\mu(vx)>\frac12D$. Now $\mu(vw)=0$, so $\sum_{x\in V\setminus\{v,w\}}\mu(vx)>\sum_{x\in V\setminus\{v,w,z\}}\mu(vx) = \sum_{x\in V\setminus\{v,z\}}\mu(vx) > \frac12D$, as claimed. \end{clmproof} \begin{clm} The graph $\overline{H_0}$ is a vertex disjoint union of stars. \label{lem5} \end{clm} \begin{clmproof} First suppose, to the contrary, that $\overline{H_0}$ contains a triangle $vwx$; see Figure~\ref{lem5-fig}(a). That is, $vw,vx,wx\notin E(H_0)$. Since $\delta(H_0)\geqslant 3$, there exist $y$ such that $vy\in E(H_0)$. By Claim~\ref{lem4}, we have $\sum_{z\in V\setminus\{w,y\}}\mu(wz)>\frac12 D$ and $\sum_{z\in V\setminus\{x,y\}}\mu(xz)>\frac12 D$. Since $\mu(wx)=0$, the sets of edges counted by these two inequalities are disjoint. Thus, there exist more than $D$ edges that share no endpoint with edge $vy$. This contradicts our definition of $H$ (in the hypothesis of the theorem). Thus, $\overline{H_0}$ is triangle-free. Now suppose instead that $\overline{H_0}$ contains edges $vw,wx,xy$; see Figure~\ref{lem5-fig}(b). By the previous paragraph, $vx,wy\notin E(\overline{H_0})$. Now Claims~\ref{lem3} and~\ref{lem4} give $\frac12D+2\leqslant \mu(vw)+\mu(wx)+\mu(xy)+\mu(vy)=\mu(vy)<\frac12D$, a contradiction. This finishes the proof of the claim. \end{clmproof} \begin{figure} \caption{The proof of Claim~\ref{lem5} \label{lem5-fig} \end{figure} \begin{clm} The graph $\overline{H_0}$ is a matching. \label{lem6} \end{clm} \begin{clmproof} Suppose, to the contrary, that there exist distinct $v,w,x\in V(H_0)$ with $vw,vx\in E(\overline{H_0})$; that is, $vw,vx\notin E(H_0)$. Since $\delta(H_0)\geqslant 3$, there exists $y$ such that $vy\in E(H_0)$. By Claim~\ref{lem5}, we know $wx, wy, xy\in E(H_0)$; see Figure~\ref{lem67-fig}(a). Thus, Claim~\ref{lem3} gives $\mu(vw)+\mu(vx)+\mu(yw)+\mu(yx)>\frac12D$. However, $\mu(vw)=\mu(vx)=0$, so $\mu(yw)+\mu(yx)>\frac12D$. Since, $\delta(H_0)\geqslant 3$, there exist $y',z$ such that $y,y',z$ are distinct neighbors of $v$ in $H_0$. Repeating the arguments above for $y'$, we get that $\mu(y'w)+\mu(y'x)>\frac12D$. But now $vz$ has more than $D$ edges with which it shares no endpoint (those counted by $\mu(yw)+\mu(yx)+\mu(y'w)+\mu(y'x)$), a contradiction. \end{clmproof} \begin{figure} \caption{The proofs of (a) Claim~\ref{lem6} \label{lem67-fig} \end{figure} \begin{clm} The graph $\overline{H_0}$ contains at most one edge. \label{lem7} \end{clm} \begin{clmproof} First suppose instead that $\overline{H_0}$ has at least 3 edges; that is, by Claim~\ref{lem6}, there are $v_1v_2, v_3v_4, v_5v_6 \notin E(H_0)$. See Figure~\ref{lem67-fig}(b). By Claim~\ref{lem6}, we know $v_1v_3, v_2v_4\in E(H_0)$. So Claim~\ref{lem3} implies that $\mu(v_2v_3)+\mu(v_1v_4)>\frac12D$. Similarly, $\mu(v_1v_3)+\mu(v_2v_4)>\frac12D$. We can now repeat both of these arguments twice, first with $v_5v_6$ in place of $v_3v_4$, and second with $v_5v_6$ in place of $v_1v_2$. Thus, we have partitioned the 12 edges induced (in $H_0$) by $v_1,\ldots,v_6$ into 6 pairs, each of which has sum of multiplicities greater than $\frac12D$. Thus, $|E(H)|>6(\frac12D)=3D$. However, applying the hypothesis of the theorem to edge $v_1v_3$ gives $|E(H)|\leqslant d(v_1)+d(v_3)+(D-2)<3D$, a contradiction. Now suppose instead that $\overline{H_0}$ has only two edges. That is, $v_1v_2,v_3v_4\notin E(H_0)$. As above, we conclude that $\{v_1,v_2,v_3,v_4\}$ induces (in $H$) more than $D$ edges. Now there exists $v_5v_6\in E(H_0)$ (with $v_5,v_6\notin \{v_1,v_2,v_3,v_4\}$; recall that $|V(H)|\geqslant 6$, by Claim~\ref{lem0}). But now there exist more than $D$ edges with which $v_5v_6$ shares no endpoint, a contradiction. \end{clmproof} \begin{clm} The graph $\overline{H_0}$ contains exactly one edge. Further, $|V(H_0)|=6$. \label{lem8} \end{clm} \begin{clmproof} Suppose to the contrary, by Claim~\ref{lem7}, that $\overline{H_0}$ contains no edges; that is $H_0$ is a complete graph. Let \Emph{$h$}$:=|V(H_0)|$. By assumption, every edge $vw\in E(H)$ shares an endpoint with all but at most $D-2$ edges. Thus, for every pair $v,w\in V(H)$, we have $d_H(v)+d_H(w)-\mu(vw) \geqslant |E(H)|-(D-2)$. Summing over all $\binom{h}2$ edges of $H_0$ gives $$ \sum_{vw\in E(H_0)}\leqslantft(d_H(v)+d_H(w)-\mu(vw)\right)\geqslant \sum_{vw\in E(H_0)}\leqslantft(|E(H)|-(D-2)\right) $$ Each edge of $H$ is counted on the left exactly $2h-3$ times. Thus, we get $(2h-3)|E(H)| \geqslant \binom{h}2|E(H)|-\binom{h}2(D-2)$. This simplifies to $|E(H)|\leqslant (D-2)\frac{h(h-1)}{(h-2)(h-3)} = (D-2)(1+\frac{4h-6}{(h-2)(h-3)}) = (D-2)(1+\frac{6}{h-3}+\frac{-2}{h-2})$. This expression is decreasing with $h$, so is maximized when $h=6$, where we get $|E(H)|\leqslant (D-2)(1+\frac{6}3+\frac{-2}4) = (D-2)\frac52$. Thus, $\overline{H_0}$ contains exactly one edge, as claimed. Let $vw$ be the unique edge in $\overline{H_0}$. Now we repeat the argument above. The main difference is that the sum on the right now has only $\binom{h}2-1$ terms. There are also some edges that are counted only $2h-4$ times, but we can still upper bound the left side by $(2h-3)|E(H)|$. That is $(2h-3)|E(H)|\geqslant (\binom{h}2-1)|E(H)|-(\binom{h}2-1)(D-2)$. This simplifies to $|E(H)|\leqslant (D-2)\frac{h^2-h-2}{(h-1)(h-4)} = (D-2)(1+\frac{4h-6}{(h-1)(h-4)}) = (D-2)(1+\frac{2/3}{h-1}+\frac{10/3}{h-4})$. Again this expression is decreasing with $h$. When $h=6$, we get $|E(H)|\leqslant \frac{14}5(D-2)$, so we cannot rule out that case. But when $h=7$, we get $|E(H)|\leqslant \frac{20}9(D-2)$. Thus $h=6$, as claimed. \end{clmproof} \begin{figure} \caption{The graphs $H_0$ and $H_1$ in the proof of Claim~\ref{lem9} \label{lem9-fig} \end{figure} \begin{clm} The graph $H$ does not exist; that is, Theorem~\ref{thm1} is true. \label{lem9} \end{clm} \begin{clmproof} Suppose the claim is false. By Claim~\ref{lem8}, we must have $H_0 = K_6-e$. Denote by $x,y$ the nonadjacent pair in $H_0$; see Figure~\ref{lem9-fig}. We define a multigraph $H_1$ that has $H_0$ as its underlying simple graph, has $\mu(e)=2$ for each edge $e$ of $H_0$ incident to $x$ or $y$, and $\mu(e)=1$ for each other edge $e$ of $H_0$. Similar to above, for each edge $vw\in E(H_0)$ we have $d_H(v)+d_H(w)-\mu(vw) \geqslant |E(H)|-(D-2)$. Summing over all $14+8=22$ edges of $H_1$ gives\footnote{In other words, $H_1$ is simply a convenient way to encode the multiplicities with which we want to sum the 14 inequalities $d_H(v)+d_H(w)-\mu(vw)\geqslant |E(H)|-(D-2)$ arising from the 14 edges $vw$ of $H_0$.} $$ \sum_{vw\in E(H_1)}\leqslantft(d_H(v)+d_H(w)-\mu(vw)\right)\geqslant \sum_{vw\in E(H_1)}\leqslantft(|E(H)|-(D-2)\right) $$ It is straightforward to check that each edge of $H$ is counted exactly 13 times. Thus, the inequality above can be rewritten as $13|E(H)|\geqslant 22|E(H)|-22(D-2)$, which simplifies to $|E(H)|\leqslant \frac{22}9(D-2)$. This contradiction proves the claim, which in turn proves the theorem. \end{clmproof} \section{Big Cliques in Squares of 2-Degenerate Graphs} \label{sec3} Fix a positive integer $D$. Let \Emph{$f(D)$} be the maximum size of a clique $S$ in the square of a 2-degenerate graph $G$ with $\Delta(G)\leqslant D$. It is easy to verify that if $G$ is $k$-degenerate with $\Delta(G)\leqslant D$, then $G^2$ has degeneracy at most $k(D-1)+(D-k)(k-1)$; this is witnessed by any vertex order showing that $G$ is $k$-degenerate. So, when $k=2$, $G^2$ is $(3D-4)$-degenerate. Thus, $f(D)\leqslant 3D-3$. Example~\ref{example1} shows that $f(D)\geqslant \frac52D$, when $D$ is even. In fact, the graph $G$ in Example~\ref{example1} has a maximum clique $S$ in $G^2$ the vertices of which form an independent set in $G$. Furthermore, there exists a 2-degeneracy order $\sigma$ for $G$ in which the vertices of $S$ appear consecutively. The goal of this section is to show that if $f(D)>\frac52D$, then this is witnessed by a graph $G$ where $S$ behaves ``nearly'' as it does above. More specifically, we show the following (recall the meaning of ``nice'' from Definition~\ref{def1}). \begin{thm} There exists a constant $c$ such that for every positive integer $D$ some 2-degenerate graph $G$ with $\Delta(G)\leqslant D$ is nice w.r.t.~a clique $S$ in $G^2$ and $|S|\geqslant f(D)-c$. In fact, $c=72$ suffices and, when $D$ is sufficiently large, $c=60$ suffices. \end{thm} \begin{proof} We prove the theorem with $c=72$, but we have not made significant effort to minimize $c$. (A small trick at the end improves this to $c=60$ when $D\geqslant 1729$.) Fix a positive integer $D$\aside{$D$, $G$, $S$}, and let $G$ be a 2-degenerate graph with maximum degree at most $D$ such that $G^2$ has a clique $S$ of order $f(D)$. Subject to this, choose $G$ to minimize $|V(G)|+|E(G)|$. We assume that $f(D) \geqslant \frac52D+60$; otherwise, we are done by Example~\ref{example1}. Note that every vertex $v\in V(G)$ must have a neighbor in $S$; otherwise, deleting $v$ contradicts the minimality of $G$. Let \Emph{$\sigma$} be a vertex order witnessing that $G$ is 2-degenerate. Subject to this, choose $\sigma$ so the first vertex in $S$ appears as late as possible in $\sigma$. We delete the vertices of $G$ in the order $\sigma$. Each time we delete a vertex $v$ of $S$, vertex $v$ gives a ``primary'' token to each of its neighbors later in $\sigma$. Moreover, each time we delete a vertex $v$ that currently holds $s$ primary tokens (for some $s\geqslant 1$), $v$ gives to each of its neighbors later in $\sigma$ exactly $s$ ``secondary'' tokens; see Figure~\ref{fig-tokens}. \begin{figure} \caption{A portion of a 2-degenerate graph $G$ and the positions of its tokens as its vertices are deleted in a 2-degeneracy order; here $S=\{1,\ldots,7\} \label{fig-tokens} \end{figure} We will analyze this process to show that all but a constant number of vertices in $S$ induce an independent set. That is, there exists $\widehat{S}\subseteq S$, of bounded size, such that $S\setminus \widehat{S}$ is an independent set. Furthermore, we can modify $G$, $S$, and $\sigma$ to get a graph $G'$ that is 2-degenerate, as witnessed by a vertex order $\sigma'$, and $G'$ is nice w.r.t.~a clique $S'$ in $(G')^2$. Finally, $|S|-|S'|\leqslant c$. Let $\textrm{tokens}(v)$\aside{$\textrm{tokens}(v)$} (resp.~$\textrm{primary}(v)$\aaside{$\textrm{primary}(v)$}{4mm}) denote the number of tokens (resp.~primary tokens) that each vertex $v$ holds immediately before it is deleted. \begin{clm} \label{clm0} If $v\in S$ and $w_1,w_2$ are the neighbors of $v$ later in $\sigma$, if they exist, then \begin{equation} 1+\textrm{tokens}(v)+D+\textrm{primary}(w_1)+\textrm{primary}(w_2)+6\geqslant |S|\geqslant \frac52D+60.\tag{$\star$} \label{key-ineq} \end{equation} \end{clm} \begin{clmproof} Fix $v\in S$. Recall that $v$ must be adjacent in $G^2$ to every vertex in $S$. Each neighbor $w$ of $v$ that precedes $v$ in $\sigma$ can be the source of at most one such adjacency in $G^2$ to a vertex $x$ from which $v$ receives no token (neither primary nor secondary), so in total $v$ can have at most $D$ such adjacencies. Each other adjacency in $G^2$ must be accounted for either (a) by a token received by $v$ or (b) by a primary token that has been or will be received by $w_1$ or $w_2$ or (c) by being an adjacency to $w_1$ or $w_2$ or to a neighbor of some $w_i$ that comes later than $w_i$ in $\sigma$. At most $\textrm{tokens}(v)$ vertices of $S$ are handled by (a), and at most 6 vertices of $S$ are handled by (c). The number handled by (b) is at most $\textrm{primary}(w_1)+\textrm{primary}(w_2)$. This implies~\eqref{key-ineq}. \end{clmproof} To analyze the movement of tokens, we use the following definitions (all w.r.t.~$G$, $S$, and $\sigma$). Let $\EmphE{\textsc{Big}}{-4mm}:=\{v\in V(G): \textrm{primary}(v) > \frac14D\}$. Let $\Emph{\textsc{Basic}}:=\{v\in S: \textrm{tokens}(v) < \frac14D\}$. Let $\EmphE{\textsc{NonBasic}}{0mm}:=S\setminus\textsc{Basic}$. So $\textsc{Big}\cap\textsc{Basic}=\emptyset$. And possibly both $\textsc{Big}\not\subseteq\textsc{NonBasic}$ and $\textsc{NonBasic}\not\subseteq\textsc{Big}$. Note that each vertex $v$ gives primary tokens to at most two vertices, and each of those vertices gives secondary tokens (due to $v$) to at most two more vertices. Thus, $v$ contributes at most 6 to the total number of tokens held by vertices at any point. So $\sum_{v\in V(G)}\textrm{tokens}(v)\leqslant 6|S|\leqslant 6f(D) \leqslant 18 D$. Thus, we get that $|\textsc{NonBasic}\cup \textsc{Big}|\leqslant 18D/(D/4)=72$. We will later refine this argument to get a stronger upper bound. \begin{clm} \label{clm1} Each vertex $v\in \textsc{Basic}$ has two neighbors later in $\sigma$ that are both in $\textsc{Big}$. \end{clm} \begin{clmproof} Let $w_1,w_2$ be the neighbors of $v$ later in $\sigma$, if they exist. Inequality~\eqref{key-ineq} simplifies to $\textrm{tokens}(v)+\textrm{primary}(w_1)+\textrm{primary}(w_2)\geqslant \frac32D+53$. Since $v$ is basic, we have $\textrm{tokens}(v)<\frac14D$. So $\textrm{primary}(w_1)+\textrm{primary}(w_2)\geqslant \frac54D+53$. Clearly, $\textrm{primary}(w_i)\leqslant D$ for each $i\in\{1,2\}$, which implies that also $\textrm{primary}(w_i)\geqslant \frac14D+53$, for each $i\in\{1,2\}$. So $w_1,w_2\in \textsc{Big}$. \end{clmproof} Let $\Emph{W}:=\{w: w\in N(v)$ for some $v\in \textsc{NonBasic}$ and $w$ appears later in $\sigma$ than $v$\}. \begin{clm} \label{clm1.5} If some vertex $w$ comes after the first vertex in $S$ and $w\notin \textsc{Big}$, then $w\in W$. \end{clm} \begin{clmproof} Since $G$ minimizes $|V(G)|+|E(G)|$, every edge has an endpoint in $S$. Since $\sigma$ puts the first vertex $v$ of $S$ as late as possible, every vertex $w$ after $v$ is a later neighbor of some vertex $x\in S$. Every vertex in $\textsc{Basic}$ has as its later neighbors in $\sigma$ exactly two vertices in $\textsc{Big}$, by Claim~\ref{clm1}. Thus, if $w\notin \textsc{Big}$, then $x\in\textsc{NonBasic}$; hence, by definition, $w\in W$. \end{clmproof} Now we modify $G$, $S$, $\sigma$ to construct \Emph{$G'$, $S'$, $\sigma'$}. \begin{enumerate} \item[(1)] Let $S':=\textsc{Basic}\setminus W$. \item[(2)] Move $\textsc{Big}$ to the end of $\sigma$ (after the final vertex of $S'$) and move $(\textsc{NonBasic}\cup W)\setminus \textsc{Big}$ to the start of $\sigma$ (before the first vertex of $S'$); call the resulting order $\sigma'$. \item[(3)] Delete every edge of $G$ with both endpoints outside $S'$; call the resulting graph $G'$. \end{enumerate} Now we check that $G'$, $S'$, $\sigma'$ satisfy the desired criteria. \begin{clm} \label{clm2} $S'$ appears consecutively in $\sigma'$, and $S'$ and $V(G')\setminus S'$ are each independent in $G'$. \end{clm} \begin{clmproof} By Claim~\ref{clm1}, every vertex of $S'$ has two later neighbors in $\textsc{Big}$. And every vertex $v$ of $\textsc{Big}$ has no later neighbors. (Such a neighbor $w$ would have $\textrm{tokens}(w)>\frac14D$, so $w$ would be excluded from $\textsc{Basic}$, and hence from $S'$. Thus, edge $vw$ would be removed in (3).) So $\textsc{Big}$ comes, in some order, at the end of $\sigma'$. And every vertex not in $S'\cup\textsc{Big}$ has no earlier neighbors in $S'$, so comes before the first vertex of $S'$ in $\sigma'$ (by Claim~\ref{clm1.5} and (2) above); thus, $S'$ is consecutive in $\sigma'$. To see that $S'$ is an independent set in $G'$, note that every vertex in $\textsc{Basic}$ has two later neighbors in $\textsc{Big}$. But $S'\subseteq \textsc{Basic}$, so $S'\cap\textsc{Big}\subseteq\textsc{Basic}\cap\textsc{Big}=\emptyset$. Thus, $S'$ is independent. Finally, $V(G')\setminus S'$ is independent in $G'$ by step (3) above constructing $G'$. \end{clmproof} \begin{clm} \label{clm3} $G'$ is $2$-degenerate, as witnessed by $\sigma'$. \end{clm} \begin{clmproof} Consider steps (2) and (3) above. Suppose we move some $v\in \textsc{Big}$ to the end of $\sigma$. If $v$ has a neighbor $w$ later (in $\sigma$), then $w \notin \textsc{Basic}$, so $w\notin S'$. Thus, in step (3) we delete edge $vw$. Suppose instead that we move a vertex $v\in (\textsc{NonBasic}\cup W)\setminus \textsc{Big}$ to the start of $\sigma$. If $v$ has a neighbor $w$ earlier (in $\sigma$), then $w\notin\textsc{Basic}$, by Claim~\ref{clm2}; so $w\notin S'$. Thus, again in step (3) we delete $vw$. So $\sigma'$ is a 2-degeneracy order for $G'$ because $\sigma$ is a 2-degeneracy order for $G$. \end{clmproof} \begin{clm} \label{clm4} $S'$ forms a clique in $G'^2$. \end{clm} \begin{clmproof} Every edge deleted when forming $G'$ has both endpoints outside of $S'$. But deleting such edges cannot impact adjacency in $G'^2$ between two vertices in $S'$. Thus, $G'^2[S'] = G^2[S']$. \end{clmproof} \begin{clm} \label{clm5} $|S\setminus S'|\leqslant 72$. More strongly (for $D\geqslant 144$), we have $|S\setminus S'|\leqslant 60+12\times(144/D)$. \end{clm} \begin{clmproof} Let \Emph{$X$}$:=\textsc{Big}\cup\textsc{NonBasic}\cup W$ and note from (1) that $|S\setminus S'|\leqslant |X|$. To bound the size of $X$, we use a discharging argument. Every vertex $x\in X$ begins with charge $\textrm{tokens}(x)$. We consider each $x\in \textsc{Big}\cup \textsc{NonBasic}$ (note that this union does not include $W$) and redistribute charge so that $x$, $w_1$, and $w_2$ all finish with charge at least $\frac14D$; again, $w_1$ and $w_2$ are the neighbors of $x$ that follow $x$ in $\sigma$ (if they exist). Recall that $\sum_{v\in V(G)}\textrm{tokens}(v)\leqslant 6|S|\leqslant 18D$. Thus, this proves that $|S\setminus S'|\leqslant |X|\leqslant 6|S|/(D/4) \leqslant 18D/(D/4)=72$. Fix $x\in \textsc{Big}$. By definition, $\textrm{primary}(x)>\frac14D$. Further, $\textrm{tokens}(w_i)\geqslant \textrm{primary}(x)>\frac14D$ for each $i\in\{1,2\}$. So $x$ does not need to give charge to any neighbor later in $\sigma$. Thus, $x$ (and each of its later neighbors) finishes with charge at least $\frac14D$. Fix $x\in (\textsc{NonBasic}\setminus\textsc{Big})$. If $\textrm{tokens}(x)\geqslant \frac34D$, then $x$ gives charge $\frac14D$ to each of its at most two neighbors that follows it in $\sigma$. Suppose instead that $\frac12D\leqslant \textrm{tokens}(x)<\frac34D$. By~\eqref{key-ineq}, we have $\textrm{primary}(w_1)+\textrm{primary}(w_2)>\frac34D$. So clearly $\textrm{primary}(w_i)>\frac14D$ for some $i\in\{1,2\}$. Thus, at least one later neighbor of $x$ is big. So $x$ gives charge $\frac14D$ to at most one neighbor later in $\sigma$. Thus, $x$ (and each of its later neighbors) finishes with charge at least $\frac14D$. Finally, suppose that $\frac14D\leqslant \textrm{tokens}(x)<\frac12D$. So \eqref{key-ineq} gives $\textrm{primary}(w_1)+\textrm{primary}(w_2)>D$. Thus, $w_i\in\textsc{Big}$ for at least one $i\in\{1,2\}$, so $w_i$ needs no charge from $x$. If $w_1,w_2\in \textsc{Big}$, then neither $w_i$ receives charge from $x$, so $x$ (and each $w_i$) finishes with charge at least $\frac14D$. Suppose instead, by symmetry, that $w_1\in \textsc{Big}$ and $w_2\notin \textsc{Big}$. Since $\textrm{primary}(w_1)\leqslant D$, we know that $\textrm{tokens}(x)+\textrm{primary}(w_2)\geqslant \frac12D$. Thus, $x$ gives to $w_2$ charge $\textrm{tokens}(x)-\frac14D$. Clearly $x$ finishes with charge at least $\frac14D$. Furthermore, $w_2$ ends with charge at least $\textrm{primary}(w_2)+(\textrm{tokens}(x)-\frac14D)\geqslant \frac12D-\frac14D=\frac14D$. Thus, each vertex of $X$ finishes with charge at least $\frac14D$. Hence, $|S\setminus S'|\leqslant |X|\leqslant 6|S|/(D/4)\leqslant 18D/(D/4)=72$, as claimed. Combining this with Theorem~\ref{main3} gives $|S|\leqslant \frac52D+72$. Substituting this bound into the previous inequality gives $|X|\leqslant 6|S|/(D/4)\leqslant (15D+6\times 72)/(D/4) = 60+(12\times 144)/D$. \end{clmproof} Claims~\ref{clm2}--\ref{clm5} show $c=72$ suffices. When $D>12\times 144$, Claim~\ref{clm5} shows $c=60$ suffices. \end{proof} \section{Big Cliques in Squares of Graphs with Mad \texorpdfstring{$\boldsymbol{<4}$}{<4}} \label{sec4} In this section, we extend the main result of Section~\ref{sec3} from the class of 2-degenerate graphs to the class of graphs with maximum average degree less than 4. To formalize this idea, we use the following definition. Fix a positive integer $D$. Let \Emph{$g(D)$} be the maximum size of a clique in the square of a graph $G$ with $\textrm{mad}(G)<4$ and $\Delta(G)\leqslant D$. Recall, from Section~\ref{sec3}, that $f(D)$ is defined as the analogous maximum over 2-degenerate graphs. Since every 2-degenerate graph $G$ has $\textrm{mad}(G)<4$, clearly $f(D)\leqslant g(D)$. In this section we show that also $g(D)-f(D)$ is bounded by a small constant. We formalize this result as follows. \begin{thm} \label{thm4} There exists a constant $c$ such that for every positive integer $D$ some 2-degenerate graph $G$ with $\Delta(G)\leqslant D$ has a clique $S'$ in $G^2$ with $|S'|\geqslant g(D)-c$. In fact, $c=460$ suffices. \end{thm} We have made very little effort to minimize $c$ (so it is likely far from sharp), prefering to present a simpler proof. Before proving Theorem~\ref{thm4}, we recall a result of Hocquard, Kim, and Pierron which slightly simplifies our proof. If we instead want to avoid using this result, we can just observe that $G$ is 3-degenerate, so $G^2$ is $5D$-degenerate, which results in a larger value of $c$. \begin{thmA}[\cite{HKP}] If $G$ is a graph with $\Delta(G)\leqslant D$ and $\textrm{mad}(G)<4$, then $G^2$ has degeneracy at most $3D$. In particular, $\omega(G^2)\leqslant 3D+1$. \end{thmA} \begin{proof}[Proof of Theorem~\ref{thm4}.] Fix $D$\aside{$D$, $G$} and let $G$ be a graph with $\Delta(G) \leqslant D$ and $\textrm{mad}(G) < 4$ such that $G^2$ contains a clique \Emph{$S$} of order $g(D)$. Subject to this, choose $G$ to minimize $|V(G)|+|E(G)|$. As a result, $V(G)\setminus S$ is an independent set. We assume that $g(D)\geqslant \frac52D+60$; otherwise, we are done (for this choice of $D$) with $c=60$ by Example~\ref{example1}. Since $\textrm{mad}(G) < 4$, the graph $G$ has a 3-degeneracy order \EmphE{$\sigma$}{-4mm}. Let \Emph{$R_3$} be the subset of $V(G)\setminus S$ each with at least 3 neighbors in $S$. \begin{clm} \label{clmA} $|R_3| < 2|S|$. \end{clm} \begin{clmproof} Let $G_1:=G[S\cup R_3]$. Clearly $\sum_{v\in V(G_1)}d_{G_1}(v)\geqslant 6|R_3|$. So $6|R_3|/(|S|+|R_3|)\leqslant \textrm{mad}(G_1)\leqslant \textrm{mad}(G)<4$. That is, $6|R_3|<4|S|+4|R_3|$, which proves the claim. \end{clmproof} We pass tokens similarly to the way we did in Section~\ref{sec3}, with an added wrinkle for vertices in $R_3$. Just before it is deleted, each $v\in S$ passes a ``primary'' token to each of its neighbors later in $\sigma$. Again, if a vertex $v$ is holding $s$ primary tokens just before it is deleted, then $v$ passes $s$ ``secondary'' tokens to each of its neighbors later in $\sigma$ (for every $s\geqslant 1$). Finally, if some $v\in R_3$ has 3 neighbors in $S$ later in $\sigma$, then $v$ passes a ``tertiary'' token to each of these neighbors. Again, let $\textrm{tokens}(v)$\aside{$\textrm{tokens}(v)$} (resp.~$\textrm{primary}(v)$\aaside{$\textrm{primary}(v)$}{4mm}) denote the number of tokens (resp.~primary tokens) that each vertex $v$ holds immediately before it is deleted (note that $\textrm{tokens}(v)$ now counts \emph{tertiary} tokens, as well as primary and secondary tokens). Our next claim is nearly identical to one in the previous section. \begin{clm} \label{clmB} If $v\in S$ and $w_1,w_2,w_3$ are the neighbors of $v$ later in $\sigma$, if they exist, then \begin{equation} 1+\textrm{tokens}(v)+D+\textrm{primary}(w_1)+\textrm{primary}(w_2)+\textrm{primary}(w_3)+12\geqslant |S|\geqslant 5D/2+60.\tag{$\star\star$} \label{key-ineq2} \end{equation} \end{clm} \begin{clmproof} Fix $v\in S$. Recall that $v$ must be adjacent in $G^2$ to every vertex in $S$. Each neighbor $w$ of $v$, with $w\notin R_3$, that precedes $v$ in $\sigma$ is the source of at most one such adjacency in $G^2$ to a vertex $x$ from which $v$ receives no token (neither primary, nor secondary, nor tertiary); if $w\in R_3$, then it can create two adjacencies for $v$ in $G^2$, but one of these is accounted for by a tertiary token sent to $v$. So in total $v$ has at most $D$ such adjacencies \mbox{(that send no tokens to $v$).} Each other adjacency in $G^2$ must be accounted for either (a) by a token received by $v$ or (b) by a primary token that has been or will be received by $w_1$ or $w_2$ or $w_3$ or (c) by being an adjacency to $w_1$ or $w_2$ or $w_3$, or to a neighbor of some $w_i$ that comes later than $w_i$ in $\sigma$. At most $\textrm{tokens}(v)$ vertices of $S$ are handled by (a), and at most $3+3^2=12$ vertices of $S$ are handled by (c). The number handled by (b) is at most $\textrm{primary}(w_1)+\textrm{primary}(w_2)+\textrm{primary}(w_3)$. This implies~\eqref{key-ineq2}. \end{clmproof} Similarly to what we did in Section~\ref{sec3}, we let $\Emph{\textsc{Big}} := \{v\in V(G): \textrm{primary}(v) > \frac18D\}$, we let $\Emph{\textsc{Basic}}:= \{v\in S: \textrm{tokens}(v) < \frac14D\}$ and we let $\EmphE{\textsc{NonBasic}}{4mm}:= S\setminus \textsc{Basic}$. If $D\leqslant 2$, then $g(D)\leqslant 5$, and we are done; so instead assume $D\geqslant 3$. Note that $|\textsc{Big}| \leqslant 3|S|/(D/8) \leqslant 3(3D+1)/(D/8)= 72+24/D\leqslant 80$; the first inequality here uses Theorem~A. The total number of primary and secondary tokens is at most $(3+3^2)|S|$, and the total number of tertiary tokens is at most $3|R_3|<6|S| \leqslant 18D+3$. So the total number of tokens is, by Claim~\ref{clmA}, at most $12|S|+3|R_3|<18|S|\leqslant 54D+3$. Thus, $|\textsc{NonBasic}| \leqslant (54D+3)/(D/4) = 216+12/D\leqslant 220$. \begin{clm} Every vertex $v\in \textsc{Basic}$ has at least two neighbors in $\textsc{Big}$ (that are later in $\sigma$). \label{clmC} \end{clm} \begin{clmproof} If $v\in\textsc{Basic}$, then $\textrm{tokens}(v)<\frac14D$, so~\eqref{key-ineq2} implies $\textrm{primary}(w_1)+\textrm{primary}(w_2)+\textrm{primary}(w_3) \geqslant \frac54D$. By symmetry, assume $\textrm{primary}(w_1)\geqslant \textrm{primary}(w_2)\geqslant \textrm{primary}(w_3)$. By Pigeonhole, $\textrm{primary}(w_1) > \frac18D$, so $w_1\in \textsc{Big}$. Clearly, $\textrm{primary}(w_1)\leqslant D$. Thus, $\textrm{primary}(w_2)+\textrm{primary}(w_3) \geqslant \frac14D$. Again, by Pigeonhole, $\textrm{primary}(w_2)\geqslant \frac18D$, so also $w_2\in\textsc{Big}$. \end{clmproof} Let $R:=\{v\notin \textsc{Big}:|N(v)\cap (\textsc{Big}\cup\textsc{Basic})|\geqslant 3\}$ and $S^+:=\textsc{Basic}\cup \textsc{Big}$.\aside{$R$, $S^+$} \begin{clm} We have $\sum_{v\in R}(d_{S^+}(v)-2)<2|\textsc{Big}|$. \label{clmD} \end{clm} \begin{clmproof} Let $S^-:=\textsc{Basic}\setminus(R\cup\textsc{Big})$ and $G_2:=G[\textsc{Basic}\cup R\cup\textsc{Big}]$. Note that $\sum_{v\in V(G_2)}d_{G_2}(v) \geqslant 4|S^-|+2\sum_{v\in R}d_{S^+}(v)$, by Claim~\ref{clmC}. Thus, $4>\textrm{mad}(G_2) \geqslant (4|S^-|+2\sum_{v\in R}d_{S^+}(v))/(|S^-|+|R|+|\textsc{Big}|)$. So $4|S^-|+2\sum_{v\in R}d_{S^+}(v)<4(|S^-|+|R|+|\textsc{Big}|)$, which proves the claim. \end{clmproof} Let \Emph{$S'$}$:=\textsc{Basic}\setminus\textsc{Big}$. (Unlike in Section~\ref{sec3}, here possibly $\textsc{Basic}\cap\textsc{Big}\ne\emptyset$.) We form $G'$, $S'$, $\sigma'$ from $G$, $S$, $\sigma$, as follows. \begin{enumerate} \item[(1)] Move $\textsc{Big}$ to the end of the order, and delete all edges with neither endpoint in $S'$. \item[(2)] For each vertex $v\notin S'\cup\textsc{Big}$, delete all but two edges incident to $v$, and move $v$ to the start of the order; remove all endpoints of those deleted edges from $S'$. \item[(3)] Now consider $v\in S'$; by (2), every neighbor of $v$ later in the order is in $S'\cup \textsc{Big}$. If $v$ has at most two neighbors later in the order, then do nothing. Otherwise, delete all but two edges from $v$ to neighbors later in the order; in particular, delete edges from $v$ to two big neighbors. Remove from $S'$ vertex $v$ and any endpoints of deleted edges that were in $S'$. \end{enumerate} Denote by $G'$ the graph resulting from the process above. The resulting order $\sigma'$ is a 2-degeneracy order for $G'$, and $S'$ induces a clique in $G'^2$. All that remains is to show that $|S'|\geqslant |S|-|\textsc{NonBasic}|-3|\textsc{Big}| \geqslant |S|-220-3\times 80 = |S|-460$. Before (1) above, we have $|S'|\geqslant |\textsc{Basic}|-|\textsc{Big}| = |S|-|\textsc{NonBasic}|-|\textsc{Big}|$. It is straightforward to check that for each vertex $v$ considered in (2) and (3), all its neighbors later in the order are in $S^+$. Further, the decrease in $|S'|$ when we process $v$ is at most $\max\{0,d_{S^+}(v)-2\}$. Thus, the total decrease in $|S'|$ from (2) and (3) is at most $\sum_{v\in V(G)\setminus\textsc{Big}} \max\{0,d_{S^+}(v)-2\} =\sum_{v\in R}(d_{S^+}(v)-2)<2|\textsc{Big}|$, by Claim~\ref{clmD}. So $|S'|\geqslant |S|-|\textsc{NonBasic}|-3|\textsc{Big}|\geqslant |S|-460$. \end{proof} \section{Open Questions} In this paper we have determined, among graphs with maximum degree at most $D$, the maximum order of a clique in $G^2$, up to an additive constant, (a) when $G$ is 2-degenerate and (b) when $\textrm{mad}(G)<2\times 2$. Recall that $f(D)$ and $g(D)$ are the largest possible sizes of a clique in $G^2$ when $G$ has maximum degree at most $D$ and satisfies (a) and (b), respectively. It is natural to ask when $g(D)=f(D)$. \begin{ques} For which positive integers $D$ does $g(D)=f(D)$? \end{ques} We can ask analogous questions for larger values of degeneracy or maximum average degree. \begin{ques} For each positive integer $k$, what is the minimum value $\alpha_k$\aside{$\alpha_k$, $c_k$} such that there exists a constant $c_k$ such that if $G$ is $k$-degenerate with maximum degree at most $D$, then $\omega(G^2)\leqslant \alpha_kD+c_k$? \end{ques} Recall that if $G$ is $k$-degenerate with $\Delta(G)\leqslant D$, then $G^2$ has degeneracy at most $(2k-1)D-k^2$; this is witnessed by any order witnessing that $G$ is $k$-degenerate. Thus, $\alpha_k\leqslant 2k-1$. By extending the ideas in Example~\ref{example2}, we can also construct such graphs $G$ for which the degeneracy of $G^2$ is equal to $(2k-1)D-k^2$; see Example~\ref{example3} below. However, we believe that $\alpha_k<2k-1$. In fact, it is interesting to ask about $\lim_{k\to\infty}(2k-1)-\alpha_k$ and $\lim_{k\to\infty}(2k-1)/\alpha_k$. We suspect the first limit is infinite, but do not have a conjectured value for the second limit. Now we consider the larger class of graphs with $\textrm{mad}(G)<2k$. \begin{ques} For each positive integer $k$, what is the minimum value $\beta_k$\aside{$\beta_k$, $d_k$} such that there exists a constant $d_k$ such that if $\textrm{mad}(G)<2k$ and $G$ has maximum degree at most $D$, then $\omega(G^2)\leqslant \beta_kD+d_k$? \end{ques} If $\textrm{mad}(G)<2k$, then $G$ has degeneracy at most $2k-1$. Thus, by the argument above, $\beta_k\leqslant 2(2k-1)-1=4k-3$. This was significantly improved by Kierstead et al.~\cite{KYY} who showed that if $\Delta(G)\leqslant D$ and $2(k-1)<\textrm{mad}(G)\leqslant 2k$, then $G^2$ has degeneracy at most $(2k-1)D+2k$. Thus, $\beta_k\leqslant 2k-1$. In this paper, we proved $\alpha_2=\beta_2=5/2$. We believe this equality persists for larger values of $k$, but in general we have no conjecture for the precise value of $\alpha_k$ (and $\beta_k$). \begin{conj} For all $k\geqslant 2$, we have $\beta_k=\alpha_k$. \end{conj} \noindent In any future efforts to improve the upper bounds on $\alpha_k$ and $\beta_k$, it would be natural to try a token passing scheme, similar to what we used in Sections~\ref{sec3} and~\ref{sec4}. This paper was motivated by a question of Hocquard, Kim, and Pierron: Given a positive integer $D$, what is the maximum value of ${\mathrm{ch}}i(G^2)$ over all graphs $G$ with maximum degree $D$ such that (a) $G$ is 2-degenerate or (b) $\textrm{mad}(G)<4$. It is natural to generalize these questions. \begin{ques} \label{HKM-gen-ques} Fix positive integers $D$ and $k$. (a) What is the maximum value of ${\mathrm{ch}}i(G^2)$ over all graphs $G$ with maximum degree $D$ that are $k$-degenerate? (b) What is the maximum value of ${\mathrm{ch}}i(G^2)$ over all graphs $G$ with maximum degree $D$ such that $\textrm{mad}(G)<2k$? \end{ques} In an effort to attack Question~\ref{HKM-gen-ques}, it would be natural to try to improve the degeneracy bound for $G^2$, mentioned above, proved by Kierstead et al. We end this short section by observing that this bound is nearly the best possible. Essentially, we generalize Example~\ref{example2} to $k$-degenerate graphs for each larger $k$. (Recall that each $k$-degenerate graph $G$ has $\textrm{mad}(G)<2k$.) For the proof, we will use the following well-known result of Hajnal and Szemeredi~\cite{HS}. \begin{thmHS}[\cite{HS}] If $G$ is a graph with $|V(G)|=(k+1)r$ and $\Delta(G)\leqslant k$, for positive integers $r$ and $k$, then $G$ has a proper coloring with $k+1$ color classes each~of~size~$r$. \end{thmHS} \begin{example} \label{example3} For all integers $k$ and $D$ with $D\geqslant k\geqslant 2$, we can construct a graph $H_{D,k}$ that is $k$-degenerate with maximum degree $D$ but such that $H_{D,k}^2$ is not $((2k-1)D-k^2-1)$-degenerate. We essentially generalize Example~\ref{example2}. We want to partition some $S\subseteq V(G)$ into parts of size $D$ and repeat this step $k$ times, with no two vertices in the same part in more than one step. Afterward, we want to partition $S$ into parts of size $k$, repeated $D-k$ times, such that each pair of vertices appears in a common part (over all $k+(D-k)$ times) at most once. In fact, all of this can be done by repeated application of the Hajnal--Szemer\'{e}di Theorem. Fix a set $S$ s.t. $|S|=2kD^2$. We will build a graph $G$ with $S\subseteq V(G)$ and also build an auxiliary graph $J$ with $V(J)=S$, which will aid in the construction of $G$. (At the end, we will let $H_{D,k}:=G$.) Initially, let $V(G)=V(J)=S$ and $E(G)=E(J)=\emptyset$. Trivially, we can partition $V(J)$ into $|S|/D$ independent sets, each of size $D$. For each part $P_i$ in this partition, add to $G$ a vertex $w_i$ adjacent to precisely the vertices in $P_i$. Moreover, add to $E(J)$ the edge $vv'$ for each pair $v,v'$ in a common part. Now we again partition $V(J)$ into $|S|/D$ independent sets, each of size $D$; this time we use Hajnal--Szemer\'{e}di. We repeat this process, partitioning $V(J)$ a total of $k$ times, each time into $|S|/D$ parts, and each inducing no edges. After each partition, we add the prescribed edges to $E(J)$ and $E(G)$. This ensures that each pair of vertices in $S$ appear in a common part in at most one partition. In a second phase we partition $|S|$ into parts of size $k$, and we do this $D-k$ times. After the final partition, the degree of each vertex in $J$ is exactly $k(D-1)+(D-k)(k-1)\leqslant D(2k-1)$. Thus, $H^2_{D,k}[S]$ has minimum degree $k(D-1)+(D-k)(k-1)$. We do not really care about the number of color classes, just that the size of each is $D$ (in the first phase) or $k$ (in the second phase). Although we required above that $|S|=2kD^2$, we note that the same construction works whenever $|S|\geqslant 2kD^2$ and $kD\big| |S|$. Let $H_{D,k}:=G$. We verify, as follows, that $H_{D,k}$ is $k$-degenerate. Each vertex $w_i$ arising from a part of size $k$ has degree exactly $k$. So all of these $w_i$ can be deleted first. Next, we can delete all vertices of $S$, since each of them has exactly $k$ remaining neighbors. Finally, we can delete all vertices $w_i$ arising from parts of size $D$ (which form an independent set). \exampleEnd \end{example} \end{document}
\begin{document} \title{Robust Merging of Information\footnote{We are grateful to Nageeb Ali, Marc Henry, Jiangtao Li, Elliot Lipnowski, Pietro Ortoleva, and Shamim Sinnar for valuable comments.} } \author{Henrique de Oliveira\thanks{S\~{a}o Paulo School of Economics - FGV. Email: \href{mailto:[email protected]}{[email protected]} } \and Yuhta Ishii\thanks{ Pennsylvania State University. Email: \href{mailto:[email protected]}{[email protected]} } \and Xiao Lin\thanks{ Pennsylvania State University. Email: \href{mailto:[email protected]}{[email protected]} } } \date{\today} \maketitle \abstract{ When multiple sources of information are available, any decision must take into account their correlation. If information about this correlation is lacking, an agent may find it desirable to make a decision that is robust to possible correlations. Our main results characterize the strategies that are robust to possible hidden correlations. In particular, with two states and two actions, the robustly optimal strategy pays attention to a single information source, ignoring all others. More generally, the robustly optimal strategy may need to combine multiple information sources, but can be constructed quite simply by using a decomposition of the original problem into separate decision problems, each requiring attention to only one information source. An implication is that an information source generates value to the agent if and only if it is best for at least one of these decomposed problems. } \section{Introduction} During the COVID-19 pandemic, testing has been essential in effectively monitoring the transmission of the virus. Two prevalent diagnostic tests are the molecular and antigen tests, which differ in checking the virus's genetic materials or specific proteins.\footnote{For more information regarding these tests, see for example \url{https://www.fda.gov/health-professionals/closer-look-covid-19-diagnostic-testing}.} It might then be appealing to use both tests.\footnote{Taking both tests is indeed recommended by FDA: ``(for antigen test) positive results are usually highly accurate, \dots negative results may need to be confirmed with a molecular test.'' Some medical providers always require one to take both tests.} However, in order to correctly interpret the joint pair of results from the two tests, knowledge of their correlation is crucial. For example, conditional on the molecular test producing a false negative, what is the probability that the antigen test also yields a false negative? Although the likelihoods of false positives and false negatives for each test are well-understood, data regarding the correlations between these tests is scarce.\footnote{See for example \citet{dinnes2020rapid}.} With such limited information about these correlations, how is a health authority supposed to make use of the results of both tests? Such unclear correlation between information sources is a common difficulty in practical decision problems. For example, someone might have access to the opinions of multiple experts (such as doctors), but these experts might use similar specialized sources (such as a flawed study). In this paper, we assume that the agent fully understands each information source in isolation, but has no knowledge about the correlations between different information sources. We then look for strategies that are robust to such correlation, by considering the worst possible correlation that could occur. Our main results characterize robustly optimal strategies. The simplest characterization occurs when we have two states and two actions. In that case, to guard against hidden correlation, one must resort to a rather extreme measure: the optimal robust strategy involves paying attention to a single information source, ignoring all others. In the example of the health authority, since the relevant state is whether the patient is infected with covid or not, if the decision to be made is whether to put the patient in quarantine or not, our result implies that only one test should be considered. Even if both tests have already been administered or are completely costless, the health authority should still ignore one of them. In more general settings, this extreme measure is no longer necessary and it can be beneficial to use multiple information sources. However, we show a method of finding robust strategies that consists of decomposing a decision problem into subproblems, each requiring the use of a single information source. This shows the precise way in which information sources should be merged. In general, this decomposition can depend on the information sources, but we also show that, with two states, there is a canonical decomposition of a decision problem into binary action problems that is independent of information sources. Finally, these characterizations of the robustly optimal strategy provide normative guidelines for constructing strategies that are robust to potentially misspecified correlations and reduce the computational burden of finding such strategies. They also provide an alternative explanation for some behavioral patterns documented empirically, such as when decision makers ignore free information in making their decisions. \section{Related Literature} Our paper provides practical robust strategies to deal with possible hidden correlation. The practice of finding robust strategies dates back at least to \citet{wald1950statistical} and our modeling of information structures follows that of \citet{blackwell1953equivalent}. Our way of modeling robustness, by considering the worst case scenario, also is in line with the literature on ambiguity aversion, going back to \citet{gilboa1989maxmin}. More recently, \citet{epstein2019ambiguous} run an experiment that documents ambiguity aversion on correlation structures. More closely related, some papers consider strategies that are robust to unknown correlations in different contexts. In particular, \citet{carroll2017robustness} studies a multi-dimensional screening problem, where the principal knows only the marginals of the agent's type distribution, and designs a mechanism that is robust to all possible correlation structures. With similar robustness concerns regarding the correlations of values between different bidders, \citet{HeLi2020} study an auctioneer's robust design problem when selling a single indivisible good to a group of bidders. A recent thread of related literature similarly studies how a decision maker combines forecasts from multiple sources. \citet{levy2020combining} consider a model where the decision maker can consult multiple forecasts (posterior beliefs), but is uncertain about the information structures that generate these forecasts. \citet{razin2020drowning} study a maximum likelihood approach of combining forecasts, and derive a novel result that only extreme forecasts will be used. A key distinction is that the aforementioned papers consider robust optimality from an interim approach, while we study the decision maker's robustly optimal ex-ante decision plan. Finally, \citet*{ArieliE12135} also study features of the robustly optimal ex-ante decision plans. An important difference is that they study robust aggregation in a specific decision problem while we characterize the robustly optimal ex-ante decision plan in general decision problems.\footnote{More specifically, they study the robustly optimal decision plan in the sense of worst-case regret when the decision maker must make a forecast $x \in [0,1]$ in the unit interval, and incurs a quadratic loss of $- (\omega - x)^2$ conditional on the true state, $\omega \in \{0,1\}$.} Moreover, \citet*{ArieliE12135} study robust aggregation when the decision maker has limited knowledge of the distribution of posteriors/signals generated by each expert. In contrast, in order to focus our analysis on robustness concerns about correlations between information sources, we assume in our model that the decision maker possesses a perfect understanding of the marginal distributions of signals of each expert/information source in isolation. \section{Model}\label{section-model} An agent faces a decision problem $\Gamma\equiv(\Theta,\nu,A,\varrho)$ with \textbf{binary} state space $\Theta=\{1,2\}$, prior $\nu\in\Delta\Theta$, finite action space $A$, and utility function $\varrho:\Theta\times A\rightarrow \mathbb{R}$. To later simplify notation, define $u(\theta,a)=\nu(\theta)\rho(\theta,a)$, which represents the prior-weighted utility function. A marginal experiment $P_j:\Theta\rightarrow\Delta Y_j$ maps each state to a distribution over some finite signal set $Y_j$. The agent can observe the realizations of multiple marginal experiments $\{P_j\}_{j=1}^m$, but does not have detailed knowledge of the joint. To simplify notation, let $\mathbf{Y}=Y_1\times \cdots \times Y_m$ denote the set of possible observations the agent can see. Thus, the agent conceives of the following set of joint experiments: \[ \mathcal{P}(P_1,...,P_m)=\left\{ P:\Theta\rightarrow \Delta(\mathbf{Y}): \sum_{-j}P(y_1,\ldots,y_m|\theta)=P_j(y_j|\theta)\text{ for all }\theta,j,y_j \right\}. \] A strategy for the agent is a mapping $\sigma:\mathbf{Y}\rightarrow \Delta (A)$, and the set of all strategies is denoted by $\Sigma$. The agent's problem is to maximize his/her expected utility robustly among the set of possible joint experiments (i.e. considering the worst possible joint experiment): \[ V(P_1, \ldots , P_m):= \max_{\sigma\in\Sigma} \min_{P \in \mathcal{P}(P_1, \ldots , P_m)} \sum_{\theta\in\Theta}\sum_{(y_1, \ldots , y_m) \in \mathbf{Y}} P(y_1, \ldots , y_m|\theta)u(\theta,\sigma(y_1, \ldots , y_m)). \] We call a solution to the problem a \textbf{robustly optimal} strategy. Clearly if only one experiment $P:\Theta\rightarrow \Delta (Y)$ is considered ($m=1$), $V(P)$ is the same as the classical value of a Blackwell experiment, and a robustly optimal strategy is just an optimal strategy for a Bayesian agent. \subsection{The Blackwell order } It will be useful to rank experiments according to how much information they convey. For that, we will use the Blackwell order, which we review in this subsection for completeness. The reader familiar with the Blackwell order may skip this part. \begin{defn} $P:\Theta\rightarrow\Delta(Y)$ is more informative than $Q:\Theta\rightarrow\Delta(Z)$ if, for every decision problem, we have the inequality $V(P)\geqslant V(Q)$. We also say that $P$ Blackwell dominates $Q$. \end{defn} There are two other natural ways of ranking experiments by informativeness. The first uses the notion of a \emph{garbling}. \begin{defn} $Q:\Theta\rightarrow\Delta(Z)$ is a garbling of $P:\Theta\rightarrow\Delta(Y)$ if there exists a function $g:Y\rightarrow \Delta(Z)$ such that \[ Q(z|\theta)=\sum_{y}g(z|y)P(y|\theta). \] The function $g$ is then called ``the garbling''. \end{defn} Thus $Q$ is a garbling of $P$ when one can replicate $Q$ by ``adding noise'' to the signal generated from $P$. Another notion of informativeness can be obtained by considering the strategies that are feasible given the experiment. \begin{defn} Given a set of actions $A$ and an experiment $P:\Theta\rightarrow\Delta(Y)$, the feasible set of actions given $P$ is \[ \Lambda_P=\left\{\lambda:\Theta\rightarrow \Delta A\:\Big|\:\lambda(a|\theta)=\sum_y \sigma(a|y)P(y|\theta) \text{ for some } \sigma:Y\rightarrow \Delta(A)\right\}. \] \end{defn} The feasible set of an experiment specifies what conditional action distributions can be obtained by some choice of strategy $\sigma$ (see \autoref{zonotope}). One might then try to rank the informativeness of experiments according to the size of the feasible set. Blackwell's Informativeness Theorem states that these three rankings of informativeness are equivalent (for a proof, see \citet{blackwell1953equivalent} or \citet{de2018blackwell}.) \begin{thmbw} The following statements are equivalent \begin{enumerate} \item $P$ is more informative than $Q$; \item $Q$ is a garbling of $P$; \item For all sets $A$, $\Lambda_Q\subseteq \Lambda_P$. \end{enumerate} \end{thmbw} It should be clear from the definitions that the Blackwell order is not complete---it is possible for two experiments to be unranked. It is also possible for two different experiments to be \emph{equivalent}, in the sense that each Blackwell dominates the other. For example, we can change the labels in the signal set while keeping the probabilities the same. This lack of uniqueness is easily remedied by considering equivalence classes of experiments when necessary. \subsection{The Blackwell supremum} In the next section, we will use some lattice properties of the Blackwell order. In particular, the concept of a Blackwell supremum will be useful. \begin{defn} Let $P$ and $Q$ be two arbitrary experiments. We say that $R$ is the Blackwell supremum of $P$ and $Q$ if \begin{enumerate} \item $R$ is more informative than $P$ and $Q$ \item If $S$ is more informative than $P$ and $Q$ then $S$ is also more informative than $R$ \end{enumerate} \end{defn} The definition generalizes immediately to any number of experiments. It is immediate from the definition that, if there are two Blackwell suprema, they must Blackwell dominate each other. Hence, by considering the equivalence class of equally informative experiments, we can say that the Blackwell supremum is unique. However, when $|\Theta|>2$ a Blackwell supremum may not exist (see \cite{bertschinger2014blackwell}, example 18). Fortunately for us, the Blackwell order does form a lattice when $|\Theta|=2$. In particular, the existence of a Blackwell supremum will be useful. \begin{lem}\label{lem:blackwellsup} When $|\Theta|=2$, the Blackwell supremum always exists. \end{lem} For a proof, see \cite{bertschinger2014blackwell}, proposition 16. The Blackwell supremum can also be characterized using the feasible set. If $R$ is the Blackwell supremum of $P$ and $Q$, we know from Blackwell's Theorem that $\Lambda_{R}$ must contain both $\Lambda_P$ and $\Lambda_Q$. Moreover, if $S$ is more informative than $P$ and $Q$, it must be more informative than $R$ as well, so $\Lambda_S$ must also contain $\Lambda_{R}$. Hence the feasible set of the Blackwell supremum should be the smallest feasible set containing $\Lambda_P\cup\Lambda_Q$. Since the feasible set is always convex, a candidate feasible set is $co(\Lambda_P\cup\Lambda_Q)$. If such an $R$ exists satisfying $\Lambda_R=co(\Lambda_P\cup\Lambda_Q)$, it must be the Blackwell supremum (see \autoref{zonotope}). The difficulty lies in showing that such an $R$ exists, and that's where the proof of existence fails when $|\Theta|>2$. Another useful property of the Blackwell order when $|\Theta|=2$ is that it is characterized by the feasible set with only two actions---$P$ is more informative than $Q$ if for any set $A$ with $|A|=2$, we have $\Lambda_Q\subseteq \Lambda_P$ (see \cite{blackwell1953equivalent}, Theorem 10). We can use this property to obtain a characterization of the Blackwell supremum. \begin{lem}\label{lem:blackwellsupfeasible} Suppose $|\Theta|=2$ and $|A|=2$. Then $R$ is the Blackwell supremum of $P$ and $Q$ if and only if $\Lambda_R=co(\Lambda_P\cup\Lambda_Q)$ \end{lem} In fact, this result can be used to show the existence of the Blackwell supremum when $|\Theta|=2$. \begin{figure}\end{figure} \section{Nature's Problem} Most of our focus will be on the robustly optimal strategies for the agent, but it will be helpful to first understand Nature's problem, of choosing the worst possible correlation structure. First note that since the objective function is linear in both $\sigma$ and $P$, and the choice sets of $\sigma$ and $P$ are both convex and compact, the minimax theorem implies that \[ V(P_1, \ldots , P_m)= \min_{P \in \mathcal{P}(P_1, \ldots , P_m)} \max_{\sigma\in\Sigma} \sum_{\theta\in\Theta}\sum_{(y_1, \ldots , y_m) \in \mathbf{Y}} P(y_1, \ldots , y_m|\theta) u(\theta,\sigma(y_1, \ldots , y_m)). \] That is, the value of the agent's maxmin problem equals the value of a minmax problem where Nature chooses an experiment in the set $\mathcal{P}(P_1, \ldots , P_m)$ to minimize a Bayesian agent's value in the decision problem. An immediate observation is that if there exists a Blackwell least informative element in the set $\mathcal{P}(P_1, \ldots , P_m)$, it would solve Nature's problem---any other information structure would yield a higher value for the agent. Notice that every experiment in $\mathcal{P}(P_1, \ldots , P_m)$ must be more informative than every $P_j$, since the projection into the $j$th coordinate defines a garbling. By Lemma \ref{lem:blackwellsup}, there is a \emph{Blackwell supremum}---the least informative experiment that Blackwell dominates every $P_j$. The only question that remains is whether this Blackwell supremum can be expressed as a joint distribution with marginals $P_1,\ldots, P_m$. This is proved in the following lemma. \begin{lem}\label{lem:supjoint} For any collection of experiments $\{P_j\}_{j=1}^m$, there exists a Blackwell supremum $\overline{P}(P_1,\ldots,P_m)\in \mathcal{P}(P_1, \ldots , P_m)$ so that for any $P\in \mathcal{P}(P_1, \ldots , P_m)$, $V(\overline{P}(P_1,\ldots,P_m))\leq V(P)$. \end{lem} \begin{proof} See Appendix~\ref{proof:blackwellsup}. \end{proof} Immediately from the lemma, we have the following proposition. \begin{prop}\label{value} $$V(P_1,...,P_m)=V(\overline{P}(P_1,...,P_m))$$ where $\overline{P}(P_1,...,P_m)$ is a Blackwell supremum of experiments $\{P_1,...,P_m\}$. \end{prop} Thus, the agent's value from using a robust strategy is the same as the value she would obtain if she faced a single experiment---the Blackwell supremum of all marginal experiments. Moreover, the Blackwell supremum depends only on the marginal experiments, and not on the particular decision problem. \section{Binary Action Decision Problems}\label{sec:binarybinary} While \autoref{value} provides a useful characterization of the agent's value, it still does not answer our main question: what are the robust strategies? This is because a strategy may be a best response to the Blackwell supremum $\bar{P}(P_1, \ldots , P_m)$, without being a robustly optimal strategy. In particular, the Blackwell supremum typically specifies a probability of zero for many signal realizations, so that any action is a best response to those signal realizations. But if we fix a strategy that chooses a particularly bad action after such a signal realization, it might be a best response for Nature to make it occur with positive probability. So we now turn to the question of finding the optimal robust strategies. For any decision problem, one simple strategy that can always be used is to choose exactly one experiment $Q \in \{P_1, \ldots ,P_m\}$ and play the optimal strategy that uses that information alone, ignoring the signal realizations of all other experiments. By choosing $Q$ optimally, the agent achieves an ex-ante expected payoff of $\max_{j = 1, \ldots , n} V(P_j)$, regardless of the particular actual joint experiment $P \in \mathcal{P}(P_1, \ldots , P_m)$. \autoref{binary} shows that if the decision problem has binary action, this is indeed a robustly optimal strategy. \begin{thm}\label{binary} If $|A| = 2$, then \[ V(P_1, \ldots , P_m) = V(\overline{P}(P_1, \ldots , P_m)) = \max_{j = 1, \ldots , m}V(P_j). \] \end{thm} \begin{proof} By \autoref{value}, it suffices to show that $V(\overline{P}(P_1,...,P_m))=\max_{j=1,...,m}V(P_j)$. By \autoref{lem:blackwellsupfeasible}, an experiment $\overline{P}$ is the Blackwell supremum of $P_1,\ldots,P_m$ if and only if \begin{equation}\label{convex_hull} \Lambda_{\overline{P}} =co\left(\Lambda_{P_1}\cup \cdots \cup \Lambda_{P_m}\right) \end{equation} Now, the maximum utility achievable given Blackwell experiment $\overline{P}(P_1, \ldots , P_m)$ is $V(\overline{P})=\max_{\lambda\in \Lambda_{\overline{P}}}\sum_{a,\theta} u(\theta,a)\lambda(a|\theta)$. Since the maximand is linear in $\lambda$, the maximum is achieved at an extreme point of $\Lambda_{\overline{P}}$. By (\ref{convex_hull}), an extreme point of $\Lambda_{\overline{P}}$ must belong to some $\Lambda_{P_j}$. Hence, we have $$V(\overline{P})=\max_{\lambda\in \Lambda_{P_j}}\sum_{a,\theta} u(\theta,a)\lambda(a|\theta)=\max_{j=1,...,m}V(P_j).$$ \end{proof} \begin{figure} \caption{The maximum is achieved at an extreme point} \label{extreme} \end{figure} The idea of \autoref{binary} can be visualized in \autoref{extreme} for two marginal experiments. Each marginal Blackwell experiment $P_1,P_2$ can be represented by $\Lambda_{P_1},\Lambda_{P_2}$, the set of feasible state-action distribution generated by the experiment. The corresponding $\Lambda_{\overline{P}}$ for Blackwell supremum $\overline{P}$ is the convex hull of $\Lambda_{P_1}\cup \Lambda_{P_2}$. Since the utility function is linear with respect to $\lambda \in \Lambda_{\overline{P}}$, the maximum is achieved at an extreme point, which belongs to either $\Lambda_{P_1}$ or $\Lambda_{P_2}$, and thus can be achieved by using a single marginal experiment. \section{General Decision Problems}\label{sec:binarygen} \autoref{binary} allows us to solve any binary action decision problem in a fairly simple way: finding the best marginal information source and best responding to it. For decision problem with more actions, a robustly optimal strategy may need to use multiple information sources. To understand how optimal robust strategies work in general, we start our discussion in \autoref{sec:example} and \autoref{sec:composition} with a simple class of decision problems: those that can be written as a composition of multiple binary action problems. For these problems, we show that the optimal strategy can be obtained by simply ``adding up'' the optimal strategies for the isolated binary action problems. Finally, in \autoref{sec:decompose} and \autoref{sec:canonical}, we show that this simple class of decision problems is exhaustive---\textbf{any} finite action decision problem can be decomposed into binary action decision problems. \subsection{An Example}\label{sec:example} We start with an example which showcases how an agent can benefit from using information from multiple sources when she faces a more complex problem. \begin{ex}\label{ex-portfolio} An investor can invest in two assets whose outputs depend on an unknown binary state $\theta\in\{1,2\}$. Outputs from each asset are given by: \hspace{0.5in}\begin{tabular}{ |c | c |c|} \multicolumn{3}{c}{Asset 1} \\\hline & Invest & Not Invest\\ \hline $\theta=1$ & $2$ & $0$\\ \hline $\theta=2$ & $-1$ & $0$\\ \hline \end{tabular} \begin{tabular}{ |c | c |c|} \multicolumn{3}{c}{Asset 2} \\\hline & Invest & Not Invest\\ \hline $\theta=1$ & $-1$ & $0$\\ \hline $\theta=2$ & $2$ & $0$\\ \hline \end{tabular} \hspace{0.5in} \noindent The investor's payoff is the sum of outputs from both assets. This can be written as a decision problem with $A=\{I,NI\}\times \{I,NI\}$ and $u(\theta,a)=u_1(\theta,a_1)+u_2(\theta,a_2)$ where $a_1,a_2\in\{I,NI\}$ and $u_1,u_2$ are the outputs function given in the table above.\footnote{Recall that $u(\theta,a)=\nu(\theta)\rho(\theta,a)$, so the payoffs here have been weighted by the prior.} Suppose the investor has access to two experiments $P_1$, $P_2$: \hspace{0.5in}\begin{tabular}{ |c | c |c|} \multicolumn{3}{c}{$P_1$} \\\hline & $y_1=1$ & $y_1=0$\\ \hline $\theta=1$ & $0.9$ & $0.1$\\ \hline $\theta=2$ & $0.5$ & $0.5$\\ \hline \end{tabular} \begin{tabular}{ |c | c |c|} \multicolumn{3}{c}{$P_2$} \\ \hline & $y_2=1$ & $y_2=0$\\ \hline $\theta=1$ & $0.5$ & $0.5$\\ \hline $\theta=2$ & $0.9$ & $0.1$\\ \hline \end{tabular} \hspace{0.5in} By paying attention to one experiment, for example $P_1$, the optimal strategy is to invest in both assets if $y_1=1$ and only asset $2$ if $y_1=0$. The expected payoff from this strategy is thus $0.9\cdot 1+0.1\cdot (-1)+0.5\cdot 1+0.5\cdot 2=2.3$. Now suppose the investor makes the investment decision of asset 1 based on experiment $P_1$, and asset 2 based on experiment $P_2$. Then for asset $i=1,2$, the optimal strategy is to invest iff $y_{i}=1$. ``Adding up'' these two strategies yield: \begin{center} \begin{tabular}{ |c | c |c|} \hline & $y_2=1$ & $y_2=0$\\ \hline $y_1=1$ & Invest in both & Invest in asset 1\\ \hline $y_1=0$ & Invest in asset 2 & No investment\\ \hline \end{tabular} \end{center} This strategy guarantees an expected output of $0.9\cdot 2+0.1\cdot 0+0.5\cdot (-1)+0.5\cdot 0=1.3$ from each asset regardless of the correlations, which gives a total output of $2.6>2.3$. So the agent strictly benefits from utilizing information from both information sources. \end{ex} The strategy constructed in \autoref{ex-portfolio} is in fact a robustly optimal strategy. There are two special structures of this example: 1. The action space is a product space of binary action spaces; 2. The payoff function can be written in an additively separable form of binary action problems. These two features enable us to find a robustly optimal strategy in a fairly simple way: find the robustly optimal strategy for each binary action problem via \autoref{binary}, and then ``add them up''. We will generalize and formalize this idea in the next section. \subsection{Composition of Decision Problems}\label{sec:composition} Recall that we define a decision problem as $\Gamma\equiv (\Theta,\nu,A,\rho)$, which can simply be summarized by $(A,u)$ where $u(\theta,a)=\nu(\theta)\rho(\theta,a)$. Since in this section we are going to alter the decision problems along the analyses, we use $V(P_1,...,P_m;(A,u))$ to denote the agent's value in decision problem $(A,u)$. \begin{defn} Given a finite collection of decision problems $(A_1,u_1),...,(A_n,u_n)$, their composition, denoted by $\bigoplus_{\ell=1}^k (A_\ell,u_\ell)$, is a decision problem with action space $A=(A_1\times\ldots\times A_k)$ and $u(\theta,\mathbf{a})=\sum_{\ell=1}^k u_\ell(\theta,a_\ell)$. \end{defn} Thus, the composition of decision problems is a single decision problem that has a specific additively separable structure. Notice that the decision problem in \autoref{ex-portfolio} is a composition of two decision problems $A_1=\{I_1,N_1\}$, $A_2=\{I_2,N_2\}$ and $u_1(\cdot,I_1)=(2,-1)$, $u_1(\cdot,N_1)=(0,0)$, $u_2(\cdot,I_2)=(-1,2)$, $u_2(\cdot,N_2)=(0,0)$. Consider a finite collection of \textbf{binary} action problems, $(A_1, u_1), \ldots , (A_k, u_k)$, and consider the composition of these problems $(\bar{A}, \bar{U}) := \bigoplus_{\ell = 1}^k (A_\ell, u_\ell)$. In this decision problem, a simple, robust strategy that an agent can always use is to choose exactly one experiment $Q_\ell \in \{P_1, \ldots , P_m\}$ for every binary problem $\ell$ and play the optimal strategy that uses that information alone, ignoring the signal realizations of all other experiments. Furthermore, by choosing this $Q_\ell$ optimally for each $\ell$, regardless of the actual joint experiment $P \in \mathcal{P}(P_1, \ldots , P_m)$, the agent can achieve a total ex-ante utility of $\sum_{\ell = 1}^{k} \max_{j = 1, \ldots , m}V(P_j, (A_\ell, u_\ell))$, which is typically strictly greater than $\max_{j = 1, \ldots , m} V(P_j, (\bar{A}, \bar{U}))$. The following lemma shows that this is indeed the best that the agent can do in $(\bar{A}, \bar{U})$. \begin{lem}\label{lem:decomposed} Let $(A_1, u_1), \ldots , (A_k, u_k)$ be a finite collection of \textbf{binary} action problems. Then \[ V\left(P_1, \ldots , P_m; \bigoplus_{\ell = 1}^{k} (A_\ell, u_\ell)\right) = \sum_{\ell = 1}^{k} \max_{j = 1, \ldots, m} V(P_j; (A_\ell, u_\ell)). \] Moreover, let $\sigma_\ell:\mathbf{Y}\rightarrow \Delta A_\ell$ be a robustly optimal strategy for decision problem $(A_\ell,u_\ell)$. Then $\sigma:\mathbf{Y}\rightarrow \Delta (A_1\times ...\times A_k)$ defined by \begin{equation}\label{eq:assemble} \sigma(y_1,...,y_m)=\bigg(\sigma_\ell(y_1,...,y_m)\bigg)_{\ell=1}^k\quad\text{ for all }y_1,...,y_m \end{equation} is a robustly optimal strategy for decision problem $\bigoplus_{\ell = 1}^{k} (A_\ell, u_\ell)$. \end{lem} \begin{proof} Using \autoref{value}, $V\left(P_1, \ldots , P_m; \bigoplus_{\ell = 1}^{k} (A_\ell, u_\ell)\right) = V\left(\overline{P}(P_1, \ldots , P_m); \bigoplus_{\ell = 1}^{k} (A_\ell, u_\ell) \right)$. By \autoref{binary}, we then have: \[ V\left(\overline{P}(P_1, \ldots , P_m); \bigoplus_{\ell = 1}^{k} (A_\ell, u_\ell) \right) = \sum_{\ell = 1}^{k} V(\overline{P}(P_1, \ldots , P_m); (A_\ell, u_\ell)) = \sum_{\ell = 1}^{k} \max_{j = 1, \ldots , m}V(P_j, (A_\ell,u_\ell)). \] To see the second statement, for any $P\in\mathcal{P}(P_1,...,P_m)$, the agent's payoff from strategy $\sigma$ is \begin{align*} \sum_{\theta\in\Theta}\sum_{y_1,...,y_m} P(y_1,...,y_m|\theta) \sum_{\ell=1}^k u_\ell(\theta,\sigma_\ell(y_1,...,y_m))&= \sum_{\ell=1}^k\sum_{\theta\in\Theta}\sum_{y_1,...,y_m} P(y_1,...,y_m|\theta) u_\ell(\theta,\sigma_\ell(y_1,...,y_m))\\ &\geq \sum_{\ell=1}^k V(P_1,...,P_m;(A_\ell,u_\ell))\\ &= V\left(P_1, \ldots , P_m; \bigoplus_{\ell = 1}^{k} (A_\ell, u_\ell)\right) \end{align*} Since $\sigma$ guarantees the maxmin value regardless of $P$, it is a robustly optimal strategy. \end{proof} \autoref{lem:decomposed} provides a simple solution to any problem that can be expressed as a composition of binary action problem: For each binary action problem, one can derive a robustly optimal strategy by paying attention to the best marginal experiment and best responding to it. Then assembling these strategies as in \eqref{eq:assemble} yields a robustly optimal strategy for the composite problem. \subsection{Decomposition of decision problems}\label{sec:decompose} In the previous section, we saw how a problem that is a composition of binary action problems can be solved by combining the solutions of each binary action problem. It is natural to ask then what problems can be decomposed into binary action problems. As we will see in the next section, it turns out that such a decomposition is possible for any decision problem. Before we get to that, we must define precisely what it means to decompose a decision problem and for that we need a notion of equivalence between decision problems. For each decision problem $(A,u)$, define the associated polyhedron containing all payoff vectors that are either achievable or weakly dominated by some mixed action:\footnote{Here and in what follows, whenever $+$ and $-$ are used in the operations of sets, they denote the Minkowski sum and difference.} \[\mathcal{H}(A,u)=co\{u(\cdot,a):a\in A\}-\mathbb{R}_+^{2}.\] An example of $\mathcal{H}(A,u)$ is depicted in \autoref{figure-actionorder}. \begin{figure} \caption{The shaded area represents $\mathcal{H} \label{figure-actionorder} \end{figure} Whenever $\mathcal{H}(A',u')= \mathcal{H}(A,u)$, the two decision problems $\mathcal{H}(A',u')$ and $\mathcal{H}(A,u)$ give the same value under any information environment, so we call them \emph{equivalent}. \begin{defn} A decision problem $(A,u)$ is equivalent to another decision problem $(A',u')$ if \[\mathcal{H}(A',u')= \mathcal{H}(A,u).\] \end{defn} Now we can define decomposition, the inverse operation of composition: \begin{defn} A decision problem $(A,u)$ admits a decomposition $\{(A_\ell,u_\ell)\}_{\ell=1}^k$ if $(A,U)$ is equivalent to $\bigoplus_{\ell=1}^k (A_\ell,u_\ell)$. \end{defn} \begin{ex}\label{ex-decomposition} Consider two decision problems $A_1=\{I_1,N_1\}$, $u_1(I_1)=(2,-1)$, $u_1(N_1)=(0,0)$ and $A_2=\{I_2,N_2\}$, $u_2(I_2)=(-1,2)$, $u_2(N_2)=(0,0)$. The associated polyhedra are the blue/red shaded areas in \autoref{figure-ex-composition}(a). Their composition $(A_1,u_1)\bigoplus (A_2,u_2)$ consists of four actions, which are depicted in \autoref{figure-ex-composition}(b). Now we consider a three-action decision problem $A=\{a_1,a_2,a_3\}$ with $u(a_1)=(-1,2), u(a_2)=(1,1)$, and $u(a_3)=(2,-1)$. Notice that $\mathcal{H}(A,u)=\mathcal{H}((A_1,u_1)\bigoplus (A_2,u_2))$ as the shaded area in \autoref{figure-ex-composition}(b), so $(A,u)$ is equivalent to $(A_1,u_1)\bigoplus (A_2,u_2)$. Therefore, $(A_1,u_1),(A_2,u_2)$ is a decomposition of $(A,u)$. \end{ex} \begin{figure}\end{figure} The analyses in the previous section give some hint on how to find robustly optimal strategies for general decision problems. If a given decision problem $(A, u)$ admits a decomposition $(A_1, u_1), \ldots , (A_k, u_k)$ where each $(A_\ell,u_\ell)$ is a \textbf{binary} action problem, then it is immediately clear by \autoref{lem:decomposed} and $\mathcal{H}\left( A, u\right) = \mathcal{H}\left(\bigoplus_{\ell =1}^{k}(A_\ell, u_\ell) \right)$ that \begin{align} V(P_1, \ldots P_m; (A, u)) = V\left(P_1, \ldots , P_m; \bigoplus_{\ell = 1}^{k} (A_\ell, u_\ell)\right) = \sum_{\ell = 1}^{k} \max_{j = 1, \ldots , m}V(P_j; (A_\ell, u_\ell)). \label{eqn:canonical} \end{align} Moreover, the robustly optimal strategy for $\bigoplus_{\ell = 1}^{k} (A_\ell, u_\ell)$, defined in \eqref{eq:assemble}, allows us to characterize robustly optimal strategies for $(A,u)$ as by the following lemma. \begin{lem}\label{lem:dominate} Suppose $(A,u)$ is equivalent to $\bigoplus_{\ell = 1}^{k} (A_\ell, u_\ell)$, and $\sigma:\mathbf{Y}\rightarrow \Delta(A_1\times\ldots\times A_k)$ is a robustly optimal strategy for $\bigoplus_{\ell = 1}^{k} (A_\ell, u_\ell)$, then there exists $\sigma^*:\mathbf{Y}\rightarrow \Delta A$ such that \[u(\sigma^*(\mathbf{y}))\geq \sum_{\ell=1}^k u_\ell(\sigma_\ell(\mathbf{y})),\quad\text{ for all }\mathbf{y} \in \mathbf{Y}.\] Moreover, any such $\sigma^*$ is a robustly optimal strategy for $(A,u)$. \end{lem} \begin{proof} For each $\mathbf{y}$, $\sum_{\ell=1}^k u_\ell(\sigma_\ell(\mathbf{y}))\in \mathcal{H}\bigg(\bigoplus_{\ell = 1}^{k} (A_\ell, u_\ell)\bigg)=\mathcal{H}(A,u)$. So there exists $\sigma^*(\mathbf{y})$ such that $u(\sigma^*(\mathbf{y}))\geq \sum_{\ell=1}^k u_\ell(\sigma_\ell(\mathbf{y}))$. Moreover, since $\sigma^*$ guarantees a higher value in $(A,u)$ than $\sigma$ in $\bigoplus_{\ell = 1}^{k} (A_\ell, u_\ell)$, and $V(P_1, \ldots P_m; (A, u)) = V\left(P_1, \ldots , P_m; \bigoplus_{\ell = 1}^{k} (A_\ell, u_\ell)\right)$, $\sigma^*$ is a robustly optimal strategy for $(A,u)$. \end{proof} If a decision problem $(A, u)$ admits a decomposition into binary action problems, \autoref{lem:decomposed} and \autoref{lem:dominate} characterize a set of robustly optimal strategies. However, it is not immediately clear what kind of decision problem admits a decomposition into binary action problems. Interestingly, we show by direct construction that, \textbf{any} decision problem admits a decomposition into binary action problems. \subsection{Canonical Decomposition}\label{sec:canonical} We are now ready to show that any decision problem can be decomposed into binary-action problems. Given an arbitrary decision problem $(A,u)$, we start with some normalization to simplify exposition. First we remove all weakly*-dominated actions,\footnote{An action $a\in A$ is weakly*-dominated if there exists $\alpha\in \Delta A$ such that $u(a)\leq u(\alpha)$. If there are duplicated actions, we remove all but keep one copy.} so that actions can be ordered as \begin{align*} u(\theta_1,a_1) < u(\theta_1,a_2) < \cdots < u(\theta_1,a_n),\\ u(\theta_2,a_1) > u(\theta_2,a_2) > \cdots > u(\theta_2,a_n). \end{align*} Moreover, by adding a constant vector, we can normalize $u(\cdot,a_1)=(0,0)$. \begin{defn} Given a decision problem $(A, u)$, the \textbf{canonical decomposition} of $(A, u)$ is the following collection of $n - 1$ binary action problems $(A_1^*, u_1^*), \ldots , (A_{n - 1}^*, u_{n-1}^*)$: \[ A_{\ell}^* := \left\{0, 1 \right\}, u_\ell^*(\cdot,0) = (0,0), u_\ell^*(\cdot,1) = u(\cdot,a_{\ell + 1}) - u(\cdot,a_{\ell}). \] \end{defn} \begin{figure}\end{figure} The canonical decomposition can be visualized in \autoref{figure:canonical} for an example with four actions. To see that a canonical decomposition is a decomposition, first notice that for any $i=1,...,n$, $u(a_i)=\sum_{\ell=1}^{i-1}u^*_{\ell}(1)+\sum_{\ell=i}^{n-1}u^*_{\ell}(0)$, so $\mathcal{H}(A,u)\subset \mathcal{H}\big(\bigoplus_{\ell=1}^{n-1}(A^*_{\ell},u^*_\ell)\big)$. For the other direction, we need to show that for any ${\boldsymbol\delta}\in\{0,1\}^{n-1}$, $\sum_{\ell=1}^{n-1} {\boldsymbol\delta}_\ell u_\ell^*(1)\in \mathcal{H}(A,u)$. The idea is that any nonconsecutive sum of $u_\ell^*(1)$ always lies in the interior of $\mathcal{H}(A,u)$, as illustrated in the example in \autoref{figure:canonical}(b). \begin{lem}\label{lem:exact} The canonical decomposition is a decomposition. \end{lem} \begin{proof} See Appendix~\ref{proof:interior}. \end{proof} Finally \autoref{lem:decomposed}, \autoref{lem:dominate}, and \autoref{lem:exact} immediately imply \autoref{thm:binarymain}. \begin{thm}\label{thm:binarymain} Let $(A_1^*, u_1^*), \ldots , (A_{n-1}^*, u_{n-1}^*)$ be the canonical decomposition of $(A, u)$, and $\sigma^*_\ell$ be a robustly optimal strategy for $(A_{\ell}^*,u^*_\ell)$. Then \begin{enumerate} \item $V(P_1, \ldots , P_m; (A,u)) = \sum_{\ell = 1}^{n-1} \max_{j = 1, \ldots , m} V(P_j; (A_\ell^*, u_\ell^*)).$ \item There exists $\sigma^*:\mathbf{Y}\rightarrow \Delta A$ such that \[u(\sigma^*(\mathbf{y}))\geq \sum_{\ell=1}^{n-1} u^*_\ell(\sigma_\ell^*(\mathbf{y})),\quad\text{ for all }\mathbf{y}.\] Moreover, any such $\sigma^*$ is a robustly optimal strategy for $(A,u)$. \end{enumerate} \end{thm} \autoref{thm:binarymain} allows us to construct a robustly optimal strategy for any decision problem $(A,u)$ in two steps: 1. For each $(A^*_\ell,u^*_\ell)$, only one (the best) marginal experiment needs to be considered, and an robustly optimal strategy $\sigma^*_\ell$ only need to be measurable with respect to this experiment; 2. For each realization $\mathbf{y}$, pick a (mixed) action $\sigma(\mathbf{y})\in\Delta (A)$ such that $u(\sigma^*(\mathbf{y}))\geq \sum_{\ell=1}^{n-1} u^*_\ell(\sigma_\ell^*(\mathbf{y}))$. The theorem features two interesting corollaries. \begin{cor}\label{cor:benefit} For any decision problem $(A,u)$ with the canonical decomposition $(A_1^*,u_1^*)$, $\ldots$, $(A^*_{n-1},u^*_{n-1})$ and any collection of marginal experiments $\{P_j\}_{j=1}^m$, for any $j$, \[V(P_1,...,P_m;(A,u))=V(P_{-j};(A,u))\] if and only if $V(P_j;(A_\ell^*,u_\ell^*))\leq \max_{j'\neq j}V(P_{j'};(A_\ell^*,u_\ell^*))$ for all $\ell=1,...,n-1$. \end{cor} \autoref{cor:benefit} describes when an additional marginal experiment robustly improves the agent's value, which happens if and only if it outperforms all other marginal experiment in at least one of the canonically decomposed problem. \begin{cor}\label{cor:number} For any decision problem $(A,u)$ with $|A|=n$, and any collection of experiments $\{P_j\}_{j=1}^m$, there exists a subset of marginal experiments $\{P_j\}_{j\in S\subset \{1,...,m\}}$ with $|S|\leq n-1$, such that \[V(P_1,\cdots,P_m;(A,u))=V(\{P_j\}_{j\in S};(A,u)).\] \end{cor} \autoref{cor:number} implies that in any $n$-action decision problem, it is not beneficial to use more than $n-1$ experiments. \autoref{binary} can be viewed as a special case where $n=2$. \section{Extensions and Discussions} \subsection{General state space} Our previous analyses focus on binary state decision problems. A natural question is whether those results can be extended into environments with more states. Unfortunately, when $|\Theta|>3$, \autoref{binary} no longer holds as we will show in \autoref{ex:threestaets} below. Since \autoref{binary} is the building blocks of other previous results, the same methodology doesn't work for general state space. Nevertheless, using the duality approach, we show that the central idea of ``decomposition'' extends, in the sense that the original decision problem can still be decomposed into a collection of ``subproblems'', and in each of these ``subproblem'', only one information source needs to be used. We will explain in the end of the section why this decomposition result is weaker than what we have for binary state environment. \begin{ex}\label{ex:threestaets} Suppose that there are three states $\theta_1, \theta_2, \theta_3$. The marginal experiments are both binary with respective signals $x_1, x_2$, $y_1, y_2$, and given by \autoref{table-ex-threestate}. \begin{table}[htp] \centering \makebox[0pt][c]{\parbox{1\textwidth}{ \begin{minipage}[b]{0.6\hsize}\centering \begin{tabular}{ccc} \multicolumn{3}{c}{$P_X$} \\ \hline \multicolumn{1}{|c|}{} & \multicolumn{1}{c|}{$x_1$} & \multicolumn{1}{c|}{$x_2$} \\ \hline \multicolumn{1}{|c|}{$\theta_1$} & \multicolumn{1}{c|}{1} & \multicolumn{1}{c|}{0} \\ \hline \multicolumn{1}{|c|}{$\theta_2$} & \multicolumn{1}{c|}{1} & \multicolumn{1}{c|}{0} \\ \hline \multicolumn{1}{|c|}{$\theta_3$} & \multicolumn{1}{c|}{0} & \multicolumn{1}{c|}{1} \\ \hline \end{tabular} \end{minipage} \hspace{0in} \begin{minipage}[b]{0.1\hsize}\centering \begin{tabular}{ccc} \multicolumn{3}{c}{$P_Y$} \\ \hline \multicolumn{1}{|c|}{} & \multicolumn{1}{c|}{$y_1$} & \multicolumn{1}{c|}{$y_2$} \\ \hline \multicolumn{1}{|c|}{$\theta_1$} & \multicolumn{1}{c|}{1} & \multicolumn{1}{c|}{0} \\ \hline \multicolumn{1}{|c|}{$\theta_2$} & \multicolumn{1}{c|}{0} & \multicolumn{1}{c|}{1} \\ \hline \multicolumn{1}{|c|}{$\theta_3$} & \multicolumn{1}{c|}{0} & \multicolumn{1}{c|}{1} \\ \hline \end{tabular} \end{minipage} }} \caption{} \label{table-ex-threestate} \end{table} Intuitively, experiment $P_X$ tells the agent whether the state is $\theta_3$ or not and experiment $P_Y$ tells the agent whether the state is $\theta_1$ or not. Of course, upon observing both experiments, the agent obtains perfect information and so in any decision problem, the agent obtains the perfect information payoff. Let $A = \{1, 0\}$ and suppose that the utilities are as follows: \begin{align*} u(\theta,a = 1) &= \mathbf{1}\left( \theta \in \{\theta_1, \theta_3\} \right) - \mathbf{1}\left( \theta = \theta_2\right), \\ u(\theta,a = 0) &= 0. \end{align*} Then the agent's maxmin value from marginals $P_X,P_Y$ is her perfect information payoff: $ 0+ 1+1=2$. By using only one information source (either $P_X$ or $P_Y$), $a=0$ is always a best response to any signal realization, so the agent's expected payoff is $0$. The example illustrates that even in a binary action decision problem, the agent would like to use more than one information sources, which draws a contrast with \autoref{binary}. \end{ex} In what follows, we allow for a more general state space $\Theta$ with $|\Theta|<\infty$. The notations we defined for binary state environment naturally generalize to general finite state environment, so we use the same notations without redefining them. We first introduce the following definition of weak decomposition. \begin{defn} A decision problem $(A,u)$ admits a weak decomposition $\{(A_\ell,u_\ell)_{\ell=1}^k\}$ if \[\mathcal{H}(\bigoplus_{\ell=1}^k(A_\ell,u_\ell))\subseteq \mathcal{H}(A,u)\] \end{defn} That is, the polyhedron induced by a weak decomposition needs to be contained in the payoff polyhedron induced by $(A,u)$. The following theorem characterizes the idea of decomposition in general state environment. \begin{thm}\label{thm:weakde} Fix a decision problem $(A,u)$ and Blackwell experiments $P_1, \ldots , P_m$. There exists a \textbf{weak} decomposition $((A^*_1,u_1^*), \ldots , (A^*_k,u_k^*))$ of $(A,u)$ for which \begin{align} V(P_1, \ldots , P_m; (A,u)) = \sum_{\ell = 1}^{k} \max_{j = 1, \ldots, m} V(P_j; (A_\ell^*,u_\ell^*)). \label{eqn:optimaldecomp} \end{align} \end{thm} \begin{proof} See Appendix~\ref{proof:weakde}. \end{proof} \autoref{thm:weakde} can be seen as a generalization of \autoref{thm:binarymain} from the binary state environment. In particular, when the state space is binary, we showed in the previous section that by representing a decision problem equivalently as $\bigoplus_{\ell = 1}^{n - 1} (A_\ell^*, u_\ell^*)$ corresponding to the canonical decomposition, indeed the constructed robustly optimal strategy in the latter decision problem guarantees the payoff $\sum_{\ell = 1}^{n-1} \max_{j = 1, \ldots , n} V(P_j, (A_\ell^*, u_\ell^*))$. Moreover, similar to \autoref{thm:binarymain}, one can derive a robustly optimal strategy based on a weak decomposition in two steps: 1. For each $(A_\ell^*,u_\ell^*)$, the agent chooses a strategy that uses only the best marginal experiment and best responses to it, denoted by $\sigma^*_\ell(\mathbf{y})$; 2. For each realization $\mathbf{y}$, the agent picks a (mixed) action $\sigma(\mathbf{y})\in\Delta(A)$ such that $u(\sigma^*(\mathbf{y}))\geq \sum_{\ell=1}^k u_{\ell}^*(\sigma_\ell^*(\mathbf{y}))$. The decomposition in \autoref{thm:weakde} is weaker than \autoref{thm:binarymain} in the following aspects: 1. the decomposition here may depend on the marginal experiments, while the canonical decomposition in \autoref{thm:binarymain} only depends on the decision problem; 2. the decomposition is weak so the payoff polyhedrons of the original problem and decomposed problem might not exactly coincide, though they lead to the same value; 3. in each subproblem under the weak decomposition, it might contain more than two actions, while in the canonical decomposition in \autoref{thm:binarymain}, every subproblem is a binary action problem. \subsection{Large Datasets} Our main results in the binary state learning environment of \autoref{sec:binarybinary} and \autoref{sec:binarygen} show that the robustly optimal strategy only uses a select-few information sources ignoring the signal realizations of all other information sources. \autoref{binary} shows that when the decision problem involves just a binary choice, the robustly optimal strategy takes a particularly stark form where the decision maker pays attention to only a \emph{single} information source. On the other hand, in general decision problems involving more actions, \autoref{thm:binarymain} demonstrates that robustly optimal strategy makes use of multiple information sources. In \autoref{thm:largedata} we show that the robustly optimal strategy again takes the form of paying attention to only a single information source in \textbf{any} decision problem when the state space is binary and the information sources are each individually sufficiently informative. Let us again assume throughout this section that the state space is binary, i.e. $\theta \in \Theta := \{1,2\}$. Consider any Blackwell experiment, $P$, with a corresponding signal space $Y$. Let $P^t$ denote the Blackwell experiment with signal space $Y^t$, where $P^t(\cdot \mid \theta)$ consists of $t$ i.i.d draws from $P(\cdot \mid \theta)$: for all $(y_1, \ldots , y_t) \in Y^t$ and each $\theta \in \{1,2\}$, \[ P^t(y_1 , \ldots , y_t \mid \theta) = \prod_{\tau = 1}^{t} P(y_\tau \mid \theta). \] Because we want to study the setting in which each information source is individually, very informative, we study the robustly optimal strategy of a decision maker when the decision maker has access to information sources $P_1^t, \ldots , P_m^t$ when $t$ is large. To state our result, let us define the quantity $w(P)$ for a given Blackwell experiment $P$ with signal space $Y$: \[ w(P) = \max_{\theta \in \Theta} \min_{\nu \in \Delta(Y)} KL(\nu, P_\theta), \] where $KL(\nu, \nu') = \sum_{y \in Y} \nu(y) \log \frac{\nu(y)}{\nu'(y)}$ is the Kullback-Leibler divergence between probability measures $\nu$ and $\nu'$. Notice that $w(P)$ assigns a positive real number to each Blackwell experiment $P$ and also coincides with the well-known Chernoff distance between the probability measures $P_{0} \in \Delta(Y)$ and $P_1 \in \Delta(Y)$. \begin{thm}\label{thm:largedata} Suppose that $\Theta = \{0,1\}$ and let $P_1, \ldots, P_m$ be a finite collection of Blackwell experiments. Suppose that \[ w(P_1) >\max\{ w(P_i) : i \neq 1\}. \] Then given any decision problem $(A,u)$, there exists some $t^*$ such that for all $t \geq t^*$, \[ V(P_1^t, \ldots , P_m^t; (A,u)) = V(P_1^t; (A,u)). \] \end{thm} \begin{proof} Let $(A_1^*, u_1^*), \ldots , (A_{n - 1}^*, u_{n-1}^*)$ be the canonical decomposition of $(A, u)$. Then \cite{moscarini2002law} show that for each $(A_\ell^*, u_\ell^*)$ there exists some $t^*_\ell$ such that for all $t \geq t_\ell^*$, \[ V(P_1^t;(A_\ell^*,u_\ell^*))> \max_{j \neq 1} V(P_j^t;(A^*_\ell,u_\ell^*)). \] Now consider any $t \geq t^* := \max_{\ell} t_\ell^*$. By Theorem~\ref{thm:binarymain}, \[ V(P_1^t, \ldots , P_n^t; (A,u)) = \sum_{\ell = 1}^{n - 1} \max_{j= 1, \ldots , n} V(P_j^t;(A^*_\ell,u_\ell^*)) = \sum_{\ell = 1}^{n - 1} \max_{j= 1, \ldots , n} V(P_1^t;(A^*_\ell,u_\ell^*)) = V(P_1^t; (A,u)). \] \end{proof} \appendix \section{Appendix} \subsection{Proof of \autoref{lem:supjoint}}\label{proof:blackwellsup} \begin{proof} Consider a collection of experiments $\{P_j\}_{j=1}^m$ and their Blackwell supremum $\overline{P}:\Theta\rightarrow \Delta Z$. Since $\overline{P}$ Blackwell dominates $P_j$ for all $j$, there exists garblings $g_j: Z\rightarrow\Delta (Y_m)$, $j=1,...,m$, such that for all $y_j\in Y_j$, $$P_j(y_j|\theta)=\sum_{z\in Z}g_j(y_j|z)\overline{P}(z|\theta).$$ Construct the following experiment $\tilde{P}:\Theta\rightarrow\Delta(Y_1\times\ldots\times Y_m)$: \begin{equation}\label{garbling} \tilde{P}(y_1,\ldots,y_m|\theta)=\sum_{z\in Z}\prod_{j=1}^mg_j(y_j|z)\overline{P}(z|\theta). \end{equation} Notice that $\sum_{-j}\tilde{P}(y_1,\ldots,y_m|\theta)=\sum_{z\in Z}g_j(y_j|z)\overline{P}(z|\theta)=P_j(y_j|\theta)$, so $\tilde{P}\in \mathcal{P}(P_1, \ldots , P_m)$. Moreover, \eqref{garbling} implies $\tilde{P}$ is a garbling of $\overline{P}$ so $\overline{P}$ Blackwell dominates $\tilde{P}$. From the definition of Blackwell supremum, $\tilde{P}$ Blackwell dominates $\overline{P}$, so $\overline{P}=\tilde{P}\in \mathcal{P}(P_1, \ldots , P_m)$. \end{proof} \subsection{Proof of \autoref{lem:exact}}\label{proof:interior} \begin{proof} We first show that $(A_1^*, u_1^*), \ldots , (A_{n- 1}^*, u_{n-1}^*)$ is a weak decomposition. Suppose otherwise so that there exists some $(a_1^*, \ldots , a_n^*)$ for which $u^* := u(a_1^*) + \cdots + u(a_n^*) \notin \mathcal{H}(A, u).$ By Corollary 11.4.2 of \citet{rockafellar1970convex}, there exists $\lambda \in \mathbb{R}^2 \setminus \{0\}$ such that \begin{align} \lambda \cdot u^* > \sup_{v \in \mathcal{H}(A, u)} \lambda \cdot v.\label{eqn:sep} \end{align} Note that $\lambda \geq 0$ since otherwise $\sup_{v \in \mathcal{H}(A, u)} \lambda \cdot v = + \infty$. \begin{figure}\label{} \end{figure} Given the canonical decomposition, for any $\ell' > \ell$, \[ \lambda \cdot u_\ell^*(1, \cdot) \leq \lambda \cdot u_\ell^*(0, \cdot) \Longrightarrow \lambda \cdot u_{\ell'}^*(1,\cdot) < \lambda \cdot u_{\ell'}^*(0, \cdot). \] Let $\ell^* = \min\left\{ \ell : \lambda \cdot u_\ell(1, \cdot) \leq 0 \right\},$ where we use the convention that $\min \emptyset = n$. Then \begin{align*} \lambda \cdot u(a_{\ell^*}, \cdot) - \lambda \cdot u^* &= \sum_{\ell =1}^{\ell^* - 1} \lambda \cdot u_\ell^*(1) - \sum_{\ell = 1}^{n- 1} \lambda \cdot u_\ell^*(a_\ell^*) \\ &= \sum_{\ell =1}^{\ell^* - 1} \lambda \cdot (u_\ell^*(1) - u_\ell^*(a_\ell^*)) + \sum_{\ell = \ell^*}^{n- 1} \lambda \cdot (u_\ell^*(0) - u_\ell^*(a_\ell^*)) \geq 0. \end{align*} But $u(a_{\ell^*}, \cdot) \in \mathcal{H}(A, u)$, which contradicts Inequality~(\ref{eqn:sep}). It remains to show that $(A_1^*, u_1^*), \ldots , (A_{n- 1}^*, u_{n -1}^*)$ is an exact decomposition, but this is straightforward since it suffices to show that $\left\{u(a, \cdot): a \in A\right\} \subseteq \mathcal{H}\left(\bigoplus_{\ell = 1}^{n- 1} (A_{\ell}^*, u_\ell^*) \right)$. Clearly this is the case since for every action $a_k \in A$, $u(a_k) = \sum_{\ell = 1}^{k - 1} u_\ell^*(1).$ \end{proof} \subsection{Proof of \autoref{thm:weakde}}\label{proof:weakde} We start from proving the following lemma that will be useful in the proof. \begin{lem}\label{lem:weakde} If $(A_\ell,u_\ell)_{\ell=1}^k$ is a weak decomposition of $(A,u)$, \[V(P_1, \ldots , P_m; (A,u)) \geq \sum_{\ell = 1}^{k} V(P_1,...,P_m; (A_\ell,u_\ell)).\] \end{lem} \begin{proof} Suppose $P^*$ is a solution to \[\min_{P\in\mathcal{P}(P_1,...,P_m)}\max_{\sigma:\mathbf{Y}\rightarrow \Delta A}\sum_{\theta \in \Theta} \sum_{\mathbf{y} \in \mathbf{Y}} P(\mathbf{y} \mid \theta) u\left(\theta,\sigma(\mathbf{y})\right).\] From minmax theorem, \[V(P_1, \ldots , P_m; (A,u))=\max_{\sigma:\mathbf{Y}\rightarrow \Delta A}\sum_{\theta \in \Theta} \sum_{\mathbf{y} \in \mathbf{Y}} P^*(\mathbf{y} \mid \theta) u\left(\theta,\sigma(\mathbf{y})\right).\] For $\ell=1,...,k$, define $\sigma_\ell^*$ be a solution to \[\max_{\sigma_\ell:\mathbf{Y}\rightarrow \Delta A_\ell}\sum_{\theta \in \Theta} \sum_{\mathbf{y} \in \mathbf{Y}} P^*(\mathbf{y} \mid \theta) u_\ell(\theta,\sigma_\ell(y)).\] Now we have \begin{equation}\label{eq:weakdevalue} \begin{aligned} \sum_{\ell = 1}^{k} V(P_1,...,P_m; (A_\ell,u_\ell))&=\sum_{\ell = 1}^{k} \max_{\sigma_\ell:\mathbf{Y}\rightarrow \Delta A_\ell} \min_{P\in\mathcal{P}(P_1,...,P_m)}\sum_{\theta \in \Theta} \sum_{\mathbf{y} \in \mathbf{Y}} P(\mathbf{y} \mid \theta) u_\ell\left(\theta,\sigma_\ell(\mathbf{y})\right)\\ &\leq\sum_{\ell = 1}^{k} \max_{\sigma_\ell:\mathbf{Y}\rightarrow \Delta A_\ell} \sum_{\theta \in \Theta} \sum_{\mathbf{y} \in \mathbf{Y}} P^*(\mathbf{y} \mid \theta) u_\ell\left(\theta,\sigma_\ell(\mathbf{y})\right)\\ &= \sum_{\ell = 1}^{k} \sum_{\theta \in \Theta} \sum_{\mathbf{y} \in \mathbf{Y}} P^*(\mathbf{y} \mid \theta) u_\ell\left(\theta,\sigma_\ell^*(\mathbf{y})\right) \end{aligned} \end{equation} From the definition of weak decomposition, there exists $\hat{\sigma}:\mathbf{Y}\rightarrow A$ such that $u(\theta,\hat{\sigma}(\mathbf{y}))\geq \sum_{\ell=1}^k u_\ell(\theta,\sigma^*_\ell(\mathbf{y}))$ for all $\mathbf{y}$ and $\theta.$ Therefore, \begin{equation}\label{eq:weakdevalue2} \begin{aligned} \sum_{\ell = 1}^{k} \sum_{\theta \in \Theta} \sum_{\mathbf{y} \in \mathbf{Y}} P^*(\mathbf{y} \mid \theta) u_\ell\left(\theta,\sigma_\ell^*(\mathbf{y})\right)&= \sum_{\theta \in \Theta} \sum_{\mathbf{y} \in \mathbf{Y}} P^*(\mathbf{y} \mid \theta) \sum_{\ell = 1}^{k} u_\ell\left(\theta,\sigma_\ell^*(\mathbf{y})\right)\\ &\leq \sum_{\theta \in \Theta} \sum_{\mathbf{y} \in \mathbf{Y}} P^*(\mathbf{y} \mid \theta)u(\theta,\hat{\sigma}(\mathbf{y}))\\ &\leq \max_{\sigma:\mathbf{Y}\rightarrow \Delta A}\sum_{\theta \in \Theta} \sum_{\mathbf{y} \in \mathbf{Y}} P^*(\mathbf{y} \mid \theta)u(\theta,\sigma(\mathbf{y}))\\ &=V(P_1,...,P_m;(A,u)) \end{aligned} \end{equation} The statement of the lemma follows immediately from \eqref{eq:weakdevalue} and \eqref{eq:weakdevalue2} \end{proof} \begin{proof}[Proof of \autoref{thm:weakde}] Consider a robustly optimal strategy $\sigma^*:\mathbf{Y}\rightarrow \Delta(A)$ so \[V(P_1,...,P_m;(A,u))=\min _{P \in \mathcal{P}\left(P_{1}, \ldots, P_{m}\right)} \sum_{\theta \in \Theta} \sum_{\mathbf{y} \in \mathbf{Y}} P(\mathbf{y} \mid \theta) u\left(\theta,\sigma^{*}(\mathbf{y})\right).\] By considering the dual of the above linear program (Kantorovich dual), we obtain: \begin{align*} V\left(P_{1}, \ldots, P_{m},(A, u)\right) & =\max _{\phi_{1}: \Theta\times Y_{1} \rightarrow \mathbb{R}, \ldots, \phi_{m}: \Theta\times Y_{m} \rightarrow \mathbb{R}} \sum_{j=1}^{m} \sum_{\theta \in \Theta} \sum_{y_{j} \in Y_{j}} P_{j}\left(y_{j} \mid \theta\right) \phi_{j}\left( \theta,y_{j}\right)\\ &\text{s.t. }\sum_{j=1}^{m} \phi_{j}\left(\theta,y_{j}\right) \leq u\left(\theta,\sigma^{*}(\mathbf{y})\right) \text { for all }(\theta, \mathbf{y}) \in \Theta \times \mathbf{Y} \addtocounter{equation}{1}\tag{\theequation} \label{eqn:constraint} \end{align*} Let $\phi_1^*,...,\phi_m^*$ be the solution to the dual program. Define the collection of decision problems $\{(A_j,u_j)\}_{j=1}^m$ such that $A_j=Y_j$ and $u_j=\phi_j^*$. From the constraint \eqref{eqn:constraint}, $((A_1,u_1),\ldots,(A_m,u_m))$ forms a weak decomposition of $(A,u)$. Moreover, in every ``sub-problem'' $(A_j,u_j)$, by playing the strategy \[\sigma^j(y_1,...,y_j,...,y_m)=y_j\quad \text{ for all } (y_1,...,y_m),\] the agent achieves exactly a payoff of \begin{align*} \sum_{\theta \in \Theta} \sum_{\left(y_{1}, \ldots, y_{m}\right) \in \mathbf{Y}} P(y_{1}, \ldots, y_{m} \mid \theta) u_j(\theta, \sigma^j(y_{1}, \ldots, y_{m})) =&\sum_{\theta \in \Theta} \sum_{y_j \in Y_j} P_j(y_j \mid \theta) u_j(\theta, \sigma^j(y_{1}, \ldots, y_{m}))\\ =&\sum_{\theta \in \Theta} \sum_{y_j \in Y_j} P_j(y_j \mid \theta) \phi_j^*(\theta, y_j), \end{align*} which implies $V(P_1,...,P_m;(A_j,u_j))\geq \max\limits_{\ell=1,...,m} V(P_\ell;(A_j,u_j))\geq \sum_{\theta \in \Theta} \sum_{y_j \in Y_j} P_j(y_j|\theta) \phi_j^*(\theta, y_j)$. Summing over all $j=1,...,m$ we have \begin{equation}\label{eq:weakde} \sum_{j=1}^m V(P_1,...,P_m;(A_j,u_j))\geq \sum_{j=1}^m\max_{\ell=1,...,m} V(P_\ell;(A_j,u_j)) \geq \sum_{j=1}^m \sum_{\theta \in \Theta} \sum_{y_j \in Y_j} P_j(y_j \mid \theta) \phi_j^*(\theta, y_j). \end{equation} Now from \autoref{lem:weakde}, equation \eqref{eq:weakde}, and $\{\phi_j^*\}_{j=1}^m$ being the solution to the dual program, we have \begin{align*} V(P_1, \ldots , P_m; (A,u))&\geq \sum_{j=1}^{m} V(P_1,...,P_m; (A_j,u_j))\\ &\geq \sum_{j=1}^m\max_{\ell=1,...,m} V(P_\ell;(A_j,u_j))\\ &\geq \sum_{j=1}^{m}\sum_{\theta\in\Theta}\sum_{y_j \in Y_j} P_j(y_j \mid \theta) \phi_j^*(\theta, y_j)\\ &= V(P_1, \ldots , P_m; (A,u)) \end{align*} which implies $V(P_1, \ldots , P_m; (A,u))=\sum_{j=1}^m\max_{\ell=1,...,m} V(P_\ell;(A_j,u_j))$. \end{proof} \end{document}
\betaegin{document} \title{Parabolic integrodifferential identification \ problems related to radial memory kernels II ootnote{Work partially supported by the Italian Ministero dell'Universit\`a e della Ricerca Scientifica e Tecnologica (M.U.R.S.T.).} \par \noindent {\betaf Abstract.} We are concerned with the problem of recovering the radial kernel $k$, depending also on time, in the parabolic integro-differential equation $$D_{t}u(t,x)={\cal A}u(t,x)+{\rm i}nt_0^t\!\! k(t-s,|x|)\mathcal{B}u(s,x)ds +{\rm i}nt_0^t\!\! D_{|x|}k(t-s,|x|)\mathcal{C}u(s,x)ds+f(t,x),$$ ${\cal A}$ being a uniformly elliptic second-order linear operator in divergence form. We single out a special class of operators ${\cal A}$ and two pieces of suitable additional information for which the problem of identifying $k$ can be uniquely solved locally in time when the domain under consideration is a ball or a disk. \par \noindent {{\rm i}t 2000 Mathematical Subject Classification.} Primary 45Q05. Secondary 45K05, 45N05, 35K20, 35K90. \par \noindent {{\rm i}t Key words and phrases.} Identification problems. Parabolic integro-differential equations in two and three space dimensions. Recovering radial kernels depending also on time. Existence and uniqueness results. \sigmaection{Posing the identification problem} \sigmaetcounter{equation}{0} The present paper is strictly related to our previous one \cite{3}. Indeed, the problem we are going to investigate consists, as in \cite{3}, in identifying an unknown radial memory kernel $k$ also depending on time, which appears in the following integro-differential equation related to the ball ${\cal O}megaega\!=\!\{x\!\!=\!\! (x_1,x_2,x_3){\rm i}n\mathbb{R}^3\!:\!|x|<R\}$, $R>0$ and $|x|={(x_1^2+ x_2^2+ x_3^2)}^{\!1/2}$: \betaegin{eqnarray}\lambdaabel{problem} D_{t}u(t,x)=\mathcal{A}u(t,x)+\!{\rm i}nt_0^t\!\! k(t-s,|x|)\mathcal{B}u(s,x)ds+\! {\rm i}nt_0^t\!\! D_{|x|}k(t-s,|x|)\mathcal{C}u(s,x)ds\;+\!\!\!\!&f(t,x),& {\nonumber}number\\[2mm]\hskip 8truecm \forall\, (t,x){\rm i}n [0,T] \times{\cal O}megaega. & & {\rm e}nd{eqnarray} We emphasize that the aim of the present paper is to study the identification problem related to $(\ref{problem})$ when the domain ${\cal O}mega$ is a {{\rm e}mph{full}} ball. This is exactly a singular domain for our problem as we noted in Remark 2.9 in \cite{3}, where we were able to recover the kernel $k$ only in the case of a spherical corona or an annulus ${\cal O}mega$. In this paper we show that our identification problem can actually be solved in suitable weighted spaces if we appropriately restrict the class of admissible differential operators $\cal{A}$ to a class whose coefficients have an appropriate structure in a neighbourhood of the centre $x=0$ of ${\cal O}mega$, which turns out to be a ``singular point'' for our problem.\\ In equation $(\ref{problem})\;\mathcal{A}$ and $\mathcal{B}$ are two second-order linear differential operators, while $\mathcal{C}$ is a first-order differential operator having the following forms, respectively: \betaegin{eqnarray} \lambdaabel{A} \mathcal{A}=\sigmaum_{j=1}^{3}D_{x_j}\betaig(\sigmaum_{k=1}^{3}a_{j,k}(x)D_{x_k} \betaig),\ \ \lambdaabel{B}\mathcal{B}=\sigmaum_{j=1}^{3}D_{x_j}\betaig(\sigmaum_{k=1}^{3}b_{j,k}(x) D_{x_k} \betaig),\ \ \lambdaabel{C}\mathcal{C}=\sigmaum_{j=1}^{3}c_{j}(x)D_{x_j}. {\rm e}nd{eqnarray} In addition, operator $\mathcal{A}$ has a very special structure, since its coefficients $a_{i,j}$, $i,j=1,2,3,$ have the following particular representation, (cf. \cite{3}, formula $(2.4)$, where $(b,d)$ is changed in $(-b, -d)$): \betaegin{equation}\lambdaabel{condsuaij} \lambdaeft\{\!\!\!\betaegin{array}{lll} a_{1,1}(x)\!\!\!&=&\!\!\!a(|x|)+ \,{\rm d}isplaystyle\frac{(x_2^2+x_3^2)[c(x)+b(|x|)]}{|x|^2}- \frac{x_1^2d(|x|)}{|x|^2},\\[5mm] a_{2,2}(x)\!\!\!&=&\!\!\!a(|x|)+ \,{\rm d}isplaystyle\frac{(x_1^2+x_3^2)[c(x)+b(|x|)]}{|x|^2}- \frac{x_2^2d(|x|)}{|x|^2}, \\[5mm] a_{3,3}(x)\!\!\!&=&\!\!\!a(|x|)+ \,{\rm d}isplaystyle\frac{(x_1^2+x_2^2)[c(x)+b(|x|)]}{|x|^2} -\frac{x_3^2d(|x|)}{|x|^2}, \\[5mm] a_{j,k}(x)\!\!\!&= &\!\!\!a_{k,j}(x)=\,{\rm d}isplaystyle -\frac{x_jx_k[b(|x|)+c(x)+d(|x|)]}{|x|^2},\quadquad 1\lambdae j,k\lambdae 3,\ j\neq k, {\rm e}nd{array}\right. {\rm e}nd{equation} \par \noindent where the functions $a$, $b$, $c$, $d$ are {{\rm i}t non-negative} and enjoy the following properties: \betaegin{eqnarray}\lambdaabel{abcd} \lambdaabel{regular}&a, b, d {\rm i}n C^{2}\betaig([0,R]\betaig)\,,\quad\; c{\rm i}n C^2(\overline{\cal O}mega),\quadquad&\\[1,7mm] \lambdaabel{bcd} &a(r)> d(r)\,,\quad \forall r{\rm i}n [0,R]\,, \quad b(0)+c(0)=0\,,\quad d(0)=0 .& {\rm e}nd{eqnarray} In particular, we note that each coefficient $a_{i,j}$ is Lipschitz-continuous in ${\overline {\cal O}mega}$. \par \noindent We now introduce the function $h$ defined by \betaegin{equation}\lambdaabel{H} h(r)=a(r)-d(r),\quadquad\forall\,r{\rm i}n [0,R]\,, {\rm e}nd{equation} and which is non-negative by virtue of $(\ref{bcd})$. Then, as we noted in \cite{3}, for every $x{\rm i}n\overline{\cal O}megaega$ and $\xi{\rm i}n\mathbb{R}^3$ we have \betaegin{eqnarray}\lambdaabel{unel1} \sigmaum_{j,k=1}^{3}a_{j,k}(x){\xi}_j{\xi}_k \!\!&\gammaeqslant&\!\! a(|x|){|\xi|}^2+\frac{b(|x|)+c(x)}{|x|^2}\, {|x\wedge\xi |}^2-\frac{d(|x|)}{|x|^2}\,{[x\cdot\xi]}^2{\nonumber}number\\[2mm] &\gammaeqslant&\!\! a(|x|){|\xi|}^2+\frac{b(|x|)}{|x|^2}\, {|x\wedge\xi |}^2-\frac{d(|x|)}{|x|^2}\,{[x\cdot\xi]}^2\gammaeqslant h(|x|)\,|\xi|^2\gammae 0,\quadquad\quad {\rm e}nd{eqnarray} where $ \wedge$ and $\cdot$ denote, respectively, the wedge and inner products in $\mathbb{R}^3$.\\ >From $(\ref{unel1})$ it follows that the condition of uniform ellipticity of $\cal{A}$, i.e. \betaegin{equation}\lambdaabel{unel} {\alphalpha}_1|\xi{|}^2\lambdaeqslant\sigmaum_{j,k=1}^{3}a_{j,k}(x){\xi}_j{\xi}_k \lambdaeqslant{\alphalpha}_2|\xi{|}^2,\quadquaduad\,\forall\, (x,\xi){\rm i}n{\cal O}megaega\times \mathbb{R}^3\;, {\rm e}nd{equation} is trivially satisfied with $\alpha_1\!=\!\min_{r{\rm i}n [0,R]}h(r)$ and $\alpha_2\!=\!\|h+b\|_{C([0,R])}+\|c\|_{C(\overline{\cal O}mega)}$.\\ Then we prescribe the {{\rm i}t{initial condition}}: \betaegin{equation} \lambdaabel{u0} u(0,x)=u_0(x)\,,\;\;\;\;\quadquaduad \forall\, x{\rm i}n{\cal O}megaega\,, {\rm e}nd{equation} $u_0:\overlineerline{{\cal O}megaega}\rightarrow\mathbb{R}$ being a given smooth function, as well as one of the following boundary value conditions, where $u_1\!:\![0,T]\!\times\!\overlineerline{{\cal O}megaega}\!\rightarrow\!\mathbb{R}$ is a given smooth function: \betaegin{alignat}{2} \lambdaabel{D11} & (\textrm{D})\quaduad\quadquaduad & u(t,x)=u_1(t,x),\quadquaduad\quadquaduad \quaduad & \forall\, (t,x){\rm i}n [0,T]\times\partial\mbox{}{\cal O}megaega,\,\\[2mm] \lambdaabel{N11} & (\textrm{N})\quaduad\quadquaduad & \frac{\partial u}{\partial n}(t,x) = \frac{\partial u_1}{\partial n}(t,x),\quadquaduad\quaduad \quaduad & \forall\, (t,x){\rm i}n [0,T]\times\partial\mbox{}{\cal O}megaega. {\rm e}nd{alignat} Here D and N stand, respectively, for the Dirichlet and Neumann boundary conditions, whereas $n$ denotes the outwarding normal to $\partial\mbox{}{\cal O}megaega$. \betaegin{remark}\lambdaabel{conormal} {\rm e}mph{The conormal vector associated with the matrix $\{a_{j,k}(x)\}_ {j,k=1}^{3}$ defined by $(\ref{condsuaij})$ and the boundary $\partial\mbox{} {\cal O}mega$ coincides with $R^{-1}[a(R)-d(R)]x$, i.e. with the outwarding normal $n(x)$.} {\rm e}nd{remark} To determine the radial memory kernel $k$ we need also the two following pieces of information: \betaegin{eqnarray} \lambdaabel{g11}\!\!\!&\Phi&\!\!\!\!\![u(t,\cdot)](r)\!:= g_1(t,r),\,\quadquaduad \forall\,(t,r){\rm i}n[0,T]\times (0,R),\\[1,5mm] \lambdaabel{g22}\;\!\!\!&\Psi&\!\!\!\!\![u(t,\cdot)]\!:= g_2(t),\;\,\quad\quaduad \quad\quad\forall\,t{\rm i}n[0,T], {\rm e}nd{eqnarray} where, representing with $(r, \varphi, \theta)$ the usual spherical co-ordinates with pole at $x=0$, $\Phi$ and $\Psi$ are two linear operators acting, respectively, on the angular variables $\varphi,\,\theta$ only and all the space variables $r,\,\varphi,\,\theta$.\\ \vskip -0,3truecm \par \noindent{{\rm i}t{Convention:}} from now on we will denote by $\textrm{P}(\textrm{K}),\,\textrm{K}{\rm i}n\{\textrm{D,N}\}$, the identification problem consisting of $(\ref{problem}), (\ref{u0})$, the boundary condition $(\textrm{K})$ and $(\ref{g11}),(\ref{g22})$. \\ \vskip -0,3truecm \par \noindent An example of admissible linear operators $\Phi$ and $\Psi$ is the following: \betaegin{align}\lambdaabel{Phi1} &\Phi [v](r):= {\rm i}nt_{\!0}^{\pi}\!\!\sigmain\!\theta\mbox{}d\theta\! {\rm i}nt_{\!0}^{2\pi}\!\!\!\!v(rx') d\varphi\;, &\\[3mm] \lambdaabel{Psi1} &\Psi[v]:={\rm i}nt_{\!0}^{R}\!\!r^2 dr\!\!{\rm i}nt_{\!0}^{\pi}\!\! \sigmain\!\theta\mbox{} d\theta\! {\rm i}nt_{\!0}^{2\pi}\!\!\!\!\psi(rx')v(rx') d\varphi \;\,,& {\rm e}nd{align} where $x'\!=\!(\cos\!\varphi\sigmain\!\theta,\,\sigmain\!\varphi\sigmain\!\theta,\, \cos\!\theta) $, while $\psi:\overlineerline{{\cal O}megaega}\rightarrow\mathbb{R}$ is a smooth assigned function. \betaegin{remark} {\rm e}mph{We note that $(\ref{Phi1})$ coincides with $(1.12)$ in \cite{3} with $\lambda=1$. We stress here that at present this case, along with the particular choice $(\ref{condsuaij})$ of the coefficients $a_{i,j}$, seems to be the only one allowing an analytical treatment in the usual $L^p$-spaces when dealing with a full ball.} {\rm e}nd{remark} >From $(\ref{D11})-(\ref{g22})$ we (formally) deduce that our data must satisfy the following consistency conditions, respectively: \betaegin{align}\lambdaabel{DD1} &(\textrm{C1,D})\quaduad\quad\quaduad {u_0}(x)=u_1(0,x),\quadquad\, &\forall& x{\rm i}n \partial\mbox{}{\cal O}megaega\,,\quadquad\;\;\\[2mm] \lambdaabel{NN1}&(\textrm{C1,N})\quad\quad\quad \frac{\partial u_0}{\partial\mbox{}n}(x)=\frac{\partial u_1}{\partial\mbox{}n} (0,x), &\forall& x{\rm i}n \partial\mbox{}{\cal O}megaega\,, \quadquaduad\quaduad\quadquaduad\quaduad\,\\[4mm] \lambdaabel{1.18}&\hskip 2,5truecm\Phi[u_0](r)=g_1(0,r), &\forall& r{\rm i}n (0,R)\,,\\[1,5mm] \lambdaabel{1.19}&\hskip 2,5truecm\Psi[u_0]=g_2(0)\,.& &\; {\rm e}nd{align} \sigmaection{Main results} \sigmaetcounter{equation}{0} In this section we state our {{\rm i}t{local in time}} existence and uniqueness result related to the identification problem $\textrm{P}(\textrm{K}), \textrm{K}{\rm i}n\{\textrm{D,N}\}I$. For this purpose we assume that the coefficients of operator $\mathcal{A}$ satisfies $(\ref{condsuaij})-(\ref{bcd})$, whereas, as far as the coefficients $b_{i,j}$ and $c_i$ of operators $\cal{B},\,\cal{C}$ are concerned, we assume: \betaegin{eqnarray}\lambdaabel{ipotesibijeci} b_{i,j}{\rm i}n W^{1,{\rm i}nfty}({\cal O}mega)\,, \quadquad c_{i}{\rm i}n L^{{\rm i}nfty}({\cal O}megaega)\,, \;\quaduad &i,j=1,2,3.& {\rm e}nd{eqnarray} In order to find out the right hypotheses on the linear operators $\Phi$ and $\Psi$, it will be convenient to rewrite the operator $\mathcal{A}$ in the spherical co-ordinates $(r,\,\varphi,\,\theta)$.\\ As a consequence, using representation $(\ref{condsuaij})$ for the $a_{i,j}$'s, through lengthy but easy computations, we obtain the following polar representation $\widetilde{\mathcal{A}}$ for the second-order differential operator $\mathcal{A}$: \betaegin{eqnarray}\lambdaabel{tildeA} {\widetilde{\mathcal{A}}}\!\!\!\! & = &\!\!\!\! D_r\betaig[{h}(r)D_r\betaig]+ \frac{2{h}(r)D_r }{r}+ \frac{{a}(r)+{b}(r)}{r^2\sigmain\!\theta}\Big[\, {{(\sigmain\!\theta)}^{-1}D_{\varphi}^2}+D_{\theta}\betaig(\sigmain\!\theta D_{\theta}\betaig)\Big] {\nonumber}number\\[1mm] & &\!\!\! +\,\frac{1}{r^2\sigmain\!\theta} \Big[\,{(\sigmain\!\theta)}^{-1}{D_{\varphi}\betaig[ \wtil{c}(r,\varphi,\theta)D_{\varphi}\betaig]} +D_{\theta}\betaig(\wtil{c}(r,\varphi,\theta)\sigmain\!\theta D_{\theta} \betaig)\Big]\,, {\rm e}nd{eqnarray} where we have set ${\wtil{c}}\mbox{}(r,\varphi,\theta)= c\mbox{}(r\cos\!\varphi\sigmain\!\theta, r\sigmain\!\varphi\sigmain\!\theta, r\cos\!\theta)\,$.\\ Before listing our requirements concerning operators $\Phi$ and $\Psi$ and the data, we recall (cf. \cite{4}) some definitions about weighted Sobolev spaces. Given an $n$-dimensional domain ${\cal O}megaega$ the weighted Sobolev spaces $W_{\sigmaigma}^{k,p}({{\cal O}megaega})$, $k{\rm i}n\mathbb{N}$, $p{\rm i}n [1,+{\rm i}nfty]$, $\sigmaigma{\rm i}n\mathbb{R}$, are defined by \betaegin{equation}\lambdaabel{WSS} W_{\sigmaigma}^{k,p}({{\cal O}megaega})=\Big\{f{\rm i}n W_{loc}^{k,p}({\cal O}megaega \betaackslash \{0\})\,:\, {\|f\|}_{W_{\sigmaigma}^{k,p}({{\cal O}megaega})}= {\betaigg(\sigmaum_{0\lambdaeqslant |\alphalpha| \lambdaeqslant k}{\rm i}nt_{{{\cal O}megaega}}|x|^{\sigma}|D^{\alphalpha}f(x)|^p dx \betaigg)}^{\!1/p}\!<+{\rm i}nfty\Big\}, {\rm e}nd{equation} \vskip -0,4truecm \par \noindent where \vskip -0,5truecm \betaegin{equation} \alphalpha=({\alphalpha}_1,\lambdadots ,{\alphalpha}_n){\rm i}n \mathbb{N}^n\,,\quadquad |\alphalpha|=\sigmaum_{i=1}^{n}|{\alphalpha}_i| \,,\quadquad D^{\alphalpha}=\frac{{\partial}^{|\alphalpha|}}{{\partial}^{^{{\alphalpha}_1}}x_1 \lambdadots {\partial}^{^{{\alphalpha}_n}}x_n}\,. {\nonumber}number {\rm e}nd{equation} Of course, $W_{\sigmaigma}^{k,p}({{\cal O}megaega})$ turns out to be a Banach space when endowed with the norm \newline $\|\cdot\|_{W_{\sigmaigma}^{k,p}({{\cal O}megaega})}$. In particular, taking $\sigma=0$ in $(\ref{WSS})$ we obtain the usual Sobolev spaces $W^{k,p}({\cal O}mega)$ whereas taking $k=0$ we obtain the weighted $L^p$-spaces defined by \betaegin{equation}\lambdaabel{WLpS} L_{\sigma}^{p}({\cal O}mega)=\Big\{f{\rm i}n L_{loc}^p({\cal O}mega):\|f\|_{L_{\sigma}^{p}({\cal O}mega)}=\Big( {\rm i}nt_{{{\cal O}megaega}}|x|^{\sigma}|f(x)|^p dx \Big) ^{\!1/p}\!<+{\rm i}nfty\Big\}. {\rm e}nd{equation} \betaegin{lemma}\lambdaabel{suphi} Operator $\Phi$ defined by $(\ref{Phi1})$ maps $W^{2,p}({{\cal O}megaega})$ continuously into $W_{2}^{2,p}(0,R)$. {\rm e}nd{lemma} \betaegin{proof} Taking $u{\rm i}n W^{2,p}({{\cal O}megaega})$ from $(\ref{Phi1})$ it follows that \betaegin{equation}\lambdaabel{Drj} D_r^{(j)}\Phi[u](r)=\Phi[D_r^{(j)}u](r),\quadquad\forall j=0,1,2. {\rm e}nd{equation} Hence, denoting with $p'$ the conjugate exponent of $p$, from H\"older's inequality we obtain \betaegin{align} &\|\Phi[u]\|_{L_{2}^p(0,R)}^p={\rm i}nt_0^Rr^{2}\,| \Phi[u](r)|^p dr = {\rm i}nt_0^Rr^{2}\Big|{\rm i}nt_{\!0}^{\pi} \!\!\sigmain\!\theta\mbox{}d\theta\!{\rm i}nt_{\!0}^{2\pi}\!\!\!u(rx') d\varphi\Big|^p dr &{\nonumber}number\\[2mm] \lambdaabel{0.1}&\;\,\quad\quadquad\quadquad\lambdaeqslant {(4\pi)}^{p/{p'}}\! {\rm i}nt_0^R\!\!r^2dr\!\! {\rm i}nt_{\!0}^{\pi}\!\!\sigmain\!\theta d\theta\!\! {\rm i}nt_{\!0}^{2\pi}\!\!\!{|u(rx')|}^p\, d\varphi ={(4\pi)}^{p/{p'}}{\|u\|}_{L^p({\cal O}mega)}^p\,. {\rm e}nd{align} Repeating similar computations and using the well-known inequalities \betaegin{align} &|D_ru(rx')|\lambdaeqslant|\nabla u(rx')|\,,\quadquad |D_r^2u(rx')|\lambdaeqslant \sigmaum_{j,k=1}^{3}|D_{x_j}D_{x_{k}}u(rx')|^2\,,& {\rm e}nd{align} from $(\ref{Drj})$ we can easily find that the following inequalities hold: \betaegin{align} \lambdaabel{1.1}&\quadquad\|D_r\Phi[u]\|_{L_{2}^p(0,R)}^p\lambdaeqslant C_1{\|u\|}_{W^{1,p}({\cal O}mega)}^p\,,\quadquad\|D_r^2\Phi[u]\|_{L_{2}^p(0,R)}^p\lambdaeqslant C_2{\|u\|}_{W^{2,p}({\cal O}mega)}^p\,,& {\rm e}nd{align} where $C_1$ and $C_2$ are two non-negative constants depending on $p$ only.\\ Therefore, from $(\ref{0.1})$ and $(\ref{1.1})$ it follows that there exists a non-negative constant $C_3$, independent of $u$, such that \betaegin{equation} \|\Phi[u]\|_{W_{2}^{2,p}(0,R)}\lambdaeqslant C_3{\|u\|}_{W^{2,p}({\cal O}mega)}. {\rm e}nd{equation} {\rm e}nd{proof} In this paper we will use Sobolev spaces $W^{k,p}({\cal O}megaega)$ with \betaegin{equation}\lambdaabel{p} p{\rm i}n(3,+{\rm i}nfty) {\rm e}nd{equation} and we will assume that the functionals $\Phi$ and $\Psi$ satisfy the following requirements: \betaegin{alignat}{5} \lambdaabel{primasuPhiePsi} & \Phi{\rm i}n\mathcal{L}\betaig(L^p({\cal O}megaega);\,L_{2}^p(0,R)\betaig), \;\,\quadquad\quadquad \Psi{\rm i}n L^p({\cal O}megaega)^*,&\\[1,9mm] \lambdaabel{secondasuPhi} & \Phi[wu]=w\,\Phi[u],\quadquad\quadquad\quadquad\quadquad\; \forall\,(w,u){\rm i}n L_{2}^p(0,R)\times L^p({\cal O}megaega),&\\[1,9mm] \lambdaabel{terzasuPhi} & D_r\Phi[u](r)=\Phi[D_ru](r), \quadquad\quadquad\quad \forall\,u{\rm i}n W^{1,p}({\cal O}megaega)\;\;\textrm{and}\, \,r{\rm i}n(0,\,R),& \\[1,7mm] \lambdaabel{quartasuPhi} & \Phi\mathcal{\widetilde{A}}=\mathcal{\widetilde{A}}_1\Phi \,\,\quaduad\quaduad\textrm{on}\; W^{2,p}({\cal O}megaega),&\\[1,9mm] \lambdaabel{primasuPsi} & \Psi\mathcal{\widetilde{A}}={\Psi}_1\quad\quaduad\quaduad\textrm{on}\; W^{2,p}({\cal O}megaega),\quad\quad\quad {\Psi}_1{\rm i}n W^{1,p}({{\cal O}megaega})^*,& {\rm e}nd{alignat} where \betaegin{equation}\lambdaabel{tildeA1} \mathcal{\widetilde{A}}_1= D_r\betaig[{h}(r)D_r]+2\frac{h(r)}{r}D_r\;. {\rm e}nd{equation} To state our result concerning the identification problem $\textrm{P}(\textrm{K}), \textrm{K}{\rm i}n\{\textrm{D,N}\}$, we need to make also the following assumptions on the data $f,\,u_0,\,u_1,\,g_1,\,g_2$: \betaegin{alignat}{8} \lambdaabel{richiestasuf} &f{\rm i}n C^{1+\betaeta}\betaig([0,T];L^{p}({\cal O}megaega)\betaig)\,,\quad f(0,\cdot){\rm i}n W^{2,p}({\cal O}mega)\,, &\\[2,3mm] \lambdaabel{richiesteperu0eu1} &u_0{\rm i}n W^{4,p}({\cal O}megaega)\;,\quad {\cal B}u_0{\rm i}n W_{\,\textrm{K}}^{2\,{\rm d}lta,p}({\cal O}megaega)\,,\\[2,3mm] &u_1{\rm i}n C^{2+\betaeta}\betaig([0,T];L^{p}({\cal O}megaega)\betaig)\cap C^{1+\betaeta}\betaig([0,T];W^{2,p}({\cal O}megaega)\betaig)\,,\,&\\[2,3mm] \lambdaabel{richiestaperAu0} &\mathcal{A}u_0+f(0,\cdot)-D_tu_1(0,\cdot) {\rm i}n W_{\textrm{K}}^{2,p}({\cal O}megaega)\,,&\\[2,3mm] \lambdaabel{richiestaperA2u0} &F:=k_0'{\cal C}u_0+k_0{\cal B}u_0+{\mathcal{A}}^2u_0+\mathcal{A}f(0,\cdot)-D_t^2u_1(0,\cdot)+D_tf(0,\cdot){\rm i}n W_{\,\textrm{K}}^{2\betaeta,p}({\cal O}megaega)\,, & \\[2,3mm] &g_1{\rm i}n C^{2+\betaeta}\betaig([0,T];L_{2}^{p}(0,R)\betaig)\cap C^{1+\betaeta}\betaig([0,T];W_{2}^{2,p}(0,R)\betaig),\quad\; \frac{1}{r}D_tD_rg_1 C^{\betaeta}\betaig([0,T];L_{2}^{p}(0,R)\betaig),&{\nonumber}number\\ \lambdaabel{richiesteperg1}& &\\[2,3mm] \lambdaabel{richiesteperg2} & g_2 {\rm i}n C^{2+\betaeta}\betaig([0,T];\mathbb{R}\betaig)\,,& {\rm e}nd{alignat} where $\betaeta\!{\rm i}n\! (0,1/2)\betaackslash \{1/(2p)\}$, $\,{\rm d}lta\!{\rm i}n \!(\beta,1/2)\betaackslash \{1/(2p)\}$ and function $k_0$ in $(\ref{richiestaperA2u0})$ is defined by formula $(\ref{k01})$. Moreover, the spaces $W_{\textrm{K}}^{2,p}({\cal O}megaega)$ are defined by \betaegin{equation}\lambdaabel{WIK} W_{\textrm{K}}^{2,p}({\cal O}megaega)=\betaig\{ w {\rm i}n W^{2,p}({\cal O}megaega)\!: w\; \textrm{satisfies the homogeneous condition (K)}\}\,, {\rm e}nd{equation} whereas the spaces $W_{\,\textrm{K}}^{2\gamma,p}({\cal O}megaega)\!\!{\rm e}quiv \!\!{\betaig(L^p({\cal O}megaega), W_{\textrm{K}}^{2,p}({\cal O}megaega)\betaig)}_{\gamma,p}$, $\gamma{\rm i}n (0,1/2]\betaackslash \{1/(2p)\}$, are interpolation spaces between $W_{\textrm{K}}^{2,p}({\cal O}megaega)$ and $L^p({\cal O}megaega)$ and they are defined \cite[section 4.3.3]{5}, respectively, by: \betaegin{align} \lambdaabel{WDD} & W_{\textrm{D}}^{2\gamma,p}({\cal O}megaega)= \lambdaeft\{\!\!\betaegin{array}{lll} W^{2\gamma,p}({\cal O}megaega)\,, & &\,\,\,\,\;\textrm{if} \;\; 0<\gamma<{1}/{(2p)}\;, \\[2mm] \{u{\rm i}n W^{2\gamma,p}({\cal O}megaega):u=0 \;\;\textrm{on}\; \partial\mbox{}{\cal O}megaega\}\,,& &\,\,\,\,\;\textrm{if}\;\;{1}/{(2p)}<\gamma\lambdae 1/2\;, {\rm e}nd{array}\right.\, &\\[3mm] \lambdaabel{WNN} & W_{\textrm{N}}^{2\gamma,p}({\cal O}megaega)= W^{2\gamma,p}({\cal O}megaega)\,,\quadquad\quadquad\quadquad\quadquad\quadquad\quadquad\;\;\, \textrm{if}\;\;0<\gamma\lambdae 1/2\;. & {\rm e}nd{align} \betaegin{remark}\lambdaabel{data} {\rm e}mph{Assumption $(\ref{richiesteperg1})$ ensures that $D_t{\wtil{\cal{A}}}_1g_1{\rm i}n C^{2+\beta}\betaig([0,T], L_2^p(0,R)\betaig)$ (see formula $(\ref{N10})$).} {\rm e}nd{remark} \betaegin{remark} {\rm e}mph{Observe that our choice $p{\rm i}n(3,+{\rm i}nfty)$ implies the embeddings} \betaegin{align} \lambdaabel{EMB1}& W^{1,p}({\cal O}megaega)\hookrightarrow C^{(p-3)/p}({\overline {\cal O}megaega}),&\\ \lambdaabel{EMB2}&W_2^{1,p}(0,R)\hookrightarrow C^{(p-3)/p}([0,R]).& {\rm e}nd{align} {\rm e}mph{In fact, while $(\ref{EMB1})$ is a classical consequence of the Sobolev embedding theorems (\cite{1}), Theorem 5.4, $(\ref{EMB2})$ follows immediately from the inequalities} \betaegin{eqnarray} |u(t)-u(s)|\!\!\!&\lambdaeqslant&\!\!\! {\rm i}nt_s^t\!\xi^{-2/p}\xi^{2/p}|u'(\xi)| d\xi \lambdaeqslant \betaigg[{\rm i}nt_s^t\!\xi^{-2/(p-1)}d\xi\betaigg]^{1/p'}\!\|u'\|_ {L_2^p(0,R)}{\nonumber}number\\[2mm] \lambdaabel{hold}&\lambdae &\!\!\!\Big(\frac{p-1}{p-3}\Big)^{\!1/p'}\! |t-s|^{(p-3)/p}\|u\|_{W_2^{1,p}(0,R)},\quadquad\forall\,s,t{\rm i}n[0,R] {\rm e}nd{eqnarray} {\rm e}nd{remark} Assume also that $u_0$ satisfies the following conditions for some positive constant $m$:\betaegin{eqnarray} \lambdaabel{J0} &J_0&\!\!\!\!\!(u_0)(r)\!:=\betaig|\Phi[\mathcal{C}u_0](r)\betaig| \gammaeqslant m\,,\quadquaduad\;\forall\,r{\rm i}n (0,R),\\[1,7mm] \lambdaabel{J1} &J_1&\!\!\!\!\!(u_0)\!:=\Psi[J(u_0)]\neq 0\,, {\rm e}nd{eqnarray} where we have set: \betaegin{equation}\lambdaabel{J} J(u_0)(x)\!:=\!\betaigg(\!\mathcal{B}u_0(x)-\frac{\Phi[\mathcal{B}u_0](|x|)} {\Phi[\mathcal{C}u_0](|x|)}\mathcal{C}u_0(x)\!\betaigg){\rm e}xp\!\betaigg[ {\rm i}nt_{{\!|x|}}^{{R}}\!\frac{\Phi[\mathcal{B}u_0](\xi)}{\Phi[\mathcal{C}u_0] (\xi)}d\xi\betaigg]\,,\quaduad\forall\,x{\rm i}n{\cal O}megaega\,. {\rm e}nd{equation} \betaegin{remark}{\rm e}mph{According to $(\ref{primasuPhiePsi})$ and $(\ref{secondasuPhi})$ it follows that: \betaegin{equation} \Phi\betaig[J(u_0)\betaig](r)={\rm e}xp\!\betaigg[{\rm i}nt_{_{r}}^{{R}}\!\frac{\Phi[ \mathcal{B}u_0](\xi)}{\Phi[\mathcal{C}u_0](\xi)}d\xi\betaigg]\Phi\betaigg(\!\mathcal {B}u_0- \frac{\Phi[\mathcal{B}u_0]}{\Phi[\mathcal{C}u_0]} \mathcal{C}u_0\!\betaigg)(r)=0\,,\quaduad\;\forall\,r{\rm i}n (0,R)\,. {\rm e}nd{equation} This means that operator $\Psi$} cannot be chosen of the form $\Psi\!=\! \Lambdaambda\Phi$, where\, $\Lambdaambda$ is in $ L_{2}^p(0,R)^*$, {\rm e}mph{i.e. $\Lambdaambda[v]\!=\!{\rm i}nt_0^R r^2\rho(r)v(r)dr$ for any $v{\rm i}n L_{2}^p(0,R)$ and some $\rho {\rm i}n L_{2}^{p'}(0,R)$, otherwise condition $(\ref{J1})$ would be not satisfied. In the explicit case, when $\Phi$ and $\Psi$ have the integral representation $(\ref{Phi1})$ and $(\ref{Psi1})$, this means that no function ${\psi}$ of the form \,$\psi(x)=|x|^2\rho(|x|)$ is allowed.}{\rm e}nd{remark} \betaegin{remark}{\rm e}mph{When operators $\Phi$ and $\Psi$ are defined by $(\ref{Phi1}),\,(\ref{Psi1})$ conditions $(\ref{J0})$, $(\ref{J1})$ can be rewritten as}: \betaegin{eqnarray} \Big| {\rm i}nt_0^{\pi}\!\!\!\!\sigmain\!\theta d\theta\!\!{\rm i}nt_0^{2\pi} \!\!\! \mathcal{C}u_0(rx') d\varphi\,\Big|\!\gammaeqslant m_1\,, \quadquaduad\forall\,r{\rm i}n (0,R)\,,\quadquaduad\quadquad\\ [3mm] \quadquad\betaigg|{\rm i}nt_{\!{0}}^{{R}}\!\!r^2 dr\!\!{\rm i}nt_0^{\pi}\!\! \sigmain\!\theta d\theta\!\! {\rm i}nt_0^{2\pi}\!\!\psi(rx')\betaigg(\!\mathcal{B}u_0(rx')-\frac{{\rm i}nt_0^{\pi} \sigmain\!\theta d\theta{\rm i}nt_0^{2\pi}\mathcal{B}u_0(rx') d\varphi}{{\rm i}nt_0^{\pi}\sigmain\!\theta d\theta{\rm i}nt_0^{2\pi} \mathcal{C}u_0(rx') d\varphi}\mathcal{C}u_0(rx')\!\betaigg)\; {\nonumber}number\\[2,3mm] \lambdaabel{bo}\times{\rm e}xp\!\Bigg[\!{\rm i}nt_{{r}}^{{R}}\!\!\!\frac{\;{\rm i}nt_0^{\pi} \sigmain\!\theta d\theta{\rm i}nt_0^{2\pi}\mathcal{B}u_0(\xi x') d\varphi}{{\rm i}nt_0^{\pi}\sigmain\!\theta d\theta{\rm i}nt_0^{2\pi} \mathcal{C}u_0(\xi x') d\varphi}d\xi\Bigg] d\varphi \,\betaigg|\gammaeqslant m_2\quadquaduad\quaduad {\rm e}nd{eqnarray} {\rm e}mph{for some positive constants $m_1$ and $m_2$.}{\rm e}nd{remark} Finally, we introduce the Banach spaces ${\mathcal{U}}^{\,s,p}(T)$, ${\mathcal{U}}_{\textrm{K}}^{\,s,p}(T)$ $(\textrm{K}{\rm i}n\{\textrm{D,N}\})$ which are defined for any $s{\rm i}n \mathbb{N}\betaackslash\{0\}$ by: \betaegin{equation}\lambdaabel{Us} \lambdaeft\{\!\!\betaegin{array}{l} {\mathcal{U}}^{\,s,p}(T)=C^s\betaig([0,T];L^p({\cal O}megaega)\betaig)\cap C^{s-1}\betaig([0,T];W^{2,p}({\cal O}megaega)\betaig)\,,\\[2mm] {\mathcal{U}}_{\textrm{K}}^{\,s,p}(T)=C^s\betaig([0,T];L^p({\cal O}megaega)\betaig)\cap C^{s-1}\betaig([0,T];W_{\textrm{K}}^{2,p}({\cal O}megaega)\betaig)\,. {\rm e}nd{array}\right. {\rm e}nd{equation} Moreover, we list some further consistency conditions: \betaegin{align} \lambdaabel{DDV} &\quadquad(\textrm{C2,D})\quad\quadquad\quad\, {v_0}(x)=0,\hskip 2,55truecm \forall\, x{\rm i}n \partial\mbox{}{\cal O}megaega\,,&\\[2mm] \lambdaabel{NNV} &\quadquad(\textrm{C2,N})\quad\quadquad\;\;\,\;\frac{\partial v_0}{\partial\mbox{}\nu}(x)=0, \hskip 2,37truecm\forall\, x{\rm i}n \partial\mbox{}{\cal O}megaega\,,& {\rm e}nd{align} \betaegin{eqnarray} \lambdaabel{PHIV1}&& \Phi[v_0](r)=D_tg_1(0,r)-\Phi[D_tu_1(0,\cdot)](r),\quadquad\forall r{\rm i}n (R_1,R_2),\\[1,5mm] \lambdaabel{PSIV1}&& \Psi[v_0]=D_tg_2(0)-\Psi[D_tu_1(0,\cdot)]\,, {\rm e}nd{eqnarray} where \betaegin{equation}\lambdaabel{v0} v_0(x):\!={\mathcal{A}}u_0(x)+f(0,x)-D_tu_1(0,x)\,,\quadquaduad \forall\,x{\rm i}n{\cal O}megaega\,. {\rm e}nd{equation} \betaegin{theorem}\lambdaabel{sfera} Let the coefficients $a_{i,j}$ $(i,j=1,2,3)$ be represented by $(\ref{condsuaij})$ where the functions $a, b, c, d$ satisfy $(\ref{regular})$, $(\ref{bcd})$. Moreover, let assumptions $(\ref{ipotesibijeci})$, $(\ref{p})- (\ref{primasuPsi})$ be fulfilled and assume that the data enjoy properties $(\ref{richiestasuf})- (\ref{richiesteperg2})$ and satisfy $(\ref{J0})$, $(\ref{J1})$ and the consistency conditions $({\rm e}mph{C}1,{\rm e}mph{K})$ $($cf. $(\ref{DD1})$, $(\ref{NN1}))$, $({\rm e}mph{C}2,{\rm e}mph{K})$ as well as $(\ref{1.18})$, $(\ref{1.19})$, $(\ref{PHIV1})$, $(\ref{PSIV1})$.\\ Then there exists $T^{\alphast}{\rm i}n (0,T]$ such that the identification problem ${\cal E}PI$ admits a unique solution $(u,k){\rm i}n{\mathcal{U}}^ {\,2,p}(T^{\alphast})\times C^{\betaeta}\betaig([0,T^{\alphast}], W_{2}^{1,p}(0,R)\betaig)$ depending continuously on the data with respect to the norms pointed out in $(\ref{richiestasuf})\! -\!(\ref{richiesteperg2})$.\\ In the case of the specific operators $\Phi$, $\Psi$ defined by $(\ref{Phi1}), \,(\ref{Psi1})$ the previous results are still true if $\psi{\rm i}n C^1(\overlineerline{{\cal O}megaega})$, with $\psi_{|_{\partial{{\cal O}megaega}}}\!=\!0$ when ${\rm e}mph{K}\!=\!{\rm e}mph{D}$. {\rm e}nd{theorem} \betaegin{corollary}\lambdaabel{PHIPSIBAll} When $\Phi$ and $\Psi$ are defined by $(\ref{Phi1})$ and $(\ref{Psi1})$, respectively, and the coefficients $a_{i,j}\;(i,j=1,2,3)$ are represented by $(\ref{condsuaij})$, conditions $(\ref{primasuPhiePsi})-(\ref{primasuPsi})$ are satisfied under assumptions $(\ref{regular})$, $(\ref{p})$ and the hypothesis $\psi{\rm i}n C^1(\overlineerline{{\cal O}megaega})$, with ${\psi}_{|_{\partial\mbox{}{\cal O}megaega}}\!=\!0$ when ${\rm e}mph{K}\!=\!{\rm e}mph{D}$. {\rm e}nd{corollary} \betaegin{proof} >From definitions $(\ref{Psi1})$ and H\"older's inequality it immediately follows \betaegin{equation}\lambdaabel{psinorm} \betaig|\Psi[v]\betaig|\lambdaeqslant {\|\psi\|}_{C(\overlineerline{{\cal O}mega})} {\|v\|}_{L^1({\cal O}mega)}\lambdaeqslant {\betaigg[\frac{4}{3}\pi R^3\betaigg]}^{\!1/{p'}}\! {\|\psi\|}_{C(\overlineerline{{\cal O}mega})}{\|v\|}_{L^p({\cal O}mega)}\,.\quadquad\quadquad {\rm e}nd{equation} Hence, from $(\ref{0.1})$ and $(\ref{psinorm})$ we have that $(\ref{primasuPhiePsi})$ is satisfied. Definition $(\ref{Phi1})$ easily implies $(\ref{secondasuPhi})$ and $(\ref{terzasuPhi})$, as we have already noted in $(\ref{Drj})$. So, it remains only to prove that decompositions $(\ref{quartasuPhi})$ and $(\ref{primasuPsi})$ hold.\\ When the coefficients $a_{i,j}$ are represented by $(\ref{condsuaij})$ the second-order differential operator $\mathcal{A}$ can be represented, in spherical co-ordinates, by operator $\widetilde{\mathcal{A}}$ defined by $(\ref{tildeA})$. Our next task consists in computing $\Phi\betaig[\wtil{\cal{A}}w\betaig]$ for any $w{\rm i}n W_{\textrm{K}}^{2,p}({\cal O}mega)$, $p{\rm i}n(3,+{\rm i}nfty)$. Observe first that from $(\ref{0.1})$ and $(\ref{tildeA1})$ it follows \betaegin{equation}\lambdaabel{primo} \Phi[\widetilde{\mathcal{A}}_1w](r) = {\rm i}nt_0^{\pi}\!\!\!\!\sigmain\!\theta d\theta\!\!{\rm i}nt_0^{2\pi}\!\!\! \lambdaambda({Rx}')\Big\{\! D_r[ h(r)D_r w(rx')] +2\frac{h(r)}{r}D_rw(rx')\!\Big\} d\varphi= \widetilde{\mathcal{A}}_1\Phi[w](r) {\rm e}nd{equation} \vskip -0,3truecm \par \noindent Since $p{\rm i}n (3,+{\rm i}nfty)$, using the Sobolev embedding theorem of $W^{1,p}({\cal O}mega)$ into $C(\overline{\cal O}mega)$ and the well-known formulae \betaegin{equation} \lambdaabel{Dr} \lambdaeft\{\!\! \betaegin{array}{lll} D_{r}\!\!\! & = &\!\!\!{\cos\!\varphi\sigmain\!\theta} D_{x_1}+\sigmain\!\varphi \sigmain\!\theta D_{x_2}+\cos\!\theta D_{x_3}\,,\\[1,7mm] D_{\varphi}\!\!\! & = &\!\!\!{-r\sigmain\!\varphi\sigmain\!\theta} D_{x_1}+ r\cos\!\varphi\sigmain\!\theta D_{x_2}\,, \\[1,7mm] D_{\theta} \!\!\! & = &\!\!\!{r\cos\!\varphi\cos\!\theta} D_{x_1}+r\sigmain\!\varphi\sigmain\!\theta D_{x_2}-r\sigmain\!\theta D_{x_3}\,, {\rm e}nd{array}\right. {\rm e}nd{equation} it can be easily shown that $(D_{\varphi}w)/(r\sigmain\!\theta)$ and $(D_{\theta}w)/r$ are bounded, while the functions $(D_{\varphi}^2w)/\sigmain\!\theta$ and $D_{\theta}(\sigmain\!\theta D_{\theta}w)$ belong to $L^1(\partial\mbox{} B(0,r))$ for every $r{\rm i}n (0,R)$. Therefore, integrating by parts, we obtain \betaegin{align} &\Phi\Big[\frac{{a}(r)+{b}(r)}{r^2\sigmain\!\theta}\Big( {(\sigmain\!\theta)}^{-1}{D_{\varphi}^2w}+D_{\theta}(\sigmain\!\theta D_{\theta}w)\!\Big)\!\Big]\!(r){\nonumber}number\\[3mm] \lambdaabel{terzo}&\quad =\frac{{a}(r)+{b}(r)}{r^2}\betaigg\{ \!{\rm i}nt_0^{\pi}\!\betaigg[\frac{D_{\varphi}w(rx')}{\sigmain\!\theta} \betaigg|_{\varphi=0}^{\varphi=2\pi}\,\betaigg] d\theta+{\rm i}nt_{0}^{2\pi}\!\Big[ {D_{\theta}w(rx')\sigmain\!\theta]}\Big|_{\theta=0}^{\theta=\pi}\,\Big] d\varphi\betaigg\}=0 \,,\\[3mm] &\Phi\Big[\,\frac{1}{r^2\sigmain\!\theta} \Big({(\sigmain\!\theta)}^{-1}{D_{\varphi}\betaig[ \wtil{c}(r,\varphi,\theta)D_{\varphi}w\betaig]} +D_{\theta}\betaig[\wtil{c}(r,\varphi,\theta)\sigmain\!\theta D_{\theta}w \betaig]\Big)\!\Big]\!(r){\nonumber}number\\[3mm] \lambdaabel{quarto}&\quad =\frac{1}{r^2}\betaigg\{ \!{\rm i}nt_0^{\pi}\!\betaigg[\frac{\wtil{c}(r,\varphi,\theta)D_{\varphi}w(rx')} {\sigmain\!\theta}\betaigg|_{\varphi=0}^{\varphi=2\pi}\,\betaigg] d\theta+ {\rm i}nt_{0}^{2\pi}\!\Big[ {\wtil{c}(r,\varphi,\theta)D_{\theta}w(rx')\sigmain\!\theta]} \Big|_{{\theta=0}}^{{\theta=\pi}}\,\Big] d\varphi\betaigg\}=0. {\rm e}nd{align} Hence, from $(\ref{primo})$, $(\ref{terzo})$, $(\ref{quarto})$ we find that $(\ref{quartasuPhi})$ holds for every $w{\rm i}n W_{\textrm{K}}^{2,p}({\cal O}megaega)$ with $p{\rm i}n (3,+{\rm i}nfty)$.\\ Let now $\Psi$ be the functional defined in $(\ref{Psi1})$. Analogously to what we have done for $\Phi$, we apply $\Psi$ to both sides in $(\ref{tildeA})$. Performing computations similar to those made above and using the assumption $\psi_{|_{\partial\mbox{}{\cal O}megaega}}\!=\!0$ when $\textrm{K}=\textrm{D}$ which ensure that the surface integral vanishes, we obtain the equation $$\Psi[\widetilde{\cal{A}}w]= {\Psi}_1[w]\,,\quadquad w{\rm i}n W_{\textrm{K}}^{2,p}({\cal O}megaega)\,,$$ \vskip -0,3truecm \par \noindent where \vskip -0,3truecm \betaegin{align} & {\Psi}_1[ w]= -\!{\rm i}nt_{0}^{R}\!\!r^2\,{h}(r)dr\!\! {\rm i}nt_0^{\pi}\!\!\!\!\sigmain\!\theta d\theta\!\! {\rm i}nt_0^{2\pi}\!\!\!D_rw(rx')D_r\psi(rx')d\varphi&{\nonumber}number \\[1,7mm] &\quadquad\quad-\!{\rm i}nt_{0}^{R}\!\!\!r^2dr\!\!{\rm i}nt_0^{\pi}\!\!\!\!\sigmain\!\theta d\theta\!\! {\rm i}nt_0^{2\pi}\!\betaig[{a}(r)+{b}(r)+\widetilde{c}(r,\varphi,\theta) \betaig]\frac{D_{\varphi}w(rx')} {r\sigmain\!\theta} \frac{D_{\varphi}\psi(rx')}{r\sigmain\!\theta} \,d\varphi&{\nonumber}number \\[1,7mm] \lambdaabel{psi11ball}&\quadquad\quad -{\rm i}nt_{0}^{R}\!\!\!r^2dr\!\! {\rm i}nt_0^{\pi}\!\!\!\!\sigmain\!\theta d\theta\!\!{\rm i}nt_0^{2\pi}\!\betaig[{a}(r) +{b}(r)+\widetilde{c}(r,\varphi,\theta) \betaig]\frac{{D_{\theta}w(rx')}}{r} \frac{D_{\theta}\psi(rx')}{r}\,d\varphi\,.& {\rm e}nd{align} Now it is an easy task to show that $\Psi_1$ defined in $(\ref{psi11ball})$ belongs to ${W^{1,p}({\cal O}mega)}^{\alphast}$. Indeed, using formulae $(\ref{Dr})$ and H\"older's inequality, we can easily find \betaegin{equation}\lambdaabel{C1} |\Psi_1[w]|\lambdaeqslant {C_1}\|\nabla u\|_{L^{p}({\cal O}mega)}\lambdaeqslant C_1\|w\|_{W^{1,p}({\cal O}mega)}\,, {\rm e}nd{equation} where $C_1>0$ depends on ${\|\psi\|}_{C^{1}(\overline{\cal O}mega)}$ and $\max\!\betaig[\|h\|_{L^{{\rm i}nfty}(0,R)},{\|a+b+c\|}_{L^{{\rm i}nfty}({\cal O}mega)}\betaig]$, only.\\ Hence also decomposition $(\ref{primasuPsi})$ holds and this completes the proof.\ {\rm e}nd{proof} \sigmaection{An equivalence result in the concrete case} \sigmaetcounter{equation}{0} Taking advantage of the results proved in \cite{2}, we limit ourselves to sketching the procedure for solving the necessary equivalence result.\\ We introduce the new triplet of unknown functions $(v, l, q)$ defined by \betaegin{eqnarray}\lambdaabel{v,h,q} v(t,x)= D_tu(t,x)-D_t{u}_1(t,x)\,,\quad\; l(t)=k(t,R_2)\,,\quad\; q(t,r)=D_rk(t,r)\,,\quad {\rm e}nd{eqnarray} so that $u$ and $k$ are given, respectively, by the following formulae \betaegin{eqnarray} u(t,x)\!\!\!&=&\!\!\!u_1(t,x)-u_1(0,x)+u_0(x) +{\rm i}nt_0^t\!v(s,x)ds,\quad\; \forall\,(t,x){\rm i}n[0,T]\times{\cal O}mega,\quad\\[1mm] k(t,r)\!\!\!&=&\!\!\!l(t)-{\rm i}nt_{r}^{R}\!\!\!\!q(t,\xi)d\xi:=l(t)-Eq(t,r),\quaduad \,\forall\, (t,r){\rm i}n[0,T]\times(0,R). {\rm e}nd{eqnarray} Then problem $(\ref{problem})$, $(\ref{u0})-(\ref{g22})$ can be shown to be equivalent to the following identification problem: \betaegin{eqnarray}\lambdaabel{problem1} D_tv(t,x)\!\!\!&=&\!\!\!\mathcal{A}v(t,x)+{\rm i}nt_0^t\! k(t-s,|x|) \betaig[\mathcal{B}v(s,x)+\mathcal{B}D_{t}u_1(s,x)\betaig]ds+k(t,|x|) \mathcal{B}u_0(x){\nonumber}number\\[1,2mm] \!\!\!& &\!\!\!+\!{\rm i}nt_0^t\! D_{|x|}k(t-s,|x|)\betaig[\mathcal{C}v(s,x)+ \mathcal{C}D_t{u}_1(s,x)\betaig]ds+D_{|x|}k(t,|x|)\mathcal{C}u_0(x) {\nonumber}number\\[1,7mm] &&\!\!\!+ \mathcal{A}D_t{u}_1(t,x)-D_t^{2}{u}_1(t,x) +D_tf(t,x),\quadquad\forall\,(t,x){\rm i}n[0,T]\times{\cal O}megaega,\quad\;\;\quad {\rm e}nd{eqnarray} \vskip -0,85truecm \betaegin{align} \lambdaabel{v01}&\quad v(0,x)={\mathcal{A}}u_0(x)+f(0,\cdot)-D_tu_1(0,x)\!:=v_0(x),\quadquad \forall\,x{\rm i}n{\cal O}megaega,&\\[1,5mm] &\quad v\; \textrm{satisfies the homogeneous boundary condition (K)}, \quad\textrm{K}{\rm i}n\{\textrm{D,N}\}\,,&\\[3mm] \lambdaabel{hhh} &\quad l(t)= l_0(t)+N_3(v,l,q)(t),\,\quadquad \forall\;t{\rm i}n[0,T], \\[3mm] \lambdaabel{q3}&\quad q(t,r)=q_0(t,r)+J_2(u_0)(r)N_3(v,l,q)(t) +N_2(v,l,q)(t,r),\quad\forall\,(t,r){\rm i}n [0,T]\times (0,R),{\nonumber}number\\ {\rm e}nd{align} \vskip -0,3truecm \par \noindent where we have set \betaegin{align} \lambdaabel{h0} & l_0(t)\!:={[J_1(u_0)]}^{-1}N_0(u_0,u_1,g_1,g_2,f)(t)\,, \quadquad \forall\;t{\rm i}n[0,T],\\[2,3mm] & \lambdaabel{q0} q_0(t,r)\!:=J_2(u_0)(r)h_0(t)+N_3^0(u_0,u_1,g_1,f)(t,r),\;\quaduad\forall\,(t,r) {\rm i}n [0,T]\times (0,R). {\rm e}nd{align} We recall that operators $J_0$, $J_1$ and $J_2$ are defined, respectively, by $(\ref{J0})$, $(\ref{J1})$ and \betaegin{equation}\lambdaabel{J2} J_2(u_0)(r)=-\frac{\Phi[\mathcal{B}u_0](r)} {\Phi[\mathcal{C}u_0](r)}{\rm e}xp\! \betaigg[\!{\rm i}nt_{\!{r}}^{\!{R_2}}\frac{\Phi[\mathcal{B}u_0](\xi)}{\Phi[ \mathcal{C}u_0](\xi)}d\xi\betaigg],\quadquaduad\forall\,r{\rm i}n (0,R). {\rm e}nd{equation} To define operators $N_2$ and $N_3$ appearing in $(\ref{hhh})$, $(\ref{q3})$ we need to introduce the operators $N_1$ and $L$: \betaegin{align} &\; {N}_1(v,l,q)(t,|x|):=-\!{\rm i}nt_0^t\! \betaig[ l(t-s)-Eq(t-s,|x|) \betaig]\betaig[\mathcal{B}v(s,x)+\mathcal{B}D_{t}u_1(s,x)\betaig]ds &{\nonumber}number\\ \lambdaabel{N1}&\quadquad\quadquad -{\rm i}nt_0^t\!\!q(t-s,| x |)\betaig [\mathcal{C}v(s,x)+\mathcal{C} D_t{u}_1(s,x)\betaig]ds\,,\;\quaduad\forall\;(t,x){\rm i}n[0,T]\!\times\!{\cal O}megaega\,,&\\[3mm] \lambdaabel{L} &\, Lg(t,r)\!:={\rm i}nt_{r}^{R_2}\!\!\!{\rm e}xp\!\betaigg[{\rm i}nt_{\!r}^{{\rm e}ta}\frac{\Phi[ \mathcal{B}u_0](\xi)}{\Phi[\mathcal{C}u_0](\xi)}d\xi\betaigg]\frac{g(t,{\rm e}ta)} {\Phi[\mathcal{C}u_0]({\rm e}ta)}d{\rm e}ta\,,\quad\;\forall g{\rm i}n L^1((0,T)\times (0,R)).& {\rm e}nd{align} Now, denoting by $I$ the identity operator, define $N_2$ and $N_3$ via the formulae \betaegin{eqnarray} N_2(v,l,q)(t,r)\!:\!\!\!\!\!\!&=&\!\!\!\frac{1} {\Phi[\mathcal{C}u_0](r)}\betaig[I+\Phi[ \mathcal{B}u_0](r)L\betaig]\,\Phi[{N_{1}}(v,l,q)(t,\cdot)](r) {\nonumber}number\\[2mm] \lambdaabel{N2} &: =&\!\!\!\!J_3(u_0)(r) \,\Phi[{N_{1}}(v,l,q)(t,\cdot)](r) ,\quadquaduad\quadquaduad\\[3mm] N_3(v,l,q)(t)\!:\!\!\!\!\!\!&=&\!\!\!{[J_1(u_0)]}^{-1} \Big\{\Psi[{N_{1}}(v,l,q)(t,\cdot)]\!-\!\Psi[N_2(v,l,q)(t,\cdot) \mathcal{C}u_0]{\nonumber}number\\[2mm] \lambdaabel{N3} & +&\!\!\!\!\Psi\betaig[E\betaig(N_2(v,l,q)(t,\cdot)\betaig) \mathcal{B}u_0\betaig]\!-\!{\Psi}_1[v(t,\cdot)]\Big\}\,, {\rm e}nd{eqnarray} where $\Psi_1$ is defined by $(\ref{psi11ball})$.\\ Finally, to define operators $N_0$ and $N_3^0$ appearing in $(\ref{h0})$, $(\ref{q0})$ we need to introduce first the operators $N_1^0$ and $N_2^0$, where operators ${\wtil{\cal{A}}}$ and ${\wtil{\cal{A}}}_1$ are defined, respectively, by $(\ref{tildeA})$ and $(\ref{tildeA1})$: \betaegin{eqnarray} \hskip -0,7truecm N_1^{0}(u_1,g_1,f)(t,r)\!\!\!&=&\!\!\!D_t^2g_1(t,r) -D_t{\widetilde{\mathcal{A}}}_1g_1(t,r) {\nonumber}number\\[2mm] \hskip -0,7truecm &&\!\!\!-\Phi[D_tf(t,\cdot)](r)\,,\quadquad\forall\,(t,r){\rm i}n[0,T]\! \times\!(0,R),\lambdaabel{N10}\\[3,5mm] \lambdaabel{N20}\hskip -0,7truecm N_2^{0}(u_1,g_2,f)(t)\!\!\!& =&\!\!\!\!D_t^2g_2(t)-{\Psi}_1[D_tu_1(t,\cdot)] -{\Psi}[D_t f(t,\cdot)]\,,\quadquad\forall\,t{\rm i}n[0,T]\,. {\rm e}nd{eqnarray} Then we define \betaegin{eqnarray} N_3^0(u_0,u_1,g_1,f)(t,r)\!\!\!\!&:=&\!\!\! \frac{1}{\Phi[\mathcal{C}u_0](r)} \betaig[I+\Phi[\mathcal{B}u_0](r)L\betaig]N_1^{0}(u_1,g_1,f)(t,r) {\nonumber}number\\[2mm] \lambdaabel{N30}&:=&\!\!\!\!J_3(u_0)(r)N_1^{0}(u_1,g_1,f)(t,r),\\[3mm] N_0(u_0,u_1,g_1,g_2,f)(t)\!:\!\!\!&=&\!\!\!N_2^{0}(u_1,g_2,f)(t)- \Psi[N_3^0(u_0,u_1,g_1,f)(t,\cdot)\mathcal{C}u_0]{\nonumber}number\\[1,8mm] \lambdaabel{N0}& &\!\!\!-\Psi\betaig[E\betaig(N_3^0(u_0,u_1,g_1,f)(t,\cdot)\betaig) \mathcal{B}u_0\betaig]\,. {\rm e}nd{eqnarray} Finally, we introduce function $k_0$ appearing in $(\ref{richiestaperA2u0})$: \betaegin{eqnarray} \lambdaabel{k01} k_0(r)\!\!\!\!&=&\!\!\!\![J_1(u_0){]}^{-1}\Big\{ \Psi[\wtil{l}_2] +N_2^{0}(u_1,g_2,f)(0)-{\Psi}_1[v_0]\!\Big\} {\rm e}xp\!\betaigg[{\rm i}nt_{\!r}^{R_2}\frac{\Phi[ \mathcal{B}u_0](\xi)}{\Phi[\mathcal{C}u_0](\xi)}d\xi\betaigg] {\nonumber}number\\[1,2mm] &&\!\!\!\!+{\rm i}nt_{\!R_2}^{{r}} \!\!\!{\rm e}xp\!\betaigg[{\rm i}nt_{\!r}^{{\rm e}ta}\!\frac{\Phi[\mathcal{B}u_0](\xi)} {\Phi[\mathcal{C}u_0](\xi)}d\xi\betaigg]\frac{N_1^0(u_1,g_1,f)({\rm e}ta)} {\Phi[\mathcal{C}u_0]({\rm e}ta)}d{\rm e}ta\,,\quad \;\forall\;r{\rm i}n (R_1,R_2)\,. {\rm e}nd{eqnarray} where for any $x{\rm i}n{\cal O}megaega$ we set \betaegin{eqnarray} \wtil{l}_2(x)\!\!\!\!&:= &\!\!\!\!\mathcal{C}u_0(x)\betaigg\{ \frac{N_1^0(u_1,g_1,f)(|x|)} {\Phi[\mathcal{C}u_0](|x|)}-\frac{\Phi[\mathcal{B}u_0](|x|)} {\Phi[\mathcal{C}u_0](|x|)}{\rm i}nt_{\!R_2}^{|x|}\!\!\!{\rm e}xp\! \betaigg[{\rm i}nt_{\!|x|}^{{\rm e}ta}\frac{\Phi[\mathcal{B}u_0](\xi)} {\Phi[\mathcal{C}u_0] (\xi)}d\xi\betaigg]{\nonumber}number\\{\nonumber}number\\[2mm] \lambdaabel{l2}& &\!\!\!\!\times\frac{N_1^0(u_1,g_1,f)({\rm e}ta)}{\Phi[\mathcal{C}u_0]({\rm e}ta)} d{\rm e}ta\betaigg\}+\mathcal{B}u_0(x){\rm i}nt_{\!R_2}^{|x|}\!\!\!{\rm e}xp\!\betaigg[ {\rm i}nt_{\!|x|}^{{\rm e}ta}\frac{\Phi[\mathcal{B}u_0](\xi)} {\Phi[\mathcal{C}u_0](\xi)}d\xi\betaigg]\frac{N_1^0(u_1,g_1,f)({\rm e}ta)} {\Phi[\mathcal{C}u_0]({\rm e}ta)}d{\rm e}ta\,.{\nonumber}number {\rm e}nd{eqnarray} We can summarize the result sketched in this section in the following equivalence theorem. \betaegin{theorem}\lambdaabel{3.1} The pair $(u,k){\rm i}n{\mathcal{U}}^{\,2,p}(T)\times C^{\betaeta}\betaig([0,T];W_{2}^{1,p} (0,R)\betaig)$ is a solution to the identification problem ${\rm e}mph{\textrm{P}(\textrm{K}), \textrm{K}{\rm i}n\{\textrm{D,N}\}I},\; {\rm e}mph{K}{\rm i}n\{{\rm e}mph{D,N}\}$, if and only if the triplet $(v,l,q)$ defined by $(\ref{v,h,q})$ belongs to $ {\mathcal{U}}_{{\rm e}mph{K}}^{\,1,p}(T) \times C^{\betaeta} \betaig([0,T];\mathbb{R}\betaig)\times C^{\betaeta}\betaig([0,T]; L_{2}^{p}(0,R)\betaig)$ and solves problem $(\ref{problem1})\!-\!(\ref{q3})$. {\rm e}nd{theorem} \sigmaection{An abstract formulation of problem (\ref{problem1})-(\ref{q3}).} \sigmaetcounter{equation}{0} Starting from the result of the previous section, we can reformulate our identification problem in a Banach space framework.\\Lambdaet $A:\mathcal{D}(A)\sigmaubset X \to X$ be a linear closed operator satisfying the following assumptions: \betaegin{itemize} {\rm i}tem[(H1)]{\rm e}mph{there exists $\zeta{\rm i}n (\pi /2,\pi)$ such that the resolvent set of $A$ contains $0$ and the open sector ${\Sigma}_{\zeta}=\{\mu{\rm i}n \mathbb{C}:|\alpharg\mu|<\zeta\}$;} {\rm i}tem[(H2)]{\rm e}mph{there exists $M>0$ such that ${\|{(\mu I-A)}^{-1}\|} _{\mathcal{L}(X)}\lambdaeqslant M|\mu{|}^{-1}$ for every $\mu{\rm i}n {\Sigma}_{\zeta}$;} {\rm i}tem[(H3)]{\rm e}mph{$X_1$ and $X_2$ are Banach spaces such that $\mathcal{D}(A)=X_2\hookrightarrow X_1\hookrightarrow X $. Moreover, $\mu\to {(\mu I-A)}^{-1}$ belongs to ${\cal L}(X;X_1)$ and satisfies the estimate ${\|{(\mu I-A)}^{-1}\|} _{\mathcal{L}(X;X_1)}\lambdaeqslant M|\mu{|}^{-1/2}$ for every $\mu{\rm i}n {\Sigma}_{\zeta}$.} {\rm e}nd{itemize} Here $\mathcal{L}(Z_1;Z_2)$ denotes, for any pair of Banach spaces $Z_1$ and $Z_2$, the Banach space of all bounded linear operators from $Z_1$ into $Z_2$ equipped with the uniform-norm. In particular we set ${\cal L}(X)=\mathcal{L}(X;X)$.\\ By virtue of assumptions (H1), (H2) we can define the analytic semigroup $\{{\rm e}^{tA}\}_{t\gammaeqslant 0}$ of bounded linear operators in $\mathcal{L}(X)$ generated by $A$. As is well-known, there exist positive constants $\widetilde{c_{k}}(\zeta)\; (k {\rm i}n\mathbb{N})$ such that $$ \|A^k{\rm e}^{tA}\|_{\mathcal{L}(X)}\lambdaeqslant \widetilde{c_{k}}(\zeta)Mt^{-k}, \quadquaduad\forall t {\rm i}n {\mathbb{R}}_{+},\, \forall k{\rm i}n\mathbb{N}. $$ After endowing $\mathcal{D}(A)$ with the graph-norm, we can define the following family of interpolation spaces ${\mathcal{D}}_{A}(\betaeta,p)$, $\betaeta{\rm i}n (0,1)$, $p{\rm i}n [1,+{\rm i}nfty]$, which are intermediate between $\mathcal{D}(A)$ and $X$: \betaegin{eqnarray}\lambdaabel{interpol1} {\mathcal{D}}_{A}(\betaeta,p)= \Big\{x{\rm i}n X: |x|_{{\mathcal {D}}_{A}(\betaeta,p)} < +{\rm i}nfty\Big\}, \quadquad \mbox{if } p{\rm i}n [1,+{\rm i}nfty], {\rm e}nd{eqnarray} where \betaegin{equation} {|x|}_{{\mathcal{D}}_{A}(\betaeta,p)} = \lambdaeft\{ \betaegin{array}{l} \,{\rm d}isplaystyle \Big({\rm i}nt_0^{+{\rm i}nfty}\!t^{(1-\betaeta)p-1}\|A{\rm e}^{tA}x\|_X^p\,dt \Big)^{\! 1/p},\quad \mbox{if } p{\rm i}n [1,+{\rm i}nfty), \\[5mm] \sigmaup_{0<t\lambdae 1}\betaig(t^{1-\betaeta}\|A{\rm{e}}^{tA}x\|_X\betaig),\quad \hskip 1.2truecm \mbox{if } p={\rm i}nfty. {\rm e}nd{array} \right. {\rm e}nd{equation} \par \noindent They are well defined by virtue of assumption (H1). Moreover, we set \betaegin{equation}\lambdaabel{interpol2} {\mathcal{D}}_{A}(1+\betaeta,p)\!=\! \{x{\rm i}n\mathcal{D}(A):Ax{\rm i}n {\mathcal{D}}_{A}(\betaeta,p)\}\,. {\rm e}nd{equation} Consequently, ${\mathcal{D}}_{A}(n+\betaeta,p)$, $n{\rm i}n\mathbb{N}, \betaeta{\rm i}n (0,1)$, $p{\rm i}n [1,+{\rm i}nfty]$, turns out to be a Banach space when equipped with the norm \betaegin{equation} {\|x\|}_{{\mathcal{D}}_{A}(n+\betaeta,p)}\!=\! \sigmaum_{j=0}^{n}{\|A^{j}x\|}_{X}+{|A^{n}x|}_{{\mathcal{D}}_{A}(\betaeta,p)}\,. {\rm e}nd{equation} In order to reformulate in an abstract form our identification problem$(\ref{problem1})\!-\!(\ref{q3})$ we need the following assumptions involving spaces, operators and data: \betaegin{alignat}{9} &(\textrm{H}4)\;{\rm e}mph{$Y$ and $Y_1$ are Banach spaces such that $Y_1\hookrightarrow Y$;}& {\nonumber}number\\[2mm] &(\textrm{H}5)\;{\rm e}mph{$B:\mathcal{D}(B)\sigmaubset X\rightarrow X$ is a linear closed operator such that $X_2\sigmaubset \mathcal{D}(B)$;}& {\nonumber}number\\[2mm] &(\textrm{H}6)\;{\rm e}mph{$C:\mathcal{D}(C):=X_1\sigmaubset X\rightarrow X$ is a linear closed operator;}& {\nonumber}number\\[2mm] &(\textrm{H}7)\;{\rm e}mph{$E{\rm i}n\mathcal{L}(Y;Y_1)$, $\Phi{\rm i}n\mathcal{L}(X;Y)$, $\Psi{\rm i}n {X}^{\alphast}$, ${\Psi}_{1}{\rm i}n {X_1}^{\alphast}$;}& {\nonumber}number\\[2mm] &(\textrm{H}8)\;{\rm e}mph{$\mathcal{M}$ is a continuous bilinear operator from $Y\times {\wtil X}_1$ to $X$ and from $Y_1\times X$ to $X$,}& {\nonumber}number\\ & \quadquad\;{\rm e}mph{where $X_1\hookrightarrow {\wtil X}_1$;}{\nonumber}number\\[2mm] &(\textrm{H}9)\;{\rm e}mph{$J_1:X_2\rightarrow\mathbb{R}$, $J_2:X_2\rightarrow Y$, $J_3:X_2\rightarrow\mathcal{L}(Y)$\, are three prescribed (non-linear)}& {\nonumber}number\\ &\quadquad\;{\rm e}mph{operators}\,;&{\nonumber}number\\[1mm] &(\textrm{H}10)\;{\rm e}mph{$u_0, v_0{\rm i}n X_2$,\, $Cu_0{\rm i}n X_1$,\; $J_1(u_0)\neq 0$, $Bu_0{\rm i}n \mathcal{D}_A(\,{\rm d}lta,+{\rm i}nfty)$, $\,{\rm d}lta{\rm i}n (\beta,1/2)$ ;}& {\nonumber}number\\[2mm] &(\textrm{H}11)\;{\rm e}mph{$q_0{\rm i}n C^{\betaeta}([0,T];Y)$, $l_0{\rm i}n C^{\betaeta}([0,T];\mathbb{R})$\,;} &{\nonumber}number\\[2mm] &(\textrm{H}12)\;{\rm e}mph{$z_0{\rm i}n C^{\betaeta}([0,T];X)$,\; $z_1{\rm i}n C^{\betaeta}([0,T];{\wtil X}_1)$,\; $z_2{\rm i}n C^{\betaeta}([0,T];X)$\,;}&{\nonumber}number\\[2mm] &(\textrm{H}13)\;{\rm e}mph{$Av_0+\mathcal{M}({\wtil q}_0,{C}u_0)+ {\wtil l}_0{B}u_0-\mathcal{M}(E{\wtil q}_0,Bu_0)+z_2(0,\cdot) {\rm i}n\mathcal{D}_A(\betaeta,+{\rm i}nfty)$\,.}&{\nonumber}number {\rm e}nd{alignat} The elements ${\wtil q}_0$ and ${\wtil l}_0$ appearing in $(\textrm{H}13)$ are defined by: \betaegin{equation}\lambdaabel{rem4.2.1} \lambdaeft\{\!\betaegin{array}{l} \wtil{l}_0=l_0(0)-\betaig[J_1(u_0)\betaig]^{-1}\Psi_1[v_0]\,,\\[3mm] \wtil{q}_0=q_0(0)+J_2(u_0)\betaig[J_1(u_0)\betaig]^{-1}\Psi_1[v_0]\,, {\rm e}nd{array}\right. {\rm e}nd{equation} where $l_0$ and $q_0$ are the elements appearing in $(\textrm{H}11)$. \betaegin{remark}\lambdaabel{rem4.3} {\rm e}mph{In the explicit case we get the equations \betaegin{equation} \wtil{l}_0=k_0(R_2)\,,\,\quad\wtil{q}_0(r)=k_0'(r)\,. {\rm e}nd{equation} where $k_0$ is defined in $(\ref{k01})$.} {\rm e}nd{remark} We can now reformulate our direct problem: {\rm e}mph{determine a function $v{\rm i}n C^1([0,T];X)\cap C([0,T];X_2)$ such that} \betaegin{eqnarray} \lambdaabel{problem2} v'(t)\!\!\!&=&\!\!\![\lambda_0I+A]v(t)+\!{\rm i}nt_0^t\!\! l(t-s)[{B}v(s)+z_0(s)]ds- \!{\rm i}nt_0^t\!\! \mathcal{M}\betaig(Eq(t-s),{B}v(s)+z_0(s)\betaig)ds{\nonumber}number\\[2mm] && + {\rm i}nt_0^t \mathcal{M}\betaig(q(t-s),{C}v(s)+z_1(s)\betaig)ds +\mathcal{M}\betaig(q(t),{C}u_0\betaig)+l(t)Bu_0{\nonumber}number\\[2mm] & &-\mathcal{M}\betaig(Eq(t),Bu_0\betaig)+z_2(t), \hskip 3truecm \forall\;t{\rm i}n[0,T],\\[2mm] \lambdaabel{v02} v(0)\!\!\!&=&\!\!\!v_0. {\rm e}nd{eqnarray} \betaegin{remark}\lambdaabel{z0z1z2} {\rm e}mph{In the explicit case $(\ref{problem1})-(\ref{q3})$ we have $A={\cal{A}}-\lambda_0I$, with a large enough positive $\lambda_0$, and the functions $z_0, z_1, z_2$ defined by} \betaegin{eqnarray} \lambdaabel{z1z2z3}&z_0=D_t\mathcal{B}u_1\;,\quadquaduad z_1=D_t\mathcal{C}u_1\;,\quadquaduad z_2=D_t\mathcal{A}u_1-D_t^2u_1+D_tf,& {\rm e}nd{eqnarray} {\rm e}mph{whereas $v_0, h_0, q_0$ are defined, respectively, via the formulae $(\ref{v0})$, $(\ref{h0})$, $(\ref{q0})$.} {\rm e}nd{remark} Introducing the operators \betaegin{eqnarray} \widetilde{R}_2(\!\!\!\!\!&v&\!\!\!\!\!,h,q)\!:=-{[J_1(u_0)]}^{-1} \Big\{\!\Psi\betaig[\mathcal{M}\betaig(J_3(u_0)\Phi[{N_{1}}(v,l,q)],Cu_0\betaig)\betaig] {\nonumber}number\\[1,8mm] \lambdaabel{tildeR2}&&\quadquaduad\quadquaduad-\Psi\betaig[\mathcal{M}\betaig(E\betaig(J_3(u_0) \Phi[{N_{1}}(v,l,q)],{B}u_0\betaig)\betaig]-\Psi[{N_{1}}(v,l,q)]\!\Big\}\,, \\[1,8mm] \lambdaabel{tildeR3}\widetilde{R}_3(\!\!\!\!\!&v&\!\!\!\!\!,h,q)\!:=J_2(u_0) \widetilde{R}_2(v,l,q)+J_3(u_0)\Phi[{N_{1}}(v,l,q)]\,,\\[1,8mm] \widetilde{S_2}(\!\!\!\!\!&v&\!\!\!\!\!)\!:=\!{[J_1(u_0)]}^{-1} \Big\{\!\Psi\betaig[\mathcal{M}\betaig(J_3(u_0){\Phi}_1[v],Cu_0\betaig)\betaig]\!+\! \Psi\betaig[\mathcal{M}\betaig(E\betaig(J_3(u_0){\Phi}_1[v],Cu_0\betaig)\betaig]\! -\!{\Psi}_1[v]\!\Big\}\,,{\nonumber}number\\\lambdaabel{tildeS2} \\ \lambdaabel{tildeS3}\widetilde{S_3}(\!\!\!\!\!&v&\!\!\!\!\!)\!:=J_2(u_0) \widetilde{S_2}(v)\,, {\rm e}nd{eqnarray} the fixed-point system $(\ref{hhh})$, $(\ref{q3})$ for $l$ and $q$ becomes \betaegin{eqnarray} \lambdaabel{ha1}l=l_0+\widetilde{R}_2(v,l,q)+\widetilde{S_2}(v)\,,\\[1,6mm] \lambdaabel{qa1}q=q_0+\widetilde{R}_3(v,l,q)+\widetilde{S_3}(v)\,. {\rm e}nd{eqnarray} The present situation is analogous to the one in \cite{3} (cf. Section 4). Consequently, also in this case we can apply the abstract results proved in \cite{2} (cf. Sections 5 and 6) to get the following local in time existence and uniqueness theorem. \betaegin{theorem}\lambdaabel{4.2} Under assumptions $({\rm e}mph{H}1)-({\rm e}mph{H}13)$ there exists $T^{\alphast}{\rm i}n (0,T)$ such that for any $\tau{\rm i}n (0,T^{\alphast}]$ problem $(\ref{problem2}), (\ref{v02}), (\ref{ha1}), (\ref{qa1})$ admits a unique solution $(v,l,q){\rm i}n [C^{1+\betaeta}([0,\tau];X)\cap C^{\betaeta}([0,\tau];X_2)]\times C^{\betaeta}([0,\tau];\mathbb{R})\times C^{\betaeta}([0,\tau];Y)$. {\rm e}nd{theorem} \sigmaection{Solving the identification problem (\ref{problem1})--(\ref{q3})\newline and proving Theorem \ref{sfera}} \sigmaetcounter{equation}{0} The main difficulties we meet when we try to solve our identification problem $\textrm{P}(\textrm{K}), \textrm{K}{\rm i}n\{\textrm{D,N}\}$, in the open ball ${\cal O}megaega$ can be overcome by introducing the representation $(\ref{condsuaij})$ and the additional assumptions $(\ref{regular})\!-\!(\ref{bcd})$ for the coefficients $a_{i,j}\;(i,j=1,2,3)$ of $\mathcal{A}$. The basic result of this section is the following Theorem. \betaegin{theorem}\lambdaabel{sfera1} Let the coefficients $a_{i,j}$ $(i,j=1,2,3)$ be represented by $(\ref{condsuaij})$ where the functions $a, b, c, d$ satisfy $(\ref{regular})\!-\!(\ref{bcd})$. Moreover, let assumptions $(\ref{ipotesibijeci})$, $(\ref{p})\!-\!(\ref{richiesteperg2})$, $(\ref{J0})$, $(\ref{J1})$ be fulfilled along with the consistency conditions $(\ref{DDV})-(\ref{PSIV1})$. \\ Then there exists $T^{\alphast}{\rm i}n (0,T]$ such that the identification problem $(\ref{problem1})-(\ref{q3})$ admits a unique solution $(v,l,q){\rm i}n{\mathcal{U}}_{{\rm e}mph{K}}^ {\,1,p}(T^{\alphast})\times C^{\betaeta}\betaig([0,T^{\alphast}];\mathbb{R}\betaig)\times C^{\betaeta}\betaig([0,T^{\alphast}]; L_{2}^{p}(0,R)\betaig)$ depending continuously on the data with respect to the norms pointed out in $(\ref{richiestasuf})\! -\!(\ref{richiesteperg2})$.\\ In the case of the specific operators $\Phi$, $\Psi$ defined by $(\ref{Phi1}), \,(\ref{Psi1})$ the previous results are still true if $\psi{\rm i}n C^1(\overlineerline{{\cal O}megaega})$, with $\psi_{|_{\partial{{\cal O}megaega}}}\!=\!0$ when ${\rm e}mph{K}\!=\!{\rm e}mph{D}$. {\rm e}nd{theorem} \betaegin{proof} We will show that under our assumption $(\ref{condsuaij})-(\ref{bcd})$, $(\ref{ipotesibijeci})$ on the coefficients $a_{i,j}$, $b_{i,j}$, $c_j$\, $(i,j=1,2,3)$ of the linear differential operators $\cal{A},\, \cal{B},\, \cal{C}$ defined in $(\ref{A})$ we can apply the abstract results of Section 4 to prove locally in time existence and uniqueness of the solution $(u,k)$ to the identification problem $\textrm{P}(\textrm{K}), \textrm{K}{\rm i}n\{\textrm{D,N}\}$.\\ For this purpose let $p{\rm i}n (3,+{\rm i}nfty)$ and let us choose the Banach space $X,\,{\wtil X}_1,\,X_1,\,X_2,\, Y,\, Y_1$ appearing in assumptions $(\textrm{H}1)-(\textrm{H}12)$ according to the rule \betaegin{equation} X=L^p({\cal O}mega)\,,\quad {\wtil X}_1=W^{1,p}({\cal O}mega)\,, \quad X_1=W^{1,p}_{\textrm{K}}({\cal O}mega)\,, \quad X_2=W_{\textrm{K}}^{2,p}({\cal O}mega)\,, {\rm e}nd{equation} \betaegin{equation} Y=L_{2}^p(0,R)\,, \quad Y_1=W_{2}^{1,p}(0,R)\,. {\rm e}nd{equation} Since $p{\rm i}n (3,+{\rm i}nfty)$, reasoning as as in the first part of Section 5 in \cite{3}, we conclude that $A={\cal A}-\lambda_0I$ satisfies (H1) -- (H3) in the sector $\Sigma_{\zeta}$ for some $\lambda_0 {\rm i}n {\betaf R}_+$. \par \noindent Since assumptions $(\textrm{H}4)-(\textrm{H}6)$ are obviously fulfilled, we have that $(\textrm{H}1)-(\textrm{H}6)$ hold. Define now operators $\Phi, \Psi,\, \Psi_1,$ respectively, by $(\ref{Phi1})$, $(\ref{Psi1})$, $(\ref{psi11ball})$ and operators $E$ and $\mathcal{M}$ by \betaegin{eqnarray} \lambdaabel{EE}Eq(r)={\rm i}nt_r^{R_2}\!\!q(\xi)d\xi,\quadquaduad\forall\,r{\rm i}n [0,R],\; \\[1,8mm] \lambdaabel{MM}\quadquad\mathcal{M}(q,w)(x)=q(|x|)w(x),\quadquaduad\forall\,x{\rm i}n{\cal O}megaega,\quaduad {\rm e}nd{eqnarray} Then from H\"older's inequality and the fact that $p{\rm i}n (3,+{\rm i}nfty)$ we get\\ \betaegin{eqnarray} {\|Eq\|}_{L_{2}^p(0,R)}^p\!\!\!&=&\!\!\!{\rm i}nt_0^R\!r^2{\betaigg| {\rm i}nt_r^Rq(\xi)d\xi\betaigg|}^p dr\lambdaeqslant {\rm i}nt_0^R\!r^2{\betaigg[{\rm i}nt_0^R\xi^{-2/p}\xi^{2/p}|q(\xi)| d\xi\betaigg]}^p dr{\nonumber}number\\[1,8mm] \lambdaabel{EP}&\lambdaeqslant&\!\!\!{\|q\|}_{L_{2}^p(0,R)}^p\,{\rm i}nt_0^R\!\! r^2{\betaigg[{\rm i}nt_0^R\!\xi^{^{-{2}/{(p-1)}}}d\xi\betaigg]}^{p-1}\!\!\!dr= \frac{R^{p}}{3}{\Big(\frac{p-1}{p-3}\Big)}^{p-1} {\|q\|}_{L_{2}^p(0,R)}^p\,.\quadquad {\rm e}nd{eqnarray} Since $D_rEq(r)=-q(r)$ from $(\ref{EP})$ it follows: \betaegin{eqnarray} {\|Eq\|}_{W_{2}^{1,p}(0,R)}\!\!\!&=&\!\!\! {\Big[{\|Eq\|}_{L_{2}^p(0,R)}^p+ {\|D_rEq\|}_{L_{2}^p(0,R)}^p\Big]}^{1/p}{\nonumber}number\\[1,8mm] &\lambdaeqslant &\!\!\! {\Big[\frac{R^{^p}}{3}{\Big(\frac{p-1}{p-3}\Big)}^{\!p-1}\! +1\Big]}^{1/p}{\|q\|}_{L_{2}^p(0,R)}\,. {\rm e}nd{eqnarray} Hence $E\!{\rm i}n\!{\cal{L}}{\betaig(L_{2}^p(0,R);W_{2}^{1,p}(0,R)\betaig)}$. Therefore, by virtue of $(\ref{0.1})$, $(\ref{psinorm})$, $(\ref{C1})$ assumption (H7) is satisfied.\\ Since $p{\rm i}n (3,+{\rm i}nfty)$ we have the embedding $(\ref{EMB1})$. Then from the following inequalities, \betaegin{eqnarray} \lambdaabel{M1} \|{\cal M}(q,w)\|_{L^p({\cal O}megaega)}^p\!\!\! &=&\!\!\! {\rm i}nt_{\cal O}megaega |q(|x|)|^p |w(x)|^p\,dx \,\lambdae\, \|w\|_{C({\overlineerline{{\cal O}megaega}})}^p{\rm i}nt_{\cal O}megaega |q(|x|)|^p\,dx {\nonumber}number \\[2mm] &\lambdae&\!\!\! 4\pi \|w\|_{C({\overlineerline{{\cal O}megaega}})}^p{\rm i}nt_0^R r^{2}|q(r)|^p\,dx \,\lambdae\, C\|w\|_{W^{1,p}({\cal O}megaega)}^p\|q\|_{L_{2}^p(0,R)}^p,\quad\; {\rm e}nd{eqnarray} we conclude that ${\cal{M}}$ is a bilinear continuous operator from $L_{2}^p(0,R)\times W^{1,p}({\cal O}mega)$ to $L^p({\cal O}mega)$. Moreover, using the embedding $(\ref{EMB2})$ it is an easy task to prove that $\cal{M}$ is also continuous from $W_{2}^{1,p}(0,R)\times L^p({\cal O}mega)$ to $L^p({\cal O}mega)$ and so (H8) is satisfied.\\ Then we define $J_1(u_0)$,\,$J_2(u_0)$,\,$J_3(u_0)$ according to formulae $(\ref{J1}),(\ref{J2}), (\ref{N2})$ and it immediately follows that assumptions (H9) is satisfied, too.\\ Finally we estimate the vector $(v_0,z_0,z_1,z_2,h_0,q_0)$ in terms of the data $(f,u_0,u_1,g_1,g_2)$. Definitions $(\ref{N10})\!-\!(\ref{N0})$ imply that \betaegin{align} &\hskip 0,5truecm N_1^0(u_1,g_1,f),\;N_3^0(u_0,u_1,g_1,f){\rm i}n C^{\betaeta}([0,T];L_{2}^{^{p}}(0,R)),&{\nonumber}number\\[1,7mm] &\hskip 1truecm N_2^0(u_1,g_2,f),\;N_0(u_0,u_1,g_1,g_2,f){\rm i}n C^{\betaeta}([0,T]).&{\nonumber}number {\rm e}nd{align} Therefore from $(\ref{h0})$ and $(\ref{q0})$ we deduce \betaegin{equation} \hskip -0,6truecm (h_0,q_0){\rm i}n C^{\betaeta}([0,T])\times C^{\betaeta}([0,T];L_{2}^{^{p}}(0,R)), {\rm e}nd{equation} whereas from $(\ref{z1z2z3})$, $(\ref{v01})$ and hypotheses $(\ref{richiestasuf})\!-\!(\ref{richiestaperA2u0})$ it follows \betaegin{align} &(z_0,z_1,z_2){\rm i}n C^{\betaeta}([0,T];L^{p}({\cal O}megaega))\times C^{\betaeta}([0,T]; W^{1,p} ({\cal O}megaega))\times C^{\betaeta}([0,T];L^{p}({\cal O}megaega)),&\\[2,5mm] &\quadquad\quadquad\quad v_0{\rm i}n W_{\textrm{K}}^{2,p}({\cal O}mega),\,\quad {\cal{A}}v_0+z_2(0,\cdot){\rm i}n W_{\textrm{K}}^{2\betaeta,p}({\cal O}mega)\,.& {\rm e}nd{align} Hence assumptions $(\textrm{H}10)\!-\!(\textrm{H}12)$ are also satisfied. To check condition $(\textrm{H}13)$ first we recall that in this case the interpolation space ${\cal D}_A(\beta,+{\rm i}nfty)$ coincides with the Besov spaces $B_{\textrm{H,K}}^{2\beta,p,{\rm i}nfty}({\cal O}mega)\!{\rm e}quiv \!{\betaig(L^p({\cal O}megaega), W_{\!\textrm{H,K}}^{2,p}({\cal O}megaega)\betaig)}_{\betaeta,{\rm i}nfty}$ (cf. $\cite[\textrm{section 4.3.3}]{5}$). Moreover, we recall that $B_{\textrm{H,K}}^{2\beta,p,p}({\cal O}mega)=W_{\textrm{H,K}}^{2\beta,p}({\cal O}mega)$. Finally, we remind the basic inclusion (cf. \cite[section 4.6.1]{5}) \betaegin{equation}\lambdaabel{inclusion} W^{s,p}({\cal O}mega)\hookrightarrow B^{s,p,{\rm i}nfty}({\cal O}mega)\,,\quad\;\textrm{if}\;\,s{\nonumber}tin \mathbb{N}\,. {\rm e}nd{equation} Since our function $F$ defined in $(\ref{richiestaperA2u0})$ belongs to $W_{\textrm{H,K}}^{2\beta,p}({\cal O}mega)$, it is necessarily an element of $B_{\textrm{H,K}}^{2\beta,p,{\rm i}nfty}({\cal O}mega)$. Therefore $(\textrm{H}13)$ is satisfied, too.\ {\rm e}nd{proof} \textbf{Proof of Theorem \ref{sfera}.} It easily follows from Theorems \ref{3.1} and \ref{sfera1}.\ $\sigmaquare$ \betaegin{remark}\lambdaabel{su2.36} {\rm e}mph{We want here to give some insight into the somewhat involved condition $(\ref{richiestaperA2u0})$. For this purpose we need to assume that the functions $a,b,d{\rm i}n W^{3,{\rm i}nfty}((0,R))$, $c{\rm i}n W^{3,{\rm i}nfty}({\cal O}mega)$ satisfy the following conditions \[ b(0)=b'(0)=b''(0)=0,\quad d(0)=d'(0)=d''(0)=0, \] \[ a'(0)=a''(0)=0,\quad D_{x_i}c(0)=D_{x_i}D_{x_j}c(0)=0,\quad i,j=1,\lambdadots,n. \] This implies that the coefficients $a_{i,j}$ belongs $W^{3,{\rm i}nfty}({\cal O}mega)$, $i,j=1,2,3$. Then we observe that function $k_0$ defined in (3.20) actually belongs to $C^{1+\alpha}([R_1,R_2])$, $\alpha{\rm i}n (2\beta,1)$. It is then an easy task to show the membership of function $F$ in $W_{\textrm{H,K}}^{2\beta,p}({\cal O}mega)$, $\beta{\rm i}n (0,1/2)$ under the following regularity assumptions \betaegin{itemize} {\rm i}tem [{{\rm i}t{i)\ }}] for any $\rho{\rm i}n C^{\alpha}(\overline{\cal O}mega), \alpha{\rm i}n (2\beta,1), w{\rm i}n W^{2\beta,p}({\cal O}mega)$, $\rho w {\rm i}n W^{2\beta,p}({\cal O}mega)\;$ and satisfies\\ \ the estimate $\|\rho w\|_{W^{2\beta,p}({\cal O}mega)}\lambdae C\|\rho\|_{C^{\alpha}(\overline{\cal O}mega)} \|w\|_{W^{2\beta,p}({\cal O}mega)}$\,; {\rm i}tem [{{\rm i}t{ii)\ }}] operator $\Phi$ maps $C^{\alpha}(\overline{\cal O}mega)$ into $C^{\alpha}([R_1,R_2])$\,. {\rm e}nd{itemize} As for as the boundary conditions involved by assumption $(\textrm{H}13)$ are concerned, we observe that they are missing when $(\textrm{K})=(\textrm{N})$, while in the remaining case they are so complicated that we like better not to explicit them and we limit to list them as $$F\;\textrm{satisfies boundary conditions (K)}.$$ Of course, when needed, such conditions can be explicitly computed in terms of the data and function $k_0$ defined in (3.20).} {\rm e}nd{remark} \sigmaection{The two-dimensional case} \sigmaetcounter{equation}{0} In this section we deal with the planar identification problem $\textrm{P}(\textrm{K}), \textrm{K}{\rm i}n\{\textrm{D,N}\}I$ related to the disk ${\cal O}megaega=\{x{\rm i}n\mathbb{R}^2\!:|x|<R\}$ where $R>0$.\\ Operators $\mathcal{A}$, $\mathcal{B}$, $\mathcal{C}$ are defined by $(\ref{A})$ simply replacing the subscript $3$ with $2$: \betaegin{eqnarray} \lambdaabel{A1}\mathcal{A}\!=\!\!\sigmaum_{j=1}^{2}D_{x_j} \betaig(\sigmaum_{k=1}^{2}a_{j,k}(x)D_{x_k} \betaig)\,,\quaduad\,\mathcal{B}\!=\!\!\sigmaum_{j=1}^{2}D_{x_j}\betaig(\sigmaum_{k=1}^{2} b_{j,k}(x)D_{x_k}\betaig)\,,\quaduad\, \mathcal{C}\!=\!\!\sigmaum_{j=1}^{2}c_{j}(x)D_{x_j}\,. {\rm e}nd{eqnarray} According to $(\ref{condsuaij})$ for the two-dimensional case, we assume that the coefficients $a_{i,j}$ of $\cal{A}$ have the following representation \betaegin{equation}\lambdaabel{condsuaij1} \lambdaeft\{\betaegin{array}{lll} a_{1,1}(x)\!\!\!&=&\!\!\!a(|x|)+\,{\rm d}isplaystyle\frac{x_2^2[c(x)+b(|x|)]}{|x|^2} -\,{\rm d}isplaystyle\frac{x_1^2d(|x|)}{|x|^2},\\[5,0mm] a_{2,2}(x)\!\!\!&=&\!\!\!a(|x|)+\,{\rm d}isplaystyle\frac{x_1^2[c(x)+b(|x|)]}{|x|^2} -\,{\rm d}isplaystyle\frac{x_2^2d(|x|)}{|x|^2},\\[5,0mm] a_{1,2}(x)\!\!\!&=&\!\!\! a_{2,1}(x)=-\,{\rm d}isplaystyle\frac{\,x_1x_2[ b(|x|)+c(x)+d(|x|)]}{|x|^2}, {\rm e}nd{array}\right. {\rm e}nd{equation} \par \noindent where the function $a$, $b$, $c$ and $d$ satisfy properties $(\ref{regular})$, $(\ref{bcd})$. \\ Furthermore we assume that the coefficients of operators $\mathcal{B},\,\mathcal{C}\,$ satisfy $(\ref{ipotesibijeci})$.\\ In the two-dimensional case, setting $x'=(\cos{\!\varphi},\sigmain\varphi)$ an example of admissible linear operators $\Phi$ and $\Psi$ is now the following: \betaegin{eqnarray} \lambdaabel{Phi12} \hskip 0,74truecm\Phi [\!\!\!\!\! &v&\!\!\!\!\!](r)\!:= {\rm i}nt_{\!0}^{2\pi}\!\!\!\!v(rx')d\varphi\,,\quadquad\\[1,7mm] \lambdaabel{Psi12} \hskip 0,74truecm\Psi[\!\!\!\!\! &v&\!\!\!\!\!]\!:= {\rm i}nt_{\!0}^{R}\!\! r dr{\rm i}nt_{\!0}^{2\pi}\!\!\!\!\psi(rx')v(rx')\,d\varphi, {\rm e}nd{eqnarray} Similarly to $(\ref{tildeA})$, using $(\ref{condsuaij1})$, we obtain the following polar representation for the second order differential operator $\mathcal{A}$: \betaegin{eqnarray} \lambdaabel{tildeA2} \widetilde{\mathcal{A}}\!\!\! & = &\!\!\! D_r\betaig[{h}(r)D_r\betaig] \,+\,\frac{{h}(r)D_r}{r}\,+\, \frac{{a}(r)+ {b}(r)}{r^2}D_{\varphi}^2\,+\,\frac{1}{r^2} D_{\varphi}\betaig[\,\wtil{c}(r,\varphi)D_{\varphi}\betaig], {\rm e}nd{eqnarray} where $\wtil{c}(r,\varphi)=c(r\cos{\!\varphi},r\sigmain{\!\varphi})$ and function $h$ is defined in $(\ref{H})$.\\ Working in the Sobolev spaces $W^{k,p}({\cal O}mega)$, we will assume \betaegin{equation}\lambdaabel{P2} p{\rm i}n (2,+{\rm i}nfty). {\rm e}nd{equation} Moreover, our assumptions on operators $\Phi$ and $\Psi$ and the data will be the same as in $(\ref{primasuPhiePsi})\!-\! (\ref{richiesteperg2})$ with the spaces $L_2^p(0,R)$ and $W_2^{2,p}(0,R)$ replaced, respectively, by $L_1^p(0,R)$ and $W_1^{2,p}(0,R)$. The Banach spaces ${\mathcal{U}}^{\,s,p}(T)$, ${\mathcal{U}}_{\textrm{K}}^{s,p}(T)$ are still defined by $(\ref{Us})$. \betaegin{theorem}\lambdaabel{sfera2} Let us suppose that the coefficients $a_{i,j}$ $(i,j=1,2)$ are represented by $(\ref{condsuaij1})$ and that $(\ref{regular})$, $(\ref{bcd})$, $(\ref{ipotesibijeci})$, $(\ref{primasuPhiePsi})\!-\!(\ref{primasuPsi})$, $(\ref{P2})$ are fulfilled. Moreover, assume that the data enjoy the properties $(\ref{richiestasuf})\! -\!(\ref{richiesteperg2})$ and satisfy inequalities $(\ref{J0}), (\ref{J1})$ as well as consistency conditions $(\ref{DD1})-(\ref{1.19})$, $(\ref{DDV})-(\ref{PSIV1})$.\\ Then there exists $T^{\alphast}{\rm i}n (0,T]$ such that the identification problem ${\rm e}mph\textrm{P}(\textrm{K}), \textrm{K}{\rm i}n\{\textrm{D,N}\}I\,, {\rm e}mph{K}{\rm i}n\{{\rm e}mph{D,N}\} $, admits a unique solution $(u,k){\rm i}n{\mathcal{U}}^{\,2,p}( T^{\alphast})\times C^{\betaeta}\betaig([0,T^{\alphast}];W_1^{1,p}(0,R)\betaig)$ depending continuously on the data with respect to the norms pointed out in $(\ref{richiestasuf})\!-\!(\ref{richiesteperg2})$.\\ In the case of the specific operators $\Phi$, $\Psi$ defined as in $(\ref{Phi12}),\,(\ref{Psi12})$ the previous results are still true if we assume $\psi{\rm i}n C^1(\overlineerline{{\cal O}megaega})$ with ${\psi}_{|_{\partial\mbox{}{\cal O}mega}}\!=\!0$ when ${\rm e}mph{K}\!=\!{\rm e}mph{D}$. {\rm e}nd{theorem} \betaegin{lemma}\lambdaabel{PHIPSI1} When $\Phi$ and $\Psi$ are defined by $(\ref{Phi12})$ and $(\ref{Psi12})$, respectively, and the coefficients $a_{i,j}$ $(i,j=1,2)$ are represented by $(\ref{condsuaij1})$, conditions $(\ref{primasuPhiePsi})\!-\!(\ref{primasuPsi})$ are satisfied under assumptions $(\ref{regular})$, $(\ref{P2})$ and the hypothesis $\psi{\rm i}n C^1(\overlineerline{{\cal O}megaega})$ with ${\psi}_{|_{\partial\mbox{}{\cal O}mega}}\!=\!0$ when ${\rm e}mph{K}\!=\!{\rm e}mph{D}$. {\rm e}nd{lemma} \betaegin{proof} It is essentially the same as that of Lemma \ref{PHIPSIBAll}. Therefore we leave it to the reader.\ {\rm e}nd{proof} For the two-dimensional case the results of Section 5 are still true. Therefore the proof of Theorem \ref{sfera1} is analogous to the one of Theorem \ref{sfera}. \betaegin{thebibliography}{9999} \betaibitem[1]{1} Adams R. A.: Sobolev Spaces, Academic Press, New York-San Francisco-London 1975. \betaibitem[2]{2} Colombo F., Lorenzi A.: {{\rm i}t An identification problem related to parabolic integro-differential equations with non commuting spatial operators}, J. Inverse Ill Posed Problems, 8 (2000), 505--540. \betaibitem[3]{3} Favaron A., Lorenzi A.: {{\rm i}t Parabolic integro-differential identification problems related to radial memory kernels I}, preprint **/2001, Department of Mathematics ``F. Enriques'' of the Universit\`a degli Studi di Milano. \betaibitem[4]{4} Kufner A.: Weighted Sobolev Spaces, John Wiley Sons Limited, 1985, \betaibitem[5]{5} Triebel H.: Interpolation Theory, Function Spaces, Differential Operators, North Holland Publ. Co., Amsterdam - New York - Oxford 1978 . {\rm e}nd{thebibliography} {\rm e}nd{document}
\begin{document} \def\sigma^x_a{\sigma^x_a} \def\sum_a{\sum_a} \def\sum_b{\sum_b} \def\tilde{E}^x_a{\tilde{E}^x_a} \def\tilde{F}^y_b{\tilde{F}^y_b} \def{E}^x_a{{E}^x_a} \def{F}^y_b{{F}^y_b} \def{E}^x_ah{({E}^x_a)^{1/2}} \def\Phi{\Phi} \def\Phi^*{\Phi^*} \defB(H){B(H)} \defC_1(H){C_1(H)} \def{E}^x_ap{\{E^x_a\}\sb{a,x}} \def{F}^y_bp{\{F^y_b\}\sb{b,y}} \def\tilde{E}^x_ap{\{\tilde{E}^x_a\}\sb{a,x}} \def\tilde{F}^y_bp{\{\tilde{F}^y_b\}\sb{b,y}} \defp(a,b|x,y){p(a,b|x,y)} \title[Tsirelson's problem]{Tsirelson's problem\\ and purely atomic\\ von Neumann algebras} \author{Bebe Prunaru} \address{Institute of Mathematics ``Simion Stoilow'' of the Romanian Academy\\ P.O. Box 1-764 RO-014700 Bucharest Romania} \email{[email protected]} \begin{abstract} It is shown that if a bipartite behavior admits a field representation in which Alice (or Bob's) observable algebra generates a purely atomic von Neumann algebra then it is non-relativistic. \end{abstract} \maketitle Let $H$ be a separable complex Hilbert space, and let $B(H)$ be the algebra of all bounded linear operators on $H$. If $S\subsetB(H)$ then $span(S)$ denotes its linear span and $comm(S)$ its commutant. Let $C_1(H)$ be the space of all trace-class operators on $H$. We denote $$<\mu,T>=tr(\mu T) \quad \mu\inC_1(H), T\inB(H).$$ If $\Psi:B(H)\toB(H)$ is a weak star continuous map, then we shall denote by $\Psi^*:C_1(H)\toC_1(H)$ its predual map, hence $$<\Psi^*(\mu),T>=<\mu,\Psi(T)> \quad \mu\inC_1(H), T\inB(H).$$ In what follows $A$ and $B$ are finite sets. Moreover $\{A_x\}\sb{x\in A}$ and $\{B_y\}\sb{y\in B}$ are families of finite sets. Elements of $A\sb x$ are identified by pairs of the form $(a,x)$ and similarly for $B\sb y$. The following result has been recently proved in \cite{NCPV}. \begin{theorem} \label{quansal} Let ${E}^x_ap$ and ${F}^y_bp$ be two families of positive operators in $B(H)$ such that \begin{itemize} \item[(i)] $\sum_a{E}^x_a=1 \quad (\forall) x\in A$ \item[(ii)] $\sum_b{F}^y_b=1 \quad (\forall) y\in B$ \item[(iii)] ${E}^x_a{F}^y_b={F}^y_b{E}^x_a \quad (\forall) a,b,x,y.$ \end{itemize} Let $\rho\inC_1(H)$ be positive with $tr(\rho)=1$ and let $$p(a,b|x,y)=<\rho,{E}^x_a{F}^y_b>.$$ Suppose there exist a family $\{\sigma^x_a\}\sb{a,x}$ of positive trace-class operators pn $H$ such that \begin{itemize} \item[(iv)] $\sigma=\sum_a\sigma^x_a$ does not depend on $x\in A$ and \item[(v)] $<\sigma^x_a,{F}^y_b>=p(a,b|x,y)$ for all $a,b,x,y$. \end{itemize} Then there exist families $\tilde{E}^x_ap$ and $\tilde{F}^y_bp$ of positive operators on $H$ and a normal state $\tilde{\rho}$ on $B(H\otimes H)$ such that $$\sum_a\tilde{E}^x_a=1 \quad (\forall) x\in A$$ and $$\sum_b\tilde{F}^y_b=1 \quad (\forall)y\in B$$ and such that $$p(a,b|x,y)=<\tilde{\rho},\tilde{E}^x_a\otimes\tilde{F}^y_b>\quad (\forall) a,b,x,y.$$ \end{theorem} This result is related to a certain problem in the theory of quantum correlations formulated in \cite{T1} and \cite{T2}. For recent work in this area we refer to \cite{F}, \cite{J},\cite{NCPV},\cite{SW} and the references therein. In this paper we shall provide a class of examples where this theorem applies. \begin{proposition} \label{main} Suppose ${E}^x_ap$ and ${F}^y_bp$ are families of positive operators on $H$ satisfying (i)-(iii) in Theorem \ref{quansal} and let $\rho\inC_1(H)$ be positive with $tr(\rho)=1$. Assume there exists a positive linear weak star continuous idempotent map $$\Phi:B(H)\toB(H)$$ such that $$span({F}^y_bp)\subset range(\Phi)\subset comm({E}^x_ap)$$ Then there exist a family $\{\sigma^x_a\}\sb{a,x}$ of positive trace-class operators on $H$ such that (iv) and (v) in Theorem \ref{quansal} hold true. \end{proposition} \begin{proof} Let us define $$\sigma^x_a=\Phi^*({E}^x_ah\rho{E}^x_ah)$$ for all $a,x$. Then for every $T\inB(H)$ we have $$<\sum_a\sigma^x_a,T>=<\rho,\sum_a{E}^x_ah\Phi(T){E}^x_ah> =<\rho,\Phi(T)>=<\Phi^*(\rho),T>$$ therefore $\sigma=\sum_a\sigma^x_a$ does not depend on $x$. Moreover for every $a,x,b,y$ we have $$<\sigma^x_a, {F}^y_b>=<\rho, {E}^x_ah\Phi({F}^y_b){E}^x_ah>=<\rho,{E}^x_a{F}^y_b>=p(a,b|x,y).$$ \end{proof} This proof is in part inspired by the proof of Thm 5 in \cite{NCPV}. Recall that a purely atomic von Neumann algebra is one in which the identity is a sum of minimal projections. Obviously, every finite dimensional von Neumann algebra is purely atomic as well as $B(H)$ itself. It is known that any such algebra is the range of a weak star continuous completely positive idempotent. It follows that the above result applies for instance when either ${E}^x_ap$ or ${F}^y_bp$ generate purely atomic von Neumann algebras. For terminology and results on this class of algebras we refer to \cite{B}. \end{document}
\begin{document} \title{Message Passing Neural Networks for Hypergraphs\thanks{Supported by The Canada Research Chairs program.}} \author{Sajjad Heydari\and Lorenzo Livi\orcidID{0000-0001-6384-4743}} \authorrunning{S. Heydari and L. Livi} \institute{Computer Science, Univeristy of Manitoba, Winnipeg, Canada \email{[email protected]}} \maketitle \begin{abstract} Hypergraph representations are both more efficient and better suited to describe data characterized by relations between two or more objects. In this work, we present a new graph neural network based on message passing capable of processing hypergraph-structured data. We show that the proposed model defines a design space for neural network models for hypergraphs, thus generalizing existing models for hypergraphs. We report experiments on a benchmark dataset for node classification, highlighting the effectiveness of the proposed model with respect to other state-of-the-art methods for graphs and hypergraphs. We also discuss the benefits of using hypergraph representations and, at the same time, highlight the limitation of using equivalent graph representations when the underlying problem has relations among more than two objects. \keywords{Graph Neural Network\and Hypergraph \and Message passing.} \end{abstract} \section{Introduction} Graphs perfectly capture structured data, not only representing data points, but also relations amongst them. Recently, Graph Neural Networks have been employed as a tool to directly process graph data with huge success in various tasks, such as node classification in citation network labeling~\cite{kipf2016semi, velivckovic2017graph, atwood2016diffusion}, link predictions in recommender networks\cite{zhang2018link}, and state prediction in traffic prediction or weather forecasting via sensory networks\cite{zhang2018gaan, yan2018spatial, yu2017spatio, cui2019traffic}. Graph Neural Networks have provided a significant reduction in numbers of trainable parameters in a machine learning model with input that could be represented as graphs, much like Convolutional Neural Networks have done for tensors. This reduction in parameters allows complex problems to be addressed with much smaller datasets. However there still remains a set of problems that could benefit from a change in representation, namely the hypergraph representation. Hypergraphs constitute a natural generalization of graphs, where relations are not restricted to represent the interaction of two objects: hypergraphs can encode higher-order relations, i.e. relations between two or more objects. Formally, a hypergraph $H=(V, \mathcal{E})$ is composed by a set of vertices, $V$, and a set of hyperedges, $\mathcal{E}\subset\mathcal{P}(V)$, where $\mathcal{P}(V)$ is the power set of $V$. Accordingly we may refer to the vertices composing a hyperedge as $v\in e, e\in \mathcal{E}$. In a citation network, for example, there are pair-wise relations between cited work and citing work. However, there are also bigger relations among multiple works that have been cited together in a single publication, which require hyperedges to be properly modeled. Higher-order relations could still be encoded in a graph by performing suitable transformations (e.g. using line graphs). However, we find that none of the currently used encodings are as effective as directly processing hypergraphs. We note that well-established graph neural networks \cite{gilmer2017neural} fail to apply to hypergraphs due to their more complex structure. Therefore, newer methods for processing hypergraphs with neural networks have been introduced in the literature \cite{feng2019hypergraph}. Here we introduce the Hypergraph Message Passing Neural Network (HMPNN), a Message Passing Neural Network model \cite{gilmer2017neural} that can process hypergraph-structured data of variable size. The main contributions of this paper can be summarized as follows: \begin{itemize} \item We develop a new message passing neural network model for hypergraph-structured data; \item We show that hypergraph convolution methods from the literature can be seen as a special case of HMPNN; \item We show that the proposed model significantly outperforms other existing methods from the literature on a common node classification benchmark. \end{itemize} The reminder of this paper is structured as follows. Section \ref{sec:related_work} contextualizes the paper. In Section \ref{sec:graph_expansions}, we show that common approaches for transforming a hypergraph into a graph inevitably lead to loss of structural information, hence preventing the correct functioning of any machine learning method operating on them. Section \ref{sec:hmpnn} presents the main contribution of this paper, namely a novel hypergraph neural network model. In Section \ref{sec:experiments} we show experimental results on a common hypergraph benchmark used in the literature. Finally, Section \ref{sec:conclusions} concludes the paper. \section{Related work} \label{sec:related_work} \citet{gilmer2017neural} reformulated existing graph neural networks in a framework called Message Passing Neural Networks. There are two phases in MPNN, the message passing and readout, which corresponds to convolutions and pooling operations, respectively. The convolutional layer consists of updating the hidden state of node $V$ at layer $t$ ($h^t_v$), based on the previous state of $v$ and their neighbors. The model reads: \begin{equation} \begin{aligned} m_v^{t+1} &= \sum_{w\in N(v)} M_t(h^t_v, h^t_w, e_{vw}) \\ h^{t+1}_v &= U_t(h^t_v, m^{t+1}_v) \end{aligned} \end{equation} The function $M$ prepares messages from each node to each of its neighbours, which are then aggregated together to form the incoming message for each node. $M$ can utilize the sender's features, the receiver features, or the weight of the edge between them. The incoming message is then processed with function $U$ along with the internal feature, to form the next feature of each node. \subsection{Graph design space} \label{sec:graph_design_space} \citet{you2020design} introduced a design space for graph neural networks. Their contributions involves a similarity metric and an evaluation method for GNNs. Their design space consists of three parts: intra-layer, inter-layer, and training configuration. The intra-layer is responsible for creation of various GNN convolutions, while also considering dropout and batch normalization. It is described as follows: \begin{equation*} h_v^{(k+1)} = AGG({ACT(DROPOUT(BN(W^{(k)}h_u^{(k)} + b^{(k)}))), u \in \mathcal{N}(v)}) \end{equation*} The inter-layer is responsible for the interconnection of the various layers. These includes choices for \textit{layer connectivity}, \textit{pre-process layers}, \textit{mp-layers} and \textit{post-process layers}; with layer connectivity having options such as \textit{stack}, \textit{skip-sum} and \textit{skip-cat}. Finally, training configuration includes choices for batch size, learning rate, optimizer and number of epochs. \subsection{Hypergraph neural networks and hypergraph representation} The literature on graph neural networks for hypergraph-structured data is sparse. \citet{jiang2019dynamic} introduce dynamic hypergraph neural networks, a neural network that both updates the structure of the underlying hypergraph as well as performing a two phase convolution, a vertex convolution and a hyperedge convolution. \citet{feng2019hypergraph} introduce hypergraph neural networks, a spectral convolution operation on hypergraphs, that considers weights of the hyperedges as well as the incident matrix. \citet{yi2020hypergraph} introduce a method to perform hypergraph convolutions with recurrent neural networks. \citet{chien2021you} introduce an extension to message passing operation for hypergraphs based on composition of two multiset functions. \section{Graph expansions and loss of structural information} \label{sec:graph_expansions} Here, we consider the process of mapping hypergraph representations into graph representations. There are various ways of encoding hyperedges involving three or more vertices in graphs. We consider some popular approaches and show that they all have drawbacks that lead to loss of relevant structural information, thus highlighting the importance of processing hypergraph-structured data. \paragraph{Clique expansion} is an approach in which each hyperedge of size $k$ is substituted with $k*(k-1)$ edges amongst pairs of its members~\cite{sun2008hypergraph}. This approach cannot distinguish between pairwise relations amongst $k$ vertices and a hyperedge of size $k$, as shown in Figure~\ref{fig:clique_expansion}. In fact, they are both mapped to the same structure, and therefore lose structural information. \begin{figure} \caption{A hyperedge of size 3 (left) and a clique with 3 vertices (right).} \label{fig:clique_expansion} \end{figure} \paragraph{Star expansion} is another approach of encoding a hypergraph in a simple graph, in which the hyperedge $h$ is replaced with a new vertex $v_h$, and edges between $h_v$ and original vertices of $h$ are added~\cite{zien1999multilevel}. An example could be seen in Figure~\ref{fig:star_expansion}. The resulting graph is bipartite, with one partition representing the original hyperedges and the other partition representing the original vertices. This conversion is not one to one because without further encoded information it is not possible to figure out which partition is representing the hyperedges. Another downside is that due to change in neighborhoods, in order to pass messages from previously neighbouring vertices, we now have to pass messages twice to reach the same destination. \begin{figure} \caption{A hyperedge of size 3 (left) and its star expansion (right).} \label{fig:star_expansion} \end{figure} \paragraph{Line conversions} (also known as edge conversion and edge expansion) creates a simple graph, and for any hyperedge $h$ creates a vertex $v_h$ in it. Two vertices $v_h$ and $v_h'$ are connected if and only if the intersection between $h$ and $h'$ is not empty~\cite{pu2012hypergraph}. An example is shown in Figure~\ref{fig:line_conversion}. Plain line conversions are not a one to one mappings, and hence we loose various kind of information, from data stored on the vertices, to size of the higher order relations, neighbourhoods and many other types of structural information. \begin{figure} \caption{A hypergraph (left) and its line conversion (right).} \label{fig:line_conversion} \end{figure} \section{The Proposed Hypergraph Message Passing Neural Networks} \label{sec:hmpnn} The computation in the proposed Hypergraph Message Passing Neural Network (HMPNN) consists of two main phases: (1) sending messages from vertices to hyperedges and (2) sending messages from hyperedges to vertices. The operations performed by the proposed HMPNN model can be formalized as follows: \begin{align} \label{hmpnn1}M_v &= f_v(X^{(t)}_v) \\ \label{hmpnn2}W^{(t+1)}_e &= g_w(W^{(t)}_e, \square_{v \in e}M_v)\\ \label{hmpnn3}M_e &= f_w(W^{(t)}_e, \square'_{v \in e}M_v)\\ \label{hmpnn4}X^{(t+1)}_v &= g_v(X^{(t)}_v, \diamond_{e, v \in e}M_e) \end{align} $X^{(t)}_v$ is the representation of vertex $v$ at layer $t$, $W^{(t)}_e$ is the representation of hyperedge $e$ at layer $t$, $f_v$ and $f_w$ are vertex and hyperedge messaging functions, respectively, $g_v$ and $g_w$ are vertex and hyperedge updating functions, respectively, and finally $\square, \square'$, and $\diamond$ are aggregating functions. The choice for messaging and updating functions includes identity function, linear and non-linear functions, or MLP. The choice for aggregation functions includes mean aggregation, sum aggregation and concatenation. We can therefore define an instance of hypergraph message passing operation by specifying what follows: \begin{itemize} \item Choice of messaging functions over vertices $f_v$ and hyperedges $f_w$ \item Choice of updating function over vertices $g_v$ and hyperedges $g_w$ \item Choice of aggregation operation over incoming messages to hyperedges $\square$ \item Choice of aggregation operation over outgoing messages to vertices $\square'$ \item Choice of aggregation operation over incoming messages to vertices $\diamond$ \end{itemize} These choices allow us to describe a wide range of hypergraph convolutions, which can be used as an intra-layer design space for hypergraphs, in a similar fashion to \citet{you2020design} for graphs. It is possible to include various forms of regularization to prevent overfitting. Dropout could be added to any of messaging or updating functions $f$ and $g$. It is also possible to introduce adjacency connection dropout to aggregation functions $\square \square'\diamond$, allowing for a reduction of bias during training. \subsection{Batch Normalization and Dropout} Batch normalization is a useful operation to speed-up training and reduce bias. Similarly, dropout is a mechanism used to decrease the learned bias of the system. We introduce two types of dropout for HMPNN: regular dropout and adjacency dropout. During regular dropout, random cells in the feature vector of nodes or hyperedges are set to zero; the other cells are scaled by $\frac{n+k}{n}$, where $n$ is the total number of cells in that vector and $k$ is the number of cells that were zeroed. Adjacency dropout randomly removes hyperedges in a convolution step. Adjacency dropout must be applied in neighborhood creation steps of Equations~\ref{hmpnn2} through \ref{hmpnn4}. Regular dropout can follow a batch normalization right before updating functions in Equations~\ref{hmpnn2} and \ref{hmpnn4}, as part of the corresponding $g$ functions. \subsection{Equivalency between hypergraph convolutions and HMPNN} \label{sec:} Following the spirit of the graph design space mentioned in Section \ref{sec:graph_design_space}, here we show that, by including appropriate search parameters, HMPNN can mimic the behaviour of existing hypergraph neural network convolutions, which are thus special cases of HMPNN. In order to compare different models based on hypergraph convolutions, we need to define a notion of equivalency among models. We define two models $m_1, m_2$ to be equivalent if and only if for any weights $w_1$ there exists $w_2$ such that for any input hypergraph $x$ the model outputs correspond, i.e. $m_1(w_1, x) = m_2(w_2, x)$; in other words we can translate the weights of one network to the other one. \paragraph{Dynamic hypergraph neural network's convolution} (DHNN)~\cite{jiang2019dynamic} operates according to algorithm~\ref{alg:DHNN}. In other words, for each hyperedge containing $u$, they sample all members of $u$ and capture their feature set (1st outgoing message), stacking them in $X_u$ (hyperedge aggregation), processing them via the vertex convolution(hyperedge updating), aggregating all such hyperedge messages via stacking (incoming messages to vertices), performing edge convolution on the stacked aggregation and applying the activation function on it (vertex update function). \begin{algorithm} \caption{Dynamic Hypergraph Neural Network Convolution} \label{alg:DHNN} \begin{algorithmic} \State \textbf{input}: Sample $x_u$, hypergraph structure $\mathcal{G}$ \State \textbf{output}: Sample $y_u$ \State xlist = $\Phi$ \For e in Adj(u) do \State $X_u = \text{VertexSample}(X, \mathcal{G})$ \State $X_e = \text{1-dconv}(MLP(X_v), MLP(X_v))$ or VertexConvolution($X_v$) \State xlist.insert($X_e$) \EndFor \State $X_e$ = stack(xlist) \State $X_u = \sum_{i=0}^{|Adj(u)|} \text{softmax}(x_eW+b)^i x^i_e$ or edgeConv($X_e$) \State $y_u = \sigma(x_uW+b)$ \end{algorithmic} \end{algorithm} These steps can be directly described in terms of message passing, with specific functions for each phase of message passing opearation. HMPNN can thus directly emulate this model. \paragraph{Hypergraph neural networks} (HNN)~\cite{feng2019hypergraph} convolution performs the following operations: \begin{equation} X^{(l+1)} = \sigma(D_v^{-1/2}HWD_e^{-1}H^TD_v^{-1/2}X^{(l)}\Theta^{(l)}) \end{equation} where $X^{(l)}$ describes the node feature on layer $l$, $D_v$ and $D_e$ describe the diagonal matrices of node and edge degrees, respectively, $W$ is the weights of each hyperedge, $H$ is the incidence matrix, $\Theta$ are the learnable parameters of the layer and finally $\sigma$ is the non-linear activation function. Note that this convolution assumes that hyperedge features are numbers (weights). Given any $\Theta$, we construct the following equivalent HMPNN where the outgoing node message is node features multiplied by inverse square root of their degree, i.e. $D_v^{-1/2}X^{(l)}$; the hyperedge aggregation is the average, i.e. $D_e^{-1}H^T$; there is no hyperedge updating; the hyperedge outgoing message is multiplied by their weight (or feature); the node aggregation function is sum of input multiplied by the inverse square root of their degree, i.e. $D_v^{-1/2}H$; and finally the node updating function is $\sigma(X\Theta^{(l)})$. \paragraph{Multiset Learning for hypergraphs} (AllSet)~\cite{chien2021you} construct their message passing according to the following equations: \begin{align} &f_{\mathcal{V}\rightarrow\mathcal{E}}(S) = f_{\mathcal{E}\rightarrow\mathcal{V}}(S) = LN(Y+MLP(Y)),\\ &Y= LN(\theta+MH_{h,\omega}(\theta,S,S)), MH_{h,\omega}(\theta,S,S)=||_i=1^{h} O^{(i)},\\ &O^{(i)}= \omega(\theta^{(i)}(K^{(i)})^T)V^{(i)}, K^{(i)}=MLP^{(K,i)}(S), V^{(i)}=MLP^{(V,i)}(S). \end{align} $LN$ is the normalization layer, $||$ is concatenation and $\theta$ are the learnable weights, $MH_{h,w}$ denotes an h-multihead with activation function $\omega$. It is clear that the AllSet is a special case of the proposed HMPNN with the above mentioned functions used as the updating mechanism and the identity function used for messaging. Moreover, we note that there is a direct translation of weights from a trained AllSet model to the equivalent HMPNN. \section{Experiments} \label{sec:experiments} In this section, we look at the semi-supervised task of node classification in citation networks, which offer a suitable playground to benchmark graph and hypergraph representations and related processing methods. \subsection{Experiment Setup} \subsubsection{Dataset}. To provide direct comparison with state-of-the-art methods \cite{kipf2016semi,bai2019hypergraph,velivckovic2017graph,feng2019hypergraph,jiang2019dynamic}, we use the Cora dataset\cite{yang2016revisiting} which includes 2708 academic papers and 5429 citation relations amongst them. Each publication contains a bag of word feature vector, where a binary value indicates presence or absence of that word in the publication. Finally, each publication belongs to one of the 7 categories. For train-test split, we follow the protocol described in \cite{bai2019hypergraph,yang2016revisiting}. We randomly select: 20 items from each category, totaling 140 for training; 70 items from each category, totaling 490 for validation; the remaining items are used for testing. \subsubsection{Hypergraph Construction}. Citation networks are often represented as simple graphs with publications that cite each other connected via simple edges. This representation fails to appreciate documents that are cited together, and only captures them in terms of a second neighborhood. \citet{bai2019hypergraph} provide an alternate representation in which documents are simultaneously treated as vertices and hyperedges at the same time, with hyperedges of a publication grouping all of its related work together. Each vertex is then characterized by a boolean matrix of their representative words acting as its feature set. \subsubsection{Implementation Details} Our model uses two layers of HMPNN with sigmoid activation and a hidden representation of size 2. We use sum as the message aggregation functions, with adjacency matrix dropout with rate 0.7, as well as dropout with rate 0.5 for vertex and hyperedge representation. \subsection{Results and Analysis} Table~\ref{tab:cora} shows a comparison with respect to different models taken from the state-of-the-art. As shown in the table, the proposed method (HMPNN) significantly outperforms all other methods. \begin{table}[htp!] \caption{Classification accuracy on Cora dataset.} \label{tab:cora} \centering \begin{tabular}{|c|c|}\hline \textbf{Method} & \textbf{Accuracy} \\\hline \hline GCN \cite{kipf2016semi} & 81.5\\\hline HGNN \cite{feng2019hypergraph} & 81.60\\\hline GAT \cite{velivckovic2017graph} & 82.43\\\hline DHGNN \cite{jiang2019dynamic} & 82.50\\\hline HGC+Atten \cite{bai2019hypergraph} & 82.61 \\\hline \textbf{HMPNN} & \textbf{92.16} \\\hline \end{tabular} \end{table} \subsubsection{Analysis of adjacency dropout} In order to verify the benefits of adjacency dropout, we perform a test with various different values of adjacency dropout rate used during training and report the obtained accuracy on test set. The results are shown in Table~\ref{tab:adj_dropout}. Results show that, regardless of the use of activation dropout, the accuracy increases when we introduce adjacency dropout. We conclude that adjacency dropout is a useful mechanism to decrease the bias of the proposed neural network model. \begin{table}[htp!] \caption{Accuracy test for various values of adjacency dropout. Test 1 had 0.5 activation dropout whereas test 2 did not have any activation dropout.} \label{tab:adj_dropout} \centering \begin{tabular}{|c|c|c|}\hline Adjacency Dropout Rate & Accuracy Test 1 & Accuracy Test 2 \\\hline\hline 90\% & 79.35\% & 65.51\%\\\hline 70\% & 92.16\% & 89.90\%\\\hline 50\% & 91.14\% & 83.26\%\\\hline 30\% & 89.94\% & 76.18\%\\\hline 0\% & 81.76\% & 54.11\%\\\hline \end{tabular} \end{table} \section{Conclusion and Future Work} \label{sec:conclusions} We investigate the use of hypergraph representations in machine learning and showed their superiority over graph representations with respect to capturing structural information for data characterized by $n$-ary relations (i.e. relations involving two or more vertices). Moreover, we introduced the Hypergraph Message Passing Neural Network as a novel neural network layer to process hypergraph-structured data. We showed that HMPNN can emulate existing convolution methods for hypergraphs, and therefore could be used to search over the space of architectures for hypergraph-structured data. Finally, we investigated using HMPNN on the task of node classification over the Cora citation network, in which we employed the adjacency dropout as well as activation dropout as mechanisms of controlling the bias. Our model outperformed various state-of-the-art methods for this task. \end{document}
\begin{document} \begin{abstract} In the 1995 paper entitled ``Noncommutative symmetric functions," Gelfand, et.\ al.\ defined two noncommutative symmetric function analogues for the power sum basis of the symmetric functions, along with analogues for the elementary and the homogeneous bases. They did not consider the noncommutative symmetric power sum duals in the quasisymmetric functions, which have since been explored only in passing by Derksen and Malvenuto-Reutenauer. These two distinct quasisymmetric power sum bases are the topic of this paper. In contrast to the simplicity of the symmetric power sums, or the other well known bases of the quasisymmetric functions, the quasisymmetric power sums have a more complex combinatorial description. As a result, although symmetric function proofs often translate directly to quasisymmetric analogues, this is not the case for quasisymmetric power sums. Neither is there a model for working with the quasisymmetric power sums in the work of Gelfand, et.\ al., which relies heavily on quasi-determinants (which can only be exploited by duality for our purposes) and is not particularly combinatorial in nature. This paper therefore offers a first glimpse at working with these two relatively unstudied quasisymmetric bases, avoiding duality where possible to encourage a previously unexplored combinatorial understanding. \boldsymbol{e}nd{abstract} \title{Quasisymmetric Power Sums} \boldsymbol{s}etcounter{tocdepth}{1} \tableofcontents \boldsymbol{s}ection{Introduction} The ring of symmetric functions $\Sym$ has several well-studied bases indexed by integer partitions $\lambda$, such as the monomial basis $m_\lambda$, the elementary basis $e_\lambda$, the complete homogeneous basis $h_\lambda$, the Schur functions $s_\lambda$, and, most relevant here, the power sum basis $p_\lambda$. Two important generalizations of $\Sym$ are $\QS$ (the ring of quasisymmetric functions) and $\NS$ (the ring of noncommutative symmetric functions). These rings share dual Hopf algebra structures, giving a rich interconnected theory with many beautiful algebraic and combinatorial results. In particular, many quasisymmetric and noncommutative symmetric analogues to the familiar symmetric bases have been defined and studied, such as the quasisymmetric monomial basis $M_\alpha$, and the noncommutative elementary and homogeneous bases $\boldsymbol{e}_\alpha$ and $\boldsymbol{h}_\alpha$ \cite{GKLLRT94} (where the indexing set is compositions $\alpha$). Several different analogues of the Schur functions have also been defined, including the quasisymmetric fundamental basis $F_\alpha$ \cite{gessel1984multipartite}, dual to the noncommutative ribbon basis $\boldsymbol{r}_\alpha$; the quasi-Schur basis and its dual in \cite{haglund2011quasisymmetric}; and the immaculate basis and its quasisymmetric dual~\cite{BBSSZ14lift}. Quasisymmetric analogues of symmetric function bases are useful for a number of reasons. Quasisymmetric functions form a combinatorial Hopf algebra~\cite{Ehr96,gessel1984multipartite,MalReu95} and in fact are the terminal object in the category of combinatorial Hopf algebras~\cite{ABS06}, which explains why they consistently appear throughout algebraic combinatorics. Complicated combinatorial objects often have simpler formulas when expanded into quasisymmetric functions, and translating from symmetric to quasisymmetric functions can provide new avenues for proofs. Here, we explore the analogs to the power sum bases. In $\Sym$, there is an important bilinear pairing, the Hall inner product, defined by $\langlem_\lambda, h_\mu\rangle = \delta_{\lambda, \mu}$. Moreover, the duality between $\QS$ and $\NS$ precisely generalizes the inner product on $\Sym$ so that, for example, $\langleM_\lambda, \boldsymbol{h}_\mu\rangle = \delta_{\lambda, \mu}$. With respect to the pairing on $\Sym$, the power sum basis is (up to a constant) self-dual, so analogs to the power sum basis in $\QS$ and $\NS$ should share a similar relationship. Two types of noncommutative power sum bases, $\boldsymbol{\Psi}_\alpha$ and $\boldsymbol{\Psi}two_\alpha$, were defined by Gelfand, et.\ al.\ \cite{GKLLRT94}. Briefly, the quasisymmetric duals to one type or the other were also discussed in \cite{der09} and in \cite{MalReu95}; but in contrast to the other bases listed above, very little has been said about their structure or their relationship to other bases. The main objective of this paper is to fill this gap in the literature. Namely, we define two types of quasisymmetric power sum bases, which are scaled duals to $\boldsymbol{\Psi}_\alpha$ and $\boldsymbol{\Psi}two_\alpha$. The scalars are chosen analogous to the scaled self-duality of the symmetric power sums; moreover, we show that these are exactly the right coefficients to force our bases to refine the symmetric power sums (Theorems~\boldsymbol{r}ef{thm:refine} and~\boldsymbol{r}ef{thm:2refine}). Section~\boldsymbol{r}ef{sec:qsps} develops combinatorial proofs of these refinements. In Section~\boldsymbol{r}ef{sec:btw}, we give transition matrices to other well-understood bases. Section~\boldsymbol{r}ef{sec:products} explores algebraic properties, giving explicit formulas for products of quasisymmetric power sums. Section~\boldsymbol{r}ef{sec:plethysm} gives formulas for plethysm in the quasisymmetric case. \boldsymbol{s}ection{Preliminaries}\label{sec:prelim} In this section, we define the rings $\QS$ of quasisymmetric functions and $\NS$ of noncommutative symmetric functions, and briefly discuss their dual Hopf algebra structures. We begin with a brief discussion of notation. Due to the nature of this paper, we note that there in a lot of notation to keep track of throughout, and therefore we set aside numbered definitions and notations to help the reader. In general, we use lower case letters (e.g.\ $e, m, h, s$, and $p$) to indicate {\boldsymbol{e}m symmetric functions}, bold lowercase letters (e.g.\ $\boldsymbol{e}$, $\boldsymbol{h}$, and $\boldsymbol{r}$) to indicate {\boldsymbol{e}m noncommutative symmetric functions}, and capital letters (e.g.\ $M$ and $F$) to indicate {\boldsymbol{e}m quasisymmetric functions}. When there is a single clear analogue of a symmetric function basis, we use the same letter for the symmetric functions and their analogue (following \cite{LMvW} rather than \cite{GKLLRT94}). For the two different analogs to the power sums, we echo \cite{GKLLRT94} in using $\boldsymbol{\Psi}$ and $\boldsymbol{\Psi}two$ for the noncommutative symmetric power sums, and then $\Psi$ and $\Phi$ as quasisymmetric analogues. We generally follow \cite{LMvW} for the names of the automorphisms on the quasisymmetric and noncommutative symmetric functions. For example, we use $S$ for the antipode map (in particular, see \cite[\S 3.6]{LMvW} for a complete list and a translation to other authors). \boldsymbol{s}ubsection{Quasisymmetric functions}\label{sec:qsym} A formal power series $f \in \mathbb{C}\llbracket x_1,x_2,\ldots \boldsymbol{r}rbracket$ is a {\boldsymbol{e}m quasisymmetric function} if the coefficient of $x_1^{a_1}x_2^{a_2}\cdots x_k^{a_k}$ in $f$ is the same as the coefficient for $x_{i_1}^{a_1}x_{i_2}^{a_2}\cdots x_{i_k}^{a_k}$ for any $i_1<i_2<\cdots <i_k$. The set of quasisymmetric functions $\QS$ forms a ring. Moreover, this ring has a $\ZZ_{\geq 0}$-grading by degree, so that $\QS=\bigoplus_n\QS_n$, where $\QS_n$ is the set of $f \in \QS$ that are homogeneous of degree $n$. For a comprehensive discussion of $\QS$ see \cite{LMvW,MalReu95,Sta99v2}. There are a number of common bases for $\QS_n$ as a vector space over $\mathbb{C}$. These bases are indexed by (strong) integer compositions. \begin{defn}[composition, $\alpha\vDash n$] A sequence $\alpha=(\alpha_1,\alpha_2,\ldots,\alpha_k)$ is a \boldsymbol{e}mph{composition} of $n$, denoted $\alpha\vDash n$, if $\alpha_i>0$ for each $i$ and $\boldsymbol{s}um_i \alpha_i=n$. \boldsymbol{e}nd{defn} \begin{notn}[$|\alpha|$, $l(\alpha)$, $\widetilde{\alpha}$]The {\boldsymbol{e}m size} of a composition $\alpha=(\alpha_1,\alpha_2,\ldots,\alpha_k)$ is $|\alpha|=\boldsymbol{s}um \alpha_i$ and the {\boldsymbol{e}m length} of $\alpha$ is $\boldsymbol{e}ll(\alpha)=k$. Given a composition $\alpha$, we denote by $\widetilde{\alpha}$ the partition obtained by placing the parts of $\alpha$ in weakly decreasing order. \boldsymbol{e}nd{notn} \begin{defn}[refinement, $\beta\boldsymbol{\Psi}reccurlyeq\alpha$, $\beta^{(j)}$]\label{defn:refinement} If $\alpha$ and $\beta$ are both compositions of $n$, we say that $\beta$ {\boldsymbol{e}m refines} $\alpha$ (equivalently, $\alpha$ is a {\boldsymbol{e}m coarsening} of $\beta$), denoted $\beta\boldsymbol{\Psi}reccurlyeq \alpha$, if $$\alpha=(\beta_1+\cdots+\beta_{i_1}, \beta_{i_1+1}+\cdots +\beta_{i_1+i_2}, \ldots, \beta_{i_1+\cdots +i_{k-1}+1}+\cdots + \beta_{i_1+\cdots+i_k}).$$ We will denote by $\beta^{(j)}$ the composition made up of the parts of $\beta$ (in order) that sum to $\alpha_{j}$; namely, if $j=i_s$, then $\beta^{(j)}=(\beta_{i_1+\cdots +i_{s-1}+1},\cdots , \beta_{i_1+\cdots+i_s})$. \boldsymbol{e}nd{defn} It is worth noting that some authors reverse the inequality, using $\boldsymbol{\Psi}reccurlyeq$ for coarsening as opposed to refinement as we do here. Repeatedly we will need the particular parts of $\beta$ that sum to a particular part of $\alpha$. \begin{notn}[$\boldsymbol{s}et(\alpha)$, $\comp(A)$] There is a natural bijection between compositions of $n$ and subsets of $[n-1]$ given by partial sums. (Here $[n]$ is the set $\{1,2, \boldsymbol{h}dots , n\}$.) Namely, if $\alpha=(\alpha_1,\ldots,\alpha_k)\vDash n$, then $\boldsymbol{s}et(\alpha) = \{\alpha_1, \alpha_1+\alpha_2, \ldots, \alpha_1+\cdots+\alpha_{k-1}\}.$ Similarly, if $A=\{a_1,\ldots,a_j\}\boldsymbol{s}ubseteq[n-1]$ with $a_1<a_2<\cdots<a_j$ then $\comp(A)=(a_1,a_2-a_1,\ldots, a_j-a_{j-1},n-a_j)$. \boldsymbol{e}nd{notn} We remark that $\alpha \boldsymbol{\Psi}reccurlyeq \beta$ if and only if $\boldsymbol{s}et(\beta)\boldsymbol{s}ubseteq \boldsymbol{s}et(\alpha)$. Let $\alpha=(\alpha_1,\ldots,\alpha_k)$ be a composition. The {\boldsymbol{e}m quasisymmetric monomial function} indexed by $\alpha$ is \begin{equation}\label{eq:Ms}M_\alpha=\boldsymbol{s}um_{i_1<i_2<\cdots<i_k} x_{i_1}^{\alpha_1}x_{i_2}^{\alpha_2}\cdots x_{i_k}^{\alpha_k};\boldsymbol{e}nd{equation} and the {\boldsymbol{e}m fundamental quasisymmetric function} indexed by $\alpha$ is \begin{equation} \label{M-F} F_\alpha = \boldsymbol{s}um_{\beta\boldsymbol{\Psi}reccurlyeq \alpha}M_\beta, \qquad \text{so that} \qquad M_\alpha = \boldsymbol{s}um_{\beta\boldsymbol{\Psi}reccurlyeq\alpha}(-1)^{\boldsymbol{e}ll(\beta)-\boldsymbol{e}ll(\alpha)}F_\beta.\boldsymbol{e}nd{equation} Equivalently, $F_{\alpha}$ is defined directly by \begin{equation}\label{eq:Fs} F_{\alpha} = \boldsymbol{s}um_{\boldsymbol{s}ubstack{i_1\leq i_2\leq \cdots \leq i_n\\ i_j<i_{j+1} \text{ if } j \in \boldsymbol{s}et(\alpha)}} x_{i_1}x_{i_2}\cdots x_{i_n}.\boldsymbol{e}nd{equation} In addition to being a graded ring, $\QS$ can be endowed with the structure of a \boldsymbol{e}mph{combinatorial Hopf algebra}. For our purposes, this means that $\QS$ has a product (ordinary polynomial multiplication), a coproduct $\Delta$, a unit and counit, and an antipode map. The ring $\NS$ of noncommutative symmetric functions is dual to $\QS$ with respect to a certain inner product (defined later), and thus also is a combinatorial Hopf algebra. For further details on the Hopf algebra structure of $\QS$ and $\NS$, see~\cite{ABS06,grinberg2014hopf,LMvW}. \boldsymbol{s}ubsection{Noncommutative symmetric functions}\label{sec:nsym} The ring of {\boldsymbol{e}m noncommutative symmetric functions}, denoted $\NS$, is formally defined as a free associative algebra $\mathbb{C} \langle \boldsymbol{e}_1, \boldsymbol{e}_2, \boldsymbol{h}dots \boldsymbol{r}angle$, where the $\boldsymbol{e}_i$ are regarded as {\boldsymbol{e}m noncommutative elementary functions} and \[ \boldsymbol{e}_\alpha = \boldsymbol{e}_{\alpha_1}\boldsymbol{e}_{\alpha_2}\cdots \boldsymbol{e}_{\alpha_k}, \qquad \text{for a composition } \alpha.\] Define the {\boldsymbol{e}m noncommutative complete homogeneous symmetric functions} as in~\cite[\S 4.1]{GKLLRT94} by \begin{equation}\label{eq:hine} \boldsymbol{h}_n = \boldsymbol{s}um_{\alpha \vDash n} (-1)^{n-\boldsymbol{e}ll(\alpha)}\boldsymbol{e}_\alpha, \quad \text{ and } \quad \boldsymbol{h}_\alpha = \boldsymbol{h}_{\alpha_1}\cdots \boldsymbol{h}_{\alpha_k} = \boldsymbol{s}um_{\beta\boldsymbol{\Psi}reccurlyeq \alpha}(-1)^{|\alpha|-\boldsymbol{e}ll(\beta)}\boldsymbol{e}_\beta.\boldsymbol{e}nd{equation} The noncommutative symmetric analogue (dual) to the fundamental quasisymmetric functions is given by the {\boldsymbol{e}m ribbon Schur functions} \begin{equation}\label{eq:rinh} \boldsymbol{r}_\alpha = \boldsymbol{s}um_{\beta\boldsymbol{s}ucccurlyeq \alpha} (-1)^{\boldsymbol{e}ll(\alpha)-\boldsymbol{e}ll(\beta)}\boldsymbol{h}_\beta.\boldsymbol{e}nd{equation} \boldsymbol{s}ubsubsection{Noncommutative power sums} To define the noncommutative power sums, we begin by recalling the useful exposition in~\cite[\S 2]{GKLLRT94} on the (commuting) symmetric power sums. Namely, the power sums $p_n$ can be defined by the generating function: $$P(X;t)=\boldsymbol{s}um_{k\geq 1}t^{k-1}p_k[X]=\boldsymbol{s}um_{i\geq 1}x_i(1-x_it)^{-1}.$$ This generating function can equivalently be defined by any of the following generating functions, where $H(X;t)$ is the standard generating function for the complete homogeneous functions and $E(X;t)$ is the standard generating function for the elementary homogeneous functions: \begin{equation} P(X;t)=\frac{d}{dt}\log H(X;t) = -\frac{d}{dt}\log E(-X;t).\label{eq:pfrome} \boldsymbol{e}nd{equation} Unfortunately, there is not a unique sense of logarithmic differentiation for power series (in $t$) with noncommuting coefficients (in $\NS$). Two natural well-defined reformulations of these are \begin{equation}\label{eq:type1gen} \frac{d}{dt}H(X;t)=H(X;t)P(X;t) \quad \text{ or } \quad -\frac{d}{dt}E(X;-t)=P(X;t)E(X;-t), \boldsymbol{e}nd{equation} and \begin{equation} H(X;t)=-E(X;-t) = \boldsymbol{e}xp\left(\int P(X;t) dt\boldsymbol{r}ight). \label{eq:type2gen} \boldsymbol{e}nd{equation} In $\NS$, these do indeed give rise to \boldsymbol{e}mph{two different analogs} to the power sum basis, introduced in~\cite[\S 3]{GKLLRT94}: the \boldsymbol{e}mph{noncommutative power sums of the first kind} (or \boldsymbol{e}mph{type}) $\boldsymbol{\Psi}_\alpha$ and of the \boldsymbol{e}mph{second kind} (or \boldsymbol{e}mph{type}) $\boldsymbol{\Psi}two_\alpha$, with explicit formulas (due to \cite[\S 4]{GKLLRT94}) as follows. The noncommutative power sums of the first kind are those satisfying essentially the same generating function relation as \boldsymbol{e}qref{eq:type1gen}, where this time $H(X;t)$, $E(X;t)$, and $P(X;t)$ are taken to be the generating functions for the noncommutative homogeneous, elementary, and type one power sums respectively, and expand as \begin{equation}\label{eq:powerinh} \boldsymbol{\Psi}_n = \boldsymbol{s}um_{\beta \vDash n} (-1)^{\boldsymbol{e}ll(\beta)-1} \beta_k \boldsymbol{h}_\beta \boldsymbol{e}nd{equation} where $\beta=(\beta_1,\ldots,\beta_k)$. \begin{notn}[$\lp(\beta,\alpha)$]Given a composition $\alpha =(\alpha_1, \ldots, \alpha_m)$ and a composition $\beta = (\beta_1,\ldots, \beta_k)$ which refines $\alpha$, we let $\lp(\beta) = \beta_k$ (last part) and $$ \lp(\beta,\alpha) = \boldsymbol{\Psi}rod_{i=1}^{\boldsymbol{e}ll(\alpha)} \lp(\beta^{(i)}).$$ \boldsymbol{e}nd{notn} Then \begin{equation}\label{eq:htopsi}\boldsymbol{\Psi}_\alpha =\boldsymbol{\Psi}_{\alpha_1}\cdots\boldsymbol{\Psi}_{\alpha_m}=\boldsymbol{s}um_{\beta \boldsymbol{\Psi}reccurlyeq \alpha} (-1)^{\boldsymbol{e}ll(\beta)-\boldsymbol{e}ll(\alpha)}\lp(\beta,\alpha)\boldsymbol{h}_\beta. \boldsymbol{e}nd{equation} Similarly, the noncommutative power sums of the second kind are those satisfying the analogous generating function relation to \boldsymbol{e}qref{eq:type2gen}, and expand as \begin{equation}\label{eq:htophi} \boldsymbol{\Psi}two_n = \boldsymbol{s}um_{\alpha\vDash n}(-1)^{\boldsymbol{e}ll(\alpha)-1}\frac{n}{\boldsymbol{e}ll(\alpha)}\boldsymbol{h}_\alpha, \quad \text{and} \quad \boldsymbol{\Psi}two_\alpha = \boldsymbol{s}um_{\beta\boldsymbol{\Psi}reccurlyeq\alpha}(-1)^{\boldsymbol{e}ll(\beta)-\boldsymbol{e}ll(\alpha)}\frac{\boldsymbol{\Psi}rod_i \alpha_i}{\boldsymbol{e}ll(\beta,\alpha)}\boldsymbol{h}_\beta, \boldsymbol{e}nd{equation} where $\boldsymbol{e}ll(\beta,\alpha)=\boldsymbol{\Psi}rod_{j=1}^{\boldsymbol{e}ll(\alpha)} \boldsymbol{e}ll(\beta^{(j)})$. \boldsymbol{s}ubsection{Dual bases}\label{sec:dual}Let $V$ be a vector space over $\mathbb{C}$, and let $V^* = \{ \text{linear } \varphi: V \to \mathbb{C}C\}$ be its dual. Let $\langle,\rangle: V\otimes V^* \boldsymbol{r}ightarrow \mathbb{C}$ be the natural bilinear pairing. Bases of these vector spaces are indexed by a common set, say $I$; and we say bases $\{b_\alpha\}_{\alpha \in I}$ of $V$ and $\{b_\alpha^*\}_{\alpha \in I}$ of $B^*$ are \boldsymbol{e}mph{dual} if $\langleb_\alpha,b^*_\beta\rangle=\delta_{\alpha,\beta}$ for all $\alpha, \beta \in I$. Due to the duality between $\QS$ and $\NS$, we make extensive use of the well-known relationships between change of bases in a vector space an its dual. Namely, if $(A,A^*)$ and $(B,B^*)$ are two pairs of dual bases of $V$ and $V^*$, then for $a_{\alpha}\in A$ and $b_{\beta}^* \in B^*$, we have \[a_\alpha = \boldsymbol{s}um_{b_\beta \in B}c_\beta^\alpha b_\beta\qquad \text{ if and only if }\qquad b^*_\beta = \boldsymbol{s}um_{a^*_\alpha \in A^*}c_\beta^\alpha a^*_\alpha.\] \noindent In particular, the bases $\{M_\alpha\}$ of $\QS$ and $\{\boldsymbol{h}_\alpha\}$ of $\NS$ are dual; as are $\{F_\alpha\}$ and $\{\boldsymbol{r}_\alpha\}$ (see \cite[\S 6]{GKLLRT94}). The primary object of this paper is to explore properties of two $\QS$ bases dual to $\{\boldsymbol{\Psi}_\alpha\}$ or $\{ \boldsymbol{\Psi}two_\alpha \}$ (up to scalars) that also refine $p_\lambda$. Malvenuto and Reutenauer~\cite{MalReu95} mention (a rescaled version of) the type 1 version but do not explore its properties; Derksen \cite{der09} describes such a basis for the type 2 version, but a computational error leads to an incorrect formula in terms of the monomial quasisymmetric function expansion. \boldsymbol{s}ection{Quasisymmetric power sum bases}\label{sec:qsps} The symmetric power sums have the property that $\langlep_\lambda,p_\mu\rangle = z_\lambda \delta_{\lambda, \mu}$ where $z_\lambda$ is as follows. \begin{notn}[$z_\alpha$]\label{notn:z} For a partition $\lambda \vdash n$, let $m_i$ be the number of parts of length $i$. Then $$z_\lambda = 1^{m_1}m_1!2^{m_2}m_2!\cdots k^{m_k}m_k!.$$ Namely, $z_\lambda $ is the size of the stabilizer of a permutation of cycle type $\lambda$ under the action of $S_n$ on itself by conjugation. For a composition $\alpha$, we use $z_{\alpha}=z_{\widetilde{\alpha}}$, where $\widetilde{\alpha}$ is the partition rearrangement of $\alpha$ as above. \boldsymbol{e}nd{notn} We describe two quasisymmetric analogues of the power sums, each of which satisfies a variant of this duality property. \boldsymbol{s}ubsection{Type 1 quasisymmetric power sums} We define the type 1 quasisymmetric power sums to be the basis $\Psi_\alpha$ of $\QS$ such that $$\langle\Psi_\alpha,\boldsymbol{\Psi}_\beta\rangle = z_\alpha \delta_{\alpha,\beta}.$$ While duality makes most of this definition obvious, the scaling is somehow a free choice to be made. However, as we show in Theorem \boldsymbol{r}ef{thm:refine} and Corollary \boldsymbol{r}ef{cor:refine}, our choice of scalar not only generalizes the self-dual relationship of the symmetric power sums, but serves to provide a refinement of those power sums. Moreover, the proof leads to a (best possible) combinatorial interpretation of $\Psi_\alpha$. In \cite[\S 4.5]{GKLLRT94}, the authors give both the transition matrix from the $\boldsymbol{h}$ basis to the $\Psi$ basis (above in (\boldsymbol{r}ef{eq:powerinh})), and is inverse. Using the latter and duality, we compute a monomial expansion of $\Psi_{\alpha}$. \begin{notn}[$\boldsymbol{\Psi}i(\alpha,\beta)$] First, given $\alpha$ a refinement of $\beta$, recall from Definition \boldsymbol{r}ef{defn:refinement} that $\alpha^{(i)}$ is the composition consisting of the parts of $\alpha$ that combine (in order) to $\beta_i$. Define $$\boldsymbol{\Psi}i(\alpha)=\boldsymbol{\Psi}rod_{i=1}^{\boldsymbol{e}ll(\alpha)} \boldsymbol{s}um_{j=1}^i \alpha_j \qquad\text{and} \qquad \boldsymbol{\Psi}i(\alpha,\beta)=\boldsymbol{\Psi}rod_{i=1}^{\boldsymbol{e}ll(\beta)} \boldsymbol{\Psi}i(\alpha^{(i)}).$$ \boldsymbol{e}nd{notn} Then \[\boldsymbol{h}_\alpha = \boldsymbol{s}um_{\beta \boldsymbol{\Psi}reccurlyeq \alpha} \frac{1}{\boldsymbol{\Psi}i(\beta,\alpha)}\boldsymbol{\Psi}_\beta.\] By duality, the polynomial \[\boldsymbol{\Psi}si_\alpha = \boldsymbol{s}um_{\beta\boldsymbol{s}ucccurlyeq \alpha}\frac{1}{\boldsymbol{\Psi}i(\alpha,\beta)}M_\beta\] has the property that $\langle\boldsymbol{\Psi}si_\alpha,\boldsymbol{\Psi}_\beta\rangle=\delta_{\alpha,\beta}$. Then the type 1 quasisymmetric power sums have the following monomial expansion: \begin{equation}\label{eq:PsiM}\Psi_\alpha = z_\alpha \boldsymbol{\Psi}si_\alpha=z_\alpha\boldsymbol{s}um_{\beta\boldsymbol{s}ucccurlyeq\alpha}\frac{1}{\boldsymbol{\Psi}i(\alpha,\beta)}M_\beta.\boldsymbol{e}nd{equation} For example \begin{align*} \Psi_{232} &= (2^2 \cdot 2! \cdot 3)(\frac{1}{2 \cdot 3 \cdot 2} M_{232}+\frac{1}{2 \cdot 5 \cdot 2} M_{52} + \frac{1}{2 \cdot 3 \cdot 5} M_{25} + \frac{1}{2 \cdot 5 \cdot 7} M_7) \\ &= 2 M_{232} +\frac{6}{5} M_{52} + \frac{4}{5} M_{25}+\frac{12}{35} M_7. \boldsymbol{e}nd{align*} The remainder of this section is devoted to constructing the ``best possible'' combinatorial formulation of the $\Psi_\alpha$, given in Theorem \boldsymbol{r}ef{thm:combPsi}, followed by the proof of the refinement of the symmetric power sums, given in Theorem \boldsymbol{r}ef{thm:refine} and Corollary \boldsymbol{r}ef{cor:refine}. \boldsymbol{s}ubsubsection{A combinatorial interpretation of $\Psi_\alpha$} We consider the set $S_n$ of permutations of $[n]=\{1,2,\dots,n\}$ both in one-line notation and in cycle notation. For a partition $\lambda=(\lambda_1, \lambda_2, \ldots, \lambda_{\boldsymbol{e}ll})$ of $n$, a permutation $\boldsymbol{s}igma$ has \boldsymbol{e}mph{cycle type} $\lambda$ if its cycles are of lengths $\lambda_1$, $\lambda_2$, \dots, $\lambda_\boldsymbol{e}ll$. We consider two canonical forms for writing a permutation according to its cycle type. \begin{defn}[standard and partition forms]\label{def:standard-partition} A permutation in cycle notation is said to be in \boldsymbol{e}mph{standard form} if each cycle is written with the largest element last and the cycles are listed in increasing order according to their largest element. It is said to be in \boldsymbol{e}mph{partition form} if each cycle is written with the largest element last, the cycles are listed in descending length order, and cycles of equal length are listed in increasing order according to their largest element. \boldsymbol{e}nd{defn} For example, for the permutation $(26)(397)(54)(1)(8)$, we have standard form $(1)(45)(26)(8)(739)$ and partition form $(739)(45)(26)(1)(8)$. Note that our definition of standard form differs from that in \cite[\S1.3]{Sta99v1} in the cyclic ordering; they are equivalent, but our convention is more convenient for the purposes of this paper. If we fix an order of the cycles (as we do when writing a permutation in standard and partition forms), the \boldsymbol{e}mph{(ordered) cycle type} is the composition $\alpha \vDash n$ where the $i$th cycle has length $\alpha_i$. As alluded to in Notation \boldsymbol{r}ef{notn:z}, we have \cite[Prop.\ 1.3.2]{Sta99v1} \begin{equation*} \frac{n!}{z_\lambda} = \#\{ \boldsymbol{s}igma \in S_n \text{ of cycle type } \lambda \}. \boldsymbol{e}nd{equation*} We are now ready to define a subset of $S_n$ (which uses one-line notation) needed to prove Proposition~\boldsymbol{r}ef{prop:consistent}. \begin{notn}[$\boldsymbol{s}plt{\beta}{\boldsymbol{s}igma}{j}$] Let $\beta \vdash n$ and let $\boldsymbol{s}igma \in S_n$ be written in one-line notation. First partition $\boldsymbol{s}igma$ according to $\beta$ (which we draw using $|\!|$), and consider the (disjoint) words $\boldsymbol{s}pl{\beta}{\boldsymbol{s}igma}==[\boldsymbol{s}igma^{1}, \dots, \boldsymbol{s}igma^{\boldsymbol{e}ll}]$, where $\boldsymbol{e}ll = \boldsymbol{e}ll(\beta)$. Let $\boldsymbol{s}plt{\beta}{\boldsymbol{s}igma}{j}=\boldsymbol{s}igma^j$. See Table \boldsymbol{r}ef{tbl:consistent}. \boldsymbol{e}nd{notn} \begin{defn}[consistent, $\mathbb{C}ons_{\alpha \boldsymbol{\Psi}reccurlyeq \beta}$]\label{defn:consistent} Fix $\alpha \boldsymbol{\Psi}reccurlyeq \beta$ compositions of $n$. Given $\boldsymbol{s}igma \in S_n$ written in one-line notation, let $\boldsymbol{s}igma^j=\boldsymbol{s}plt{\beta}{\boldsymbol{s}igma}{j}$. Then, for each $i = 1, \dots, \boldsymbol{e}ll$, add parentheses to $\boldsymbol{s}igma^{i}$ according to $\alpha^{(i)}$, yielding disjoint permutations $\bar{\boldsymbol{s}igma}^{i}$ (of subalphabets of $[n]$) of cycle type $\alpha^{(i)}$. If the resulting subpermutations $\bar{\boldsymbol{s}igma}^{i}$ are all in standard form, we say $\boldsymbol{s}igma$ is \boldsymbol{e}mph{consistent} with $\alpha \boldsymbol{\Psi}reccurlyeq \beta$. In other words, we look at subsequences of $\boldsymbol{s}igma$ and split according to $\beta$ \textit{separately} to see if, for each $j$, the $j$th subsequence is in standard form when further partition by $\alpha^{(j)}$. Define \begin{equation*} \mathbb{C}ons_{\alpha \boldsymbol{\Psi}reccurlyeq \beta} = \{\boldsymbol{s}igma \in S_n \mid \boldsymbol{s}igma \mbox{ is consistent with } \alpha \boldsymbol{\Psi}reccurlyeq \beta\}. \boldsymbol{e}nd{equation*} \boldsymbol{e}nd{defn} \begin{example}\label{ex:consistent} Fix $\alpha = (1,1,2,1,3,1)$ and $\beta = (2,2,5)$. Table~\boldsymbol{r}ef{tbl:consistent} shows several examples of permutations and the partitioning process.\boldsymbol{e}nd{example} \begin{table}[h] $$ \begin{array}{c@{\qquad}c@{\qquad}c@{\qquad}c} \text{permutation $\boldsymbol{s}igma$} & \text{partition by $\beta$} & \text{add $()$ by $\alpha$} & \text{$\boldsymbol{s}igma$ consistent?}\\\boldsymbol{h}line 571423689 & \underbrace{57}_{{\boldsymbol{s}igma}^{1}}|\!|\underbrace{14}_{{\boldsymbol{s}igma}^{2}}|\!|\underbrace{23689}_{{\boldsymbol{s}igma}^{3}} & \underbrace{(5)(7)}_{\bar{\boldsymbol{s}igma}^{1}}|\!|\underbrace{(14)}_{\bar{\boldsymbol{s}igma}^{2}}|\!|\underbrace{(2)(368)(9)}_{\bar{\boldsymbol{s}igma}^{3}} &\text{yes}\\ 571428369 & \underbrace{57}_{{\boldsymbol{s}igma}^{1}}|\!|\underbrace{14}_{{\boldsymbol{s}igma}^{2}}|\!|\underbrace{28369}_{{\boldsymbol{s}igma}^{3}} & \underbrace{(5)(7)}_{\bar{\boldsymbol{s}igma}^{1}}|\!|\underbrace{(14)}_{\bar{\boldsymbol{s}igma}^{2}}|\!|\underbrace{(2)(\boldsymbol{8}36)(9)}_{\bar{\boldsymbol{s}igma}^{3}} &\textbf{no}\\ 571493682 & \underbrace{57}_{{\boldsymbol{s}igma}^{1}}|\!|\underbrace{14}_{{\boldsymbol{s}igma}^{2}}|\!|\underbrace{93682}_{{\boldsymbol{s}igma}^{3}} & \underbrace{(5)(7)}_{\bar{\boldsymbol{s}igma}^{1}}|\!|\underbrace{(14)}_{\bar{\boldsymbol{s}igma}^{2}}|\!|\underbrace{(\boldsymbol{9})(36\boldsymbol{8})(\boldsymbol{2})}_{\bar{\boldsymbol{s}igma}^{3}} &\textbf{no} \boldsymbol{e}nd{array} $$ \caption{Examples of permutations in $S_9$ and determining if they are in $\mathbb{C}ons_{\alpha\boldsymbol{\Psi}reccurlyeq\beta}$ where $\alpha=(1,1,2,1,3,1)$ and $\beta=(2,2,5)$. Note how $\beta$ subtly influences consistency in the last example.}\label{tbl:consistent} \boldsymbol{e}nd{table} We also consider the set of all permutations consistent with a given $\alpha$ and all possible choices of (a coarser composition) $\beta$, as each will correspond to various monomial terms in the expansion of a given $\Psi_\alpha$. \begin{example} We now consider sets of permutations that are consistent with $\alpha=(1,2,1)$ and each coarsening of $\alpha$. The coarsening of $\alpha = (1,2,1)$ are $(1,2,1)$, $(3,1)$, $(1,3)$, and $(4)$, and the corresponding sets of consistent permutations in $S_4$ are \begin{align*} \mathbb{C}ons_{(1,2,1) \boldsymbol{\Psi}reccurlyeq (1,2,1)}& = \{1234, 1243, 1342, 2134, 2143, 2341, 3124, 3142, 3241, 4123, 4132, 4231\}, \\\mathbb{C}ons_{(1,2,1) \boldsymbol{\Psi}reccurlyeq (1,3)} &= \{1234, 2134, 3124, 4123\},\\ \mathbb{C}ons_{(1,2,1) \boldsymbol{\Psi}reccurlyeq (3,1)} &= \{1234, 1243, 1342, 2134, 2143, 2341, 3142, 3241\}, \\\mathbb{C}ons_{(1,2,1) \boldsymbol{\Psi}reccurlyeq (4)}& = \{1234, 2134\}. \boldsymbol{e}nd{align*} Notice that these sets are not disjoint. \boldsymbol{e}nd{example} The following is a salient observation that can be seen from this example. \begin{lemma}\label{lemma:niceobs}If $\boldsymbol{s}igma$ is consistent with $\alpha\boldsymbol{\Psi}reccurlyeq \beta$ for some choice of $\beta$, then $\boldsymbol{s}igma$ is consistent with $\alpha \boldsymbol{\Psi}reccurlyeq\gamma$ for all $\alpha \boldsymbol{\Psi}reccurlyeq \gamma \boldsymbol{\Psi}reccurlyeq \beta$.\boldsymbol{e}nd{lemma} Note that this implies $Cons_{\alpha \boldsymbol{\Psi}reccurlyeq \beta} \boldsymbol{s}ubseteq Cons_{\alpha \boldsymbol{\Psi}reccurlyeq \alpha}$ for all $\alpha \boldsymbol{\Psi}reccurlyeq \beta$. With these examples in mind, we will use the following lemma to justify a combinatorial interpretation of $\Psi_\alpha$ in Theorem~\boldsymbol{r}ef{thm:combPsi}. \begin{lemma}\label{lem:cons} If $\alpha \boldsymbol{\Psi}reccurlyeq \beta$, we have $$ n!=|\mathbb{C}ons_{\alpha \boldsymbol{\Psi}reccurlyeq \beta}|\cdot \boldsymbol{\Psi}i(\alpha, \beta).$$ \boldsymbol{e}nd{lemma} \begin{proof}Consider the set \[A_\alpha= \bigotimes_{i=1}^{\boldsymbol{e}ll(\beta)}\left( \bigotimes_{j=1}^{\boldsymbol{e}ll(\alpha^{(i)})} \nicefrac{\mathbb{Z}}{a_j^{(i)}\mathbb{Z}}\boldsymbol{r}ight), \qquad \text{where } a_j^{(i)} = \boldsymbol{s}um_{r=1}^j \alpha_r^{(i)}.\] We have \[|A_\alpha| = \boldsymbol{\Psi}rod_{i=1}^{\boldsymbol{e}ll(\beta)} \boldsymbol{\Psi}rod_{j=1}^{\boldsymbol{e}ll(\alpha^{(i)})}a_j^{(i)} = \boldsymbol{\Psi}i(\alpha, \beta).\] Construct a map \begin{align*} \mathrm{Sh}: \mathbb{C}ons_{\alpha\boldsymbol{\Psi}reccurlyeq\beta} \times A_\alpha &\to S_n\\ (\boldsymbol{s}igma, s) \quad&\mapsto \boldsymbol{s}igma_s \boldsymbol{e}nd{align*} as follows (see also Example \boldsymbol{r}ef{ex:Rlambdabeta-identity(b)}). For $s=[s^{(i)}_j]_{i=1\ j=1}^{\boldsymbol{e}ll(\beta)\ \boldsymbol{e}ll(\alpha^{(i)})} \in A_\alpha$ and $\boldsymbol{s}igma \in \mathbb{C}ons_{\alpha\boldsymbol{\Psi}reccurlyeq\beta}$, construct a permutation $\boldsymbol{s}igma_s \in S_n$ as follows. \begin{enumerate}[1.] \item Partition $\boldsymbol{s}igma$ into words $\boldsymbol{s}igma^{1}, \dots, \boldsymbol{s}igma^{\boldsymbol{e}ll}$ according to $\beta$ so that $\boldsymbol{s}igma^{i}=\boldsymbol{s}plt{\beta}{\boldsymbol{s}igma}{i}$. \item For each $i=1, \ldots, \boldsymbol{e}ll(\beta)$, modify $\boldsymbol{s}igma^{i}$ by cycling the first $a_j^{(i)}$ values right by $s_j^{(i)}$ for $j=1, \ldots,\boldsymbol{e}ll(\alpha^{(i)})$. Call the resulting word $\boldsymbol{s}igma^{i}_s$. \item Let $\boldsymbol{s}igma_s = \boldsymbol{s}igma^{1}_s \cdots \boldsymbol{s}igma^{\boldsymbol{e}ll}_s$. \boldsymbol{e}nd{enumerate} \noindent This process is invertible as follows. Let $\tau \in S_n$ be written in one-line notation. \begin{enumerate}[1'.] \item Partition $\tau$ into words $\tau^{1}, \dots, \tau^{\boldsymbol{e}ll}$ according to $\beta$ such that $\tau^i=\boldsymbol{s}plt{\beta}{\tau}{i}$. \item For each $i=1, \ldots, \boldsymbol{e}ll(\beta)$, let $m_i = \boldsymbol{e}ll(\alpha^{(i)})$. Modify $\tau^{i}$ and record $s^{(i)}_j$ for $j=m_i,\ldots,1$ by cycling the first $a_j^{(i)}$ values left until the largest element is last. Let $s_j^{(i)}$ be the number of required shifts left. Call the resulting word $\boldsymbol{s}igma^{i}$. \item Let $\boldsymbol{s}igma= \boldsymbol{s}igma^{1} \cdots \boldsymbol{s}igma^{\boldsymbol{e}ll}$ and $s =[s^{(i)}_j]_{i=1\ j=1}^{\boldsymbol{e}ll(\beta)\ \boldsymbol{e}ll(\alpha^{(i)})}$. \boldsymbol{e}nd{enumerate} By construction, $s \in A_\alpha$ and $\boldsymbol{s}igma \in \mathbb{C}ons_{\alpha \boldsymbol{\Psi}reccurlyeq \beta}$. It is straightforward to verify that $\boldsymbol{s}igma_s = \tau$. Therefore $\mathrm{Sh}^{-1}$ is well-defined, so that $\mathrm{Sh}$ is a bijection, and thus $n!=|\mathbb{C}ons_{\alpha\boldsymbol{\Psi}reccurlyeq\beta}|\cdot \boldsymbol{\Psi}i(\alpha,\beta)$. \boldsymbol{e}nd{proof} \begin{example}\label{ex:Rlambdabeta-identity(b)} As an example of the construction of $\mathrm{Sh}$ in the proof of Lemma~\boldsymbol{r}ef{lem:cons}, let $\beta=(5,4) \vDash 9$, and let $\alpha=(2,3,2,2) \boldsymbol{\Psi}reccurlyeq \beta$, so that $$\alpha^{(1)} = (2,3), \quad a_1^{(1)} = 2, \quad a_2^{(1)} = 2+3=5, \quad \text{ and }$$ $$\alpha^{(2)} = (2,2), \quad a_1^{(2)} = 2, \quad a_2^{(2)} = 2+2=4. \boldsymbol{\Psi}hantom{\quad \text{ and }}$$ Fix $\boldsymbol{s}igma=267394518 \in \mathbb{C}ons_{\alpha \boldsymbol{\Psi}reccurlyeq \beta}$, and $s=(s^{(1)},s^{(2)})=((1,3),(0,1)) \in A_\alpha$. We want to determine $\boldsymbol{s}igma_s$. \begin{enumerate}[1.] \item Partition $\boldsymbol{s}igma$ according to $\beta$: \quad $\boldsymbol{s}igma^{1} = 26739$ and $\boldsymbol{s}igma^{2} = 4518.$ \item Cycle $\boldsymbol{s}igma^{i}$ according to $\alpha^{(i)}$: \begin{align*} \boldsymbol{s}igma^{1} = 26739 \xrightarrow{\text{take first $a_1^{(1)} = 2$ terms}} &\ \underline{26}739 \xrightarrow{\text{cycle $s_1^{(1)} = 1$ right}} \ \underline{62}739\\ \xrightarrow{\text{take first $a_2^{(1)} = 5$ terms}} &\ \underline{62739} \xrightarrow{\text{cycle $s_2^{(1)} = 3$ right}} \ \underline{73962} = \boldsymbol{s}igma^{1}_s; \boldsymbol{e}nd{align*} \begin{align*} \boldsymbol{s}igma^{2} = 4518 \xrightarrow{\text{take first $a_1^{(2)} = 2$ terms}} &\ \underline{45}18 \xrightarrow{\text{cycle $s_1^{(2)} = 0$ right}} \ \underline{45}18\\ \xrightarrow{\text{take first $a_2^{(2)} = 4$ terms}} &\ \underline{4518} \xrightarrow{\text{cycle $s_2^{(2)} = 1$ right}} \ \underline{8451} = \boldsymbol{s}igma^{2}_s. \boldsymbol{e}nd{align*} \item Combine to get $\boldsymbol{s}igma_s = \boldsymbol{s}igma^{1}_s \boldsymbol{s}igma^{2}_s = 739628451$. \boldsymbol{e}nd{enumerate} \noindent Going the other way, start with $\beta=(5,4)\vDash 9$, $\alpha=(2,3,2,2) \boldsymbol{\Psi}reccurlyeq \beta$, and $\tau = 739628451 \in S_9$ in one-line notation, and want to find $\boldsymbol{s}igma \in \mathbb{C}ons_{\alpha \boldsymbol{\Psi}reccurlyeq \beta}$ and $s \in S$ such that $\boldsymbol{s}igma_s = \tau$. \begin{enumerate}[1'.] \item Partition $\tau$ according to $\beta$: \quad $\tau^{1} = 73962$ and $\tau^{2} = 8451.$ \item Cycle $\tau^{i}$ into $\alpha \boldsymbol{\Psi}reccurlyeq \beta$ consistency and record shifts: {\boldsymbol{s}etlength{\fboxsep}{1.5pt} \begin{align*} \tau^{1} = 73962 \xrightarrow{\text{take first $a_2^{(1)} = 5$ terms}} &\ \boldsymbol{s}tackrel{(\xleftarrow{~3~}) }{\underline{73\fbox{$9$}62}} \xrightarrow{\text{cycle left so largest is last, and record}} \underline{6273\fbox{$9$}}, & s_2^{(1)} = 3,\\ \xrightarrow{\text{take first $a_1^{(1)} = 2$ terms}} &\ \boldsymbol{s}tackrel{(\xleftarrow{1})}{\underline{\fbox{$6$}2}}\!\!739 \xrightarrow{\text{cycle left so largest is last, and record}} \underline{2\fbox{$6$}}739 = \boldsymbol{s}igma^{1}, & s_1^{(1)} = 1;\\ \boldsymbol{e}nd{align*} \begin{align*} \tau^{2} = 8451 \xrightarrow{\text{take first $a_2^{(2)} = 4$ terms}} & \boldsymbol{s}tackrel{(\xleftarrow{~1~})}{\underline{\fbox{$8$}451}} \xrightarrow{\text{cycle left so largest is last, and record}} \underline{451\fbox{$8$}}, & s_2^{(2)} = 1,\\ \xrightarrow{\text{take first $a_1^{(2)} = 2$ terms}} &\ \boldsymbol{s}tackrel{\checkmark}{\underline{4\fbox{5}}}18 \xrightarrow{\text{cycle left so largest is last, and record}} \underline{4\fbox{$5$}}18 = \boldsymbol{s}igma^{2}, & s_1^{(2)} = 0.\\ \boldsymbol{e}nd{align*}} \item Combine to get $\boldsymbol{s}igma = \boldsymbol{s}igma^{1} \boldsymbol{s}igma^{2} = 267394518$ and $s = ((1,3),(0,1))$ as expected. \boldsymbol{e}nd{enumerate} \boldsymbol{e}nd{example} One might reasonably ask for a ``combinatorial'' description of the quasisymmetric power sums, one similar to our description of the monomial and fundamental basis of the quasisymmetric functions as in (\boldsymbol{r}ef{eq:Ms}) and (\boldsymbol{r}ef{eq:Fs}). There is not an altogether satisfactory such formula for either of the quasisymmetric power sums, although the previous lemma hints at what appears to be the best possible interpretation in the Type 1 case. We give the formula, and its quick proof. Before we begin, we will find the following notation useful both here and in the fundamental expansion of the Type 1 power sums. \begin{notn}[$\widehat{\alpha(\boldsymbol{s}igma)}$, $\mathbb{C}ons_{\alpha}$]\label{notn:hat} Given a permutation $\boldsymbol{s}igma$ and a composition $\alpha$, let $\widehat{\alpha(\boldsymbol{s}igma)}$ denote the coarsest composition $\beta$ with $\beta \boldsymbol{s}ucccurlyeq \alpha$ and $\boldsymbol{s}igma \in \mathbb{C}ons_{\alpha\boldsymbol{\Psi}reccurlyeq\beta}.$ For example, if $\alpha=(3,2,2)$ and $\boldsymbol{s}igma=1352467$, then $\widehat{\alpha(\boldsymbol{s}igma)}=(3,4)$. In addition, we write $\boldsymbol{s}igma \in \mathbb{C}ons_\alpha$ if we are considering $\beta=\alpha$.\boldsymbol{e}nd{notn} \begin{thm}\label{thm:combPsi} Let $m_i(\alpha)$ the multiplicity of $i$ in $\alpha$. Then $$\Psi_{(\alpha_1,\cdots,\alpha_k)}(x_1,\cdots,x_m) =\frac{ \boldsymbol{\Psi}rod_{i=1}^n m_i(\alpha)!}{n!}\boldsymbol{s}um_{\boldsymbol{s}igma\in S_n} \boldsymbol{s}um_{\boldsymbol{s}ubstack{1\leq i_1\leq \cdots\leq i_k\leq m\\ i_j=i_{j+1}\Rightarrow\\ \max(\boldsymbol{s}plt{\alpha}{\boldsymbol{s}igma}{j})<\max(\boldsymbol{s}plt{\alpha}{\boldsymbol{s}igma}{j+1})}}x_{i_1}^{\alpha_1}\cdots x_{i_k}^{\alpha_k},$$ where $\max(\boldsymbol{s}plt{\alpha}{\boldsymbol{s}igma}{j})$ is the maximum of the list defined in Notation 3.4. \boldsymbol{e}nd{thm} \begin{proof} First, by Lemmas \boldsymbol{r}ef{lem:cons} and \boldsymbol{r}ef{lemma:niceobs}, \begin{align} \Psi_\alpha &=\frac{z_\alpha}{n!}\boldsymbol{s}um_{\alpha\boldsymbol{\Psi}reccurlyeq\beta}|\mathbb{C}ons_{\alpha\boldsymbol{\Psi}reccurlyeq\beta}|M_\beta \nonumber\\ &=\frac{z_\alpha}{n!}\boldsymbol{s}um_{\boldsymbol{s}igma\in \mathbb{C}ons_\alpha}\boldsymbol{s}um_{\alpha\boldsymbol{\Psi}reccurlyeq\delta\boldsymbol{\Psi}reccurlyeq \widehat{\alpha(\boldsymbol{s}igma)}}M_\delta \label{eq:unscaled}\\&= \frac{ \boldsymbol{\Psi}rod_{i=1}^n m_i(\alpha)!}{n!}\boldsymbol{s}um_{\boldsymbol{s}igma\in \mathbb{C}ons_\alpha}\left(\boldsymbol{\Psi}rod_{i}i^{m_i(\alpha)}\boldsymbol{r}ight)\boldsymbol{s}um_{\alpha\boldsymbol{\Psi}reccurlyeq\delta\boldsymbol{\Psi}reccurlyeq \widehat{\alpha(\boldsymbol{s}igma)}}M_\delta.\nonumber\boldsymbol{e}nd{align} The first equality follows by grouping together terms according to the permutation counted rather than the monomial basis. Next, for each $\boldsymbol{s}igma\in \mathbb{C}ons_\alpha$, we can assign $\left(\boldsymbol{\Psi}rod_{i}i^{m_i(\alpha)}\boldsymbol{r}ight)$ objects by cycling each $\boldsymbol{s}igma$ (within cycles defined by $\alpha$) in all possible ways. (Thus for all $j$ we cycle $\boldsymbol{s}plt{\alpha}{\boldsymbol{s}igma}{j}$.) The result for a fixed $\alpha$ is all permutations of $\mathfrak{S}_n$ (considered in one line notation by removing the cycle marks). In particular, for any new permutation $\tau$ we may recover the original permutation $\boldsymbol{s}igma$ by cycling each $\boldsymbol{s}plt{\alpha}{\tau}{j}$, until the maximal term in each cycle is again at the end. Thus we may instead sum over all permutations and consider the largest element (rather than the last element) in each cycle: \begin{align*} \Psi_\alpha &=\frac{ \boldsymbol{\Psi}rod_{i=1}^n m_i(\alpha)!}{n!} \boldsymbol{s}um_{\boldsymbol{s}igma\in S_n} \boldsymbol{s}um_{\boldsymbol{s}ubstack{1\leq i_1\leq \cdots\leq i_k\leq m\\ i_j=i_{j+1}\Rightarrow\\ \max(\boldsymbol{s}plt{\alpha}{\boldsymbol{s}igma}{j})<\max(\boldsymbol{s}plt{\alpha}{\boldsymbol{s}igma}{j+1})}} x_{i_1}^{\alpha_1}\cdots x_{i_k}^{\alpha_k}. \boldsymbol{e}nd{align*} \boldsymbol{e}nd{proof} While one might hope to incorporate the multiplicities (perhaps summing over a different combinatorial object, or considering cycling parts, then sorting them by largest last element) there does not seem to be a natural way to do so with previously well known combinatorial objects; the heart of the problem is that the definition of consistency inherently uses (sub)permutations written in standard form, while $\frac{n!}{z_\alpha}$ counts permutations with cycle type $\alpha$ in partition form. This subtlety blocks simplification of formulas throughout the paper. In practice, we expect (\boldsymbol{r}ef{eq:unscaled}) to be a more useful expression because of this fact, but work out the details of the other interpretation here as it cleanly expresses this easily overlooked subtlety. \boldsymbol{s}ubsubsection{Type 1 quasisymmetric power sums refine symmetric power sums} We next turn our attention to a proof that the type 1 quasisymmetric power sums refine the symmetric power sums in a natural way. \begin{notn}[$R_{\alpha\beta}$, $\mathcal{O}_{\alpha\beta}$]\label{notn:R} For compositions $\alpha$, $\beta$, let \begin{equation*} R_{\alpha\beta} = | \mathcal{O}_{\alpha\beta}|, \text{ where } \mathcal{O}_{\alpha\beta} = \left\{ \left. \begin{matrix}\text{ordered set partitions}\\\text{$(B_1,\cdots, B_{\boldsymbol{e}ll(\beta)})$ of $\{1,\cdots,\boldsymbol{e}ll(\alpha)\}$}\boldsymbol{e}nd{matrix} ~\boldsymbol{r}ight| ~ \beta_j=\boldsymbol{s}um_{i\in B_j}\alpha_i \text{ for } 1\leq j\leq \boldsymbol{e}ll(\beta) \boldsymbol{r}ight\}, \boldsymbol{e}nd{equation*} i.e.\ $R_{\alpha\beta}$ is the number of ways to group the parts of $\alpha$ so that the parts in the $j$th (unordered) group sum to $\beta_j$. \boldsymbol{e}nd{notn} \begin{example} If $\alpha=(1,3,2,1)$ and $\beta=(3,4)$, then $\mathcal{O}_{\alpha\beta}$ consists of $$(\{2\}, \{1,3,4\}),\ (\{1,3\}, \{2,4\}), \quad \text{and} \quad (\{ 3,4\},\{1,2 \}),$$ corresponding, respectively, to the three possible ways of grouping parts of $\alpha$, $$(\alpha_2 , \alpha_1+\alpha_3+\alpha_4),\ (\alpha_1+\alpha_3 , \alpha_2+\alpha_4), \quad \text{and} \quad ( \alpha_3+\alpha_4 , \alpha_1+\alpha_2).$$ Therefore $R_{\alpha \beta} = 3$. \boldsymbol{e}nd{example} For partitions $\lambda, \mu$, we have \begin{equation*}\label{eq:p-in-m} p_\lambda=\boldsymbol{s}um_{\mu \vdash n}R_{\lambda \mu}m_\mu \boldsymbol{e}nd{equation*} (see for example \cite[p.297]{Sta99v2}). Further, if $\widetilde{\alpha}$ is the partition obtained by putting the parts of $\alpha$ in decreasing order as before, then \begin{equation*}\label{eq:p-in-M} R_{\alpha\beta}=R_{\widetilde{\alpha}\widetilde{\beta}}, \qquad \text{ implying }\qquad p_\lambda=\boldsymbol{s}um_{\alpha \models n}R_{\lambda \alpha}M_\alpha. \boldsymbol{e}nd{equation*} The refinement of the symmetric power sums can be established either by exploiting duality or through a bijective proof. We present the bijective proof first (using it to justify our combinatorial interpretation of the basis) and defer the simpler duality argument to \S~\boldsymbol{r}ef{sec:products}. \begin{thm}\label{thm:refine} Let $\lambda \vdash n$. Then \[p_\lambda = \boldsymbol{s}um_{\alpha: \widetilde{\alpha}=\lambda} \Psi_\alpha.\] \boldsymbol{e}nd{thm} \begin{cor}\label{cor:refine} $\Psi_\alpha=z_\alpha\boldsymbol{\Psi}si_\alpha=z_{\widetilde{\alpha}}\boldsymbol{\Psi}si_\alpha$ is the unique rescaling of the $\boldsymbol{\Psi}si$ basis (that is the dual basis to $\boldsymbol{\Psi}$) which refines the symmetric power sums with unit coefficients. \boldsymbol{e}nd{cor} Recall from \boldsymbol{e}qref{eq:PsiM} that for a composition $\alpha$, $$\Psi_\alpha = \boldsymbol{s}um_{\beta \boldsymbol{s}ucccurlyeq \alpha}\frac{z_\alpha}{\boldsymbol{\Psi}i(\alpha,\beta)}M_\beta.$$ Summing over $\alpha$ rearranging to $\lambda$ and multiplying on both sides by ${n!}/{z_\lambda}$, we see that to prove Theorem \boldsymbol{r}ef{thm:refine} it is sufficient to establish the following for a fixed $\beta\vDash n$. \begin{prop}\label{prop:consistent} For $\lambda \vdash n$ and $\beta \vDash n$, \[R_{\lambda\beta}\frac{n!}{z_{\lambda}}=\boldsymbol{s}um_{\boldsymbol{s}ubstack{\alpha \boldsymbol{\Psi}reccurlyeq \beta \\ \widetilde{\alpha}=\lambda}}\frac{n!}{\boldsymbol{\Psi}i(\alpha, \beta)}.\] \boldsymbol{e}nd{prop} \begin{proof} The proof is in two steps, with the first being Lemma~\boldsymbol{r}ef{lem:cons}. Let $\beta\vDash n$ and $\lambda \vdash n$. We must establish that \begin{equation}\label{R=cons}R_{\lambda\beta} \frac{n!}{z_{\lambda}} = \boldsymbol{s}um_{\boldsymbol{s}ubstack{\alpha \boldsymbol{\Psi}reccurlyeq \beta\\\widetilde{\alpha}=\lambda}}|\mathbb{C}ons_{\alpha\boldsymbol{\Psi}reccurlyeq\beta}|.\boldsymbol{e}nd{equation} Let $\mathcal{O}_{\lambda\beta}$ be the set of ordered set partitions as defined in Notation\ \boldsymbol{r}ef{notn:R}. For each refinement $\alpha \boldsymbol{\Psi}reccurlyeq \beta$, let $C_{\alpha}=\{(\alpha, \boldsymbol{s}igma) \mid \boldsymbol{s}igma \mbox{ is consistent with } \alpha \boldsymbol{\Psi}reccurlyeq \beta\}$ and define $ C=\bigcup_{\boldsymbol{s}ubstack{\alpha \boldsymbol{\Psi}reccurlyeq \beta \\ \widetilde{\alpha}=\lambda}} C_{\alpha}.$ Denote by $S_n^{\lambda}$ the set of permutations of $n$ of cycle type $\lambda$. Then we prove \boldsymbol{e}qref{R=cons}, by defining the map $$\mathrm{Br}: C \to \mathcal{O}_{\lambda\beta} \times S_n^{\lambda}$$ as follows (see also Example \boldsymbol{r}ef{ex:Rlambdabeta-identity(a)}), and showing that it is a bijection. Start with $(\alpha, \boldsymbol{s}igma)\in C$, with $\boldsymbol{s}igma$ written in one-line notation. \begin{enumerate}[1.] \item Add parentheses to $\boldsymbol{s}igma$ according to $\alpha$, and denote the corresponding permutation (now written in cycle notation) $\bar{\boldsymbol{s}igma}$. \item Sort the cycles of $\bar{\boldsymbol{s}igma}$ into partition form (as in Definition \boldsymbol{r}ef{def:standard-partition}), and let $c_i$ be the $i$th cycle in this ordering. \item Comparing to $\bar{\boldsymbol{s}igma}^{1}$, \dots, $\bar{\boldsymbol{s}igma}^{\boldsymbol{e}ll}$ as in Definition \boldsymbol{r}ef{defn:consistent} of consistent permutations, define $B=(B_1, \dots, B_k)$ by $j \in B_i$ when $c_j$ belongs to $\bar{\boldsymbol{s}igma}^{i}$, i.e.\ $\bar{\boldsymbol{s}igma}^{i} = \boldsymbol{\Psi}rod_{j \in B_i} c_j$. \boldsymbol{e}nd{enumerate} Define $\mathrm{Br}(\alpha, \boldsymbol{s}igma)=(B, \bar{\boldsymbol{s}igma}).$ Since $\alpha$ rearranges to $\lambda$, $\bar{\boldsymbol{s}igma}$ has (unordered) cycle type $\lambda$. And since $\boldsymbol{\Psi}rod_{j \in B_i} c_j = \bar{\boldsymbol{s}igma}^{i},$ we have $\boldsymbol{s}um_{j \in B_i}\lambda_j=\beta_i.$ Thus $\mathrm{Br}: (\alpha, \boldsymbol{s}igma) \mapsto (B, \bar{\boldsymbol{s}igma})$ is well-defined. Next, we show that $\mathrm{Br}$ is invertible, and therefore a bijection. Namely, fix $B = (B_1, \ldots, B_{\boldsymbol{e}ll}) \in \mathcal{O}_{\lambda \beta}$ and $\bar{\boldsymbol{s}igma} \in S_n^\lambda$, writing $\bar{\boldsymbol{s}igma} = c_1 c_2 \cdots c_k$ in partition form. Then determine $(\alpha, \boldsymbol{s}igma) \in C$ as follows. \begin{enumerate}[1'.] \item Let $\bar{\boldsymbol{s}igma}^{i} = \boldsymbol{\Psi}rod_{j \in B_i} c_j$, and sort $\bar{\boldsymbol{s}igma}^{i}$ into standard form (as a permutation of the corresponding subalphabet of $[n]$). \item Let $\alpha$ be the (ordered) cycle type of $\bar{\boldsymbol{s}igma}^{1}\bar{\boldsymbol{s}igma}^{2}\cdots\bar{\boldsymbol{s}igma}^{\boldsymbol{e}ll}$. \item Delete the parentheses. Let $\boldsymbol{s}igma$ be the corresponding permutation written in one-line notation. \boldsymbol{e}nd{enumerate} By construction, $\alpha$ refines $\beta$ and is a rearrangement of $\lambda$, and $\boldsymbol{s}igma$ is ($\alpha \boldsymbol{\Psi}reccurlyeq \beta$)-consistent. And it is straightforward to verify that this process exactly inverts $\mathrm{Br}$. Therefore $\mathrm{Br}^{-1}$ is well-defined. This implies that $\mathrm{Br}$ is a bijection and hence \boldsymbol{e}qref{R=cons} holds. Then it follows from Lemma \boldsymbol{r}ef{lem:cons} that \[R_{\lambda\beta}\frac{n!}{z_\lambda}=\boldsymbol{s}um_{\boldsymbol{s}ubstack{\alpha\boldsymbol{\Psi}reccurlyeq\beta\\\widetilde{\alpha}=\lambda}}\frac{n!}{\boldsymbol{\Psi}i(\alpha,\beta)}\] as desired. \boldsymbol{e}nd{proof} \begin{example}\label{ex:Rlambdabeta-identity(a)} As an example of the construction of $\mathrm{Br}$ in the proof of Proposition~\boldsymbol{r}ef{prop:consistent}, let $\beta=(5,4)$, $\alpha=(2,3;2,2)$ and $\boldsymbol{s}igma=267394518$. We want to determine $\mathrm{Br}(\alpha,\boldsymbol{s}igma)$. \begin{enumerate}[1.] \item Add parentheses to $\boldsymbol{s}igma$ according to $\alpha$: \ \ $\bar{\boldsymbol{s}igma} = (26)(739)(45)(18)$. \item Partition-sort the cycles of $\bar{\boldsymbol{s}igma}$: $\bar{\boldsymbol{s}igma} = \underbrace{(739)}_{c_1}\underbrace{(45)}_{c_2}\underbrace{(26)}_{c_3}\underbrace{(18)}_{c_4}$. \item Compare to the $\beta$-partitioning: $\underbrace{(26)(739)}_{\bar{\boldsymbol{s}igma}^{1}}|\!|\underbrace{(45)(18)}_{\bar{\boldsymbol{s}igma}^{2}}$. So $B=(\{1,3\},\{2,4\})$, since $\bar{\boldsymbol{s}igma}^{1} = c_1 c_3$ and $\bar{\boldsymbol{s}igma}^{2} = c_2 c_4$. \boldsymbol{e}nd{enumerate} \noindent Going the other way, start with $B=(\{1,3\},\{2,4\})$ and $\bar{\boldsymbol{s}igma} = (739)(45)(26)(18)$ written in partition form. \begin{enumerate}[1'.] \item Place cycles into groups according to $B$: $(739)(26) |\!| (45)(18)$. \item Sort within parts in ascending order by largest value: $(26) (739)|\!| (45)(18)$\\ Then $\alpha = (2,3,2,2)$. \item Delete parentheses to get $\boldsymbol{s}igma = 267394518$. \boldsymbol{e}nd{enumerate} \boldsymbol{e}nd{example} \boldsymbol{s}ubsection{Type 2 quasisymmetric power sums}\label{sec:type2power} In order to describe the second type of quasisymmetric power sums, we introduce the following notation. \begin{notn}[$\boldsymbol{s}pab(\alpha,\beta)$] In the following, $\boldsymbol{s}pab(\beta,\alpha) = \boldsymbol{\Psi}rod_i \boldsymbol{s}pab(\beta^{(i)})$ for $\boldsymbol{s}pab(\gamma)=\boldsymbol{e}ll(\gamma)!\boldsymbol{\Psi}rod_j\gamma_j$. \boldsymbol{e}nd{notn} As shown in~\cite{GKLLRT94}, we can write the noncommutative complete homogeneous functions in terms of the noncommutative power sums of type 2 as \[\boldsymbol{h}_\alpha = \boldsymbol{s}um_{\beta\boldsymbol{\Psi}reccurlyeq\alpha}\frac{1}{\boldsymbol{s}pab(\beta,\alpha)}\boldsymbol{\Psi}two_\beta.\] By duality, the basis dual to $\boldsymbol{\Psi}two_\beta$ can be written as a sum of monomial symmetric functions as \[\boldsymbol{\Psi}hi_\alpha = \boldsymbol{s}um_{\beta\boldsymbol{s}ucccurlyeq\alpha}\frac{1}{\boldsymbol{s}pab(\alpha,\beta)}M_\beta.\] We then define the type 2 quasisymmetric power sums as \[\Phi_\alpha = z_\alpha \boldsymbol{\Psi}hi_\alpha.\] A similar polynomial $P_\alpha$ is defined in~\cite{MalReu95}, and is related to $\boldsymbol{\Psi}hi_\alpha$ by $\boldsymbol{\Psi}hi_\alpha=\left(\boldsymbol{\Psi}rod_i \alpha_i\boldsymbol{r}ight)^{-1} P_\alpha$. Note that this means Malvenuto and Reutenaur's polynomial $P_\alpha$ is not dual to $\boldsymbol{\Psi}two_\alpha$ and (by the following results) does not refine the symmetric power sums. For example, $\Phi_{322} = 2M_{322}+M_{52}+M_{34}+\frac{1}{3}M_7$ whereas $P_{322}=M_{322}+\frac{1}{2}M_{52}+\frac{1}{2}M_{34}+\frac{1}{6}M_7$. We can obtain a more combinatorial description for $\Phi_\alpha$ by rewriting the coefficients and interpreting them in terms of ordered set partitions. \begin{notn}[$\OSP(\alpha,\beta)$]\label{notn:OSP} Let $\alpha \boldsymbol{\Psi}reccurlyeq \beta$ and let $\OSP(\alpha,\beta)$ denote the ordered set partitions of $\{1,\ldots,\boldsymbol{e}ll(\alpha)\}$ with block size $|B_i|=\boldsymbol{e}ll(\alpha^{(i)})$. If $\alpha \not\boldsymbol{\Psi}reccurlyeq \beta$, we set $\OSP(\alpha,\beta)=\boldsymbol{e}mptyset$. \boldsymbol{e}nd{notn} \begin{thm}\label{thm:power2} Let $\alpha\vDash n$ and let $m_i$ denote the number of parts of $\alpha$ of size $i$. Then \[\Phi_\alpha = \binom{\boldsymbol{e}ll(\alpha)}{m_1,m_2,\ldots,m_k}^{-1}\boldsymbol{s}um_{\beta\boldsymbol{s}ucccurlyeq\alpha}|\OSP(\alpha,\beta)|M_\beta.\] \boldsymbol{e}nd{thm} \begin{proof} Given $\alpha\vDash n$, let $m_i$ denote the number of parts of $\alpha$ of size $i$. Then \begin{align*} \Phi_\alpha&=\boldsymbol{s}um_{\beta\boldsymbol{s}ucccurlyeq\alpha} \frac{z_\alpha}{\boldsymbol{s}pab(\alpha,\beta)}M_\beta\\ &= \boldsymbol{s}um_{\beta\boldsymbol{s}ucccurlyeq\alpha}\frac{z_\alpha}{\boldsymbol{\Psi}rod_j \alpha_j\boldsymbol{\Psi}rod_i (\boldsymbol{e}ll(\alpha^{(i)}))! } M_\beta\\ &=\frac{z_\alpha}{\boldsymbol{e}ll(\alpha)!\boldsymbol{\Psi}rod_j \alpha_j}\boldsymbol{s}um_{\beta\boldsymbol{s}ucccurlyeq\alpha}\frac{\boldsymbol{e}ll(\alpha)!}{\boldsymbol{\Psi}rod_i(\boldsymbol{e}ll(\alpha^{(i)}))!}M_\beta\\ &=\binom{\boldsymbol{e}ll(\alpha)}{m_1,m_2,\ldots,m_k}^{-1}\boldsymbol{s}um_{\beta\boldsymbol{s}ucccurlyeq\alpha}\frac{\boldsymbol{e}ll(\alpha)!}{\boldsymbol{\Psi}rod_i(\boldsymbol{e}ll(\alpha^{(i)}))!}M_\beta. \boldsymbol{e}nd{align*} Note that $\dfrac{\boldsymbol{e}ll(\alpha)!}{\boldsymbol{\Psi}rod_i(\boldsymbol{e}ll(\alpha^{(i)}))!}$ is the number of ordered set partitions of $\{1,\ldots,\boldsymbol{e}ll(\alpha)\}$ with block size $B_i=\boldsymbol{e}ll(\alpha^{(i)})$. Thus \[\Phi_\alpha = \binom{\boldsymbol{e}ll(\alpha)}{m_1,m_2,\ldots,m_k}^{-1}\boldsymbol{s}um_{\beta\boldsymbol{s}ucccurlyeq\alpha}|\OSP(\alpha,\beta)|M_\beta.\qedhere\] \boldsymbol{e}nd{proof} \begin{thm}{\label{thm:2refine}} The type 2 quasisymmetric power sums refine the symmetric power sums by $$p_\lambda = \boldsymbol{s}um_{\widetilde{\alpha}=\lambda}\Phi_\alpha.$$ \boldsymbol{e}nd{thm} Here the proof requires only a single (and less complex) bijection. \begin{lemma}\label{lem:type2refine} Let $\lambda\vdash n$ and $\beta \vDash n$. Let $m_i$ denote the number of parts of $\lambda$ of size $i$. Then \[\binom{\boldsymbol{e}ll(\lambda)}{m_1,m_2,\ldots,m_k}R_{\lambda\beta} = \boldsymbol{s}um_{\boldsymbol{s}ubstack{\alpha\boldsymbol{\Psi}reccurlyeq\beta\\\widetilde{\alpha}=\lambda}}|\OSP(\alpha,\beta)|.\] \boldsymbol{e}nd{lemma} \begin{proof} Let $\lambda \vdash n$, and $m_i$ denote the number of parts of $\lambda$ of size $i$, so that $\boldsymbol{e}ll(\lambda) = \boldsymbol{s}um_{i=1}^{\lambda_1} m_i$. We can model a composition $\alpha$ that rearranges $\lambda$ as an ordered set partition $(A_1,\ldots,A_{\lambda_1})$ of $\{1,\cdots,\boldsymbol{e}ll(\lambda)\}$ where $A_i = \{j ~|~ \alpha_j = i\}$. Thus, if $$\mathcal{A}}\def\cB{\mathcal{B}}\def\cC{\mathcal{C}}\def\cD{\mathcal{D}}\def\cE{\mathcal{E}}\def\cF{\mathcal{F}}\def\cG{\mathcal{G}}\def\cH{\mathcal{H}}\def\cI{\mathcal{I}}\def\cJ{\mathcal{J}}\def\cK{\mathcal{K}}\def\cL{\mathcal{L}}\def\cM{\mathcal{M}}\def\cN{\mathcal{N}}\def\mathcal{O}{\mathcal{O}}\def\cP{\mathcal{P}}\def\cQ{\mathcal{Q}}\def\cR{\mathcal{R}}\defA{\mathcal{S}}\def\cT{\mathcal{T}}\def\cU{\mathcal{U}}\def\cV{\mathcal{V}}\def\cW{\mathcal{W}}\def\cX{\mathcal{X}}\def\cY{\mathcal{Y}}\def\cZ{\mathcal{Z}_\lambda = \left\{\left. \begin{matrix}\text{ordered set partitions}\\\text{$(A_1,\ldots,A_{\lambda_1})$ of $\{1,\cdots,\boldsymbol{e}ll(\lambda)\}$}\boldsymbol{e}nd{matrix} ~\boldsymbol{r}ight| ~ |A_i| = m_i \boldsymbol{r}ight\}, $$ then the map $$\gamma: \mathcal{A}}\def\cB{\mathcal{B}}\def\cC{\mathcal{C}}\def\cD{\mathcal{D}}\def\cE{\mathcal{E}}\def\cF{\mathcal{F}}\def\cG{\mathcal{G}}\def\cH{\mathcal{H}}\def\cI{\mathcal{I}}\def\cJ{\mathcal{J}}\def\cK{\mathcal{K}}\def\cL{\mathcal{L}}\def\cM{\mathcal{M}}\def\cN{\mathcal{N}}\def\mathcal{O}{\mathcal{O}}\def\cP{\mathcal{P}}\def\cQ{\mathcal{Q}}\def\cR{\mathcal{R}}\defA{\mathcal{S}}\def\cT{\mathcal{T}}\def\cU{\mathcal{U}}\def\cV{\mathcal{V}}\def\cW{\mathcal{W}}\def\cX{\mathcal{X}}\def\cY{\mathcal{Y}}\def\cZ{\mathcal{Z} \to \{\alpha \vDash n ~|~ \tilde{\alpha} = \lambda\}$$ defined by \begin{equation} \gamma(A) \text{ is the composition with $\gamma(A)_j=i$ for all $j \in A_i$} \label{eq:gamma(A)} \boldsymbol{e}nd{equation} is a natural bijection. Further, we have $|\mathcal{A}_\lambda|=\binom{\boldsymbol{e}ll(\lambda)}{m_1,m_2,\ldots,m_{\lambda_1}}$. Now, fix $\beta \vDash n$. Recall, we have $$\mathcal{O}_{\lambda\beta}= \left\{\left. \begin{matrix}\text{ordered set partitions}\\\text{$(B_1,\cdots, B_{\boldsymbol{e}ll(\beta)})$ of $\{1,\cdots,\boldsymbol{e}ll(\lambda)\}$}\boldsymbol{e}nd{matrix} ~\boldsymbol{r}ight| ~ \beta_j=\boldsymbol{s}um_{i\in B_j}\lambda_i \text{ for } 1\leq j\leq \boldsymbol{e}ll(\beta) \boldsymbol{r}ight\}, $$ so that $|\mathcal{O}_{\lambda\beta}| = \cR_{\lambda \beta}$ (Notation \boldsymbol{r}ef{notn:R}). For $\alpha\boldsymbol{\Psi}reccurlyeq\beta$, we have $$\OSP(\alpha, \beta) = \left\{\left. \begin{matrix}\text{ordered set partitions}\\\text{$(C_1,\cdots, C_{\boldsymbol{e}ll(\beta)})$ of $\{1,\cdots,\boldsymbol{e}ll(\alpha)\}$}\boldsymbol{e}nd{matrix} ~\boldsymbol{r}ight| ~ |C_i| = \boldsymbol{e}ll(\alpha^{(i)})\boldsymbol{r}ight\}$$ (Notation \boldsymbol{r}ef{notn:OSP}). Informally, $\mathcal{O}_{\lambda\beta}$ tells us how to build $\beta$ as combination of parts of $\lambda$; and $\OSP(\alpha, \beta)$ tells us a shuffle of a refinement $\alpha \boldsymbol{\Psi}reccurlyeq \beta$. For an ordered set partition $P = (P_1, \dots, P_\boldsymbol{e}ll)$ of $\{1, \dots, \boldsymbol{e}ll(\lambda)\}$, let $p_1^{(i)}, \dots, p_{\boldsymbol{e}ll(\alpha^{(i)})}^{(i)}$ be the elements of $P_i$ written in increasing order. Define $w_{P}$ be the permutation (in one-line notation) given by $$w_{P_i} = p_1^{(i)} \cdots p_{\boldsymbol{e}ll}^{(i)}\qquad \text{ and } \qquad w_P = w_{p_1} \cdots w_{p_{\boldsymbol{e}ll(\beta)}}.$$ We are now ready to construct a bijection $$g: \mathcal{A}}\def\cB{\mathcal{B}}\def\cC{\mathcal{C}}\def\cD{\mathcal{D}}\def\cE{\mathcal{E}}\def\cF{\mathcal{F}}\def\cG{\mathcal{G}}\def\cH{\mathcal{H}}\def\cI{\mathcal{I}}\def\cJ{\mathcal{J}}\def\cK{\mathcal{K}}\def\cL{\mathcal{L}}\def\cM{\mathcal{M}}\def\cN{\mathcal{N}}\def\mathcal{O}{\mathcal{O}}\def\cP{\mathcal{P}}\def\cQ{\mathcal{Q}}\def\cR{\mathcal{R}}\defA{\mathcal{S}}\def\cT{\mathcal{T}}\def\cU{\mathcal{U}}\def\cV{\mathcal{V}}\def\cW{\mathcal{W}}\def\cX{\mathcal{X}}\def\cY{\mathcal{Y}}\def\cZ{\mathcal{Z}_\lambda \times \mathcal{O}_{\lambda\beta} \to \bigsqcup_{\boldsymbol{s}ubstack{\alpha\boldsymbol{\Psi}reccurlyeq\beta\\\widetilde{\alpha}=\lambda}}\{(\alpha, C) ~|~ C \in \OSP(\alpha, \beta)\}.$$ See Example \boldsymbol{r}ef{ex:OSP}. Let $(A, B) \in \mathcal{A}}\def\cB{\mathcal{B}}\def\cC{\mathcal{C}}\def\cD{\mathcal{D}}\def\cE{\mathcal{E}}\def\cF{\mathcal{F}}\def\cG{\mathcal{G}}\def\cH{\mathcal{H}}\def\cI{\mathcal{I}}\def\cJ{\mathcal{J}}\def\cK{\mathcal{K}}\def\cL{\mathcal{L}}\def\cM{\mathcal{M}}\def\cN{\mathcal{N}}\def\mathcal{O}{\mathcal{O}}\def\cP{\mathcal{P}}\def\cQ{\mathcal{Q}}\def\cR{\mathcal{R}}\defA{\mathcal{S}}\def\cT{\mathcal{T}}\def\cU{\mathcal{U}}\def\cV{\mathcal{V}}\def\cW{\mathcal{W}}\def\cX{\mathcal{X}}\def\cY{\mathcal{Y}}\def\cZ{\mathcal{Z}_\lambda \times \mathcal{O}_{\lambda\beta}$. Initially, set $\alpha' = \gamma(A)$ (where $\gamma$ is the map in \boldsymbol{e}qref{eq:gamma(A)}), and set $C\in \mathcal{O}_{\alpha'\beta}$ equal to the image of $B$ under the permutation of indices induced by $\lambda \to \gamma(A)$. Namely, if the $i$th part of $\lambda$ got placed into the $j$th part of $\alpha'$ (where parts of equal length are kept in the same relative order), then $i$ in $B$ is replaced by $j$ in $C$. Now, act by $w_C^{-1}$ on the subscripts of $\alpha'$ to get $\alpha$. The result is $\alpha \boldsymbol{\Psi}reccurlyeq \beta$, $\tilde{\alpha} = \lambda$, and $C \in \OSP(\alpha, \beta)$. Let $g((A,B)) = (\alpha, C)$. To see that this is a bijection, we show that each step in building $g((A,B))$ is invertible as follows. Take $\alpha \boldsymbol{\Psi}reccurlyeq \beta$ such that $\tilde{\alpha} = \lambda$, and let $C \in \OSP(\alpha, \beta)$. Let $\alpha'$ be the result of acting by $w_C$ on the subscripts of $\alpha$. Then we can recover $A = \gamma^{-1}(\alpha')$ from $\alpha'$; and $B$ is the image of $C$ under the permutation of indices induced by $\alpha' \to \lambda$. Namely, if the $j$th part of $\alpha'$ came from the $i$th part of $\lambda$ (where parts of equal length are kept in the same relative order), then $j$ in $C$ is replaced by $i$ in $B$. Then $A \in \mathcal{A}}\def\cB{\mathcal{B}}\def\cC{\mathcal{C}}\def\cD{\mathcal{D}}\def\cE{\mathcal{E}}\def\cF{\mathcal{F}}\def\cG{\mathcal{G}}\def\cH{\mathcal{H}}\def\cI{\mathcal{I}}\def\cJ{\mathcal{J}}\def\cK{\mathcal{K}}\def\cL{\mathcal{L}}\def\cM{\mathcal{M}}\def\cN{\mathcal{N}}\def\mathcal{O}{\mathcal{O}}\def\cP{\mathcal{P}}\def\cQ{\mathcal{Q}}\def\cR{\mathcal{R}}\defA{\mathcal{S}}\def\cT{\mathcal{T}}\def\cU{\mathcal{U}}\def\cV{\mathcal{V}}\def\cW{\mathcal{W}}\def\cX{\mathcal{X}}\def\cY{\mathcal{Y}}\def\cZ{\mathcal{Z}_\lambda$, $B \in \mathcal{O}_{\lambda\beta}$, and setting $g^{-1}((\alpha, C)) = (A,B)$ gives $g(g^{-1}((\alpha, C))) = (\alpha, C)$ and $g^{-1}(g((A,B)) = (A,B)$. \boldsymbol{e}nd{proof} \begin{example}\label{ex:OSP} Fix $\lambda=(3,2,2,1,1,1,1)$ and $\beta = (5,1,4,1)$. So $m_1 = 4$, $m_2 = 2$, and $m_3 = 1$. Now consider $$A = ((\{1,2,4,7\},\{3,6\},\{5\}) \in \mathcal{A}_{\lambda} \quad \text{ and } \quad B = (\{1,3\},\{4\},\{2,5,7\},\{6\}) \in \mathcal{O}_{\lambda\beta}.$$ Then $\alpha' = \gamma(A) = (1,1,2,1,3,2,1)$, corresponding to the rearrangement $$\TikZ{ \foreach \x in {1,2,...,7}{\coordinate (t\x) at (\x,1); \coordinate (b\x) at (\x,0); } \node[above] at (1,1) {$\begin{matrix}\lambda_1 \\ 3\boldsymbol{e}nd{matrix}$}; \node[above] at (2,1) {$\begin{matrix}\lambda_2 \\ 2\boldsymbol{e}nd{matrix}$}; \node[above] at (3,1) {$\begin{matrix}\lambda_3 \\ 2\boldsymbol{e}nd{matrix}$}; \node[above] at (4,1) {$\begin{matrix}\lambda_4 \\ 1\boldsymbol{e}nd{matrix}$}; \node[above] at (5,1) {$\begin{matrix}\lambda_5 \\ 1\boldsymbol{e}nd{matrix}$}; \node[above] at (6,1) {$\begin{matrix}\lambda_6 \\ 1\boldsymbol{e}nd{matrix}$}; \node[above] at (7,1) {$\begin{matrix}\lambda_7 \\ 1\boldsymbol{e}nd{matrix}$}; \node[below] at (1,0) {$\begin{matrix}1\\ \alpha'_1\boldsymbol{e}nd{matrix}$}; \node[below] at (2,0) {$\begin{matrix}1\\ \alpha'_2\boldsymbol{e}nd{matrix}$}; \node[below] at (3,0) {$\begin{matrix}2\\ \alpha'_3\boldsymbol{e}nd{matrix}$}; \node[below] at (4,0) {$\begin{matrix}1\\ \alpha'_4\boldsymbol{e}nd{matrix}$}; \node[below] at (5,0) {$\begin{matrix}3\\ \alpha'_5\boldsymbol{e}nd{matrix}$}; \node[below] at (6,0) {$\begin{matrix}2\\ \alpha'_6\boldsymbol{e}nd{matrix}$}; \node[below] at (7,0) {$\begin{matrix}1\\ \alpha'_7\boldsymbol{e}nd{matrix}$}; \draw[thick,->] (t1) to (b5); \draw[thick,->] (t2) to (b3); \draw[thick,->] (t3) to (b6); \draw[thick,->] (t4) to (b1); \draw[thick,->] (t5) to (b2); \draw[thick,->] (t6) to (b4); \draw[thick,->] (t7) to (b7); }, \quad \text{which induces} \quad \TikZ{ \foreach \x in {1,2,...,7}{\node (t\x) at (\x,2) {$\x$}; \node (b\x) at (\x,0) {$\x$}; } \draw[thick,->] (t1) to (b5); \draw[thick,->] (t2) to (b3); \draw[thick,->] (t3) to (b6); \draw[thick,->] (t4) to (b1); \draw[thick,->] (t5) to (b2); \draw[thick,->] (t6) to (b4); \draw[thick,->] (t7) to (b7); }. $$ The image of $B$ under this induced map is $C = (\{5,6\}, \{1\}, \{2, 3, 7\}, \{4\})$. So $w_C = 5612374$, and the image of $\alpha'$ under the action of $w_C^{-1}$ on subscripts is $$w_C^{-1} : \ \TikZ{ \foreach \x in {1,2,...,7}{\coordinate (t\x) at (\x,1); \coordinate (b\x) at (\x,0); } \node[above] at (1,1) {$\begin{matrix}\alpha'_1 \\ 1\boldsymbol{e}nd{matrix}$}; \node[above] at (2,1) {$\begin{matrix}\alpha'_2 \\ 1\boldsymbol{e}nd{matrix}$}; \node[above] at (3,1) {$\begin{matrix}\alpha'_3 \\ 2\boldsymbol{e}nd{matrix}$}; \node[above] at (4,1) {$\begin{matrix}\alpha'_4 \\ 1\boldsymbol{e}nd{matrix}$}; \node[above] at (5,1) {$\begin{matrix}\alpha'_5 \\ 3\boldsymbol{e}nd{matrix}$}; \node[above] at (6,1) {$\begin{matrix}\alpha'_6 \\ 2\boldsymbol{e}nd{matrix}$}; \node[above] at (7,1) {$\begin{matrix}\alpha'_7 \\ 1\boldsymbol{e}nd{matrix}$}; \node[below] at (1,0) {$\begin{matrix}3\\ \alpha_1\boldsymbol{e}nd{matrix}$}; \node[below] at (2,0) {$\begin{matrix}2\\ \alpha_2\boldsymbol{e}nd{matrix}$}; \node[below] at (3,0) {$\begin{matrix}1\\ \alpha_3\boldsymbol{e}nd{matrix}$}; \node[below] at (4,0) {$\begin{matrix}1\\ \alpha_4\boldsymbol{e}nd{matrix}$}; \node[below] at (5,0) {$\begin{matrix}2\\ \alpha_5\boldsymbol{e}nd{matrix}$}; \node[below] at (6,0) {$\begin{matrix}1\\ \alpha_6\boldsymbol{e}nd{matrix}$}; \node[below] at (7,0) {$\begin{matrix}1\\ \alpha_7\boldsymbol{e}nd{matrix}$}; \draw[thick,->] (t5) to (b1); \draw[thick,->] (t6) to (b2); \draw[thick,->] (t1) to (b3); \draw[thick,->] (t2) to (b4); \draw[thick,->] (t3) to (b5); \draw[thick,->] (t7) to (b6); \draw[thick,->] (t4) to (b7); }.$$ And, indeed, we see that $\alpha = (3,2,1,1,2,1,1) \boldsymbol{\Psi}reccurlyeq \beta$ and $C \in \OSP(\alpha, \beta)$. We can see why it is necessary to record $\alpha$ as follows. For example, we consider inverting $g$ on the same $C$ as above, but now paired with $\alpha = (3,2,1,2,1,1,1)$. Then the image of $\alpha$ under the action of $w_C$ on subscripts is $$w_C: \ \TikZ{ \foreach \x in {1,2,...,7}{\coordinate (t\x) at (\x,1); \coordinate (b\x) at (\x,0); } \node[above] at (1,1) {$\begin{matrix}\alpha_1 \\ 3\boldsymbol{e}nd{matrix}$}; \node[above] at (2,1) {$\begin{matrix}\alpha_2 \\ 2\boldsymbol{e}nd{matrix}$}; \node[above] at (3,1) {$\begin{matrix}\alpha_3 \\ 1\boldsymbol{e}nd{matrix}$}; \node[above] at (4,1) {$\begin{matrix}\alpha_4 \\ 2\boldsymbol{e}nd{matrix}$}; \node[above] at (5,1) {$\begin{matrix}\alpha_5 \\ 1\boldsymbol{e}nd{matrix}$}; \node[above] at (6,1) {$\begin{matrix}\alpha_6 \\ 1\boldsymbol{e}nd{matrix}$}; \node[above] at (7,1) {$\begin{matrix}\alpha_7 \\ 1\boldsymbol{e}nd{matrix}$}; \node[below] at (1,0) {$\begin{matrix}1\\ \alpha'_1\boldsymbol{e}nd{matrix}$}; \node[below] at (2,0) {$\begin{matrix}2\\ \alpha'_2\boldsymbol{e}nd{matrix}$}; \node[below] at (3,0) {$\begin{matrix}1\\ \alpha'_3\boldsymbol{e}nd{matrix}$}; \node[below] at (4,0) {$\begin{matrix}1\\ \alpha'_4\boldsymbol{e}nd{matrix}$}; \node[below] at (5,0) {$\begin{matrix}3\\ \alpha'_5\boldsymbol{e}nd{matrix}$}; \node[below] at (6,0) {$\begin{matrix}2\\ \alpha'_6\boldsymbol{e}nd{matrix}$}; \node[below] at (7,0) {$\begin{matrix}1\\ \alpha'_7\boldsymbol{e}nd{matrix}$}; \draw[thick,->] (t1) to (b5); \draw[thick,->] (t2) to (b6); \draw[thick,->] (t3) to (b1); \draw[thick,->] (t4) to (b2); \draw[thick,->] (t5) to (b3); \draw[thick,->] (t6) to (b7); \draw[thick,->] (t7) to (b4); }.$$ So $A = (\{1,3,4,7\}, \{2,6\}, \{5\})$, which is different from the $A$ we started with above. The set $B$, though, is left unchanged. \boldsymbol{e}nd{example} \boldsymbol{s}ection{Relationships between bases}{\label{sec:btw}} \boldsymbol{s}ubsection{The relationship between the type 1 and type 2 quasisymmetric power sums} To determine the relationship between the two different types of quasisymmetric power sums, we first use duality to expand the monomial quasisymmetric functions in terms of the type 2 quasisymmetric power sums. Thus, from \boldsymbol{e}qref{eq:htopsi} and duality we obtain $$M_{\beta} = \boldsymbol{s}um_{\alpha \boldsymbol{s}ucccurlyeq \beta} (-1)^{\boldsymbol{e}ll(\beta)-\boldsymbol{e}ll(\alpha)} \frac{\Pi_i \alpha_i}{\boldsymbol{e}ll(\beta,\alpha)} \Phi_{\alpha}.$$ Then we expand the type 1 quasisymmetric power sums in terms of the monomial quasisymmetric functions \boldsymbol{e}qref{eq:PsiM} and apply substitution to obtain the following expansion of the type 1 quasisymmetric power sums into the type 2 quasisymmetric power sums: \begin{align*} \Psi_{\alpha} &= \boldsymbol{s}um_{\beta \boldsymbol{s}ucccurlyeq \alpha} \frac{z_{\alpha}}{\boldsymbol{\Psi}i(\alpha, \beta)}M_{\beta} \\ &= \boldsymbol{s}um_{\beta \boldsymbol{s}ucccurlyeq \alpha} \frac{z_{\alpha}}{\boldsymbol{\Psi}i(\alpha, \beta)} \boldsymbol{s}um_{\gamma\boldsymbol{s}ucccurlyeq\beta} (-1)^{\boldsymbol{e}ll(\beta)-\boldsymbol{e}ll(\gamma)} \frac{\Pi_i \gamma_i}{\boldsymbol{e}ll(\beta, \gamma)} \Phi_{\gamma} \\ &= \boldsymbol{s}um_{\alpha \boldsymbol{\Psi}reccurlyeq \beta \boldsymbol{\Psi}reccurlyeq \gamma} (-1)^{\boldsymbol{e}ll(\beta)-\boldsymbol{e}ll(\gamma)} \frac{z_{\alpha} \Pi_i \gamma_i}{\boldsymbol{\Psi}i(\alpha, \beta) \boldsymbol{e}ll(\beta, \gamma)} \Phi_{\gamma}. \boldsymbol{e}nd{align*} A similar process produces \begin{align*} \Phi_{\alpha} &= \boldsymbol{s}um_{\beta \boldsymbol{s}ucccurlyeq \alpha} \frac{z_\alpha}{sp(\alpha,\beta)} M_{\beta} \\ &= \boldsymbol{s}um_{\beta \boldsymbol{s}ucccurlyeq \alpha} \frac{z_\alpha}{sp(\alpha,\beta)} \boldsymbol{s}um_{\gamma\boldsymbol{s}ucccurlyeq\beta} (-1)^{\boldsymbol{e}ll(\beta)-\boldsymbol{e}ll(\gamma)} lp(\beta, \gamma) \Psi_{\gamma} \\ &= \boldsymbol{s}um_{\alpha \boldsymbol{\Psi}reccurlyeq \beta \boldsymbol{\Psi}reccurlyeq \gamma} (-1)^{\boldsymbol{e}ll(\beta)-\boldsymbol{e}ll(\gamma)} \frac{z_\alpha lp(\beta, \gamma)}{sp(\alpha, \beta)} \Psi_{\gamma}. \boldsymbol{e}nd{align*} \boldsymbol{s}ubsection{The relationship between monomial and fundamental quasisymmetric functions} Our next goal is to give the ``cleanest'' possible interpretation of the expansions of the quasisymmetric power sums in the fundamental basis. Towards this goal we first establish a more basic relationship between the $F$ basis and certain sums of monomials. \begin{notn}[$\alpha^c$, $\alpha\wedge\beta$, $\alpha\vee\beta$] Given $\alpha\vDash n$, let $\alpha^c=\comp((\boldsymbol{s}et(\alpha))^c)$. Given a second composition $\beta$, $\alpha\wedge\beta$ denotes the finest (i.e.\ with the smallest parts) composition $\gamma$ such that $\gamma\boldsymbol{s}ucccurlyeq\alpha$ and $\gamma\boldsymbol{s}ucccurlyeq \beta$. Similarly, $\alpha\vee \beta$ denotes the coarsest composition $\delta$ such that $\delta\boldsymbol{\Psi}reccurlyeq\alpha$ and $\delta \boldsymbol{\Psi}reccurlyeq \beta$. \boldsymbol{e}nd{notn} \begin{example} If $\alpha=(2,3,1)$ and $\beta=(1,2,2,1)$, then $\alpha^c=(1,2, 1,2), \alpha\wedge\beta=(5,1)$, and $\alpha\vee \beta=(1,1,1,2,1)$. \boldsymbol{e}nd{example} \noindent The notation is motivated by the poset of sets ordered by containment (when combined with the bijection from sets to compositions). We note that $\boldsymbol{s}et(\alpha\wedge\beta)=\boldsymbol{s}et(\alpha) \cap \boldsymbol{s}et(\beta)$ and $\boldsymbol{s}et(\alpha \vee \beta)=\boldsymbol{s}et(\alpha)\cup \boldsymbol{s}et(\beta)$. We begin by writing the sum (over an interval in the refinement partial order) of quasisymmetric monomial functions as a sum of distinct fundamental quasisymmetric functions. \begin{lemma}\label{lem:moninterval} Let $\alpha,\beta\vDash n$ with $\alpha\boldsymbol{\Psi}reccurlyeq\beta$. Then \[\boldsymbol{s}um_{\delta: \alpha\boldsymbol{\Psi}reccurlyeq\delta\boldsymbol{\Psi}reccurlyeq\beta} M_\delta = \boldsymbol{s}um_{\beta\vee \alpha^c\boldsymbol{\Psi}reccurlyeq\delta\boldsymbol{\Psi}reccurlyeq\beta}(-1)^{\boldsymbol{e}ll(\beta)-\boldsymbol{e}ll(\delta)}F_\delta.\] \boldsymbol{e}nd{lemma} \begin{proof} Let $\alpha, \beta\vDash n$ with $\alpha \boldsymbol{\Psi}reccurlyeq \beta$. Then \begin{align} \boldsymbol{s}um_{\alpha\boldsymbol{\Psi}reccurlyeq\delta\boldsymbol{\Psi}reccurlyeq\beta}M_\delta & =\boldsymbol{s}um_{\alpha\boldsymbol{\Psi}reccurlyeq\delta\boldsymbol{\Psi}reccurlyeq\beta}\boldsymbol{s}um_{\gamma\boldsymbol{\Psi}reccurlyeq\delta}(-1)^{\boldsymbol{e}ll(\gamma)-\boldsymbol{e}ll(\delta)}F_\gamma\nonumber\\ &=\boldsymbol{s}um_{\gamma\boldsymbol{\Psi}reccurlyeq\beta}(-1)^{\boldsymbol{e}ll(\gamma)}F_\gamma\left(\boldsymbol{s}um_{\alpha\wedge\gamma\boldsymbol{\Psi}reccurlyeq\delta\boldsymbol{\Psi}reccurlyeq\beta}(-1)^{\boldsymbol{e}ll(\delta)}\boldsymbol{r}ight).\label{eq:fexpansion} \boldsymbol{e}nd{align} Recall (see \cite{Sta99v1}) the M\"obius function for the lattice of subsets of size $n-1$, ordered by set inclusion. If $S$ and $T$ are subsets of an $n-1$ element set with $T \boldsymbol{s}ubseteq S$, then $\mu(T,S)=(-1)^{|S-T|}$ and $(-1)^{|S-T|}= - \boldsymbol{s}um_{T\boldsymbol{s}ubseteq U\boldsymbol{s}ubset S} \mu(T,U)$. Thus, since compositions of $n$ are in bijection with subsets of $[n-1]$ and $\boldsymbol{e}ll(\delta)=|\boldsymbol{s}et(\delta)|+1$, when $\alpha\wedge\gamma \neq \beta$, we can write \begin{align*} \boldsymbol{s}um_{\alpha\wedge\gamma\boldsymbol{\Psi}reccurlyeq\delta\boldsymbol{\Psi}reccurlyeq\beta} (-1)^{\boldsymbol{e}ll(\delta)} &= (-1)^{\boldsymbol{e}ll(\alpha\wedge\gamma)}+(-1)^{\boldsymbol{e}ll(\beta)}\boldsymbol{s}um_{\alpha\wedge\gamma\boldsymbol{\Psi}rec\delta\boldsymbol{\Psi}reccurlyeq\beta}(-1)^{\boldsymbol{e}ll(\delta)-\boldsymbol{e}ll(\beta)} \\ &=(-1)^{\boldsymbol{e}ll(\alpha\wedge\gamma)}+(-1)^{\boldsymbol{e}ll(\beta)}\boldsymbol{s}um_{\alpha\wedge\gamma\boldsymbol{\Psi}rec\delta\boldsymbol{\Psi}reccurlyeq\beta}\mu(\boldsymbol{s}et(\beta),\boldsymbol{s}et(\delta)) \\ &=(-1)^{\boldsymbol{e}ll(\alpha\wedge\gamma)}+(-1)^{\boldsymbol{e}ll(\beta)+1}\mu(\boldsymbol{s}et(\beta), \boldsymbol{s}et(\alpha\wedge\gamma)) \\ &=(-1)^{\boldsymbol{e}ll(\alpha\wedge\gamma)}+(-1)^{\boldsymbol{e}ll(\beta)+1}(-1)^{\boldsymbol{e}ll(\alpha\wedge\gamma)-\boldsymbol{e}ll(\beta)}\\ &=0.\boldsymbol{e}nd{align*} We can now rewrite \boldsymbol{e}qref{eq:fexpansion} as \[\boldsymbol{s}um_{\gamma\boldsymbol{\Psi}reccurlyeq\beta}(-1)^{\boldsymbol{e}ll(\gamma)}F_\gamma\left(\boldsymbol{s}um_{\alpha\wedge\gamma\boldsymbol{\Psi}reccurlyeq\delta\boldsymbol{\Psi}reccurlyeq\beta}(-1)^{\boldsymbol{e}ll(\delta)}\boldsymbol{r}ight)=\boldsymbol{s}um_{\boldsymbol{s}ubstack{\gamma\boldsymbol{\Psi}reccurlyeq\beta\\\beta=\gamma\wedge\alpha}} (-1)^{\boldsymbol{e}ll(\gamma)+\boldsymbol{e}ll(\alpha\wedge\gamma)}F_\gamma=\boldsymbol{s}um_{ \alpha^c\vee\beta \boldsymbol{\Psi}reccurlyeq \gamma\boldsymbol{\Psi}reccurlyeq \beta}(-1)^{\boldsymbol{e}ll(\gamma)-\boldsymbol{e}ll(\beta)}F_\gamma.\qedhere\] \boldsymbol{e}nd{proof} \boldsymbol{s}ubsection{The relationship between type 1 quasisymmetric power sums and fundamental quasisymmetric functions} Recall Notation \boldsymbol{r}ef{notn:hat} for the following. \begin{thm}\label{thm:psitoF} Let $\alpha\vDash n$. Then \[\Psi_\alpha = \frac{z_\alpha}{n!}\boldsymbol{s}um_{\gamma\boldsymbol{s}ucccurlyeq\alpha}|\{\boldsymbol{s}igma\in\mathbb{C}ons_\alpha: \widehat{\alpha(\boldsymbol{s}igma)}=\gamma\}|\boldsymbol{s}um_{\boldsymbol{e}ta\boldsymbol{s}ucccurlyeq \alpha^c}(-1)^{\boldsymbol{e}ll(\boldsymbol{e}ta)-1}F_{\gamma\vee\boldsymbol{e}ta}.\] \boldsymbol{e}nd{thm} \begin{proof} Let $\alpha\vDash n$. We use $\mathbbold{1}_{\mathcal{R}}$ to denote the characteristic function of the relation $\mathcal{R}$. Combining the quasisymmetric monomial expansion of $\Psi_\alpha$ given in \boldsymbol{e}qref{eq:PsiM}, Lemma~\boldsymbol{r}ef{lem:cons}, and Lemma~\boldsymbol{r}ef{lem:moninterval}, we have \begin{align*} \Psi_\alpha &=\frac{z_\alpha}{n!}\boldsymbol{s}um_{\alpha\boldsymbol{\Psi}reccurlyeq\beta}|\mathbb{C}ons_{\alpha\boldsymbol{\Psi}reccurlyeq\beta}|M_\beta\\ &=\frac{z_\alpha}{n!}\boldsymbol{s}um_{\boldsymbol{s}igma\in \mathbb{C}ons_\alpha}\boldsymbol{s}um_{\alpha\boldsymbol{\Psi}reccurlyeq\delta\boldsymbol{\Psi}reccurlyeq \widehat{\alpha(\boldsymbol{s}igma)}}M_\delta \\ &=\frac{z_\alpha}{n!}\boldsymbol{s}um_{\boldsymbol{s}igma\in\mathbb{C}ons_\alpha}\boldsymbol{s}um_{\alpha^c\vee \widehat{\alpha(\boldsymbol{s}igma)}\boldsymbol{\Psi}reccurlyeq\delta\boldsymbol{\Psi}reccurlyeq\widehat{\alpha(\boldsymbol{s}igma)}}(-1)^{\boldsymbol{e}ll(\widehat{\alpha(\boldsymbol{s}igma)})-\boldsymbol{e}ll(\delta)}F_\delta \textrm{ (by Lemma~\boldsymbol{r}ef{lem:moninterval})} \\ &=\frac{z_\alpha}{n!}\boldsymbol{s}um_{\delta\vDash n}(-1)^{\boldsymbol{e}ll(\delta)}F_\delta \boldsymbol{s}um_{\boldsymbol{s}igma\in\mathbb{C}ons_\alpha}(-1)^{\boldsymbol{e}ll(\widehat{\alpha(\boldsymbol{s}igma)})}\mathbbold{1}_{\alpha^c\vee\widehat{\alpha(\boldsymbol{s}igma)}\boldsymbol{\Psi}reccurlyeq \delta \boldsymbol{\Psi}reccurlyeq \widehat{\alpha(\boldsymbol{s}igma)}}\\ &=\frac{z_\alpha}{n!}\boldsymbol{s}um_{\gamma\boldsymbol{s}ucccurlyeq\alpha}|\{\boldsymbol{s}igma\in\mathbb{C}ons_\alpha:\widehat{\alpha(\boldsymbol{s}igma)}=\gamma\}|\boldsymbol{s}um_{\delta \vDash n}(-1)^{\boldsymbol{e}ll(\gamma)-\boldsymbol{e}ll(\delta)}F_\delta \mathbbold{1}_{\alpha^c\vee\gamma\boldsymbol{\Psi}reccurlyeq \delta\boldsymbol{\Psi}reccurlyeq\gamma}, \boldsymbol{e}nd{align*} with the last equality holding since the compositions $\widehat{\alpha(\boldsymbol{s}igma)}$ are coarsening of $\alpha$. It is straightforward to check that given $\gamma \boldsymbol{s}ucccurlyeq \alpha$ and $\delta \vDash n$, there exists $\boldsymbol{e}ta \boldsymbol{s}ucccurlyeq \alpha^c$ such that $\delta=\gamma\vee\boldsymbol{e}ta$ if and only if $\delta \boldsymbol{s}ucccurlyeq \alpha^c\vee\gamma$. Then \begin{align*} \Psi_\alpha&=\frac{z_\alpha}{n!}\boldsymbol{s}um_{\gamma\boldsymbol{s}ucccurlyeq\alpha}|\{\boldsymbol{s}igma\in \mathbb{C}ons_\alpha: \widehat{\alpha(\boldsymbol{s}igma)}=\gamma\}|(-1)^{\boldsymbol{e}ll(\gamma)}\boldsymbol{s}um_{\boldsymbol{e}ta\boldsymbol{s}ucccurlyeq\alpha^c}(-1)^{\boldsymbol{e}ll(\gamma\vee\boldsymbol{e}ta)}F_{\gamma\vee\boldsymbol{e}ta}\\ &= \frac{z_\alpha}{n!}\boldsymbol{s}um_{\gamma\boldsymbol{s}ucccurlyeq\alpha}|\{\boldsymbol{s}igma\in\mathbb{C}ons_\alpha: \widehat{\alpha(\boldsymbol{s}igma)}=\gamma\}|\boldsymbol{s}um_{\boldsymbol{e}ta\boldsymbol{s}ucccurlyeq \alpha^c}(-1)^{\boldsymbol{e}ll(\boldsymbol{e}ta)-1}F_{\gamma\vee\boldsymbol{e}ta}. \boldsymbol{e}nd{align*} The final equality is established by noting that $\boldsymbol{s}et(\gamma)\cap\boldsymbol{s}et(\boldsymbol{e}ta)=\boldsymbol{e}mptyset$, so $\boldsymbol{e}ll(\gamma\vee\boldsymbol{e}ta)=|\boldsymbol{s}et(\gamma\vee\boldsymbol{e}ta)|+1=|\boldsymbol{s}et(\gamma)|+|\boldsymbol{s}et(\boldsymbol{e}ta)|+1=\boldsymbol{e}ll(\gamma)+\boldsymbol{e}ll(\boldsymbol{e}ta)-1$. \boldsymbol{e}nd{proof} \begin{note}The $F_{\gamma \vee \boldsymbol{e}ta}$'s are distinct in this sum, meaning the coefficient of $F_\delta$ is either 0 or is $$|\{ \boldsymbol{s}igma ~|~ \widehat{\alpha(\boldsymbol{s}igma)} = \gamma \}| (-1)^{\boldsymbol{e}ll(\boldsymbol{e}ta)-1}$$ when $\delta = \gamma \vee \boldsymbol{e}ta$ for $\gamma \boldsymbol{s}ucccurlyeq \alpha$ and $\alpha^c \boldsymbol{\Psi}reccurlyeq \boldsymbol{e}ta$. This follows from the fact that we can recover $\gamma$ and $\boldsymbol{e}ta$ from $\gamma \vee \boldsymbol{e}ta$ and $\alpha$, with $$\gamma=\comp(\boldsymbol{s}et(\gamma \vee \boldsymbol{e}ta)\cap \boldsymbol{s}et(\alpha)),$$ $$\boldsymbol{e}ta=\comp(\boldsymbol{s}et(\gamma \vee \boldsymbol{e}ta)\cap \boldsymbol{s}et(\alpha)^c).$$ \boldsymbol{e}nd{note} \boldsymbol{s}ubsection{The relationship between type 2 quasisymmetric power sums and fundamental quasisymmetric functions} The expansion of $\Phi_\alpha$ into fundamental quasisymmetric functions is somewhat more straightforward. Let $m_i$ denote the number of parts of $\alpha \vDash n$ that have size $i$. \begin{thm}\label{thm:phitoF} Let $\alpha\vDash n$. Then \[\Phi_\alpha=\binom{m_1+\cdots+m_n}{m_1,\ldots,m_n}^{-1}\boldsymbol{s}um_{\gamma\vDash n} \left(\boldsymbol{s}um_{\beta\boldsymbol{s}ucccurlyeq(\gamma\wedge \alpha)} (-1)^{\boldsymbol{e}ll(\gamma)-\boldsymbol{e}ll(\beta)}|\OSP(\alpha,\beta)|\boldsymbol{r}ight) F_{\gamma}.\] \boldsymbol{e}nd{thm} \begin{proof} Let $\alpha\vDash n$. Combining the quasisymmetric monomial expansion of $\Phi_\alpha$ and the fundamental expansion of $M_\beta$, gives \begin{align} \Phi_\alpha&=\binom{m_1+\cdots+m_n}{m_1,\ldots,m_n}^{-1}\boldsymbol{s}um_{\beta\boldsymbol{s}ucccurlyeq\alpha} |\OSP(\alpha,\beta)|M_\beta\\ &=\binom{m_1+\cdots+m_n}{m_1,\ldots,m_n}^{-1}\boldsymbol{s}um_{\beta\boldsymbol{s}ucccurlyeq\alpha} |\OSP(\alpha,\beta)|\boldsymbol{s}um_{\beta\boldsymbol{s}ucccurlyeq\gamma}(-1)^{\boldsymbol{e}ll(\gamma)-\boldsymbol{e}ll(\beta)}F_\gamma\\ &=\binom{m_1+\cdots+m_n}{m_1,\ldots,m_n}^{-1}\boldsymbol{s}um_{\gamma\vDash n}F_\gamma \left(\boldsymbol{s}um_{\beta\boldsymbol{s}ucccurlyeq(\gamma\wedge \alpha)} (-1)^{\boldsymbol{e}ll(\gamma)-\boldsymbol{e}ll(\beta)}|\OSP(\alpha,\beta)|\boldsymbol{r}ight).\qedhere \boldsymbol{e}nd{align} \boldsymbol{e}nd{proof} \boldsymbol{s}ubsection{The antipode map on quasisymmetric power sums}\label{sec:omega} \begin{defn}[transpose, $\alpha^r,\alpha^t$] Let $\alpha^r$ give the reverse of $\alpha$. Then we call $\alpha^t=(\alpha^c)^r$ the transpose of the composition $\alpha$. \boldsymbol{e}nd{defn} The antipode map $S: \NS\boldsymbol{r}ightarrow\NS$ on the Hopf algebra of quasisymmetric functions is commonly defined by $S(F_\alpha)=(-1)^{|\alpha|}F_{\alpha^t}$. On the noncommutative functions, it is commonly defined as the automorphism such that $S(\boldsymbol{e}_n)=(-1)^n\boldsymbol{h}_n$. It can equivalently be defined by $S(\boldsymbol{r}_\alpha)=(-1)^{|\alpha|}\boldsymbol{r}_{\alpha^t}$. Thus, for $f$ in $\QS$ and $g\in \NS$, $$\langlef,g\rangle=\langleS(f),S(g)\rangle.$$ It can be easier to compute $S$ on a multiplicative basis, such as $\boldsymbol{\Psi}$ or $\boldsymbol{\Psi}two$ and then use duality to establish the result on the quasisymmetric side. We start with the expansion of the $\boldsymbol{\Psi}$ and $\boldsymbol{\Psi}two$ in terms of the $\boldsymbol{e}$ basis in \cite[\S 4.5]{GKLLRT94}: \begin{equation}\label{eq:powerine} \boldsymbol{\Psi}_n = \boldsymbol{s}um_{\alpha\vDash n} (-1)^{n-\boldsymbol{e}ll(\alpha)}\alpha_1 \boldsymbol{e}_\alpha. \boldsymbol{e}nd{equation} It follows from \boldsymbol{e}qref{eq:powerinh} and \boldsymbol{e}qref{eq:powerine} that $S(\boldsymbol{\Psi}_n)=-\boldsymbol{\Psi}_n$. Then \begin{align*} S(\boldsymbol{\Psi}_{\alpha})&=S(\boldsymbol{\Psi}_{\alpha_1}\boldsymbol{\Psi}_{\alpha_2}\cdots \boldsymbol{\Psi}_{\alpha_k})\\ &=S(\boldsymbol{\Psi}_{\alpha_k})S(\boldsymbol{\Psi}_{\alpha_{k-1}})\cdots S(\boldsymbol{\Psi}_{\alpha_1})\\ &=\left(-\boldsymbol{\Psi}_{\alpha_k}\boldsymbol{r}ight)\cdots \left(-\boldsymbol{\Psi}_{\alpha_1}\boldsymbol{r}ight)\\ &=(-1)^{\boldsymbol{e}ll(\alpha)}\boldsymbol{\Psi}_{\alpha^r}. \boldsymbol{e}nd{align*} This result also follows from the fact that $\Psi$ is a primitive element. \begin{thm}\label{thm:omega} For $\alpha \vDash n$, $S(\Psi_\alpha) = (-1)^{\boldsymbol{e}ll(\alpha)}\Psi_{\alpha^r}.$ \boldsymbol{e}nd{thm} \begin{proof} Let $\alpha \vDash n$. Then \[z_\alpha \delta_{\alpha, \beta}=\langle\Psi_\alpha, \boldsymbol{\Psi}_\beta\rangle=\langleS(\Psi_\alpha), S(\boldsymbol{\Psi}_\beta)\rangle=\langleS(\Psi_\alpha),(-1)^{\boldsymbol{e}ll(\beta)}\boldsymbol{\Psi}_{\beta^r} \rangle=\langle(-1)^{\boldsymbol{e}ll(\beta)}S(\Psi_\alpha),\boldsymbol{\Psi}_{\beta^r} \rangle,\] so $S(\Psi_\alpha)=(-1)^{\boldsymbol{e}ll(\alpha)}\Psi_{(\alpha)^r}$. \boldsymbol{e}nd{proof} Similarly, we have that $$S(\Phi_\alpha)=(-1)^{\boldsymbol{e}ll(\alpha)}\Psi_{\alpha^r}.$$ There are considerable notational differences between various authors on the names of the well known automorphisms of $\QS$ and $\NS$, in part because there are two natural maps which descend to the well known automorphism $\omega$ in the symmetric functions. Following both \cite{GKLLRT94} and \cite{LMvW}, we use $\omega(\boldsymbol{e}_n)=\boldsymbol{h}_n$ (where $\omega$ is an anti-automorphism) and $\omega(F_\alpha)=F_{\alpha^t}$ to define (one choice of) a natural analogue of the symmetric function case. We can see, from the definition of $\omega$ and $S$ on the elementary symmetric functions, that the two maps vary by only a sign on homogeneous polynomials of a given degree. In particular, $$\omega(\Psi_\alpha)=(-1)^{|\alpha|-\boldsymbol{e}ll(\alpha)}\Psi_{\alpha^r},$$ $$\omega(\Phi_\alpha)=(-1)^{|\alpha|-\boldsymbol{e}ll(\alpha)}\Phi_{\alpha^r}.$$ \boldsymbol{s}ection{Products of quasisymmetric power sums}\label{sec:products} In contrast to the symmetric power sums, the quasisymmetric power sums are not a multiplicative basis. This is immediately evident from the fact that $\Psi_{(n)}=p_{(n)}=\Phi_{(n)}$ but the quasisymmetric power sum basis is not identical to the symmetric power sums. Thus the product of two elements of either power sum basis is more complex in the quasisymmetric setting than the symmetric setting. \boldsymbol{s}ubsection{Products of type 1 quasisymmetric power sums}{\label{sec:product}} We can exploit the duality of comultiplication in $\NS$ and multiplication in $\QS$. \begin{defn}[shuffle, $\boldsymbol{s}huffle$] Let $[a_1,\cdots ,a_n]\boldsymbol{s}huffle[b_1,\cdots,b_m]$ give the set of shuffles of $[a_1,\cdots ,a_n]$ and $[b_1,\cdots,b_n]$; that is the set of all length $m+n$ words without repetition on $ \{a_1,\cdots ,a_n\}\cup \{b_1,\cdots,b_n\}$ such that for all $i$, $a_i$ occurs before $a_{i+1}$ and $b_i$ occurs before $b_{i+1}$. \boldsymbol{e}nd{defn} Comultiplication for the noncommutative symmetric power sums (type 1) is given in~\cite{GKLLRT94} by \[\Delta ( \boldsymbol{\Psi}_k) = 1 \oplus \boldsymbol{\Psi}_k + \boldsymbol{\Psi}_k \oplus 1.\] Thus \[\Delta(\boldsymbol{\Psi}_\alpha) = \boldsymbol{\Psi}rod_i \Delta (\boldsymbol{\Psi}_{\alpha_i}) = \boldsymbol{\Psi}rod_i (1\oplus \boldsymbol{\Psi}_{\alpha_i}+\boldsymbol{\Psi}_{\alpha_i}\oplus 1) = \boldsymbol{s}um_{\boldsymbol{s}ubstack{\gamma,\beta\\\alpha \in \gamma \boldsymbol{s}huffle \beta}} \boldsymbol{\Psi}_{\gamma}\oplus \boldsymbol{\Psi}_\beta.\] \begin{notn}[$C(\alpha,\beta)$] Let $a_j$ denote the number of parts of size $j$ in $\alpha$ and $b_j$ denote the number of parts of size $j$ in $\beta$. Define $C(\alpha,\beta)=\boldsymbol{\Psi}rod_j \binom{a_j+b_j}{a_j}.$ A straightforward calculation shows that $C(\alpha,\beta) = z_{\alpha\cdot\beta}/(z_\alpha z_\beta)$. \boldsymbol{e}nd{notn} \begin{thm}\label{thm:productpower} Let $\alpha$ and $\beta$ be compositions. Then \[\Psi_\alpha \Psi_\beta = \frac{1}{C(\alpha,\beta)}\boldsymbol{s}um_{\gamma \in \alpha\boldsymbol{s}huffle\beta} \Psi_\gamma.\] \boldsymbol{e}nd{thm} \begin{proof} Let $\alpha$ and $\beta$ be compositions. Then \begin{align} \Psi_\alpha \Psi_\beta & = (z_\alpha \boldsymbol{\Psi}si_\alpha)(z_\beta \boldsymbol{\Psi}si_\beta)\nonumber\\ &= (z_\alpha z_\beta) (\boldsymbol{\Psi}si_\alpha\boldsymbol{\Psi}si_\beta). \label{eq:prod} \boldsymbol{e}nd{align} Since the $\boldsymbol{\Psi}si$ are dual to the $\boldsymbol{\Psi}$, we have that $\displaystyle{\boldsymbol{\Psi}si_\alpha\boldsymbol{\Psi}si_\beta=\boldsymbol{s}um_{\gamma \in \alpha\boldsymbol{s}huffle\beta}\boldsymbol{\Psi}si_\gamma}$. Note that for any rearrangement $\delta$ of $\gamma$, $z_\delta=z_\gamma$. Thus, we can rewrite \boldsymbol{e}qref{eq:prod} as \[\Psi_\alpha\Psi_\beta = \frac{z_\alpha z_\beta}{z_{\alpha\cdot\beta}}\boldsymbol{s}um_{\gamma \in \alpha \boldsymbol{s}huffle \beta}\Psi_\gamma. \] \boldsymbol{e}nd{proof} In addition to this proof based on duality, we note that it is possible to prove this product rule directly using the monomial expansion of the quasisymmetric power sums. We do this by showing that the coefficients in the quasisymmetric monomial function expansions of both sides of the product formula in Theorem~\boldsymbol{r}ef{thm:productpower} are the same. \begin{defn}[overlapping shuffle, $\cshuffle$] $\delta\cshuffle\beta$ is the set of {\boldsymbol{e}m overlapping shuffles} of $\delta$ and $\boldsymbol{e}ta$, that is, shuffles where a part of $\delta$ and a part of $\boldsymbol{e}ta$ can be added to form a single part. \boldsymbol{e}nd{defn} \begin{lemma}\label{lem:productpi} Let $\alpha \vDash m$, $\beta \vDash n$, and fix $\xi$ a coarsening of a shuffle of $\alpha$ and $\beta$. Then \[\binom{m+n}{m}\boldsymbol{s}um_{\boldsymbol{s}ubstack{\delta\boldsymbol{s}ucccurlyeq\alpha,\boldsymbol{e}ta\boldsymbol{s}ucccurlyeq \beta\\\text{s.t.\ }\xi\in\delta\cshuffle \boldsymbol{e}ta}}\frac{m!}{\boldsymbol{\Psi}i(\alpha,\delta)}\frac{n!}{\boldsymbol{\Psi}i(\beta,\boldsymbol{e}ta)} = \boldsymbol{s}um_{\boldsymbol{s}ubstack{\gamma \in \alpha \boldsymbol{s}huffle \beta\\\gamma \boldsymbol{\Psi}reccurlyeq \xi}}\frac{(m+n)!}{\boldsymbol{\Psi}i(\gamma,\xi)}.\] \boldsymbol{e}nd{lemma} \begin{proof} Let $\alpha \vDash m$, $\beta \vDash n$, and fix $\xi$, a coarsening of a shuffle of $\alpha$ and $\beta$. Then $\xi=\xi_1,\ldots,\xi_k$ where each $\xi_i$ is a (known) sum of parts of $\alpha$ or $\beta$, or both. Let $Y_m= \{D\boldsymbol{s}ubseteq [m+n]: |D|=m\}$ and $B_{\xi,\alpha,\beta}=\{\gamma \in \alpha\boldsymbol{s}huffle\beta:\gamma\boldsymbol{\Psi}reccurlyeq\xi\}$. We establish a bijection \[f: Y_m\times \bigcup_{\boldsymbol{s}ubstack{\delta\boldsymbol{s}ucccurlyeq\alpha,\boldsymbol{e}ta\boldsymbol{s}ucccurlyeq\beta\\\text{s.t.\ }\xi\in\delta\cshuffle\boldsymbol{e}ta}}\left(\mathbb{C}ons_{\alpha\boldsymbol{\Psi}reccurlyeq\delta}\times\mathbb{C}ons_{\beta\boldsymbol{\Psi}reccurlyeq\boldsymbol{e}ta}\boldsymbol{r}ight)\boldsymbol{r}ightarrow \bigcup_{\gamma \in B_{\xi,\alpha,\beta}} (\mathbb{C}ons_{\gamma\boldsymbol{\Psi}reccurlyeq\xi}\times\{\gamma\}).\] Let $(D, \boldsymbol{s}igma, \tau) \in Y_m\times \bigcup_{\boldsymbol{s}ubstack{\delta\boldsymbol{s}ucccurlyeq\alpha,\boldsymbol{e}ta\boldsymbol{s}ucccurlyeq\beta\\\text{s.t.\ }\xi\in\delta\cshuffle\boldsymbol{e}ta}}\left(\mathbb{C}ons_{\alpha\boldsymbol{\Psi}reccurlyeq\delta}\times\mathbb{C}ons_{\beta\boldsymbol{\Psi}reccurlyeq\boldsymbol{e}ta}\boldsymbol{r}ight)$. Then $D=\{i_1<i_2<\ldots<i_m\}$. To construct $(\word,\gamma)=f((D,\boldsymbol{s}igma,\tau))$: \begin{enumerate} \item Create a word $\widetilde{\boldsymbol{s}igma}$ that is consistent with $\alpha\boldsymbol{\Psi}reccurlyeq\delta$ by replacing each $j$ in $\boldsymbol{s}igma$ with $i_j$ from $D$. Similarly, use $[m+n]\boldsymbol{s}etminus D$ to create $\widetilde{\tau}$ consistent with $\beta\boldsymbol{\Psi}reccurlyeq\boldsymbol{e}ta$. \item Arrange the parts of $\widetilde{\boldsymbol{s}igma}$ and $\widetilde{\tau}$ in a single permutation by placing the parts corresponding to $\alpha_i$ (resp. $\beta_i$) in the location they appear in $\xi$. Finally, for all parts within a single part of $\xi$, arrange the sub-permutations so that the final elements of each sub-permutation creates an increasing sequence from left to right. Note that this will keep parts of $\alpha$ in order since $\widetilde{\boldsymbol{s}igma}$ is consistent with $\alpha\boldsymbol{\Psi}reccurlyeq \delta$ and parts of $\alpha$ occurring in the same part of $\xi$ also occurred in the same part of $\delta$. (An analogous statement is true for parts of $\beta$.) \item The resulting permutation is $\word=f((D,\boldsymbol{s}igma,\tau))$ and is an element of $\mathbb{C}ons_{\gamma\boldsymbol{\Psi}reccurlyeq\xi}$ where $\gamma$ is determined by the order of parts in $\word$ corresponding to $\alpha$ and $\beta$. \boldsymbol{e}nd{enumerate} Conversely, given $(\word',\gamma) \in \bigcup_{\gamma \in B_{\xi,\alpha,\beta}}(\mathbb{C}ons_{\gamma\leq\xi}\times\{\gamma\})$, construct a triple $(D',\boldsymbol{s}igma',\tau')$ by: \begin{enumerate} \item In $\word'$, the $i$th block corresponds to the $i$th part of $\gamma$. Place the labels in the $i$th block of $\word'$ in $D'$ if the $i$th part of $\gamma$ is from $\alpha$. \item Let $\widetilde{\boldsymbol{s}igma}'$ be the subword of $\word'$ consisting of blocks corresponding to parts of $\alpha$, retaining the double-lines to show which parts of $\alpha$ were in the same part of $\xi$ to indicate $\delta\boldsymbol{s}ucccurlyeq \alpha$. Rewrite as a permutation in $S_m$ by standardizing in the usual way: replace the $i^{th}$ smallest entry with $i$ for $1\leq i \leq m$. The resulting permutation $\boldsymbol{s}igma'$ is consistent with $\alpha\boldsymbol{\Psi}reccurlyeq\delta$. \item Similarly construct $\tau'$ from the subword $\widetilde{\tau}'$ of $\word'$ consisting of parts corresponding to parts of $\beta$. \qedhere \boldsymbol{e}nd{enumerate}\boldsymbol{e}nd{proof} \begin{example} Let $\alpha = (2,1,1,2)$, $\beta = (\underline{2},\underline{1})$ and $\xi=(2+1+\underline{2}, \underline{1}, 1+2)$. Then $(\delta,\boldsymbol{e}ta)=((2+1,1+2), (\underline{2},\underline{1}))$. Choose $D=\{1,2,5,6,7,9\}$, $\boldsymbol{s}igma = |\!|34|6|\!|2|15|\!| $, and $\tau=|\!|13|\!|2|\!|$. Then $\widetilde{\boldsymbol{s}igma} = |\!|5 6|9|\!|2|17|\!|$ and $\widetilde{\tau}=|\!|3 8|\!|4|\!|$. Then $\word = |\!|56|38|9|\!|4|\!|2|17|\!|$ and the corresponding shuffle $\gamma=(2,\underline{2},1,\underline{1},1,2)$. Now, consider $\gamma'=(\underline{2},2,1,\underline{1},1,2)$ and $\word'=|\!|24|16|8|\!|9|\!|5|37|\!|.$ Then $\widetilde{\boldsymbol{s}igma}'=|\!|16|8|\!|5|37|\!|$ and $\widetilde{\tau}'=|\!|24|\!|9|\!|$. Then $D'=\{1,3,5,6,7,8\}$, $\boldsymbol{s}igma'=|\!|14|6|\!|3|25|\!|$, and $\tau'=|\!|12|\!|3|\!|$. \boldsymbol{e}nd{example} We now use Lemma~\boldsymbol{r}ef{lem:productpi} to offer a more combinatorial proof of Theorem~\boldsymbol{r}ef{thm:productpower}. \begin{proof}(of Theorem~\boldsymbol{r}ef{thm:productpower}) Let $\alpha \vDash m$ and $\beta \vDash n$. Then \begin{align} \Psi_\alpha \Psi_\beta & = \left(\boldsymbol{s}um_{\delta\boldsymbol{s}ucccurlyeq \alpha}\frac{z_\alpha}{\boldsymbol{\Psi}i(\alpha,\delta)}M_\delta\boldsymbol{r}ight)\left(\boldsymbol{s}um_{\boldsymbol{e}ta\boldsymbol{s}ucccurlyeq \beta} \frac{z_\beta}{\boldsymbol{\Psi}i(\beta,\boldsymbol{e}ta)}M_\boldsymbol{e}ta\boldsymbol{r}ight)\nonumber\\ &=z_\alpha z_\beta \boldsymbol{s}um_{\delta\boldsymbol{s}ucccurlyeq\alpha}\boldsymbol{s}um_{\boldsymbol{e}ta\boldsymbol{s}ucccurlyeq \beta}\frac{1}{\boldsymbol{\Psi}i(\alpha,\delta)\boldsymbol{\Psi}i(\beta,\boldsymbol{e}ta)}M_\delta M_\boldsymbol{e}ta\nonumber\\ &=\frac{z_{\alpha\cdot\beta}}{C(\alpha,\beta)}\boldsymbol{s}um_{\delta\boldsymbol{s}ucccurlyeq\alpha}\boldsymbol{s}um_{\boldsymbol{e}ta\boldsymbol{s}ucccurlyeq \beta}\frac{1}{\boldsymbol{\Psi}i(\alpha,\delta)\boldsymbol{\Psi}i(\beta,\boldsymbol{e}ta)}\boldsymbol{s}um_{\zeta \in \delta \cshuffle\boldsymbol{e}ta}M_\zeta\nonumber\\ &= \frac{z_{\alpha\cdot\beta}}{C(\alpha,\beta)}\boldsymbol{s}um_{\zeta \vDash m+n}M_\zeta \left(\boldsymbol{s}um_{\boldsymbol{s}ubstack{(\delta,\boldsymbol{e}ta): \delta \boldsymbol{s}ucccurlyeq \alpha, \boldsymbol{e}ta \boldsymbol{s}ucccurlyeq \beta\\\zeta \in \delta\cshuffle\boldsymbol{e}ta}}\frac{1}{\boldsymbol{\Psi}i(\alpha,\delta)\boldsymbol{\Psi}i(\beta,\boldsymbol{e}ta)}\boldsymbol{r}ight).\label{eq:monex} \boldsymbol{e}nd{align} By Lemma~\boldsymbol{r}ef{lem:productpi} we can rewrite \boldsymbol{e}qref{eq:monex} as \begin{align*} \Psi_\alpha\Psi_\beta &=\frac{z_{\alpha\cdot\beta}}{C(\alpha,\beta)}\boldsymbol{s}um_{\zeta \vDash m+n} M_\zeta \boldsymbol{s}um_{\boldsymbol{s}ubstack{\gamma\in\alpha\boldsymbol{s}huffle\beta\\\gamma\boldsymbol{\Psi}reccurlyeq\zeta}}\frac{1}{\boldsymbol{\Psi}i(\gamma,\zeta)}\\ &=\frac{1}{C(\alpha,\beta)}\boldsymbol{s}um_{\gamma \in \alpha \boldsymbol{s}huffle\beta}\boldsymbol{s}um_{\zeta \boldsymbol{s}ucccurlyeq \gamma} \frac{z_{\gamma}}{\boldsymbol{\Psi}i(\gamma,\zeta)}M_\zeta\\ &=\frac{1}{C(\alpha,\beta)}\boldsymbol{s}um_{\gamma \in \alpha\boldsymbol{s}huffle\beta}\Psi_\gamma.\qedhere \boldsymbol{e}nd{align*} \boldsymbol{e}nd{proof} Now that we have a product formula for the quasisymmetric power functions, a more straightforward proof can be given for Theorem~\boldsymbol{r}ef{thm:refine}. \begin{proof}(of Theorem~\boldsymbol{r}ef{thm:refine}) We proceed by induction on $\boldsymbol{e}ll(\lambda)$, the length of $\lambda$. If $\boldsymbol{e}ll(\lambda)=1$, then $\lambda=(n)$ and $p_{(n)}=m_{(n)}=M_{(n)}=\Psi_{(n)}.$ (This is because $\boldsymbol{\Psi}si_{(n)}=\frac{1}{\boldsymbol{\Psi}i((n),(n))}M_{(n)}=\frac{1}{n}M_{(n)}$ and $\Psi_{(n)}=z_{(n)}\boldsymbol{\Psi}si_{(n)}=n\boldsymbol{\Psi}si_{(n)}$.) Suppose the theorem holds for partitions of length $k$ and let $\mu$ be a partition with $\boldsymbol{e}ll(\mu)=k+1$. Suppose $\mu_{k+1}=j$ and let $\lambda=(\mu_1, \mu_2, \ldots, \mu_k)$. Let $m_j$ be the number of parts of size $j$ in $\mu$. Then, using the induction hypothesis and Theorem~\boldsymbol{r}ef{thm:productpower}, we have \begin{equation}\label{ind} p_\mu=p_\lambda p_{(j)}= \left( \boldsymbol{s}um_{\boldsymbol{s}ubstack{\alpha \vDash |\lambda|\\ \tilde{\alpha}=\lambda}} \Psi_\alpha\boldsymbol{r}ight)\Psi_{(j)}= \boldsymbol{s}um_{\boldsymbol{s}ubstack{\alpha \vDash|\lambda|\\ \tilde{\alpha}=\lambda}} \left( \Psi_\alpha\Psi_{(j)}\boldsymbol{r}ight)=\frac{1}{m_j} \boldsymbol{s}um_{\boldsymbol{s}ubstack{\alpha \vDash |\lambda|\\ \tilde{\alpha}=\lambda}} \boldsymbol{s}um_{\gamma \in \alpha\boldsymbol{s}huffle (j)} \Psi_\gamma.\boldsymbol{e}nd{equation} Here, we used the fact that, if $\tilde{\alpha}=\lambda$, then $\displaystyle C(\alpha, (j))=\binom{m_j}{m_j-1}=m_j$. Suppose $\gamma\in \alpha \boldsymbol{s}huffle (j)$ for some $\alpha \vDash |\lambda|$ such that $\tilde{\alpha}=\lambda$. Then $\gamma \vDash |\mu|$ and $\tilde{\gamma}=\mu$. Moreover, every composition $\theta\vDash |\mu|$ with $\tilde{\theta}=\mu$ belongs to $ \alpha \boldsymbol{s}huffle (j)$ for some $\alpha\vDash |\lambda|$ with $\tilde{\alpha}=\lambda$. We write $\gamma\vDash |\mu|$ with $\tilde{\gamma}=\mu$ as $\gamma^{(1)},J^{(1)},\gamma^{(2)},J^{(2)},\ldots, \gamma^{(q)},J^{(q)}$ where each $\gamma^{(i)}$ has no part equal to $j$ and each $J^{(i)}$ consists of exactly $r_i$ parts equal to $j$. We refer to $J^{(i)}$ as the $i$th block of parts equal to $j$. Here $r_i>0$ for $i=1, 2, \ldots, q-1$ and $r_q\geq 0$. Moreover, $r_1+r_2+\cdots +r_q=m_j$. Denote by $\alpha(\gamma, i)$ the composition obtained from $\gamma$ be removing the first $j$ in $J^{(i)}$ (if it exists). Then, the multiplicity of $\gamma$ in $\alpha(\gamma, i)\boldsymbol{s}huffle (j)$ equals $r_i$ (since $(j)$ can be shuffled in $r_i$ different positions in the $i$th block of parts equal to $j$ of $\alpha(\gamma, i)$ to obtain $\gamma$.) Then, the multiplicity of $\gamma$ in $\displaystyle\cup_{\tilde{\alpha}=\lambda}\{\alpha \boldsymbol{s}huffle (j) \mid \alpha \vDash |\lambda|, \tilde{\alpha}=\lambda\}$ equals $m_j$ and \[p_\lambda=\boldsymbol{s}um_{\boldsymbol{s}ubstack{\beta \vDash |\mu|\\ \tilde{\beta}=\mu}} \Psi_\beta.\qedhere\] \boldsymbol{e}nd{proof} \boldsymbol{s}ubsection{Products of type 2 quasisymmetric power sums} As with the type one quasisymmetric power sums, since $\Delta\boldsymbol{\Psi}two_k=1\oplus\boldsymbol{\Psi}two_k+\boldsymbol{\Psi}two_k\oplus 1$, the product rule is \begin{equation}\label{eqn:productpower2} \Phi_\alpha\Phi_\beta = \frac{1}{C(\alpha,\beta)}\boldsymbol{s}um_{\gamma\in \alpha\boldsymbol{s}huffle\beta}\Phi_\gamma.\boldsymbol{e}nd{equation} Again, we can give a combinatorial proof of the product rule. The proof of \boldsymbol{e}qref{eqn:productpower2} is almost identical to the proof of Theorem \boldsymbol{r}ef{thm:productpower}, so we omit the details. A significant difference is the proof of the analog of Lemma \boldsymbol{r}ef{lem:productpi}, so we give the analog here. \begin{lemma}Let $\alpha \vDash m$, $\beta \vDash n$, and fix $\xi$ a coarsening of a shuffle of $\alpha$ and $\beta$. Then \[\boldsymbol{s}um_{\boldsymbol{s}ubstack{\delta\boldsymbol{s}ucccurlyeq\alpha,\boldsymbol{e}ta\boldsymbol{s}ucccurlyeq \beta\\\text{s.t.\ }\xi\in\delta\cshuffle \boldsymbol{e}ta}}\frac{1}{\boldsymbol{s}pab(\alpha,\delta)}\frac{1}{\boldsymbol{s}pab(\beta,\boldsymbol{e}ta)} = \boldsymbol{s}um_{\boldsymbol{s}ubstack{\gamma \in \alpha \boldsymbol{s}huffle \beta\\\gamma \boldsymbol{\Psi}reccurlyeq \xi}}\frac{1}{\boldsymbol{s}pab(\gamma,\xi)}.\] \boldsymbol{e}nd{lemma} \begin{proof} First, note that $\boldsymbol{\Psi}rod_i\alpha_i$ and $\boldsymbol{\Psi}rod_i\beta_i$ occur in the denominator of every term in the left hand side of our desired equation, but $$\boldsymbol{\Psi}rod_i\alpha_i\boldsymbol{\Psi}rod_i\beta_i=\boldsymbol{\Psi}rod_i \gamma_i \text{ and } \boldsymbol{e}ll(\alpha)+\boldsymbol{e}ll(\beta)=\boldsymbol{e}ll(\gamma)$$ for any $\gamma$ occuring in the right hand sum. Then multiplying by $\boldsymbol{e}ll(\gamma)!\boldsymbol{\Psi}rod_i\alpha_i\boldsymbol{\Psi}rod_i\beta_i $ on the left and right, we need to show \begin{align*}{\boldsymbol{e}ll(\alpha)+\boldsymbol{e}ll(\beta) \choose \boldsymbol{e}ll(\alpha)}\boldsymbol{s}um_{\boldsymbol{s}ubstack{\delta\boldsymbol{s}ucccurlyeq\alpha,\boldsymbol{e}ta\boldsymbol{s}ucccurlyeq \beta\\\text{s.t.\ }\xi\in\delta\cshuffle \boldsymbol{e}ta}}\frac{\boldsymbol{e}ll(\alpha)!}{\boldsymbol{\Psi}rod_j\boldsymbol{e}ll(\alpha^{(j)})!}\frac{\boldsymbol{e}ll(\beta)!}{\boldsymbol{\Psi}rod_j\boldsymbol{e}ll(\beta^{(j)})!} = \boldsymbol{s}um_{\boldsymbol{s}ubstack{\gamma \in \alpha \boldsymbol{s}huffle \beta\\\gamma \boldsymbol{\Psi}reccurlyeq \xi}}\frac{\boldsymbol{e}ll(\gamma)!}{\boldsymbol{\Psi}rod_j\boldsymbol{e}ll(\gamma^{(j)})!}. \boldsymbol{e}nd{align*} Equivalently, we need to show \begin{align}\label{eq:power2eq15}{\boldsymbol{e}ll(\alpha)+\boldsymbol{e}ll(\beta) \choose \boldsymbol{e}ll(\alpha)}\boldsymbol{s}um_{\boldsymbol{s}ubstack{\delta\boldsymbol{s}ucccurlyeq\alpha,\boldsymbol{e}ta\boldsymbol{s}ucccurlyeq \beta\\\text{s.t.\ }\xi\in\delta\cshuffle \boldsymbol{e}ta}}|\OSP(\alpha,\delta)||\OSP(\beta,\boldsymbol{e}ta)| = \boldsymbol{s}um_{\boldsymbol{s}ubstack{\gamma \in \alpha \boldsymbol{s}huffle \beta\\\gamma \boldsymbol{\Psi}reccurlyeq \xi}}|\OSP(\gamma,\xi)|. \boldsymbol{e}nd{align} For a given choice of $\delta$ and $\boldsymbol{e}ta$ satisfying the conditions on the left, select $S$ and $T$, ordered set partitions in $\OSP(\alpha,\delta)$ and $\OSP(\beta,\boldsymbol{e}ta)$ respectively. Pick a subset $U$ of size $\boldsymbol{e}ll(\alpha)$ from the first $\boldsymbol{e}ll(\alpha)+\boldsymbol{e}ll(\beta)$ positive integers and re-number the elements in $S$ and $T$ in order, such that the elements of $S$ are re-numbered with the elements of $U$ and the elements of $T$ are re-numbered with elements of $U^c$ to form $\tilde{S}$ and $\tilde{T}$ respectively. Going forward, consider each of the subsets as lists, with the elements listed in increasing order. Use the subsets to assign an additional value to each part of $\alpha$ or $\beta$, working in order. Say that $f(\alpha,i)=m$ if $\alpha_i$ occurs as the $k$th element in $\alpha^{(j)}$ and $m$ is the $k$th element in $\tilde{S}_j$. Similarly say that $f(\beta,i)=m$ if $\beta_i$ occurs as the $k$th element in $\beta^{(j)}$ and $m$ is the $k$th element in $\tilde{T}_j$. Note that each choice of $\delta$ and $\boldsymbol{e}ta$ gives a refinement of $\xi$, with each $\xi_i$ a sum of parts of $\alpha$ and $\beta$. Then sort the parts of $\alpha$ and $\beta$ to create $\gamma$, such that parts of $\alpha$ occur in order and parts of $\beta$ occur in order, and the following additional rules are satisfied: Let $\alpha_i$ be one of the parts that forms $\delta_j$ which in turn is used to form $\xi_k$ and $\beta_l$ be one of the parts that forms $\boldsymbol{e}ta_m$ which in turn is used form $\xi_n$. Then \begin{itemize} \item if $k>n$ (i.e.\ $\beta_l$ is an earlier subpart of $\xi$ than $\alpha_i$ is), $\alpha_i$ occurs after $\beta_l$, \item if $k<n$,(i.e.\ $\alpha_i$ is an earlier subpart of $\xi$ than $\beta_l$ is) $\alpha_i$ occurs before $\beta_l$, \item if $k=n$ (i.e. $\alpha_i$ and $\beta_l$ eventually make up the same part of $\xi$), $\alpha_i$ is left of $\beta_l$ iff $f(\alpha,i)<f(\beta,l)$. \boldsymbol{e}nd{itemize} Finally, create an element of $\OSP(\gamma,\xi)$ by placing $p$ in the $q$th part if $f(\alpha,i)=p$ (or $f(\beta,l)=p$) and $\alpha_i$ ($\beta_l$ respectively) is one of the parts used to form $\gamma^{(q)}$. Note that this map is bijective; since the the parts of $\alpha$ and $\beta$ which occur in the same part of $\gamma$ are sorted by the value they are assigned in the set partition, we can recover from the set partition which integers were assigned to each part (and what $U$ was, by looking at which numbers are assigned to parts corresponding to $\alpha$). \boldsymbol{e}nd{proof} \begin{example}Let $\alpha=(1,2,1)$ and $\beta=(\underline{1},\underline{1},\underline{2})$. Let $\delta=(1+2,1)$ $\boldsymbol{e}ta=(\underline{1},\underline{1}+\underline{2})$, and $\xi=(1+2+\underline{1},\underline{1}+\underline{2},1)$. ($\xi$ here is fixed before $\delta$ and $\boldsymbol{e}ta$, but how we write it as an overlapping shuffle depends on their choice.) Finally let $S=(\{1,3\},\{2\})$, $T=(\{3\},\{1,2\})$, and $U=\{1,4,6\}$. Then $\tilde{S}=(\{1,6\},\{4\})$ and $\tilde{T}=(\{5\},\{2,3\})$. Next, we reorder the second and third subparts of $\xi$ to get $\gamma=(1,\overline{1},2,\overline{1},\overline{2},1)$ and the final ordered set partition is $(\{1,5,6\},\{2,3\},\{4\})$. \boldsymbol{e}nd{example} \boldsymbol{s}ubsection{The shuffle algebra} Let $V$ be a vector space with basis $\{v_a \}_{a \in \mathfrak{U}}$ where $\mathfrak{U}$ is a totally ordered set. For our purposes, $\mathfrak{U}$ will be the positive integers. The \boldsymbol{e}mph{shuffle algebra} $sh(V)$ is the Hopf algebra of the set of all words with letters in $\mathfrak{U}$, where the product is given by the shuffle product $v \boldsymbol{s}huffle w$ defined above. The shuffle algebra and $QSym$ are isomorphic as graded Hopf algebras~\cite{GriRei14}. We now describe a method for generating $QSym$ through products of the type 1 quasisymmetric power sums indexed by objects called \boldsymbol{e}mph{Lyndon words}; to do this we first need several definitions. A \boldsymbol{e}mph{proper suffix} of a word $w$ is a word $v$ such that $w=uv$ for some nonempty word $u$. The following total ordering on words with letters in $\mathfrak{U}$ is used to define Lyndon words. We say that $u \le_L v$ if either \begin{itemize} \item $u$ is a prefix of $v$, \; \; \; or \item $j$ is the smallest positive integer such that $u_j \not= v_j$ and $u_j<v_j$. \boldsymbol{e}nd{itemize} Otherwise $v \le_L u$. If $w=w_1 w_2 \cdots w_k$ is a nonempty word with $w_i \in \mathfrak{U}$ for all $i$, we say that $w$ is a \boldsymbol{e}mph{Lyndon word} if every nonempty proper suffix $v$ of $w$ satisfies $w < v$. Let $\mathcal{L}$ be the set of all Lyndon words. Radford's Theorem~\cite{Rad79}, (Theorem 3.1.1 (e)) states that if $\{ b_a \}_{a \in \mathfrak{U}}$ is a basis for a vector space $V$, then $\{b_w\}_{w \in \mathcal{L}}$ is an algebraically independent generating set for the shuffle algebra $Sh(V)$. To construct a generating set for $Sh(V)$, first define the following operation (which we will call an \boldsymbol{e}mph{index-shuffle}) on basis elements $b_{\alpha}$ and $b_{\beta}$: $$b_{\alpha} \underline{\boldsymbol{s}huffle} b_{\beta} = \boldsymbol{s}um_{\gamma \in \alpha \boldsymbol{s}huffle \beta} b_{\gamma}.$$ Recall that $$\Psi_\alpha \Psi_\beta = \frac{1}{C(\alpha,\beta)}\boldsymbol{s}um_{\gamma \in \alpha\boldsymbol{s}huffle\beta} \Psi_\gamma,$$ where $C(\alpha,\beta) = z_{\alpha\cdot\beta}/(z_\alpha z_\beta)$. Then $\Psi_\alpha \underline{\boldsymbol{s}huffle} \Psi_\beta = C(\alpha, \beta) \Psi_{\alpha} \Psi_{\beta}$. Since Radford's theorem implies that every basis element $b_{\alpha}$ can be written as a linear combination of index shuffles of basis elements indexed by Lyndon words, every basis element $\Psi_{\alpha}$ can be written as a linear combination of products of type 1 quasisymmetric power sums indexed by Lyndon words and we have the following result. \begin{thm} The set $\{ \Psi_C | C \in \mathcal{L} \}$ freely generates $QSym$ as a commutative $\mathbb{Q}$-algebra. \boldsymbol{e}nd{thm} \begin{example} Since $231$ can be written as $23 \boldsymbol{s}huffle 1 - 2 \boldsymbol{s}huffle 13 + 132$, $$\Psi_{231} = C(23,1)\Psi_{23} \Psi_1 - C(2,13) \Psi_2 \Psi_{13} + \Psi_{132} = \Psi_{23} \Psi_1 - \Psi_2 \Psi_{13} + \Psi_{132}. $$ \boldsymbol{e}nd{example} \boldsymbol{s}ection{Plethysm on the quasisymmetric power sums}{\label{sec:plethysm}} The symmetric power sum basis $\{p_\lambda\}_{\lambda\vdash n}$ plays a particularly important role in the language of $\Lambda$-rings. It is natural to hope that one of the quasisymmetric power sums might play the same role here, and it was this motivation that initially piqued the authors' interest in the quasisymmetric power sums. This seems not to be the case, so one might take the next section as a warning to similarly minded individuals that this does not appear to be a productive path for studying quasisymmetric plethysm. To explain the differences between this and the symmetric function case, we remind the reader of the symmetric function case first. \boldsymbol{s}ubsection{Plethysm and symmetric power sums} Recall that plethysm is a natural (indeed even universal in some well defined functorial sense) $\Lambda$-ring on the symmetric functions. Following the language of \cite{knutson2006lambda}, recall that a pre-$\Lambda$-ring is a commutative ring $R$ with identity and a set of operations for $i\in \{0,1,2,\cdots\}$ $\lambda^i:R\boldsymbol{r}ightarrow R$ such that for all $r_1,r_2\in R$: \begin{itemize} \item $\lambda^0(r_1)=1$ \item $\lambda^1(r_1)=r_1$ \item $\lambda^n(r_1+r_2)=\boldsymbol{s}um_{i=0}^n \lambda^i(r_1)\lambda^{n-i}(r_2)$ \boldsymbol{e}nd{itemize} To define a $\Lambda$-ring, use $e^X_i$ and $e^Y_i$ as the elementary symmetric functions $e_i$ in the $X$ or $Y$ variables, and define the universal polynomials $P_n$ and $P_{m,n}$ by $$\boldsymbol{s}um_{n=0}P_n(e^X_1,\cdots,e^X_n;e^Y_1,\cdots, e^Y_n)t^n=\boldsymbol{\Psi}rod_{i,j}(1-x_{i,j}t),$$ and $$\boldsymbol{s}um_{n=0}P_{n,m}(e^X_1,\cdots,e^X_{nm})t^n=\boldsymbol{\Psi}rod_{i_1<\cdots<i_m }(1-x_{i_1}x_{i_2}\cdots x_{i_m}t).$$ Then a pre-$\Lambda$-ring is by definition a $\Lambda$-ring if \begin{itemize} \item for all $i>1$, $\lambda^i(1)=0$; \item for all $r_1,r_2\in R$, $n\geq 0$, $$\lambda^n(r_1r_2)=P_n(\lambda^1 (r_1),\cdots, \lambda^n (r_1);\lambda^1 (r_2),\cdots, \lambda^n (r_2));$$ \item for all $r\in R$, $m,n\geq 0$, $$\lambda^m(\lambda^n (r))=P_{m,n}(\lambda^1 (r),\cdots, \lambda^{mn} (r)).$$ \boldsymbol{e}nd{itemize} These operations force the $\lambda_i$ to behave like exterior powers of vector spaces (with sums and products of $\lambda_i$ corresponding to exterior powers of direct sums and tensor products of vector spaces), but are not always so helpful to work with directly. In the classical case, one works more easily indirectly by defining a new series of operations called the Adams operators $\Psi^n:R\boldsymbol{r}ightarrow R$ by the relationship (for all $r\in R$) \begin{equation}\label{eq:adams}\frac{d}{dt}\log \boldsymbol{s}um_{i\geq 0}t^i\lambda^i(r)=\boldsymbol{s}um_{i=0}^\infty (-1)^i\Psi^{i+1}(r)t^i.\boldsymbol{e}nd{equation} Note that while use $\Psi$ in this section for both the power sums and the Adams operators, the basis elements have subscripts and the Adams operators superscripts. Standard literature uses $\Psi$ for the Adams operators, so this follows the usual convention. Moreover, there is quite a close connection between the two, as mentioned below. \begin{thm}[\cite{knutson2006lambda}]\label{thm:adams} If $R$ is torsion free, $R$ is a $\Lambda$-ring if and only if for all $r_1,r_2\in R$, \begin{enumerate} \item \label{it:1}$\Psi^i(1)=1$, \item\label{it:2} $\Psi^i(r_1r_2)=\Psi^i(r_1)\Psi^i(r_2)$, and \item \label{it:3} $\Psi^i(\Psi^j(r_1))=\Psi^{ij}(r_1)$. \boldsymbol{e}nd{enumerate} \boldsymbol{e}nd{thm} Since (\boldsymbol{r}ef{eq:adams}) defining the Adams operators is equivalent to (\boldsymbol{r}ef{eq:pfrome}), this suggests that simple operations on the symmetric power sum functions should give a $\Lambda$-ring. This is exactly the case for the ``free $\Lambda$-ring on one generator'', where we start with $\Lambda$ a polynomial ring in infinitely many variables $\mathbb{Z}[x_1, x_2,\cdots]$ and let (for any symmetric function $f\in\mathbb{Z}[x_1, x_2,\cdots] $) $$\lambda^i(f)=e_i[f].$$ The $[\cdot]$ on the right denotes plethysm. One can make an equivalent statement using the Adams operators and the symmetric power sum $p_i$: $$\Psi^i(f)=p_i[f].$$ This implies that for all $i\geq 0$, $f,g\in\mathbb{Z}[x_1, x_2,\cdots]$ (symmetric functions, although $f$ and $g$ can in fact be any integral sum of monomials) \begin{enumerate} \item$p_i[1]=1$, \item $p_i[f+g]=p_i[f]+p_i[g]$ and \item $p_m[fg]=p_m[f]p_m[g]$. \boldsymbol{e}nd{enumerate} (Note that the first and third items follow directly from (\boldsymbol{r}ef{it:1}) and (\boldsymbol{r}ef{it:2}) in Theorem \boldsymbol{r}ef{thm:adams}. The second follows from the additive properties of a $\Lambda$-ring.) This is the context in which plethysm is usually directly defined, such that for $f,g$ symmetric functions (and indeed with $f$ allowed much more generally to be a sum of monomials) one more generally calculates $g[f]$ by first expressing $g$ in terms of the power sums and then using the above rules, combined with the following (which allows one to define plethysm as a homomorphism on the power sums): \begin{itemize} \item For any constant $c\in \mathbb{Q}$ (or generalizing, for $c$ in an underlying field $K$), $c[f]=c$. \item For $m\geq 1$, $g_1,g_2$ symmetric functions, $(g_1+g_2)[f]=g_1[f]+g_2[f]$. \item For $m\geq 1$, $g_1,g_2$ symmetric functions, $(g_1g_2)[f]=g_1[f]g_2[f]$. \boldsymbol{e}nd{itemize} In this context, $$s_\lambda[s_\mu]=\boldsymbol{s}um_{\gamma}a_{\lambda,\mu}^\nu s_\nu$$ where the $a_{\lambda,\mu}^\nu$ correspond to the well-known Kronecker coefficients and $f[x_1+\cdots +x_n]$ is just $f(x_1,\cdots,x_n)$. See \cite{LeuRem2007Computational} for a fantastic exposition, starting from this point of view. \boldsymbol{s}ubsection{Quasisymmetric and noncommutative symmetric plethysm} While one can modify the definition slightly to allow evaluation of $g[f]$ when $f$ is not a symmetric function, the case is not so simple when $g$ is no longer symmetric. Krob, Leclerc, and Thibon~\cite{KLT97Noncommutative} are the first to examine this case in detail; they begin by looking at $g$ in the noncommutative symmetric functions, but $g$ in the quasisymmetric functions can be defined from there by a natural duality. (It is not so natural to try to find an analogue directly on the quasisymmetric function side, since one needs an analogue of the elementary symmetric functions to begin.) A peculiarity of this case is that the order of the monomials in $f$ matters (as the evaluation of noncommutative symmetric functions or quasisymmetric functions depends on the order of the variables), and one can meaningfully define a different result depending on the choice of monomial ordering. As is suggested by the formalism of $\Lambda$-rings, Krob, Leclerc, and Thibon~\cite{KLT97Noncommutative} begin by essentially defining the natural analogue of a pre-$\Lambda$-ring on the homogeneous noncommutative symmetric functions, and thus by extension on the elementary noncommutative symmetric functions They do this in a way that guarantees the properties of a pre-$\Lambda$-ring as follows. Use $A\oplus B$ for the addition of ordered noncommuting alphabets (with the convention that terms in $A$ preceed terms in $B$) and $H(X;t)$ for the generating function of the homogeneous noncommutative symmetric functions on the alphabet $X$. Then define $H(A \oplus B; T)$ as follows. $$H(A\oplus B;t)=\boldsymbol{s}um_{n\geq 0}\boldsymbol{h}_{n}[A\oplus B]t^n:= H(A;t)H(B;t).$$ Already, this is enough to show that plethysm on the noncommutative power sums (or by duality the quasisymmetric power sums) is not as nice. Using $\boldsymbol{\Psi}(X;t)$ for the generating function of the noncommuting symmetric power sums, \cite{KLT97Noncommutative} show that $$\boldsymbol{\Psi}(A\oplus B;t)=H(B;t)^{-1}\boldsymbol{\Psi}(A;t)H(B;t)+\boldsymbol{\Psi}(B;t).$$ This is, of course, in contrast to simple easy to check symmetric function expansion $$p[X+Y;t] =p[X;t]+p[Y;t],$$ for $p[X;t]$ the symmetric power sum generating function. Moreover, they show that the case is equally complex for the type 2 case. The takeaway from this computation is that one can define a $\Lambda$-ring or a pre-$\Lambda$-ring in the noncommutative symmetric functions and then define Adams operators by one of two relationships between the power sums and the elementary (or equivalently homogeneous) noncommutative symmetric functions, but the resulting relationships on the Adams operators, that is the analogue of Theorem \boldsymbol{r}ef{thm:adams}, can be far more complicated than working with plethysm directly using the elementary nonsymmetric functions or (by a dual definition) the monomials in the quasisymmetric functions. A succinct resource to the latter is \cite{BKNMPT01overview}. In theory, one could work in reverse, defining an operation ``plethysm" on either the type 1 or the type 2 power sums and extending it as a homomorphism to the quasisymmetric or noncommutative symmetric functions. In practice, besides the more complicated plethysm identities on the power sums, most of the natural relationships commonly used in (commuting) symmetric function calculations are naturally generalized by plethysm as defined in \cite{KLT97Noncommutative}. (Here for example, the addition of alphabets corresponds to the coproduct, as in the symmetric function case.) Therefore the perspective of \cite{KLT97Noncommutative} seems to result in a more natural analogue of plethysm than any choice of homomorphism on the quasisymmetric power sums. \boldsymbol{s}ection{Future directions} We suggest a couple of possible directions for future research. \boldsymbol{s}ubsection{Murnaghan-Nakayama style rules} The Murnaghan-Nakayama Rule provides a formula for the product of a power sum symmetric function indexed by a single positive integer and a Schur function expressed in terms of Schur functions. This rule can be thought of as a combinatorial method for computing character values of the symmetric group. Tewari~\cite{Tew16} extends this rule to the noncommutative symmetric functions by producing a rule for the product of a type 1 noncommutative power sum symmetric function and a noncommutative Schur function. LoBue's formula for the product of a quasisymmetric Schur function and a power sum symmetric function indexed by a single positive integer~\cite{LoB15} can be thought of as a quasisymmetric analogue of the Murnaghan-Nakayama rule, although there are several alternative approaches worth exploring. The quasisymmetric power sum analogues, unlike the power sum symmetric functions and the noncommutative power sum symmetric functions, are not multiplicative, meaning a rule for multiplying a quasisymmetric Schur function and a quasisymmetric power sum indexed by a single positive integer does not immediately result in a rule for the product of a quasisymmetric Schur function and an arbitrary quasisymmetric power sum. It is therefore natural to seek a new rule for such a product. Also recall that there are several natural quasisymmetric analogues of the Schur functions; in addition to the quasisymmetric Schur functions, the fundamental quasisymmetric functions and the dual immaculate quasisymmetric functions can be thought of as Schur-like bases for $QSym$. Therefore it is worth investigating a rule for the product of a quasisymmetric power sum and either a fundamental quasisymmetric function or a dual immaculate quasisymmetric function. \boldsymbol{s}ubsection{Representation theoretic interpretations} The symmetric power sums play an important role in connecting representation theory to symmetric function theory via the Frobenius characteristic map $\mathcal{F}$. In particular, if $C_\lambda$ is a class function in the group algebra of $S_n$, one can define the Frobenius map by $\mathcal{F}(C_\lambda)=\frac{p_\lambda}{z_\lambda}$. With this definition, one can show that $\mathcal{F}$ maps the irreducible representation of $S_n$ indexed by $\lambda$ to the schur function $s_\lambda$. Krob and Thibon~\cite{krob1997noncommutative} define quasisymmetric and noncommutative symmetric characteristic maps; one takes irreducible representations of the $0$-Hecke algebra to the fundamental quasisymmetric basis, the other takes indecomposable representations of the same algebra to the ribbon basis. It would be interesting to understand if these maps could be defined equivalently and usefully on the quasisymmetric and noncommutative symmetric power sums. \begin{akn} We would like to express our gratitude to BIRS and the organizers of Algebraic Combinatorixx II for bringing the authors together and enabling the genesis of this project, and to the NSF for supporting further collaboration via the second author's grant (DMS-1162010). \boldsymbol{e}nd{akn} \boldsymbol{\Psi}rovidecommand{\bysame}{\leavevmode\boldsymbol{h}box to3em{\boldsymbol{h}rulefill}\thinspace} \boldsymbol{\Psi}rovidecommand{\MR}{\boldsymbol{r}elax\ifhmode\unskip\boldsymbol{s}pace\fi MR } \boldsymbol{\Psi}rovidecommand{\MRhref}[2]{ \boldsymbol{h}ref{http://www.ams.org/mathscinet-getitem?mr=#1}{#2} } \boldsymbol{\Psi}rovidecommand{\boldsymbol{h}ref}[2]{#2} \begin{thebibliography}{10} \bibitem{ABS06} Marcelo Aguiar, Nantel Bergeron, and Frank Sottile, \boldsymbol{e}mph{Combinatorial {H}opf algebras and generalized {D}ehn-{S}ommerville relations}, Compos. Math. \textbf{142} (2006), no.~1, 1--30. \MR{2196760} \bibitem{BBSSZ14lift} Chris Berg, Nantel Bergeron, Franco Saliola, Luis Serrano, and Mike Zabrocki, \boldsymbol{e}mph{A lift of the {S}chur and {H}all-{L}ittlewood bases to non-commutative symmetric functions}, Canad. J. Math. \textbf{66} (2014), no.~3, 525--565. \MR{3194160} \bibitem{BKNMPT01overview} Karell Bertet, Daniel Krob, Michel Morvan, J-C Novelli, HD~Phan, and J-Y Thibon, \boldsymbol{e}mph{An overview of $\lambda$-type operations on quasi-symmetric functions}, (2001). \bibitem{der09} Harm Derksen, \boldsymbol{e}mph{Symmetric and quasi-symmetric functions associated to polymatroids}, J. Algebraic Combin. \textbf{30} (2009), no.~1, 43--86. \MR{2519849} \bibitem{Ehr96} R.~Ehrenborg, \boldsymbol{e}mph{On posets and {H}opf algebras}, advances in mathematics \textbf{119} (1996), no.~1, 1--25. \bibitem{GKLLRT94} Israel~M Gelfand, Daniel Krob, Alain Lascoux, Bernard Leclerc, Vladimir~S Retakh, and Jean-Yves Thibon, \boldsymbol{e}mph{Noncommutative symmetrical functions}, Advances in Mathematics \textbf{112} (1995), no.~2, 218--348. \bibitem{gessel1984multipartite} Ira~M Gessel, \boldsymbol{e}mph{Multipartite p-partitions and inner products of skew schur functions}, Contemp. Math \textbf{34} (1984), no.~289-301, 101. \bibitem{GriRei14} D.~{Grinberg} and V.~{Reiner}, \boldsymbol{e}mph{{Hopf Algebras in Combinatorics}}, ArXiv e-prints (2014). \bibitem{grinberg2014hopf} Darij Grinberg and Victor Reiner, \boldsymbol{e}mph{Hopf algebras in combinatorics}, arXiv preprint arXiv:1409.8356 (2014). \bibitem{haglund2011quasisymmetric} James Haglund, Kurt Luoto, Sarah Mason, and Stephanie van Willigenburg, \boldsymbol{e}mph{Quasisymmetric schur functions}, Journal of Combinatorial Theory, Series A \textbf{118} (2011), no.~2, 463--490. \bibitem{knutson2006lambda} Donald Knutson, \boldsymbol{e}mph{Lambda-rings and the representation theory of the symmetric group}, vol. 308, Springer, 2006. \bibitem{KLT97Noncommutative} Daniel Krob, Bernard Leclerc, and J-Y Thibon, \boldsymbol{e}mph{Noncommutative symmetric functions ii: Transformations of alphabets}, International Journal of Algebra and Computation \textbf{7} (1997), no.~02, 181--264. \bibitem{krob1997noncommutative} Daniel Krob and Jean-Yves Thibon, \boldsymbol{e}mph{Noncommutative symmetric functions iv: Quantum linear groups and hecke algebras at q= 0}, Journal of Algebraic Combinatorics \textbf{6} (1997), no.~4, 339--376. \bibitem{LeuRem2007Computational} Nicholas~A Loehr and Jeffrey~B Remmel, \boldsymbol{e}mph{A computational and combinatorial expos{\'e} of plethystic calculus}, Journal of Algebraic Combinatorics \textbf{33} (2011), no.~2, 163--198. \bibitem{LMvW} Kurt Luoto, Stefan Mykytiuk, and Stephanie van Willigenburg, \boldsymbol{e}mph{An {I}ntroduction to {Q}uasisymmetric {S}chur {F}unctions: {H}opf {A}lgebras, {Q}uasisymmetric {F}unctions, and {Y}oung {C}omposition {T}ableaux}, Springer, 2013. \bibitem{MalReu95} Claudia Malvenuto and Christophe Reutenauer, \boldsymbol{e}mph{Duality between quasi-symmetric functions and the {S}olomon descent algebra}, J. Algebra \textbf{177} (1995), no.~3, 967--982. \MR{1358493} \bibitem{Rad79} David~E. Radford, \boldsymbol{e}mph{A natural ring basis for the shuffle algebra and an application to group schemes}, J. Algebra \textbf{58} (1979), no.~2, 432--454. \MR{540649} \bibitem{Sta99v2} Richard~P. Stanley, \boldsymbol{e}mph{Enumerative combinatorics. {V}ol. 2}, Cambridge Studies in Advanced Mathematics, vol.~62, Cambridge University Press, Cambridge, 1999, With a foreword by Gian-Carlo Rota and appendix 1 by Sergey Fomin. \MR{1676282} \bibitem{Sta99v1} \bysame, \boldsymbol{e}mph{Enumerative combinatorics. {V}olume 1}, second ed., Cambridge Studies in Advanced Mathematics, vol.~49, Cambridge University Press, Cambridge, 2012. \MR{2868112} \bibitem{Tew16} Vasu Tewari, \boldsymbol{e}mph{A {M}urnaghan-{N}akayama rule for noncommutative {S}chur functions}, European J. Combin. \textbf{58} (2016), 118--143. \MR{3530625} \bibitem{LoB15} Janine~LoBue Tiefenbruck, \boldsymbol{e}mph{Combinatorial properties of quasisymmetric schur functions and generalized demazure atoms}, 2015. \boldsymbol{e}nd{thebibliography} \boldsymbol{e}nd{document}
\begin{document} \title{Optimal control problems with control complementarity constraints} \subtitle{existence results, optimality conditions, and a penalty method} \author{ Christian Clason \footnote{ Universität Duisburg-Essen, Faculty of Mathematics, 45117 Essen, Germany, \email{[email protected]}, \url{https://udue.de/clason} } \and Yu Deng \footnote{ Technische Universität Bergakademie Freiberg, Faculty of Mathematics and Computer Science, 09596 Freiberg, Germany, \email{[email protected]}, \url{http://www.mathe.tu-freiberg.de/nmo/mitarbeiter/yu-deng} }\and Patrick Mehlitz \footnote{ Brandenburgische Technische Universität Cottbus-Senftenberg, Institute of Mathematics, Chair of Optimal Control, 03046 Cottbus, Germany, \email{[email protected]}, \url{https://www.b-tu.de/fg-optimale-steuerung/team/dr-patrick-mehlitz} }\and Uwe Prüfert \footnote{ Technische Universität Bergakademie Freiberg, Faculty of Mathematics and Computer Science, 09596 Freiberg, Germany, \email{[email protected]}, \url{http://www.mathe.tu-freiberg.de/nmo/mitarbeiter/uwe-pruefert} } } \maketitle \begin{abstract} A special class of optimal control problems with complementarity constraints on the control functions is studied. It is shown that such problems possess optimal solutions whenever the underlying control space is a first-order Sobolev space. After deriving necessary optimality conditions of strong stationarity-type, a penalty method based on the Fischer--Burmeister function is suggested and its theoretical properties are analyzed. Finally, the numerical treatment of the problem is discussed and results of computational experiments are presented. \end{abstract} \section{Introduction} Complementarity conditions appear in many mathematical optimization problems arising from real-world applications, and this phenomenon is not restricted to the finite-dimensional setting, see \cite{LuoPangRalph1996,Ulbrich2011,Wachsmuth2015} and the references therein. A prominent example for a complementarity problem in function spaces is the optimal control of the obstacle problem, see \cite{HarderWachsmuth2018} for an overview of existing literature. Mathematical problems with complementarity constraints (MPCCs) suffer from an inherent lack of regularity, see \cite[Proposition~1.1]{YeZhuZhu1997} and \cite[Lemma~3.1]{MehlitzWachsmuth2016} for the finite- and infinite-dimensional situation, respectively, which is why the construction of suitable optimality conditions, constraint qualifications, and numerical methods is a challenging task. Using so-called \emph{NCP-functions}, complementarity constraints can be transformed into possibly nonsmooth equality constraints that can be handled by, e.g., Newton-type methods, see \cite{DeLucaFacchineiKanzow2000,FacchineiFischerKanzow1998,Ulbrich2011} and the references therein. A satisfying overview of NCP-functions can be found in \cite{SunQi1999}. One of the most popular NCP-functions is the so-called \emph{Fischer--Burmeister function} $\phi\colon\R^2\to\R$ given by \begin{equation}\label{eq:FB_function} \forall a,b\in\R\colon\quad \phi(a,b):=\sqrt{a^2+b^2}-a-b, \end{equation} see \cite{Fischer1992}. Obviously, one has \[ \forall a,b\in\R\colon\quad\phi(a,b)=0\,\Longleftrightarrow\,a\geq 0\,\land\,b\geq 0\,\land ab=0, \] which (by definition) holds for all NCP-functions. Thus, NCP-functions allow the replacement of a complementarity condition by a single equality constraint. In \cite{Ulbrich2011}, it is shown that NCP-functions can be applied to solve complementarity problems in function space settings as well. In this paper, an optimal control problem with complementarity constraints on the control functions is studied. Control complementarity constraints have been the subject of several recent papers including \cite{ClarkeDePinho2010,GuoYe2016,MehlitzWachsmuth2016b,PangStewart2008}. Classically, such constraints arise from reformulating a bilevel optimal control problem with lower level control constraints as a single-level problem using lower level first-order optimality conditions, see \cite[Section~5]{MehlitzWachsmuth2016}. On the other hand, control complementarity constraints are closely related to switching conditions on the control functions, see \cite{ClasonItoKunisch2016,ClasonRundKunischBarnard2016,ClasonRundKunisch2017} and the references therein. Here, it will be shown that such problems possess an optimal solution if the control space is taken as $H^1(\Omega)$. Recently, optimal control problems with control constraints in first-order Sobolev spaces were studied in \cite{DengMehlitzPruefert2018a,DengMehlitzPruefert2018b}. It will also be demonstrated that the Fischer--Burmeister function can be used to design penalty methods that can be exploited to find minimizers of the corresponding optimal control problem. One major advantage of this procedure is that the resulting penalized problems are unconstrained. In contrast, simply penalizing the equilibrium condition and leaving the non-negativity conditions in the constraints would lead to the appearance of Lagrange multipliers from $H^1(\Omega)^\star$ in the necessary optimality conditions of the penalized problems, which would cause some theoretical and numerical difficulties due to the presumed high regularity of the control space, see \cite{DengMehlitzPruefert2018b}. The paper is organized as follows: In the remainder of this section, the basic notation is introduced. Afterwards, the optimal control problem is formally stated and the existence of solutions is discussed in \cref{sec:existence}. Necessary optimality conditions of strong stationarity-type are derived in \cref{sec:optimality_conditions}. \Cref{sec:num_methods} is dedicated to the theoretical investigation of a penalization procedure. The practical implementation of the proposed numerical method and some corresponding examples are discussed in \cref{sec:numerics} and \cref{sec:numerical_examples}, respectively. A brief summary as well as some concluding remarks are presented in \cref{sec:conclusions}. \paragraph{Basic notation} For a real Banach space $\mathcal X$, $\norm{\cdot}{\mathcal X}$ denotes its norm. The expression $\mathcal X^\star$ is used to represent the topological dual space of $\mathcal X$. Let $\dual{\cdot}{\cdot}{\mathcal X}\colon\mathcal X^\star\times\mathcal X\to\R$ be the associated dual pairing. For another Banach space $\mathcal Y$, $\operatorname{lin}op{\mathcal X}{\mathcal Y}$ represents the Banach space of all bounded, linear operators which map from $\mathcal X$ to $\mathcal Y$. For $\mathtt{F}\in\operatorname{lin}op{\mathcal X}{\mathcal Y}$, $\mathtt{F}^\star\in\operatorname{lin}op{\mathcal Y^\star}{\mathcal X^\star}$ denotes its adjoint. If $\mathcal X\subset\mathcal Y$ holds true while the associated identity mapping from $\mathcal X$ into $\mathcal Y$ is continuous, then $\mathcal X$ is said to be continuously embedded into $\mathcal Y$, denoted by $\mathcal X\hookrightarrow\mathcal Y$. Recall that a set $A\subset\mathcal X$ is said to be weakly sequentially closed if all the limit points of weakly convergent sequences contained in $A$ belong to $A$ as well, and that any closed, convex set is weakly sequentially closed by Mazur's lemma. For any $A\subset\mathcal{X}$, define the polar cone \begin{align*} A^\circ&:=\left\{x^\star\in\mathcal X^\star\,\middle|\,\forall x\in A\colon\,\dual{x^\star}{x}{\mathcal X}\leq 0\operatorname{ri}ght\}, \shortintertext{as well as the annihilator} A^\perp&:=\left\{x^\star\in\mathcal X^\star\,\middle|\,\forall x\in A\colon\,\dual{x^\star}{x}{\mathcal X}=0\operatorname{ri}ght\}. \end{align*} By definition, $A^\perp=A^\circ\cap (-A)^\circ$ holds true. It is well known that $A^\circ$ is a nonempty, closed, convex cone while $A^\perp$ is a closed subspace of $\mathcal X^\star$. For an arbitrary vector $x\in\mathcal X$, set $x^\perp:=\{x\}^\perp$ for the sake of brevity. Finally, if a function $F\colon \mathcal X\to\mathcal Y$ is Fr\'{e}chet differentiable at $\bar x\in\mathcal X$, then the bounded, linear operator $F'(\bar x)\in\operatorname{lin}op{\mathcal X}{\mathcal Y}$ denotes its Fr\'{e}chet derivative at $\bar x$. \paragraph{Function spaces} For an arbitrary bounded domain $\Omega\subset\R^d$ and $p\in[1,\infty]$, $L^p(\Omega)$ denotes the usual Lebesgue space of (equivalence classes of) Lebesgue measurable functions mapping from $\Omega$ to $\R$, which is equipped with the usual norm. It is well known that for $p\in[1,\infty)$, the space $L^p(\Omega)^\star$ is isometric to $L^{p'}(\Omega)$ for $p'\in(1,\infty]$ such that $1/p+1/p'=1$. The associated dual pairing is given by \[ \forall u\in L^p(\Omega)\,\forall v\in L^{p'}(\Omega)\colon\quad \dual{v}{u}{L^p(\Omega)}:=\int_\Omega u(x)v(x)\mathrm d x. \] Recall that $L^2(\Omega)$ is a Hilbert space whose dual $L^2(\Omega)^\star$ will be identified with $L^2(\Omega)$ by means of Riesz' representation theorem. For an arbitrary function $u\in L^1(\Omega)$, $\operatorname{supp} u:=\{x\in\Omega\,|\,u(x)\neq 0\}$ denotes the support of $u$. Supposing that $A\subset\Omega$ is a Lebesgue measurable set, $\chi_A\colon\Omega\to\R$ represents the characteristic function of $A$ which is $1$ for all $x\in A$ and $0$ else. Clearly, for a bounded domain $\Omega$ and $p\in[1,\infty)$, the relation $\norm{\chi_A}{L^p(\Omega)}=|A|^{1/p}$ is obtained where $|A|$ denotes the Lebesgue measure of $A$. The Banach space of all weakly differentiable functions from $L^2(\Omega)$ whose weak derivatives belong to $L^2(\Omega)$ is denoted by $H^1(\Omega)$. It is equipped with the usual norm \[ \forall y\in H^1(\Omega)\colon \quad \norm{y}{H^1(\Omega)}:=\left(\norm{y}{L^2(\Omega)}^2 +\mathsmaller\sum\nolimits_{i=1}^d\norm{\partial_{x_i}y}{L^2(\Omega)}^2\operatorname{ri}ght)^{1/2}. \] Clearly, $H^1(\Omega)$ is a Hilbert space. However, its dual $H^1(\Omega)^\star$ will not be identified with $H^1(\Omega)$ so that $H^1(\Omega)$, $L^2(\Omega)$, and $H^1(\Omega)^\star$ form a so-called Gelfand triple, i.e., they satisfy the relations $H^1(\Omega)\hookrightarrow L^2(\Omega)\hookrightarrow H^1(\Omega)^\star$. A detailed study of duality in Sobolev spaces can be found in \cite[Section~3]{AdamsFournier2003}. Whenever $\Omega$ satisfies the so-called cone condition, see \cite[Section~4]{AdamsFournier2003}, then the embedding $H^1(\Omega)\hookrightarrow L^2(\Omega)$ is compact, see \cite[Theorem~6.3]{AdamsFournier2003}. In this paper, $\mathtt{E}\in\operatorname{lin}op{H^1(\Omega)}{L^2(\Omega)}$ is used to denote the latter. For later use, let $L^2_+(\Omega)\subset L^2(\Omega)$ and $H^1_+(\Omega)\subset H^1(\Omega)$ denote the nonempty, closed, and convex cones of almost everywhere nonnegative functions in $L^2(\Omega)$ and $H^1(\Omega)$, respectively. \section{Problem setting and existence of optimal solutions}\label{sec:existence} In this work, the model \textbf{o}ptimal \textbf{c}ontrol problem with \textbf{c}ontrol \textbf{c}omplementarity \textbf{c}onstraints \begin{equation}\label{eq:OCCC}\tag{OC$^4$} \left\{ \begin{aligned} \tfrac{1}{2}\norm{\mathtt{D}[y]-y_\text d}{\mathcal D}^2+J(u,v)&\,\operatorname{ri}ghtarrow\,\min_{y,u,v}\\ \mathtt{A}[y]-\mathtt{B}[u]-\mathtt{C}[v]&\,=\,0\\ (u,v)&\,\in\,\C \end{aligned} \operatorname{ri}ght. \end{equation} is studied, where for some $\alpha_1,\alpha_2\geq 0$ and $\varepsilon\geq 0$, \[ \forall u,v\in H^1(\Omega) \colon\quad J(u,v):=\tfrac{\alpha_1}{2}\norm{u}{L^2(\Omega)}^2+\tfrac{\alpha_2}{2}\norm{v}{L^2(\Omega)}^2 +\tfrac{\varepsilon}{2}\left(\norm{u}{H^1(\Omega)}^2+\norm{v}{H^1(\Omega)}^2\operatorname{ri}ght), \] and $\C$ denotes the complementarity set \[ \C:=\left\{(w,z)\in H^1(\Omega)^2\,\middle|\,0\leq w(x)\perp z(x)\geq 0\text{ a.e. on }\Omega\operatorname{ri}ght\}. \] Observing that $\mathtt{A}$ can represent a differential operator, one can interpret \eqref{eq:OCCC} as an optimal control problem with complementarity constraints on the control functions that can be used to model switching requirements on the controls. In the context of ordinary differential equations, optimal control problems with mixed control-state complementarity constraints have been studied in \cite{ClarkeDePinho2010,GuoYe2016,PangStewart2008} recently. In \cite{HarderWachsmuth2018,HarderWachsmuth2018b,MehlitzWachsmuth2016b}, the interested reader can find some theoretical investigations of optimization problems with complementarity constraints with respect to the function spaces $L^2(\Omega)$ and $H^1_0(\Omega)$. Recently, optimal control problems with switching constraints related to \eqref{eq:OCCC} have been studied in \cite{ClasonRundKunischBarnard2016,ClasonRundKunisch2017}. For the remainder of this work, the following standing assumptions on the problem \eqref{eq:OCCC} are postulated. \begin{assumption}\label{ass:OCass} The domain $\Omega\subset\R^d$ is nonempty, bounded, and satisfies the cone condition. Its boundary will be denoted by $\operatorname{bd}\Omega$. Let the observation space $\mathcal D$ as well as the state space $\mathcal Y$ be Hilbert spaces. The target $y_\textup{d}\in \mathcal D$ will be fixed. The operator $\mathtt{A}\in\operatorname{lin}op{\mathcal Y}{\mathcal Y^\star}$ is an isomorphism while $\mathtt{B},\mathtt{C}\in\operatorname{lin}op{H^1(\Omega)}{\mathcal Y^\star}$ and $\mathtt{D}\in\operatorname{lin}op{\mathcal Y}{\mathcal D}$ are arbitrarily chosen. Finally, $\varepsilon>0$ holds. \end{assumption} Let $\mathtt{S}\in\operatorname{lin}op{H^1(\Omega)^2}{\mathcal D}$ be the control-to-observation operator which maps any pair of controls $(u,v)\in H^1(\Omega)^2$ to $\mathtt{D}[y]$, where $y\in\mathcal Y$ is the associated uniquely determined solution of the state equation \[ \mathtt{A}[y]-\mathtt{B}[u]-\mathtt{C}[v]=0. \] Then, $\mathtt{S}$ is a well-defined continuous linear operator since $\mathtt{A}$ is assumed to be an isomorphism. In the following, the existence of optimal solutions to \eqref{eq:OCCC} is discussed. First, the overall $H^1$-setting needed for the further theoretical treatment of \eqref{eq:OCCC} is analyzed in \cref{sec:existenceH1}. Some comments on the setting where controls come from $L^2(\Omega)$ are presented in \cref{sec:existenceL2}. \subsection{First-order Sobolev spaces}\label{sec:existenceH1} Since the objective function of \eqref{eq:OCCC} is continuously Fr\'{e}chet differentiable, convex, and bounded from below, the only critical point for existence is the weak sequential closedness of the complementarity set $\C$. \begin{lemma}\label{lem:closedness_of_complementarity_set} The set $\C$ is closed. \end{lemma} \begin{proof} Let $\{(u_k,v_k)\}_{k\in\N}\subset\C$ be a sequence converging to $(\bar u,\bar v)\in H^1(\Omega)^2$. Due to the continuity of the embedding $H^1(\Omega)\hookrightarrow L^2(\Omega)$, the strong convergences $u_k\to\bar u$ and $v_k\to\bar v$ hold in $L^2(\Omega)$. In particular, these convergences hold (at least along a subsequence) pointwise almost everywhere. Due to the closedness of the set $\{(a,b)\in\R^2\,|\,0\leq a\perp b\geq 0\}$, the desired result follows. \end{proof} Although $\C$ is a nonconvex set, the compactness of the embedding $H^1(\Omega)\hookrightarrow L^2(\Omega)$ can be used in order to show that $\C$ is weakly sequentially closed. \begin{lemma}\label{lem:weak_sequential_closedness_of_complementarity_set} The set $\C$ is weakly sequentially closed. \end{lemma} \begin{proof} First, a similar proof as for \cref{lem:closedness_of_complementarity_set} shows that the complementarity set in $L^2(\Omega)$ given by \begin{equation}\label{eq:complementarity_set_L2} \begin{aligned}[t] \widetilde\C :=& \left\{(w,z)\in L^2(\Omega)^2\,\middle|\,0\leq w(x)\perp z(x)\geq 0\text{ a.e. on }\Omega\operatorname{ri}ght\}\\ =& \left\{(w,z)\in L^2_+(\Omega)^2\,\middle|\, \dual{w}{z}{L^2(\Omega)}=0\operatorname{ri}ght\} \end{aligned} \end{equation} is closed as well. Next, choose a sequence $\{(u_k,v_k)\}_{k\in\N}\subset\C$ converging weakly to $(\bar u,\bar v)\in H^1(\Omega)^2$. Exploiting $u_k\rightharpoonup\bar u$ and $v_k\rightharpoonup\bar v$ as well as the compactness of the embedding $ H^1(\Omega)\hookrightarrow L^2(\Omega)$, there is a subsequence of $\{(u_k,v_k)\}_{k\in\N}$ that converges strongly to $(\bar u,\bar v)$ in $L^2(\Omega)^2$. Due to the closedness of $\widetilde\C$ in $L^2(\Omega)^2$, $(\bar u,\bar v)\in\widetilde\C\cap H^1(\Omega)^2$ holds, and, consequently, $(\bar u,\bar v)$ is already an element of $\C$. Thus, $\C$ is weakly sequentially closed. \end{proof} As a corollary, the existence of optimal solutions to \eqref{eq:OCCC} is obtained. \begin{corollary}\label{cor:existence} The problem \eqref{eq:OCCC} possesses an optimal solution. \end{corollary} \begin{proof} The objective functional of \eqref{eq:OCCC} is continuously Fr\'{e}chet differentiable, convex, and (due to $\varepsilon>0$) coercive. Furthermore, by \cref{lem:weak_sequential_closedness_of_complementarity_set}, the complementarity set $\C$ is weakly sequentially closed, and so is the feasible set induced by the PDE constraint. Hence, the claim follows by application of Tonelli's direct method. \end{proof} \subsection{Lebesgue spaces}\label{sec:existenceL2} In the remainder of this section, the existence of optimal controls in $L^2(\Omega)$ is investigated. In this case, the corresponding model problem is given by \begin{equation} \label{eq:problem_l2} \tag{OC$_{L^2}$} \left\{ \begin{aligned} \tfrac{1}{2}\norm{\mathtt{D}[y]-y_\text d}{\mathcal D}^2 +\tfrac{\alpha_1}{2}\norm{u}{L^2(\Omega)}^2 +\tfrac{\alpha_2}{2}\norm{v}{L^2(\Omega)}^2&\,\operatorname{ri}ghtarrow\,\min_{y,u,v}\\ \mathtt{A}[y]-\tilde\mathtt{B}[u]-\tilde\mathtt{C}[v]&\,=\,0\\ (u,v) &\,\in\,\widetilde \C \end{aligned} \operatorname{ri}ght. \end{equation} where the complementarity set $\widetilde\C$ has been defined in \eqref{eq:complementarity_set_L2}. Furthermore, $\tilde\mathtt{B},\tilde\mathtt{C}\in\operatorname{lin}op{L^2(\Omega)}{\mathcal Y^\star}$ need to be chosen. As already shown in the proof of \cref{lem:weak_sequential_closedness_of_complementarity_set}, $\widetilde\C$ is closed. However, $\widetilde \C$ is in general \emph{not} weakly sequentially closed, as the following example shows. \begin{example}\label{ex:counterex_weakly_sequentially_closed} For any $k\in\mathbb N$, define the two open sets \[ \begin{aligned} P_k&:=\left\{x\in\R^d\,\middle|\,\mathsmaller\prod\nolimits_{j=1}^d\sin(k\pi x_j)>0\operatorname{ri}ght\},\\ Q_k&:=\left\{x\in\R^d\,\middle|\,\mathsmaller\prod\nolimits_{j=1}^d\sin(k\pi x_j)<0\operatorname{ri}ght\}. \end{aligned} \] Now, set $u_k:=\chi_{\Omega\cap P_k}$ and $v_k:=\chi_{\Omega\cap Q_k}$. Obviously, $(u_k,v_k)\in\widetilde\C$ holds true for all $k\in\mathbb N$. Furthermore, the sequence $\{(u_k,v_k)\}_{k\in\mathbb N}\subset L^2(\Omega)^2$ converges weakly to the point $(\tfrac{1}{2}\chi_\Omega,\tfrac{1}{2}\chi_\Omega)$, which does not belong to $\widetilde\C$. Thus, $\widetilde\C$ is not weakly sequentially closed. \end{example} It may still happen that there exists an optimal solution of the complementarity-constrained problem \eqref{eq:problem_l2}, as illustrated by the following example. For $\mathcal D:=L^2(\Omega)$ and $\mathcal Y:=H^1(\Omega)$, consider the elliptic optimal control problem \begin{equation}\label{eq:OCCC_easy} \left\{ \begin{aligned} \tfrac{1}{2}\norm{\mathtt{E}[y]-y_\text d}{L^2(\Omega)}^2 +\tfrac{\alpha_1}{2}\norm{u}{L^2(\Omega)}^2 +\tfrac{\alpha_2}{2}\norm{v}{L^2(\Omega)}^2&\,\operatorname{ri}ghtarrow\,\min_{y,u,v}&&&\\ -\nabla \cdot(\mathbf C\nabla y)+\mathbf a y&\,=\,\chi_{\Omega_u}u+\chi_{\Omega_v}v&\quad&\text{a.e. on }\Omega&\\ \vec{\mathbf n}\cdot(\mathbf C\nabla y)+\mathbf qy&\,=\,0&&\text{a.e. on }\operatorname{bd}\Omega&\\ (u,v)&\,\in\,\widetilde \C&&& \end{aligned} \operatorname{ri}ght. \end{equation} where we recall that $\mathtt{E}$ represents the natural embedding from $H^1(\Omega)$ into $L^2(\Omega)$, $\alpha_1,\alpha_2>0$ are constants, and $\mathbf C\in L^\infty(\Omega;S^d(\R))$ (where $S^d(\R)$ denotes the set of real symmetric $d\times d$ matrices) satisfies the condition of uniform ellipticity, i.e., \begin{equation}\label{eq:uniform_ellipticity} \exists c_0>0\;\forall x\in \Omega \;\forall \xi \in \R^d\colon \quad \xi^\top \mathbf{C}(x)\xi\geq c_0\vert\xi\vert_2^2. \end{equation} Moreover, $\mathbf a\in L^\infty(\Omega)$ and $\mathbf q\in L^\infty(\operatorname{bd}\Omega)$ are nonnegative and satisfy $\norm{\mathbf a}{L^\infty(\Omega)}+\norm{\mathbf q}{L^\infty(\operatorname{bd}\Omega)}>0$, and $\Omega_u,\Omega_v\subset\Omega$ are measurable sets of positive measure satisfying $\Omega_u\cup\Omega_v=\Omega$. Here, the PDE constraint is interpreted in the weak sense. It is well known that the associated differential operator $\mathtt A$ is elliptic, see \cite[Section~6]{Evans2010}, and, thus, an isomorphism. \begin{proposition}\label{prop:trivial_existence} The problem \eqref{eq:OCCC_easy} possesses an optimal solution. \end{proposition} \begin{proof} Assume without loss of generality that $\alpha_1\leq \alpha_2$; the other case can be handled analogously. Consider then the surrogate optimal control problem \begin{equation}\label{eq:surrogateOC} \left\{ \begin{aligned} \tfrac{1}{2}\norm{\mathtt{E}[y]-y_\text d}{L^2(\Omega)}^2 +\tfrac{1}{2}\norm{ \left(\sqrt{\alpha_1}\chi_{\Omega_u}+\sqrt{\alpha_2}\chi_{\Omega_v\setminus\Omega_u}\operatorname{ri}ght)z }{L^2(\Omega)}^2&\,\operatorname{ri}ghtarrow\,\min_{y,z}&&&\\ -\nabla \cdot(\mathbf C\nabla y)+\mathbf a y&\,=\,z&\quad&\text{a.e. on }\Omega&\\ \vec{\textbf{n}}\cdot(\mathbf C\nabla y)+\mathbf qy&\,=\,0&&\text{a.e. on }\operatorname{bd}\Omega&\\ z&\,\in\,L^2_+(\Omega).&&& \end{aligned} \operatorname{ri}ght. \end{equation} Note that its objective is equivalent to \[ H^1(\Omega)\times L^2(\Omega) \ni(y,z)\mapsto \tfrac{1}{2}\norm{\mathtt{E}[y]-y_\text d}{L^2(\Omega)}^2 +\tfrac{\alpha_1}{2}\norm{\chi_{\Omega_u}z}{L^2(\Omega)}^2 +\tfrac{\alpha_2}{2}\norm{\chi_{\Omega_v\setminus\Omega_u}z}{L^2(\Omega)}^2 \in\R. \] The ellipticity of the underlying PDE in \eqref{eq:surrogateOC} implies that the associated control-to-observation operator $\check{\mathtt{S}}\colon L^2(\Omega)\to L^2(\Omega)$ is linear and continuous, see \cite[Section~6.2]{Evans2010}. Observing that $\Omega_u\cup\Omega_v=\Omega$ holds by assumption, the reduced objective functional \[ L^2(\Omega)\ni z \mapsto \tfrac{1}{2}\norm{\check{\mathtt{S}}[z]-y_\text d}{L^2(\Omega)}^2 +\tfrac{\alpha_1}{2}\norm{\chi_{\Omega_u}z}{L^2(\Omega)}^2 +\tfrac{\alpha_2}{2}\norm{\chi_{\Omega_v\setminus\Omega_u}z}{L^2(\Omega)}^2\in\R \] is convex, continuous, and coercive. This shows that the optimal control problem \eqref{eq:surrogateOC} possesses an optimal solution $(\bar y,\bar z)\in H^1(\Omega)\times L^2(\Omega)$ with objective value $\bar m\in\R$.\\ Let $(y,u,v)\in H^1(\Omega)\times L^2(\Omega)\times L^2(\Omega)$ be feasible to \eqref{eq:OCCC_easy}. Defining $z:=\chi_{\Omega_u}u+\chi_{\Omega_v\setminus \Omega_u}v$, $(y,z)$ is feasible for \eqref{eq:surrogateOC}. Then, the estimate \begin{multline*} \tfrac{1}{2}\norm{\mathtt{E}[y]-y_\text d}{L^2(\Omega)}^2 +\tfrac{\alpha_1}{2}\norm{u}{L^2(\Omega)}^2 +\tfrac{\alpha_2}{2}\norm{v}{L^2(\Omega)}^2\\ \begin{aligned} &\geq\tfrac{1}{2}\norm{\mathtt{E}[y]-y_\text{d}}{L^2(\Omega)}^2 +\tfrac{\alpha_1}{2}\norm{\chi_{\Omega_u}u}{L^2(\Omega)}^2 +\tfrac{\alpha_2}{2}\norm{\chi_{\Omega_v\setminus\Omega_u}v}{L^2(\Omega)}^2\\ &=\tfrac{1}{2}\norm{\mathtt{E}[y]-y_\text{d}}{L^2(\Omega)}^2 +\tfrac{\alpha_1}{2}\norm{\chi_{\Omega_u}z}{L^2(\Omega)}^2 +\tfrac{\alpha_2}{2}\norm{\chi_{\Omega_v\setminus\Omega_u}z}{L^2(\Omega)}^2 \geq\bar m \end{aligned} \end{multline*} is obtained. In particular, the objective value of \eqref{eq:OCCC_easy} is bounded from below by $\bar m$. Define $\bar u:=\chi_{\Omega_u}\bar z$ and $\bar v:=\chi_{\Omega_v\setminus\Omega_u}\bar z$. Then, $(\bar y,\bar u,\bar v)$ is feasible to \eqref{eq:OCCC_easy} since $\bar y$ is the state associated with $\bar z$ and $\chi_{\Omega_u}\bar u+\chi_{\Omega_v}\bar v=\bar z$ holds true. Moreover, the relation \begin{multline*} \tfrac{1}{2}\norm{\mathtt{E}[\bar y]-y_\text d}{L^2(\Omega)}^2 +\tfrac{\alpha_1}{2}\norm{\bar u}{L^2(\Omega)}^2 +\tfrac{\alpha_2}{2}\norm{\bar v}{L^2(\Omega)}^2\\ =\tfrac{1}{2}\norm{\mathtt{E}[\bar y]-y_\text d}{L^2(\Omega)}^2 +\tfrac{\alpha_1}{2}\norm{\chi_{\Omega_u}\bar z}{L^2(\Omega)}^2 +\tfrac{\alpha_2}{2}\norm{\chi_{\Omega_v\setminus\Omega_u}\bar z}{L^2(\Omega)}^2=\bar m \end{multline*} follows. Thus, $(\bar y,\bar u,\bar v)$ is an optimal solution of \eqref{eq:OCCC_easy}. \end{proof} Note that the proof of \cref{prop:trivial_existence} yields a strategy for the solution of \eqref{eq:OCCC_easy} by means of standard arguments from optimal control by solving the surrogate problem \eqref{eq:surrogateOC}. \section{Optimality conditions}\label{sec:optimality_conditions} Consider the so-called state-\emph{reduced} problem \begin{equation}\label{eq:OCCC_reduced} \left\{ \begin{aligned} \tfrac{1}{2}\norm{\mathtt{S} [u,v]-y_\text{d}}{\mathcal D}^2+J(u,v)&\,\operatorname{ri}ghtarrow\,\min_{u,v}\\ (u,v)&\,\in\,\C \end{aligned} \operatorname{ri}ght. \end{equation} which is equivalent to \eqref{eq:OCCC} by definition of the control-to-observation operator $\mathtt{S} $. Using the embedding operator $\mathtt{E}\colon H^1(\Omega)\to L^2(\Omega)$, \eqref{eq:OCCC_reduced} can be stated equivalently as \begin{equation}\label{eq:OCCC_as_MPCC} \left\{ \begin{aligned} \tfrac{1}{2}\norm{\mathtt{S} [u,v]-y_\text{d}}{\mathcal D}^2+J(u,v)&\,\operatorname{ri}ghtarrow\,\min_{u,v}\\ \mathtt{E}[u]&\,\in\, L^2_+(\Omega)\\ \mathtt{E}[v]&\,\in\, L^2_+(\Omega)\\ \dual{\mathtt{E}[u]}{\mathtt{E}[v]}{L^2(\Omega)}&\,=\,0 \end{aligned} \operatorname{ri}ght. \end{equation} which is a generalized MPCC in the Banach space $L^2(\Omega)$. It was shown in \cite[Lemma~3.1]{MehlitzWachsmuth2016} that Robinson's constraint qualification, see \cite[Section~2.3.4]{BonnansShapiro2000} for its definition, some discussion, and suitable references to the literature, does not hold at the feasible points of this problem. Moreover, since $\mathtt{E}$ is not surjective, the constraint qualifications needed to show that locally optimal solutions of this problem satisfy MPCC-tailored stationarity conditions (e.g., the weak or strong stationarity conditions) are not satisfied, see \cite{MehlitzWachsmuth2016,Wachsmuth2015} for details. On the other hand, it is still possible to derive necessary optimality conditions for \eqref{eq:OCCC_reduced} using a standard trick from finite-dimensional MPCC theory: Define appropriate surrogate problems which do not contain a complementarity constraint anymore and handle them with the classical KKT conditions in Banach spaces. In order to formulate an appropriate surrogate problem, let $(\bar u,\bar v)\in H^1(\Omega)^2$ be a feasible point of \eqref{eq:OCCC_reduced} and define the measurable sets \begin{align} I^{+0}(\bar u,\bar v)&:=\{x\in\Omega\,|\,\bar u(x)>0\,\land\,\bar v(x)=0\}, \label{eq:I+0}\\ I^{0+}(\bar u,\bar v)&:=\{x\in\Omega\,|\,\bar u(x)=0\,\land\,\bar v(x)>0\}, \label{eq:I0+}\\ I^{00}(\bar u,\bar v)&:=\{x\in\Omega\,|\,\bar u(x)=0\,\land\,\bar v(x)=0\}. \label{eq:I00} \end{align} Noting that $L^2(\Omega)$ is a space of equivalence classes, it should be mentioned that these sets are well-defined up to sets of Lebesgue measure zero. This will be taken into account in the following. If $(\bar u,\bar v)$ is a locally optimal solution of \eqref{eq:OCCC_reduced}, then it is also a locally optimal solution of the auxiliary problems \begin{equation}\label{eq:rNLP1}\tag{rNLP$_{\bar u}$} \left\{ \begin{aligned} \tfrac{1}{2}\norm{\mathtt{S} [u,v]-y_\text{d}}{\mathcal D}^2+J(u,v)&\,\operatorname{ri}ghtarrow\,\min_{u,v}&&&\\ u&\,\geq\,0&\quad&\text{a.e. on }I^{+0}(\bar u,\bar v)&\\ u&\,=\,0&\quad&\text{a.e. on }I^{0+}(\bar u,\bar v)\cup I^{00}(\bar u,\bar v)&\\ v&\,\geq 0&&\text{a.e. on }I^{0+}(\bar u,\bar v)\cup I^{00}(\bar u,\bar v)&\\ v&\,=\,0&&\text{a.e. on }I^{+0}(\bar u,\bar v)& \end{aligned} \operatorname{ri}ght. \end{equation} and \begin{equation}\label{eq:rNLP2}\tag{rNLP$_{\bar v}$} \left\{ \begin{aligned} \tfrac{1}{2}\norm{\mathtt{S} [u,v]-y_\text{d}}{\mathcal D}^2+J(u,v)&\,\operatorname{ri}ghtarrow\,\min_{u,v}&&&\\ u&\,\geq\,0&\quad&\text{a.e. on }I^{+0}(\bar u,\bar v)\cup I^{00}(\bar u,\bar v)&\\ u&\,=\,0&\quad&\text{a.e. on }I^{0+}(\bar u,\bar v)&\\ v&\,\geq 0&&\text{a.e. on }I^{0+}(\bar u,\bar v)&\\ v&\,=\,0&&\text{a.e. on }I^{+0}(\bar u,\bar v)\cup I^{00}(\bar u,\bar v) \end{aligned} \operatorname{ri}ght. \end{equation} since their respective feasible sets are smaller than $\mathbb C$ but contain $(\bar u,\bar v)$. By standard notion, see \cite{PangFukushima1999,ScheelScholtes2000,Wachsmuth2015}, \eqref{eq:rNLP1} and \eqref{eq:rNLP2} are referred to as \emph{restricted nonlinear problems}. Furthermore, the corresponding \emph{relaxed nonlinear problem} is introduced by means of \begin{equation}\label{eq:RNLP}\tag{RNLP} \left\{ \begin{aligned} \tfrac{1}{2}\norm{\mathtt{S} [u,v]-y_\text{d}}{\mathcal D}^2+J(u,v)&\,\operatorname{ri}ghtarrow\,\min_{u,v}&&&\\ u&\,\geq\,0&\quad&\text{a.e. on }I^{+0}(\bar u,\bar v)\cup I^{00}(\bar u,\bar v)&\\ u&\,=\,0&\quad&\text{a.e. on }I^{0+}(\bar u,\bar v)&\\ v&\,\geq 0&&\text{a.e. on }I^{0+}(\bar u,\bar v)\cup I^{00}(\bar u,\bar v) &\\ v&\,=\,0&&\text{a.e. on }I^{+0}(\bar u,\bar v). \end{aligned} \operatorname{ri}ght. \end{equation} Observe that the feasible points $(u,v)\in H^1(\Omega)^2$ of \eqref{eq:RNLP} do not necessarily satisfy the complementarity condition $(u,v)\in\C$. Combining standard techniques from finite-dimensional MPCC theory and optimization in Banach spaces, the following result is obtained, see also \cite[Theorems~3.1 and 5.2]{Wachsmuth2015}. It should be noted that due to the appearance of the two control variables $u$ and $v$ in \eqref{eq:OCCC_reduced}, there will be two Lagrange multipliers $\mu$ and $\nu$ corresponding to $u$ and $v$, respectively, in the stationarity system as well. In particular, the pair $(\mu,\nu)\in H^1(\Omega)^\star\times H^1(\Omega)^\star$ may be identified with a functional from $(H^1(\Omega)^2)^\star$. \begin{theorem}\label{thm:S_stationarity} Let $(\bar u,\bar v)\in H^1(\Omega)^2$ be a locally optimal solution of \eqref{eq:OCCC_reduced}. Then, there exist multipliers $\mu,\nu\in H^1(\Omega)^\star$ satisfying \begin{subequations}\label{eq:SSt} \begin{align} \label{eq:SSt1} &0=\mathtt{S} ^\star\bigl[\mathtt{S} [\bar u,\bar v]-y_\textup{d}\bigr]+J'(\bar u,\bar v)+(\mu,\nu),\\ \label{eq:SSt2} &\mu\in \left\{ z\in H^1(\Omega)\,\middle| \begin{aligned} &z\geq 0&&\text{a.e. on }I^{+0}(\bar u,\bar v)\cup I^{00}(\bar u,\bar v)\\ &z=0&&\text{a.e. on }I^{0+}(\bar u,\bar v) \end{aligned} \operatorname{ri}ght\}^\circ,\\ \label{eq:SSt3} &\dual{\mu}{\bar u}{ H^1(\Omega)}=0,\\ \label{eq:SSt4} &\nu\in \left\{ z\in H^1(\Omega)\,\middle| \begin{aligned} &z\geq 0&&\text{a.e. on }I^{0+}(\bar u,\bar v)\cup I^{00}(\bar u,\bar v)\\ &z=0&&\text{a.e. on }I^{+0}(\bar u,\bar v) \end{aligned} \operatorname{ri}ght\}^\circ,\\ \label{eq:SSt5} &\dual{\nu}{\bar v}{ H^1(\Omega)}=0. \end{align} \end{subequations} \end{theorem} \begin{proof} Introducing the cones \begin{align*} \mathcal K_{+0}&:=\left\{z\in H^1(\Omega)\,\middle| \begin{aligned} &z\geq 0&&\text{a.e. on }I^{+0}(\bar u,\bar v)\\ &z=0 &&\text{a.e. on }I^{0+}(\bar u,\bar v)\cup I^{00}(\bar u,\bar v) \end{aligned}\operatorname{ri}ght\},\\ \mathcal K_{0+,00}&:=\left\{z\in H^1(\Omega)\,\middle| \begin{aligned} &z\geq 0&&\text{a.e. on }I^{0+}(\bar u,\bar v)\cup I^{00}(\bar u,\bar v)\\ &z=0&&\text{a.e. on }I^{+0}(\bar u,\bar v) \end{aligned}\operatorname{ri}ght\}, \end{align*} \eqref{eq:rNLP1} is equivalent to \[ \left\{ \begin{aligned} \tfrac{1}{2}\norm{\mathtt{S} [u,v]-y_\text{d}}{\mathcal D}^2+J(u,v)&\,\operatorname{ri}ghtarrow\,\min_{u,v}\\ u&\,\in\,\mathcal K_{+0}\\ v&\,\in\,\mathcal K_{0+,00}. \end{aligned} \operatorname{ri}ght. \] Since $(\bar u,\bar v)$ is a locally optimal solution of \eqref{eq:rNLP1}, there exist multipliers $\mu^1,\nu^1\in H^1(\Omega)^\star$ which satisfy the corresponding KKT conditions \begin{equation}\label{eq:KKTrNLP1} \left\{ \begin{aligned} &0=\mathtt{S} ^\star\bigl[\mathtt{S} [\bar u,\bar v]-y_\text{d}\bigr]+J'(\bar u,\bar v)+(\mu^1,\nu^1),\\ &\mu^1\in\mathcal K_{+0}^\circ\cap\bar u^\perp,\\ &\nu^1\in\mathcal K_{0+,00}^\circ\cap\bar v^\perp, \end{aligned} \operatorname{ri}ght. \end{equation} see \cite[Theorem~3.9]{BonnansShapiro2000}. Considering \eqref{eq:rNLP2} in a similar way, there exist $\mu^2,\nu^2\in H^1(\Omega)^\star$ which satisfy \begin{equation}\label{eq:KKTrNLP2} \left\{ \begin{aligned} &0=\mathtt{S} ^\star\bigl[\mathtt{S} [\bar u,\bar v]-y_\text{d}\bigr]+J'(\bar u,\bar v)+(\mu^2,\nu^2),\\ &\mu^2\in\mathcal K_{+0,00}^\circ\cap\bar u^\perp,\\ &\nu^2\in\mathcal K_{0+}^\circ\cap\bar v^\perp, \end{aligned} \operatorname{ri}ght. \end{equation} where \begin{align*} \mathcal K_{+0,00}&:=\left\{z\in H^1(\Omega)\,\middle| \begin{aligned} &z\geq 0&&\text{a.e. on }I^{+0}(\bar u,\bar v)\cup I^{00}(\bar u,\bar v)\\ &z=0 &&\text{a.e. on }I^{0+}(\bar u,\bar v) \end{aligned}\operatorname{ri}ght\},\\ \mathcal K_{0+}&:=\left\{z\in H^1(\Omega)\,\middle| \begin{aligned} &z\geq 0&&\text{a.e. on }I^{0+}(\bar u,\bar v)\\ &z=0&&\text{a.e. on }I^{+0}(\bar u,\bar v)\cup I^{00}(\bar u,\bar v) \end{aligned}\operatorname{ri}ght\}. \end{align*} Combining the respective first condition in \eqref{eq:KKTrNLP1} and \eqref{eq:KKTrNLP2} yields $\mu^1=\mu^2$ and $\nu^1=\nu^2$. Since $\mathcal K_{+0,00}^\circ\cap\bar u^\perp$ is a subset of $\mathcal K_{+0}^\circ\cap\bar u^\perp$ while $\mathcal K_{0+,00}^\circ\cap\bar v^\perp$ is a subset of $\mathcal K_{0+}^\circ\cap\bar v^\perp$, the desired result is obtained by setting $\mu:=\mu^2$ and $\nu:=\nu^1$. \end{proof} Note that the system \eqref{eq:SSt} coincides with the KKT conditions of \eqref{eq:RNLP}. In this regard, it is reasonable to call the conditions \eqref{eq:SSt} a \emph{strong stationarity-type system}. \begin{remark}\label{rem:multipliers} It is difficult to give an explicit characterization of the multipliers $\mu,\nu\in H^1(\Omega)^\star$. Assume that $\Omega$ has a Lipschitz boundary. Introducing $\mathcal H_\mathcal A:=\{z\in H^1(\Omega)\,|\,z=0\;\text{a.e. on }\mathcal A\}$ for a fixed measurable set $\mathcal A\subset \Omega$ and using the relation $H^1_+(\Omega)^\circ=H^1(\Omega)^\star\cap\mathcal M_-(\overline\Omega)$, see \cite[Lemma~3.1]{DengMehlitzPruefert2018b}, it holds that \[ \mu \in\left(H^1_+(\Omega)\cap\mathcal H_{I^{0+}(\bar u,\bar v)}\operatorname{ri}ght)^\circ =\operatorname{cl}\left(H^1(\Omega)^\star\cap\mathcal M_-(\overline\Omega)+\mathcal H_{I^{0+}(\bar u,\bar v)}^\perp\operatorname{ri}ght) \] where $\mathcal M_-(\overline\Omega)$ denotes the set of all finite, nonpositive Borel measures on $\overline\Omega$. A similar result can be obtained to characterize $\nu$. However, due to the appearance of the closure as well as the annihilated subspace associated with $\mathcal H_{I^{+0}(\bar u,\bar v)}$, this characterization is of limited practical use; in particular, it cannot be deduced that $\mu$ and $\nu$ are measures. Applying the machinery of capacity theory, see \cite{AttouchButtazzoMichaille2006,BonnansShapiro2000}, a more advanced approach to the characterization of $\mu$ and $\nu$ can be attempted. For this purpose, one could strengthen the constraints in \eqref{eq:rNLP1}, \eqref{eq:rNLP2}, and \eqref{eq:RNLP} to hold \emph{quasi-everywhere} on the respective subdomains, i.e., the respective conditions hold up to sets of $H^1$-capacity zero. Then, one needs to find explicit expressions for the polar cone associated with sets of type \[ \left\{z\in H^1(\Omega)\,\middle|\, \begin{aligned} z\geq 0&\quad\text{quasi-everywhere on }\mathcal A\\ z=0 &\quad\text{quasi-everywhere on }\Omega\setminus \mathcal A \end{aligned} \operatorname{ri}ght\} \] where $\mathcal A\subset\Omega$ is measurable. The price one has to pay when using this approach is a less restrictive stationarity system than \eqref{eq:SSt}. In particular, the polar cones from \eqref{eq:SSt2} and \eqref{eq:SSt4} would be replaced by larger ones. \end{remark} In order to state necessary optimality conditions of strong stationarity-type that avoid the appearance of multipliers and allow a numerical implementation, one can exploit the definition of the polar cone in the system \eqref{eq:SSt}. \begin{corollary}\label{cor:consequences_of_S_Stationarity} Let $(\bar u,\bar v)\in H^1(\Omega)^2$ be a locally optimal solution of \eqref{eq:OCCC_reduced}. Then, the condition \[ 0=\dual{\mathtt{S} [\bar u,\bar v]-y_\textup{d}}{\mathtt{S} [\bar u,\bar v]}{\mathcal D}+J'(\bar u,\bar v)[\bar u,\bar v] \] holds, and for any pair $(z_u,z_v)\in H^1_+(\Omega)\times H^1_+(\Omega)$, \[ \left. \begin{aligned} &\operatorname{supp} z_u\subset I^{+0}(\bar u,\bar v)\cup I^{00}(\bar u,\bar v)\\ &\operatorname{supp} z_v\subset I^{0+}(\bar u,\bar v)\cup I^{00}(\bar u,\bar v) \end{aligned} \operatorname{ri}ght\} \,\Longrightarrow\, \dual{\mathtt{S} ^\star\bigl[\mathtt{S} [\bar u,\bar v]-y_\textup{d}\bigr] +J'(\bar u,\bar v)}{(z_u,z_v)}{ H^1(\Omega)^2}\geq 0. \] \end{corollary} \begin{proof} Due to \cref{thm:S_stationarity}, there exist $\mu,\nu\in H^1(\Omega)^\star$ satisfying \eqref{eq:SSt}. Testing \eqref{eq:SSt1} with $(\bar u,\bar v)$ while exploiting \eqref{eq:SSt3}, \eqref{eq:SSt5}, and the definition of the adjoint operator, the first statement of the corollary follows. The second statement is a consequence of \eqref{eq:SSt1}, \eqref{eq:SSt2}, and \eqref{eq:SSt4}. \end{proof} \begin{remark}\label{rem:MPCC_S_Stationarity} According to standard terminology for MPCCs, the necessary optimality conditions \eqref{eq:SSt} are of strong stationarity-type, see, e.g., \cite[Definition~5.1]{Wachsmuth2015} and \cite[Definition~2.7]{Ye2005}. Recall that a feasible point $(\bar u,\bar v)\in H^1(\Omega)^2$ of \eqref{eq:OCCC_reduced} and thus of \eqref{eq:OCCC_as_MPCC} is a strongly stationary point of \eqref{eq:OCCC_as_MPCC} in the sense of \cite[Definition~5.1]{Wachsmuth2015} if and only if there are multipliers $(\mu,\nu)\in L^2(\Omega)^2$ satisfying \begin{subequations}\label{eq:classical_S_stationarity} \begin{align} \label{eq:classical_S_stationarity_1} &0=\mathtt{S} ^\star\bigl[\mathtt{S} [\bar u,\bar v]-y_\textup d\bigr]+J'(\bar u,\bar v)+(\mathtt{E},\mathtt{E})^\star[\mu,\nu],\\ \label{eq:classical_S_stationarity_2} &\mu=0\quad \text{a.e. on }I^{+0}(\bar u,\bar v),\\ \label{eq:classical_S_stationarity_3} &\nu=0\quad \text{a.e. on }I^{0+}(\bar u,\bar v),\\ \label{eq:classical_S_stationarity_4} &\mu\leq 0\,\land\,\nu\leq 0\quad \text{a.e. on }I^{00}(\bar u,\bar v), \end{align} \end{subequations} see also \cite[Definition~4.1]{MehlitzWachsmuth2016b}. If $\C$ is replaced by $\widetilde \C$ and $\varepsilon =0$ is taken in the definition of $J$ (in which case $\mathtt{E}$ is the identity mapping), the systems \eqref{eq:SSt} and \eqref{eq:classical_S_stationarity} are equivalent. However, for $\C$ and $\varepsilon >0$, the necessary optimality conditions \eqref{eq:SSt} are weaker than \eqref{eq:classical_S_stationarity}, which can be seen as follows: It is clear that whenever $(\tilde\mu,\tilde\nu)\in L^2(\Omega)^2$ satisfy the classical strong stationarity conditions \eqref{eq:classical_S_stationarity}, then the multipliers $\mu:=\mathtt{E}^\star[\tilde\mu]$ and $\nu:=\mathtt{E}^\star[\tilde\nu]$ satisfy \eqref{eq:SSt}. On the other hand, by means of \cref{thm:S_stationarity}, the multipliers appearing in the system \eqref{eq:SSt} may come from $H^1(\Omega)^\star\setminus L^2(\Omega)$ in general. \end{remark} \begin{remark}\label{rem:applicability} In this section, only the property of $\mathtt{S}$ to be a bounded, linear operator has been exploited. Thus, the optimality conditions obtained in \cref{thm:S_stationarity} and \cref{cor:consequences_of_S_Stationarity} are applicable in many different situations, e.g., in case where $\mathtt{S}$ is the control-to-observation operator associated with a linear elliptic equation where $u$ and $v$ only operate on some subdomain, or for a linear parabolic equation where the controls $u$ and $v$ only depend on time. The latter problems are closely related to the switching-constrained problems examined in \cite{ClasonItoKunisch2016,ClasonRundKunischBarnard2016,ClasonRundKunisch2017}. It should be noted that similar necessary optimality conditions can be derived if $\mathtt{S}\colon H^1(\Omega)^2\to\mathcal D$ is Fr\'{e}chet differentiable but not necessarily linear. \end{remark} \section{Penalization of complementarity constraints}\label{sec:num_methods} In order to find optimal solutions of \eqref{eq:OCCC}, an obvious idea would be to penalize the violation of the equilibrium condition \begin{equation}\label{eq:equilibrium_condition} u(x)v(x)\,=\,0\quad\text{a.e. on }\Omega \end{equation} in \eqref{eq:OCCC}. This is related to the approaches used in \cite{ClasonItoKunisch2016,ClasonRundKunischBarnard2016,ClasonRundKunisch2017} for the treatment of switching-constrained optimal control problems. However, the resulting penalized problem would still involve inequality constraints for the controls in $H^1(\Omega)$, and thus the associated KKT conditions would involve Lagrange multipliers from $H^1(\Omega)^\star\cap\mathcal M_-(\overline\Omega)$, see \cite[Section~5]{DengMehlitzPruefert2018b} for details. This, however, may provoke theoretical and numerical difficulties that should be avoided here. To get around these issues, the penalization of the overall complementarity constraint using the Fischer--Burmeister function is proposed here, which leads to penalized problems in which the only constraint is the state equation. \subsection{Penalty term}\label{sec:penalty_terms} Let $\phi:\R^2\to\R$ denote the Fischer--Burmeister function introduced in \eqref{eq:FB_function} and let the mapping $\Phi\colon L^2(\Omega)^2\to L^2(\Omega)$ be the associated Nemytskii operator defined by \[ \forall (w,z)\in L^2(\Omega)^2\,\forall x\in\Omega\colon\quad \Phi(w,z)(x):=\phi(w(x),z(x)). \] This operator is well-defined since for all $w,z\in L^2(\Omega)$, one has \[ \begin{aligned} \norm{\Phi(w,z)}{L^2(\Omega)} &\leq \left(\int_\Omega\bigr(w^2(x)+z^2(x)\bigr)\mathrm d x\operatorname{ri}ght)^{1/2}+\norm{w}{L^2(\Omega)}+\norm{z}{L^2(\Omega)}\\ &\leq \left(\int_\Omega\bigr(|w(x)|+|z(x)|\bigr)^2\mathrm dx\operatorname{ri}ght)^{1/2}+\norm{w}{L^2(\Omega)}+\norm{z}{L^2(\Omega)}\\ &\leq 2\left(\norm{w}{L^2(\Omega)}+\norm{z}{L^2(\Omega)}\operatorname{ri}ght)<+\infty, \end{aligned} \] i.e., $\Phi$ maps from $L^2(\Omega)^2$ to $L^2(\Omega)$, see also \cite[Section~3.3]{Ulbrich2011}. For a detailed introduction to the theory of superposition operators in Lebesgue spaces, the interested reader is referred to \cite{AppellZabrejko1990,GoldbergKampowskyTroeltzsch1992}. The violation of the complementarity constraint $(u,v)\in\C$ can then be penalized using the functional $F\colon H^1(\Omega)^2\to\R^+_0$ defined by \begin{equation}\label{eq:penalty} \forall (u,v)\in H^1(\Omega)^2\colon\quad F(u,v):=\tfrac{1}{2}\int_\Omega\phi^2(u(x),v(x))\mathrm dx=\tfrac{1}{2}\norm{\Phi(\mathtt{E}[u],\mathtt{E}[v])}{L^2(\Omega)}^2. \end{equation} Recall that $\mathtt{E}\in\operatorname{lin}op{H^1(\Omega)}{L^2(\Omega)}$ represents the natural embedding $H^1(\Omega)\hookrightarrow L^2(\Omega)$. It is obvious that $\Phi$ cannot be Fr\'{e}chet differentiable since $\phi$ is not smooth. In contrast, $F$ is a continuously Fr\'{e}chet differentiable mapping. \begin{lemma}\label{lem:subdifferential_L2_penalization} Let $(\bar u,\bar v)\in H^1(\Omega)^2$ be arbitrarily chosen. Then, $F$ is continuously Fr\'{e}chet differentiable at $(\bar u,\bar v)$. The associated Fr\'{e}chet derivative is given by \[ \forall(\delta^u,\delta^v)\in H^1(\Omega)^2\colon \quad F'(\bar u,\bar v)[\delta^u,\delta^v] = \int_\Omega\phi(\bar u(x),\bar v(x))\bigl(\eta_{\bar u}(x)\delta^u(x)+\eta_{\bar v}(x)\delta^v(x)\bigr)\mathrm dx, \] where $\eta_{\bar u},\eta_{\bar v}\in L^\infty(\Omega)$ are defined by \begin{subequations}\label{eq:char_subdiff_8} \begin{align} \forall x\in\Omega\colon\quad \eta_{\bar u}(x) & = \begin{cases} \tfrac{\bar u(x)}{\sqrt{\bar u(x)^2+\bar v(x)^2}}-1 & \text{if }x\notin I^{00}(\bar u,\bar v),\\ 0 & \text{if }x\in I^{00}(\bar u,\bar v), \end{cases}\\ \forall x\in\Omega\colon\quad \eta_{\bar v}(x) & = \begin{cases} \tfrac{\bar v(x)}{\sqrt{\bar u(x)^2+\bar v(x)^2}}-1 & \text{if }x\notin I^{00}(\bar u,\bar v),\\ 0 & \text{if }x\in I^{00}(\bar u,\bar v), \end{cases} \end{align} \end{subequations} and $I^{00}(\bar u,\bar v)$ is defined by \eqref{eq:I00}. \end{lemma} \begin{proof} Let $f\colon\R^2\to\R$ be given by \[ \forall (a,b)\in\R^2\colon\quad f(a,b):=\tfrac{1}{2}\phi(a,b)^2. \] One can check that $f$ is continuously differentiable with gradient \[ \forall (a,b)\in\R^2\colon\quad \nabla f(a,b)= \begin{cases} \phi(a,b)\begin{pmatrix}\tfrac{a}{\sqrt{a^2+b^2}}-1\\\tfrac{b}{\sqrt{a^2+b^2}}-1\end{pmatrix} & \text{if }(a,b)\neq(0,0),\\ \begin{pmatrix}0\\0\end{pmatrix} & \text{if }(a,b)=(0,0). \end{cases} \] Clearly, the Nemytskii-operator $\mathcal F$ associated with $f$ maps from $L^2(\Omega)^2$ to $L^1(\Omega)$, since $\Phi$ maps $L^2(\Omega)^2$ to $L^2(\Omega)$. Noting that $a/\sqrt{a^2+b^2}\in[-1,1]$ and $b/\sqrt{a^2+b^2}\in[-1,1]$ hold for all $(a,b)\in\R^2\setminus\{(0,0)\}$, the Nemytskii operator associated with $\nabla f$ maps from $L^2(\Omega)^2$ to $L^2(\Omega)^2$. Applying \cite[Theorems~4 and 7]{GoldbergKampowskyTroeltzsch1992}, $\mathcal F\colon L^2(\Omega)^2\to L^1(\Omega)$ is continuously Fr\'{e}chet differentiable. Furthermore, \[ \forall x\in\Omega\colon \quad \mathcal F'(w,z)[\delta_w,\delta_z](x) = \nabla_a f(w(x),z(x))\delta_w(x)+\nabla_bf(w(x),z(x))\delta_w(x) \] for any $(w,z),(\delta_w,\delta_z)\in L^2(\Omega)^2$. Define $\mathtt{L}\in\operatorname{lin}op{L^1(\Omega)}{\R}$ by $\mathtt{L}[w]:=\int_\Omega w(x)\mathrm dx$. Then, $F=\mathtt{L}\circ\mathcal F\circ(\mathtt{E},\mathtt{E})$. Since all involved mappings are continuously Fr\'{e}chet differentiable, the assertion of the lemma follows by exploiting the chain rule for Fr\'{e}chet differentiable functions, see \cite[Theorem~2.20]{Troeltzsch2009}. \end{proof} \begin{remark}\label{rem:other_penalty_terms} As the penalty functional $F$ is smooth, it cannot lead to exact penalization of the complementarity constraints, see, e.g., \cite[Theorem~5.9]{GeigerKanzow2002}. Although \cref{sec:numerical_examples} demonstrates that a penalty method using $F$ behaves well in numerical practice, in principle any other NCP-function, see \cite{SunQi1999} for an overview, can be used to construct similar penalty methods. One possible alternative would be to use $F_1\colon H^1(\Omega)^2\to\R^+_0$ given by \[ \forall (u,v)\in H^1(\Omega)^2\colon \quad F_1(u,v):=\int_\Omega|\phi(u(x),v(x))|\mathrm dx =\norm{\tilde\Phi(\mathtt{E}[u],\mathtt{E}[v])}{L^1(\Omega)}, \] where $\tilde\Phi\colon L^2(\Omega)^2\to L^1(\Omega)$ is the mapping $\mathtt{E}_{L^2\to L^1}\circ\Phi$ where $\mathtt{E}_{L^2\to L^1}$ represents the continuous embedding $L^2(\Omega)\hookrightarrow L^1(\Omega)$. This leads to a nonsmooth but Lipschitz continuous mapping. Another approach would be to exploit the so-called \emph{smoothed} Fischer--Burmeister function $\phi_\theta\colon\R^2\to\R$ given by \[ \forall (a,b)\in\R^2\colon \quad \phi_\theta(a,b):=\sqrt{a^2+b^2+2\theta}-a-b, \] which is continuously differentiable for any $\theta>0$, see \cite{Kanzow1996}. Using \cite[Theorems~4 and~7]{GoldbergKampowskyTroeltzsch1992}, one can check that the associated Nemytskii operator $\tilde \Phi_\theta\colon L^2(\Omega)^2\to L^1(\Omega)$ is continuously Fr\'{e}chet differentiable. Define $F_{1,\theta}\colon H^1(\Omega)^2\to\R^+_0$ by means of \[ \forall (u,v)\in H^1(\Omega)^2\colon \quad F_{1,\theta}(u,v):=\int_\Omega|\phi_\theta(u(x),v(x))|\mathrm dx =\norm{\tilde \Phi_\theta(\mathtt{E}[u],\mathtt{E}[v])}{L^1(\Omega)}. \] Clearly, $F_{1,0}$ corresponds to $F_1$. For $\theta>0$ this approach can be seen as a mixture of a penalty and a smoothing method. However, it needs to be noted that $F_{1,\theta}$ is nonsmooth even for positive values of $\theta$. \end{remark} \subsection{Existence, convergence results, and optimality conditions}\label{sec:convergence_results} Using the penalty functional $F$ defined in \eqref{eq:penalty} to penalize the complementarity constraints in \eqref{eq:OCCC_reduced} leads to the family of penalized problems \begin{equation}\label{eq:penalized_problem}\tag{P$_k$} \tfrac{1}{2}\norm{\mathtt{S} [u,v]-y_\text{d}}{\mathcal D}^2+J(u,v)+\sigma_k F(u,v)\,\operatorname{ri}ghtarrow\,\min_{u,v}, \end{equation} where $\{\sigma_k\}_{k\in\N}\subset\R^+$ is a sequence of positive real numbers tending to infinity as $k\to\infty$. The first question is about the existence of solutions of \eqref{eq:penalized_problem}. \begin{proposition}\label{prop:existence_P_sigma} For any $\sigma_k>0$, the penalized problem \eqref{eq:penalized_problem} possesses an optimal solution. \end{proposition} \begin{proof} Let $\{(u_l,v_l)\}_{l\in\N}\subset H^1(\Omega)^2$ be a minimizing sequence for \eqref{eq:penalized_problem} and let $\bar m\in\overline{\R}$ be the corresponding infimal value. Since $J$ is, due to $\varepsilon>0$, coercive and bounded from below, this sequence is bounded in $H^1(\Omega)^2$ and, thus, possesses a weakly convergent subsequence (without relabeling) with weak limit $(\bar u,\bar v)\in H^1(\Omega)^2$. Due to the compactness of $H^1(\Omega)\hookrightarrow L^2(\Omega)$, the strong convergences $u_l\to\bar u$ and $v_l\to\bar v$ hold in $L^2(\Omega)$. Noting that the operator $\Phi$ is continuous on $L^2(\Omega)^2$, see \cite[Theorem~4]{GoldbergKampowskyTroeltzsch1992}, it follows that \[ \lim_{l\operatorname{ri}ghtarrow\infty}F(u_l,v_l)=F(\bar u,\bar v). \] Thus, the continuity of $\mathtt{S} $ and the weak lower semicontinuity of norms imply that \begin{multline*} \tfrac{1}{2}\norm{\mathtt{S} [\bar u,\bar v]-y_\text{d}}{\mathcal D}^2+J(\bar u,\bar v)+\sigma_kF(\bar u,\bar v)\\ \begin{aligned} &\leq \liminf_{l\operatorname{ri}ghtarrow\infty}\left(\tfrac{1}{2}\norm{\mathtt{S} [u_l,v_l]-y_\text{d}}{\mathcal D}^2+J(u_l,v_l)\operatorname{ri}ght) +\sigma_k\lim_{l\operatorname{ri}ghtarrow\infty}F(u_l,v_l)\\ &=\liminf_{l\operatorname{ri}ghtarrow\infty}\left(\tfrac{1}{2}\norm{\mathtt{S} [u_l,v_l]-y_\text{d}}{\mathcal D}^2+J(u_l,v_l) +\sigma_kF(u_l,v_l)\operatorname{ri}ght)=\bar m, \end{aligned} \end{multline*} i.e., $(\bar u,\bar v)$ is a global minimizer of \eqref{eq:penalized_problem}. \end{proof} Next, the convergence of solutions of \eqref{eq:penalized_problem} as $\sigma_k\to\infty$ is addressed. \begin{proposition}\label{prop:strong_convergence_of_surrogate_solutions} Fix a sequence $\{\sigma_k\}_{k\in\N}\subset\R^+$ tending to infinity as $k\to\infty$. For any $k\in\N$, let $(u_k,v_k)\in H^1(\Omega)^2$ be a global minimizer of \eqref{eq:penalized_problem}. Then, $\{(u_k,v_k)\}_{k\in\N}$ contains a subsequence converging strongly in $H^1(\Omega)^2$ to a point $(\bar u,\bar v)\in\C$ such that $(\bar y,\bar u,\bar v)$, where $\bar y\in\mathcal Y$ is the state associated with $(\bar u,\bar v)$, is an optimal solution of \eqref{eq:OCCC}. Moreover, any subsequence of $\{(u_k,v_k)\}_{k\in\N}$ converging weakly to some $(\bar u,\bar v)$ in $ H^1(\Omega)^2$ produces a global minimizer of \eqref{eq:OCCC} in the above sense. \end{proposition} \begin{proof} For any $k\in\N$, the estimate \[ \tfrac{1}{2}\norm{\mathtt{S} [u_k,v_k]-y_\text{d}}{\mathcal D}^2+J(u_k,v_k) +\sigma_kF(u_k,v_k)\leq \tfrac{1}{2}\norm{y_\text{d}}{\mathcal D}^2 \] follows from the feasibility of $(0,0)\in H^1(\Omega)^2$ for \eqref{eq:penalized_problem}. Thus, since $J$ is coercive and bounded from below while $F$ only takes nonnegative values, $\{(u_k,v_k)\}_{k\in\N}$ is bounded and therefore contains a weakly convergent subsequence (which, as all further subsequences, will not be relabeled). Recalling the compactness of $H^1(\Omega)\hookrightarrow L^2(\Omega)$, the sequence $\{(u_k,v_k)\}_{k\in\N}$ converges strongly to $(\bar u,\bar v)$ in $L^2(\Omega)^2$ and thus pointwise almost everywhere at least along a subsequence. Furthermore, the relation \[ 0\leq \norm{\Phi(\mathtt{E}[u_k],\mathtt{E}[v_k])}{L^2(\Omega)}\leq \sqrt{\tfrac{1}{\sigma_k}}\norm{y_\text{d}}{\mathcal D}\to 0 \] is obtained as $k\operatorname{ri}ghtarrow\infty$. Consequently, at least along a subsequence, $\{\Phi(\mathtt{E}[u_k],\mathtt{E}[v_k])\}_{k\in\N}$ converges pointwise a.e. to $0$. By definition of $\Phi$, $(\bar u,\bar v)\in\C$ follows. Now choose $(u,v)\in\C$ arbitrarily. Since this point is feasible to \eqref{eq:penalized_problem}, it follows for any $k\in\N$ that \[ \begin{aligned} \tfrac{1}{2}\norm{\mathtt{S} [u,v]-y_\text{d}}{\mathcal D}^2+J(u,v) & \geq \tfrac{1}{2}\norm{\mathtt{S} [u_k,v_k]-y_\text{d}}{\mathcal D}^2+J(u_k,v_k)+\sigma_kF(u_k,v_k)\\ &\geq \tfrac{1}{2}\norm{\mathtt{S} [u_k,v_k]-y_\text{d}}{\mathcal D}^2+J(u_k,v_k). \end{aligned} \] Thus, using the weak lower semicontinuity of the functionals, one obtains \begin{equation*} \begin{aligned} \tfrac{1}{2}\norm{\mathtt{S} [\bar u,\bar v]-y_\text d}{\mathcal D}^2+J(\bar u,\bar v) &\leq\liminf_{k\to\infty}\left(\tfrac{1}{2}\norm{\mathtt{S} [u_k,v_k]-y_\text d}{\mathcal D}^2+J(u_k,v_k)\operatorname{ri}ght)\\ &\leq\limsup_{k\to\infty}\left(\tfrac{1}{2}\norm{\mathtt{S} [u_k,v_k]-y_\text d}{\mathcal D}^2+J(u_k,v_k)\operatorname{ri}ght)\\ &\leq \limsup_{k\to\infty}\left(\tfrac{1}{2}\norm{\mathtt{S} [u_k,v_k]-y_\text d}{\mathcal D}^2 +J(u_k,v_k)+\sigma_kF(u_k,v_k)\operatorname{ri}ght)\\ &\leq\tfrac{1}{2}\norm{\mathtt{S} [u,v]-y_\text d}{\mathcal D}^2+J(u,v) \end{aligned} \end{equation*} for all $(u,v)\in\C$. Consequently, $(\bar u,\bar v)$ is a global minimizer of the state-reduced problem \eqref{eq:OCCC_reduced}. Choosing $u:=\bar u$ and $v:=\bar v$ in the above estimate, one obtains \[ \tfrac{1}{2}\norm{\mathtt{S} [u_k,v_k]-y_\text d}{\mathcal D}^2+J(u_k,v_k) \to\tfrac{1}{2}\norm{\mathtt{S} [\bar u,\bar v]-y_\text d}{\mathcal D}^2+J(\bar u,\bar v), \] and $J(u_k,v_k)\to J(\bar u,\bar v)$ follows by \cref{lem:sum_of_real_sequences}. Since $u_k\to\bar u$ and $v_k\to\bar v$ in $L^2(\Omega)$, the definition of $J$ and $\varepsilon>0$ imply that \[ \norm{u_k}{H^1(\Omega)}^2+\norm{v_k}{H^1(\Omega)}^2 \to \norm{\bar u}{H^1(\Omega)}^2+\norm{\bar v}{H^1(\Omega)}^2. \] Now, applying \cref{lem:sum_of_real_sequences} once more yields \[ \norm{u_k}{H^1(\Omega)}^2\to\norm{\bar u}{H^1(\Omega)}^2,\qquad \norm{v_k}{H^1(\Omega)}^2\to\norm{\bar v}{H^1(\Omega)}^2. \] Combining this with the weak convergences $u_k\rightharpoonup\bar u$ and $v_k\rightharpoonup\bar v$ in $H^1(\Omega)$, the convergences $u_k\to\bar u$ and $v_k\to\bar v$ in $H^1(\Omega)$ follow since the latter is a Hilbert space. This yields the first assertion. If $\{(u_k,v_k)\}_{k\in\N}$ contains a subsequence converging weakly to some $(\bar u,\bar v)\in H^1(\Omega)^2$ in $H^1(\Omega)^2$, then the above arguments can be partially repeated to show that $(\bar u,\bar v)$ is a global minimizer of \eqref{eq:OCCC_reduced}. This completes the proof. \end{proof} An obvious advantage of \eqref{eq:penalized_problem} is that it is a smooth and unconstrained problem, allowing the straightforward derivation of necessary optimality conditions. Hence, the following result is a direct consequence of Fermat's rule and \cref{lem:subdifferential_L2_penalization}. \begin{proposition}\label{prop:NOC_surrogate_L2_penalization} For fixed $\sigma_k>0$, let $(u_k,v_k)\in H^1(\Omega)^2$ be a locally optimal solution of \eqref{eq:penalized_problem}. Then, the corresponding functions $\eta_{u_k},\eta_{v_k}\in L^\infty(\Omega)$ defined as in \eqref{eq:char_subdiff_8} satisfy \[ 0=\mathtt{S} ^\star\bigl[\mathtt{S} [u_k,v_k]-y_\textup{d}\bigr] +J'(u_k,v_k) \sigma_k(\mathtt{E},\mathtt{E})^\star[\Phi(\mathtt{E}[u_k],\mathtt{E}[v_k])\eta_{u_k},\Phi(\mathtt{E}[u_k],\mathtt{E}[v_k])\eta_{v_k}]. \] \end{proposition} \begin{remark} Similar results as in this section can be shown for the penalty terms induced by the nonsmooth functionals $F_1$ and $F_{1,\theta_k}$ given in \cref{rem:other_penalty_terms} using the continuity of the associated Nemytskii operators $\tilde \Phi$ and $\tilde \Phi_{\theta_k}$ as well as calculus rules for Clarke's generalized derivative, see \cite{Clarke:1990a}. Obtaining a convergence result as in \cref{prop:strong_convergence_of_surrogate_solutions} for $F_{1,\theta_k}$ additionally requires to choose $\sigma_k$ and $\theta_k$ such that $\sigma_k\sqrt{\theta_k}\to 0$ as $k\to\infty$. \end{remark} \begin{remark} Using the boundedness of the solutions and passing to subsequences, it is possible by pointwise inspection to take the limit $k\to\infty$ in the optimality system from \cref{prop:NOC_surrogate_L2_penalization} and derive the existence of multipliers $\mu,\nu\in H^1(\Omega)^\star$ which satisfy the polarity relations from \cref{thm:S_stationarity} with respect to the index sets $I^{+0}(\bar u,\bar v)$ and $I^{0+}(\bar u,\bar v)$. This can be seen as a natural extension of the so-called weak stationarity concept, see \cite[Definition~4.1]{MehlitzWachsmuth2016b}, to \eqref{eq:OCCC_reduced}. However, it does not seem to be possible to infer the polarity relations for $\mu$ and $\nu$ on $I^{00}(\bar u,\bar v)$ found in the strong stationarity system from \cref{thm:S_stationarity}. Noting that our penalty approach is related to Scholtes' relaxation technique for the numerical solution of finite-dimensional MPCCs which yields so-called Clarke-stationary points in general, see \cite[Section~3.1]{HoheiselKanzowSchwartz2013} for details, this observation does not seem to be too surprising since Clarke-stationarity is much weaker than strong stationarity. \end{remark} \section{Numerical treatment}\label{sec:numerics} This section deals with the numerical implementation of the penalization technique described in \cref{sec:num_methods} following a ``first-discretize-then-optimize approach'' based on a finite element discretization. In order to concentrate on the complementarity constraint, the state equation is chosen as the elliptic model problem \begin{equation}\label{eq:PDE}\tag{PDE} \left\{ \begin{aligned} -\nabla\cdot(\mathbf{C}\nabla y)+\mathbf{a}y&\,=\,\mathbf bu+\mathbf cv && \text{a.e. on } \Omega\\ \vec{\mathbf{n}}\cdot(\mathbf{C}\nabla y)&\,=\,0&&\text{a.e. on }\operatorname{bd} \Omega. \end{aligned} \operatorname{ri}ght. \end{equation} Here, $\Omega\subset \R^d$ is a domain with Lipschitz boundary $\operatorname{bd}(\Omega)$, $\mathbf{C}\in L^\infty(\Omega;S^d(\R))$ satisfies the condition of uniform ellipticity \eqref{eq:uniform_ellipticity}, and the functions $\mathbf{a}, \mathbf{b}, \mathbf{c}\in L^\infty(\Omega)$ do not vanish while $\mathbf a$ is additionally nonnegative, see also \cref{sec:existenceL2}. Set $\mathcal{D}:=L^2(\Omega)$ and $\mathcal{Y}:=H^1(\Omega)$. The operator $\mathtt{D}:=\mathtt{E}$ represents the natural embedding $H^1(\Omega)\hookrightarrow L^2(\Omega)$. Note that the weak formulation of the associated state equation can be written in the abstract form $\mathtt{A}[y]-\mathtt{B}[u]-\mathtt{C}[v]=0$, where the bounded, linear operators $\mathtt{A},\mathtt{B},\mathtt{C} \in \mathbb{L}[H^1(\Omega), H^1(\Omega)^\star] $ are given for all $y,u,v,w\in H^1(\Omega)$ as \begin{align*} \dual{\mathtt{A}[y]}{w}{H^1(\Omega)}&:=\int_{\Omega}(\mathbf{C}(x)\nabla y(x))\cdot \nabla w(x)\mathrm{d}x+\int_{\Omega}\mathbf{a}(x)y(x)w(x)\mathrm{d}x, \\ \dual{\mathtt{B}[u]}{w}{H^1(\Omega)}&:=\int_{\Omega}\mathbf{b}(x)u(x) w(x)\mathrm{d}x,\\ \dual{\mathtt{C}[v]}{w}{H^1(\Omega)}&:=\int_{\Omega}\mathbf{c}(x)v(x) w(x)\mathrm{d}x. \end{align*} It can be checked that the operator $\mathtt{A}$ is elliptic and self-adjoint under the postulated assumptions, see, e.g., \cite[Section~6]{Evans2010}. The operators $\mathtt{B}$ and $\mathtt{C}$ are self-adjoint as well. \subsection{Finite element discretization}\label{sec:discrete_NOC} While the discretization of \eqref{eq:penalized_problem} is rather standard, some notation needs to be introduced for the sake of the following subsection. Let the domain $\Omega$ be discretized by a suitable tessellation $\Omega_\Delta$, where $n_p$ denotes the number of vertices and $n_e$ the number of elements in $\Omega_\Delta$. All functions from $H^1(\Omega)$ ($y$, $u$, $v$, and $p$) are represented by finite elements from $\mathcal{P}^1(\Omega_\Delta)$. The corresponding coefficient vectors are denoted by $\vec{y}$, $\vec{u}$, $\vec{v}$, and $\vec{p}$, respectively. The set of test functions $H^1(\Omega)$ is represented by the same basis functions. The coefficient functions $\mathbf{C}$, $\mathbf{a}$, $\mathbf{b}$, and $\mathbf{c}$ as well as the desired state $y_\text d$ are assumed to be chosen from $L^\infty(\Omega)$ and discretized by functions from $\mathcal{P}^0(\Omega_\Delta)$; their discrete approximations are denoted by $C$, $\vec{a}$, $\vec{b}$, $\vec{c}$, and $\vec{y}_\text{d}$, respectively. The matrix $E_{10}\in\R^{n_e\times n_p}$ realizes the discrete projection of $\mathcal{P}^1$ approximations into $\mathcal P^0$ and corresponds to the natural embedding operator $\mathtt{E}\colon H^1(\Omega)\to L^2(\Omega)$. The mass matrices $M_0(1)$ and $M_1(1)$ correspond to the finite element spaces $\mathcal{P}^0(\Omega_\Delta)$ and $\mathcal{P}^1(\Omega_\Delta)$, respectively. The stiffness matrix associated with the constant coefficient $1$ (i.e., $\mathbf{C}$ is the identity in $\R^{d\times d}$) is denoted by $K(1)$. A detailed description of this discretization and the specific forms of these matrices can be found in \cite{DengMehlitzPruefert2018a}. The main difficulty when discretizing \eqref{eq:penalized_problem} lies in the handling of the penalty term $F(u,v)$. Since the Fischer--Burmeister function is penalized with respect to the space $L_2(\Omega)$, the mass matrix $M_0(1)$ can be used to evaluate integrals over all elements. Interpreting powers and square roots of a vector in a componentwise fashion, a reasonable discretization of $F(u,v)$ is given by \[ \tilde F(\vec{u},\vec{v}) = \tfrac{1}{2} \left(\sqrt{(E_{10}\vec{u})^2+(E_{10}\vec{v})^2}-E_{10}\vec{u}-E_{10}\vec{v}\operatorname{ri}ght)^{\top} M_0(1) \left(\sqrt{(E_{10}\vec{u})^2+(E_{10}\vec{v})^2}-E_{10}\vec{u}-E_{10}\vec{v}\operatorname{ri}ght) \] for all $\vec u,\vec v\in\R^{n_p}$. The appearance of $\mathtt{E}_{10}$ is motivated by the proof of \cref{lem:subdifferential_L2_penalization}, where the penalty functional $F$ has been represented as the composition of three differentiable mappings: The natural embedding $\mathtt{E}:H^1(\Omega)\to L^2(\Omega)$, the Nemytskii-operator associated with the squared Fischer--Burmeister function (as a mapping from $L^2(\Omega)^2$ to $L^1(\Omega)$), and a linear integral operator. This discretization strategy leads to the finite-dimensional problem associated with \eqref{eq:penalized_problem} given by \begin{equation}\label{eq:discretized_penalized_problem} \left\{ \begin{aligned} \tfrac{1}{2}(E_{10}\vec{y}-\vec{y}_\text{d})^{\top}M_0(1)(E_{10}\vec{y}-\vec{y}_\text{d}) +\tfrac{\alpha_1}{2}\vec{u}^{\top}M_1(1)\vec{u}+\tfrac{\alpha_2}{2}\vec{v}^{\top}M_1(1)\vec{v}\qquad&\\ +\tfrac{\varepsilon}{2}\vec{u}^{\top}(M_1(1)+K(1))\vec{u} +\tfrac{\varepsilon}{2}\vec{v}^{\top}(M_1(1)+K(1))\vec{v}+\sigma_k\tilde F(\vec{u},\vec{v}) &\,\operatorname{ri}ghtarrow\,\min_{\vec y,\vec u,\vec v}\\ (M_{1}(\vec a)+K(C))\vec{y}-M_{1}(\vec b)\vec{u}-M_{1}(\vec c)\vec{v}&\,=\,0. \end{aligned} \operatorname{ri}ght. \end{equation} For the optimality conditions, one first observes that the quadratic function $\tilde F$ is differentiable everywhere and that its derivative at $(\vec{u},\vec{v})$ is given by \begin{equation}\label{eq:FB_First_Order_deriv} \tilde F'(\vec{u},\vec{v})= \begin{pmatrix} E_{10}^{\top}\text{diag}\left(T_u(\vec{u},\vec{v})\operatorname{ri}ght) M_0(1)\left(\sqrt{(E_{10}\vec{u})^2+(E_{10}\vec{v})^2}-E_{10}\vec{u}-E_{10}\vec{v}\operatorname{ri}ght)\\ E_{10}^{\top}\text{diag}\left(T_v(\vec{u},\vec{v})\operatorname{ri}ght) M_0(1)\left(\sqrt{(E_{10}\vec{u})^2+(E_{10}\vec{v})^2}-E_{10}\vec{u}-E_{10}\vec{v}\operatorname{ri}ght) \end{pmatrix}, \end{equation} where the vectors $T_u(\vec{u},\vec{v}),T_v(\vec{u},\vec{v})\in\R^{n_e}$ are defined for all $i\in\{1,\ldots,n_e\}$ as \[ \begin{aligned} T_u(\vec{u},\vec v)_i&:= \begin{cases} \frac{(E_{10}\vec u)_i}{\sqrt{(E_{10}\vec{u})_i^2+(E_{10}\vec{v})_i^2}}-1 &\text{if }(\mathtt{E}_{10}\vec u)_i\neq 0\text{ or }(\mathtt{E}_{10}\vec v)_i\neq 0,\\ 0 &\text{if }(\mathtt{E}_{10}\vec u)_i=(\mathtt{E}_{10}\vec v)_i=0, \end{cases} \\ T_v(\vec{u},\vec v)_i&:= \begin{cases} \frac{(E_{10}\vec v)_i}{\sqrt{(E_{10}\vec{u})_i^2+(E_{10}\vec{v})_i^2}}-1 &\text{if }(\mathtt{E}_{10}\vec u)_i\neq 0\text{ or }(\mathtt{E}_{10}\vec v)_i\neq 0,\\ 0 &\text{if }(\mathtt{E}_{10}\vec u)_i=(\mathtt{E}_{10}\vec v)_i=0. \end{cases} \\ \end{aligned} \] Note that the case $(\mathtt{E}_{10}\vec u)_i=(\mathtt{E}_{10}\vec v)_i=0$ corresponds to the \emph{biactive} case, i.e., where the discretized controls $\vec u$ and $\vec v$ (interpreted in the discretized counterpart of $L^2(\Omega)$, i.e., elementwise) are zero at the same time. Combining \eqref{eq:discretized_penalized_problem} and \eqref{eq:FB_First_Order_deriv}, it is now possible to obtain the following KKT system for the problem \eqref{eq:discretized_penalized_problem}: \begin{subequations}\label{eq:NOC_discrete} \begin{align} E_{10}^{\top}M_0(1)E_{10}\vec{y}-E_{10}^{\top}M_{0}(1)\vec{y}_\text{d}-(M_{1}(\vec{a})+K(C))\vec{p}&=0\\ \left[\alpha_1M_1(1)+\varepsilon\left(M_1(1)+K(1)\operatorname{ri}ght)\operatorname{ri}ght]\vec{u} +\sigma_k \tilde F'_{\vec u}(\vec{u}, \vec{v}) +M_{1}(\vec{b})\vec{p}&=0\\ \left[\alpha_2M_1(1)+\varepsilon\left(M_1(1)+K(1)\operatorname{ri}ght)\operatorname{ri}ght]\vec{v} +\sigma_k \tilde F'_{\vec v}(\vec{u}, \vec{v}) +M_{1}(\vec{c})\vec{p}&=0\\ -(M_{1}(\vec{a})+K(C))\vec{y}+M_{1}(\vec{b})\vec{u}+M_{1}(\vec{c})\vec{v}&=0. \end{align} \end{subequations} Recall that $\vec{p}$ represents the discretized adjoint state and can also be considered as a multiplier related to the discretized state equation. Since the function $\tilde F'$ is nonsmooth but Lipschitz continuous, the nonlinear system \eqref{eq:NOC_discrete} can be solved using a damped semismooth Newton-type method, see \cite{QiSun1999}. Note that the domain of nonsmoothness associated with the mapping $\tilde F'\colon\R^{n_p}\times\R^{n_p}\to\R^{n_p}\times\R^{n_p}$ is given by \[ \{(\vec u,\vec v)\in\R^{n_p}\times\R^{n_p}\,|\,\exists i\in\{1,\ldots,n_e\}\colon\,(E_{10}\vec u)_i=(E_{10}\vec v)_i=0\}. \] A particular Newton derivative can then be chosen as an element of Clarke's generalized Jacobian, see \cite{Clarke:1990a}, associated with $\tilde F'$ at $(\vec u, \vec v)$ that is zero at indices corresponding to biactive components of $(E_{10}\vec u,E_{10}\vec v)$. This choice will be used in the proposed method. Next, due to the well-known local convergence behavior of Newton's method, the initialization of $\vec{u}$ and $\vec{v}$ for the numerical solution of \eqref{eq:NOC_discrete} has to be taken into consideration. For that purpose, consider the (infinite-dimensional) problem \begin{equation}\label{eq:OCPC}\tag{OCNC} \left\{ \begin{aligned} \tfrac12\norm{\mathtt{E}[y]-y_\text d}{L^2(\Omega)}^2+J(u,v)&\,\operatorname{ri}ghtarrow\,\min_{y,u,v}&&&\\ -\nabla\cdot(\mathbf{C}\nabla y)+\mathbf{a}y&\,=\,\mathbf bu+\mathbf cv && \text{a.e. on } \Omega&\\ \vec{\mathbf{n}}\cdot(\mathbf{C}\nabla y)&\,=\,0&&\text{a.e. on }\operatorname{bd} \Omega&\\ u,v&\,\geq\,0&&\text{a.e. on }\Omega& \end{aligned} \operatorname{ri}ght. \end{equation} which results from \eqref{eq:OCCC} by omitting the equilibrium condition \eqref{eq:equilibrium_condition} and merely imposing nonnegativity constraints. Note that \eqref{eq:OCPC} is convex and can be solved globally by combining a penalty algorithm and a semismooth Newton method, see \cite{DengMehlitzPruefert2018b}. The associated global minimizer is uniquely determined. If its solution already satisfies the equilibrium condition \eqref{eq:equilibrium_condition}, then a global minimizer of \eqref{eq:OCCC} has already been detected. The discretized counterpart of \eqref{eq:OCPC} can be derived similarly as stated above. The associated (discrete) optimal solution $(\vec y_0,\vec u_0,\vec v_0)$ will be used as the starting vector of the semismooth Newton-type method. An abstract description of the proposed numerical method for the computational solution of \eqref{eq:OCCC} is presented in \cref{alg:pathfollowing}. In step \textbf{S2} of this algorithm, $\norm{\cdot}{M}$ denotes a weighted Euclidean norm which represents the discretized $H^1$-norm, see \cite{DengMehlitzPruefert2018b} for details. \begin{algorithm}[h] \begin{description} \item [{S0}] Let $\{\sigma_{k}\}_{k\in\N}$ be a sequence of positive penalty parameters with $\sigma_{k}\to\infty$ as $k\to\infty$. Let a tolerance $\text{eps}>0$ be given. Let $(\vec y_0,\vec u_0,\vec v_0)$ be the (discrete) optimal solution associated with \eqref{eq:OCPC}. Compute $\vec p_{0}$ as a solution of the discretized adjoint equation with source $E_{10} \vec y_{0}-\vec y_\text d$. Set $k:=1$. \item [{S1}] Solve the discretized KKT system \eqref{eq:NOC_discrete} for fixed $\sigma_{k}$ by a damped, semismooth Newton-type method with starting point $(\vec y_{k-1},\vec{u}_{k-1},\vec{v}_{k-1},\vec p_{k-1})$. Let $(\vec y_k,\vec u_{k},\vec v_{k},\vec p_k)$ be the associated solution. \item [{S2}] If $\norm{(\vec u_k,\vec v_k)-(\vec u_{k-1},\vec v_{k-1})}{M}<\text{eps}$ holds true, then return $(\vec u_{k}, \vec v_{k})$. Otherwise, set $k:=k+1$ and go to \textbf{S1}. \end{description} \caption{Abstract algorithm\label{alg:pathfollowing}} \end{algorithm} \subsection{Checking strong stationarity}\label{sec:stationarity_test} It has to be noted that in step \textbf{S1} of \cref{alg:pathfollowing}, one generally only computes critical points to \eqref{eq:discretized_penalized_problem}. Since the penalty functional $F$ defined in \eqref{eq:penalty} is not convex, these cannot be guaranteed to be global minimizers of \eqref{eq:discretized_penalized_problem} and therefore the convergence result of \cref{prop:strong_convergence_of_surrogate_solutions} does not apply. It is therefore sensible to verify whether the output is at least a strongly stationary point of \eqref{eq:OCCC_reduced} in the sense of \cref{cor:consequences_of_S_Stationarity}, since the local minimizers of \eqref{eq:OCCC_reduced} can be found among its strongly stationary points. Note that available first-order methods for the numerical solution of complementarity problems mainly compute so-called Clarke- or Mordukhovich-stationary points and that these stationarity notions are weaker than strong stationarity, see, e.g., \cite{HoheiselKanzowSchwartz2013} for a discussion of the finite-dimensional situation. Thus, checking strong stationarity is recommendable even if a directly discretized version of \eqref{eq:OCCC_reduced} is solved using the available techniques from finite-dimensional MPCC-theory. A possible approach for verifying strong stationarity is described in the following. Let $(y, u,v)\in H^1(\Omega)^3$ be feasible to \eqref{eq:OCCC}. If this point is a local minimizer, then \cref{cor:consequences_of_S_Stationarity} implies that \begin{equation} \label{eq:stationary_equality} \dual{y-y_\text{d}}{y}{L^2(\Omega)} +\alpha_1\dual{u}{u}{L^2(\Omega)} +\alpha_2\dual{v}{v}{L^2(\Omega)} +\varepsilon\dual{u}{u}{H^1(\Omega)} +\varepsilon\dual{v}{v}{H^1(\Omega)} =0 \end{equation} and that \begin{equation} \label{eq:stationary_inequality} \dual{y-y_\text{d}}{z_y}{L^2(\Omega)} +\alpha_1\dual{u}{z_{u}}{L^2(\Omega)} +\alpha_2\dual{v}{z_{v}}{L^2(\Omega)} +\varepsilon\dual{u}{z_{u}}{H^1(\Omega)} +\varepsilon\dual{v}{z_{v}}{H^1(\Omega)} \geq 0 \end{equation} for any pair $(z_u,z_v)\in H^1_+(\Omega)^2$ with \[ \operatorname{supp} z_u\subset I^{+0}(u,v)\cup I^{00}(u,v),\qquad \operatorname{supp} z_v\subset I^{0+}(u,v)\cup I^{00}(u,v), \] where $z_y\in H^1(\Omega)$ is the solution of the state equation $\mathtt{A}[z_y]-\mathtt{B}[z_u]-\mathtt{C}[z_v]=0$. Using the same discretization technique as described in \cref{sec:discrete_NOC}, a discrete counterpart to \eqref{eq:stationary_equality} is \begin{equation}\label{eq:test_discrete_first_condition} \begin{aligned}[t] \Theta:=\vec{y}^{\top}E_{10}^\top M_0(1)E_{10}\vec{y}-\vec{y}^{\top}E_{10}^\top M_{0}(1)\vec{y}_\text d &+\alpha_1\vec{u}^{\top}M_1(1)\vec{u} +\alpha_2\vec{v}^{\top}M_1(1)\vec{v}\\ &+\varepsilon\vec{u}^{\top}(K(1)+M_1(1))\vec{u} +\varepsilon\vec{v}^{\top}(K(1)+M_1(1))\vec{v} =0. \end{aligned} \end{equation} Clearly, a certain tolerance for the violation of \eqref{eq:test_discrete_first_condition} needs to be imposed in practice. The numerical verification of condition \eqref{eq:stationary_inequality} requires an appropriate choice of discrete test functions $\vec z_u,\vec z_v$ for given discretized controls $(\vec{u},\vec{v})$ in the finite element space $\mathcal{P}^1(\Omega_\Delta)$. Considering the employed finite element discretization of \eqref{eq:OCCC_reduced}, one particular choice is from the set of basis functions associated with $\mathcal P^1(\Omega_\Delta)$. Since the support of each of these ``hat functions'' covers all elements adjoining a single vertex, a corresponding elementwise approximation of the set $I^{+0}(u,v)$, $I^{0+}(u,v)$, and $I^{00}(u,v)$, see \eqref{eq:I+0}, \eqref{eq:I0+}, and \eqref{eq:I00}, respectively, is required as well. This can be defined using the projection of $\vec u,\vec v$ from $\mathcal{P}^1(\Omega_\Delta)$ to $\mathcal{P}^0(\Omega_\Delta)$ using the matrix $E_{10}$, which will be denoted by $\vec{u}^0:=E_{10}\vec u$ and $\vec{v}^0:=E_{10}\vec v$, respectively. This leads to the corresponding discrete sets \[ \begin{aligned} I^{+0}(\vec{u},\vec{v})&:=\left\{ i\in\{1,\ldots,n_e\}\,\middle|\, \vec{u}^0_i>0 \text{ and } \vec{v}^0_i=0 \operatorname{ri}ght\},\\ I^{00}(\vec{u},\vec{v})&:=\left\{ i\in\{1,\ldots,n_e\}\,\middle|\, \vec{u}^0_i=0 \text{ and } \vec{v}^0_i=0 \operatorname{ri}ght\},\\ I^{0+}(\vec{u},\vec{v}) &:=\left\{ i\in\{1,\ldots,n_e\}\,\middle|\, \vec{u}^0_i=0 \text{ and } \vec{v}^0_i>0 \operatorname{ri}ght\}. \end{aligned} \] For any pair of basis vectors $(\vec{z}_{u},\vec{z}_{v})$ whose support is contained in $I^{+0}(\vec{u},\vec{v})\cup I^{00}(\vec{u},\vec{v})$ and $I^{0+}(\vec{u},\vec{v})\cup I^{00}(\vec{u},\vec{v})$, respectively, one can then check whether \begin{equation}\label{eq:test_discrete} \begin{aligned}[t] \Sigma(\vec{z}_{u},\vec{z}_{v}):=\vec{z}_y^{\top}E_{10}^{\top}M_{0}(1)E_{10}\vec{y} & -\vec{z}_{y}^{\top}E_{10}^{\top} M_{0}(1)\vec{y}_{\text{d}} +\alpha_{1}\vec{u}^{\top}M_{1}(1)\vec{z}_{u} +\alpha_{2}\vec{v}^{\top}M_{1}(1)\vec{z}_{v}\\ & +\varepsilon\vec{u}^{\top}\left(K(1)+M_1(1)\operatorname{ri}ght)\vec{z}_{u} +\varepsilon\vec{v}^{\top}\left(K(1)+M_1(1)\operatorname{ri}ght)\vec{z}_{v}\geq0, \end{aligned} \end{equation} where the state $\vec{z}_{y}$ associated with $(\vec{z}_{u},\vec{z}_{v})$ is obtained via \[ (M_{1}(\vec{a})+K(C)))\vec{z}_{y}=M_{1}(\vec{b})\vec{z}_{u}+M_{1}(\vec{c})\vec{z}_{v}. \] In numerical practice, a certain tolerance with respect to negative values of $\Sigma(\vec z_u,\vec z_v)$ is necessary since \cref{alg:pathfollowing} involves a penalty procedure and hence yields, in general, only \emph{almost} feasible points for \eqref{eq:OCCC}. Rather than testing for nonnegativity, it is thus checked whether $\Sigma(\vec z_u,\vec z_v)$ is larger than a given negative tolerance. \section{Numerical examples}\label{sec:numerical_examples} The proposed numerical method from \cref{sec:numerics} is illustrated by means of three experiments. These examples are of academical nature and constructed in such a way that the different features of the stationarity test are visualized. In the first example, \cref{alg:pathfollowing} computes globally optimal controls, and thus the results of the corresponding stationarity test provide a first benchmark for a \emph{numerically passed} stationarity test. Examples 2 and 3 provide nontrivial situations where the stationarity test is passed and failed, respectively. Recall that whenever the stationarity test fails, the considered point cannot be a local minimizer of the underlying complementarity-constrained program, see \cref{cor:consequences_of_S_Stationarity}. Let $\Omega:=(0,1)^{2}\subset\R^{2}$. For all examples in this section, let $\mathbf C$ be the identity matrix in $\R^{2\times 2}$ and let $\mathbf a\equiv 1$, $\mathbf b=\chi_{\Omega_u} $, as well as $\mathbf c=\chi_{\Omega_v}$ hold where $\Omega_{u}:=\{(x_{1},x_{2})\in\Omega\,|\,x_{2}<0.25\}$ and $\Omega_{v}:=\{(x_{1},x_{2})\in\Omega\,|\,x_{2}>0.75\}$ are fixed subdomains of $\Omega$. The values $\alpha_1=\alpha_2=0$ are fixed for this section. Furthermore, $\varepsilon:=10^{-8}$ is used for all experiments. The implementation is carried out using the object oriented finite element \textsc{matlab} class library OOPDE, see \cite{Pruefert2015}. In order to construct examples where the controls are independent of $x_2$, cf. \cite[Section~6]{ClasonItoKunisch2016} where parabolic problems were considered and the controls only depend on time, the problem \eqref{eq:OCCC} will be equipped with the additional restrictions \begin{equation}\label{eq:gradient_constraints} \partial_{x_2}u=\partial_{x_2}v=0\qquad\text{a.e. on }\Omega. \end{equation} These constraints realize controls depending only on $x_{1}$ and being constant with respect to $x_{2}$ while allowing to use the same finite element space for the discretization of $u$, $v$, and $y$. Note that the additional constraints do not influence the complementarity constraints (which are now imposed on $\Omega$ rather than $(0,1)$). Due to these additional gradient constraints, structured grids on the discretized domain $\Omega_\Delta$ are preferentially used for the following examples. On unstructured grids, which can be created by local refinement of an arbitrary set of triangles of a structured mesh, the use of basis functions from $\mathcal{P}^1(\Omega_\Delta)$ forces the resulting controls to be globally affine, see \cite[Section~7.2]{DengMehlitzPruefert2018a} for details. This issue can be solved by choosing basis functions from $\mathcal P^2(\Omega_\Delta)$. A detailed discussion of optimal control problems with gradient constraints can be found in \cite{DengMehlitzPruefert2018a}. To compare results, the solutions of the control problem \eqref{eq:OCPC} without complementarity constraints (equipped with the additional constraints \eqref{eq:gradient_constraints}) will be considered. Recall that optimal controls $(u,v)\in H^1(\Omega)^2$ of \eqref{eq:OCPC} additionally fulfilling the equilibrium condition \eqref{eq:equilibrium_condition} solve \eqref{eq:OCCC} as well, and that these controls are used as starting points for solving \eqref{eq:OCCC}. Since the computed controls are nearly constant with respect to $x_2$, only $u(x_1,0)$ and $v(x_1,0)$ are plotted for the sake of easier comparison. To evaluate the satisfaction of the complementarity conditions, the maximal absolute value of the Fischer--Burmeister function applied componentwise to $(\vec u^0,\vec v^0)$ is reported. Furthermore, $\Sigma(\vec z_u,\vec z_v)$ from \eqref{eq:test_discrete} is checked with a tolerance \begin{equation}\label{eq:definition_tolerance} \mathrm{tol} := 0.01 \left|\min\nolimits_{(\vec z_u,\vec z_v)\text{ feasible test pair}} \Sigma(\vec z_u,\vec z_v)\operatorname{ri}ght|, \end{equation} and the number as well as distribution of pairs $(\vec z_u,\vec z_u)$ for which $\Sigma(\vec z_u,\vec z_v)>\mathrm{tol}$ (``numerically positive''), $|\Sigma(\vec z_u,\vec z_v)|\leq \mathrm{tol}$ (``numerically zero''), or $\Sigma(\vec z_u,\vec z_v)<-\mathrm{tol}$ (``numerically negative'') holds is given. \paragraph{Example 1} In this example, the desired state is given by the discontinuous function \[ y_{\text d}(x):=\begin{cases} 3 & \text{for }x\in[(0.25,0.75)\times(0,0.25)]\cup[(0,0.5)\times(0.75,1)]\\ 1 & \text{otherwise}. \end{cases} \] The optimal controls of problem \eqref{eq:OCPC} are already (numerically) complementary, see \cref{fig:ex4:ocpc}, and thus provide a globally optimal solution of \eqref{eq:OCCC}. Correspondingly, they coincide with the controls computed for \eqref{eq:OCCC}, see \cref{fig:ex4:occc}, for which the maximal absolute value of the Fischer--Burmeister function is $2.08\cdot 10^{-5}$. With the tolerance chosen as $\mathrm{tol}=2.27\cdot10^{-10}$, $5789$ pairs are labeled as numerically positive, $228$ as numerically zero, and $544$ as numerically negative, see \cref{fig:ex4:test}. Thus, only $8.3\%$ of all tested pairs belong to the latter category. Note that $\Theta=-1.65\cdot 10^{-7}$ holds for the constant defined in \eqref{eq:test_discrete_first_condition}. Observing that \cref{alg:pathfollowing} computes the globally optimal solution of \eqref{eq:OCCC} in this example, the above data represent an \emph{approximately} passed stationarity test. \definecolor{mycolor1}{rgb}{0.231674, 0.318106, 0.544834} \definecolor{mycolor2}{rgb}{0.369214, 0.788888, 0.382914} \renewcommand{0.9}{0.9} \begin{figure} \caption{solution of \eqref{eq:OCPC} \label{fig:ex4:ocpc} \caption{solution of \eqref{eq:OCCC} \label{fig:ex4:occc} \caption{Example 1: computed controls} \label{fig:ex4_sol} \end{figure} \begin{figure} \caption{$\Sigma(\vec z_u,\vec z_v)$} \label{fig:ex4:comp} \caption{pairs marked numerically positive (white), numerically zero (gray), numerically negative (black)} \label{fig:ex4:test} \caption{Example 1: values of stationarity test and distribution of failed pairs} \label{fig:ex4_stat} \end{figure} \paragraph{Example 2} Here, the desired state is chosen to be the (weak) solution of the elliptic boundary value problem \[ \left\{ \begin{aligned} -\Delta y(x) &\,=\,0&\qquad&\text{a.e. on }\Omega&\\ y(x) &\,=\,2\max\{0;x_1\cos(0.75\pi x_{1})\}&&\text{a.e. on }\Gamma_{1}&\\ y(x) &\,=\,0.25&&\text{a.e. on }\Gamma_{2}&\\ \vec{\mathbf n}(x)\cdot\nabla y(x)&\,=\,0&&\text{a.e. on }\Gamma_3& \end{aligned} \operatorname{ri}ght. \] where $\Gamma_{1}:=[0,1]\times\{0\}$, $\Gamma_{2}:=[0,1]\times\{1\}$, and $\Gamma_3:=\{0,1\}\times[0,1]$ are fixed. The optimal controls of the associated problem \eqref{eq:OCPC} do not fulfill the complementarity condition but already provide a biactive set, see \cref{fig:ex1:ocpc}. On the other hand, the computed solution for \eqref{eq:OCCC} approximately satisfies the complementarity condition, see \cref{fig:ex1:occc}, with a maximal absolute value of the Fischer--Burmeister function of approximately $3.58\cdot 10^{-6}$. \begin{figure} \caption{solution of \eqref{eq:OCPC} \label{fig:ex1:ocpc} \caption{solution of \eqref{eq:OCCC} \label{fig:ex1:occc} \caption{Example 2: computed controls} \label{fig:ex1_sol} \end{figure} \begin{figure} \caption{$\Sigma(\vec z_u,\vec z_v)$} \label{fig:ex1:comp} \caption{pairs marked numerically positive (white), numerically zero (gray), numerically negative (black)} \label{fig:ex1:test} \caption{Example 2: values of stationarity test and distribution of failed pairs} \label{fig:ex1_stat} \end{figure} The minimal value of $\Sigma(\vec z_u,\vec z_v)$ was approximately $-1.62\cdot 10^{-6}$, cf. \cref{fig:ex1:comp}. Accordingly, the tolerance for the stationarity test was chosen as $\mathrm{tol} = 1.617\cdot 10^{-8}$. This leads to $4000$ pairs $(\vec z_u,\vec z_v)$ marked as ``numerically positive'', $2256$ as ``numerically zero'', and $305$ as ``numerically negative'' and thus failing the strong stationarity test \eqref{eq:test_discrete}, see \cref{fig:ex1:test}. These amount to approximately $4.7\%$ of the total number $6561$ of pairs. Note that pairs where the stationarity test fails correlate with those basis functions associated with nodes where the subdomains $I^{+0}(\vec u,\vec v)$ and $I^{0+}(\vec u,\vec v)$ meet. Finally, $\Theta=-2.01\cdot 10^{-9}$ holds. \paragraph{Example 3} In the last experiment, the desired state is given by $y_{\text d}\equiv 1.5$. The optimal controls for the problem \eqref{eq:OCPC} are nearly constant functions, see \cref{fig:ex2:ocpc}. The controls for the problem \eqref{eq:OCCC} computed via \cref{alg:pathfollowing} are complementary, see \cref{fig:ex2:occc}. The maximal absolute value of the Fischer--Burmeister function is $2.02\cdot 10^{-6}$. \begin{figure} \caption{solution of \eqref{eq:OCPC} \label{fig:ex2:ocpc} \caption{solution of \eqref{eq:OCCC} \label{fig:ex2:occc} \caption{Example 3: computed controls} \label{fig:ex2_sol} \end{figure} \begin{figure} \caption{$\Sigma(\vec z_u,\vec z_v)$} \label{fig:ex2:comp} \caption{pairs marked numerically positive (white), numerically zero (gray), numerically negative (black)} \label{fig:ex2:test} \caption{Example 3: values of stationarity test and distribution of failed pairs} \label{fig:ex2_stat} \end{figure} Using the tolerance $\mathrm{tol}=1.11\cdot 10^{-7}$ leads to $0$ numerically positive, $5328$ numerically zero, and $1233$ numerically negative pairs, see \cref{fig:ex2:test}. These are more than $18.5\%$ of all tested pairs. In this example, $\Theta=-3.35\cdot 10^{-10}$ holds true. \paragraph{Summary} The results of the numerical experiments are summarized in \cref{tab:summary}, where ``complementarity'' refers to the maximal absolute value of the elementwise Fischer--Burmeister function. Noting that Experiment 1 provides a benchmark for a passed stationarity test, a computed solution of \eqref{eq:OCCC} is considered as \emph{approximately passing} the strong stationarity test if $|\Theta|\leq\sqrt{\mathrm{tol}}$ holds for $\Theta$ defined in \eqref{eq:test_discrete_first_condition} and the tolerance defined in \eqref{eq:definition_tolerance}, while the number of numerically negative tested pairs is at most $10\%$ of the total number of tested pairs. \begin{table}[t] \centering \begin{tabular}{rccc} \toprule & Example 1 & Example 2 & Example 3 \\ \midrule $y_{\text{d}}$ & in $L^{2}(\Omega)$ & in $H^{1}(\Omega)$ & constant \\ $\varepsilon$ & $10^{-8}$ & $10^{-8}$ & $10^{-8}$ \\ \midrule complementarity & $2.08\cdot 10^{-5}$ & $3.58\cdot 10^{-6}$ & $2.02\cdot10^{-6}$ \\ $\text{tol}$ & $2.27\cdot 10^{-10}$ & $1.62\cdot 10^{-8}$ & $1.11\cdot 10^{-7}$ \\ $\Theta$ & $-1.65\cdot 10^{-7}$ & $-2.01\cdot 10^{-9}$ & $-3.35\cdot 10^{-10}$ \\ num.\ neg.\ pairs & $8.3\%$ & $4.7\%$ & $18.5\%$ \\ stationarity test & passed & passed & failed \\ \bottomrule \end{tabular} \caption{summary of experiments} \label{tab:summary} \end{table} It has to be mentioned that more experiments with the same parameter settings of the above three examples were implemented for unstructured grids. In \cref{alg:pathfollowing}, the inner iteration implements a damped Newton method to compute the optimal solution of the KKT system \eqref{eq:NOC_discrete} with the fixed penalty parameter $\sigma_k$, which increases in every outer loop. All experiments show that there is no significant correlation between the number of (inner) Newton iterations and the mesh size. However, the solutions calculated on unstructured grids differ significantly from those ones obtained on structured grids, and this phenomenon is not restricted to the use of basis functions from $\mathcal{P}^1(\Omega_\Delta)$. The reason behind this fact may be the inherent nonconvexity of the optimal control problem \eqref{eq:OCCC}, which causes the existence of several local minimizers (and thus strongly stationary points). This also explains the observed fact that the output of \cref{alg:pathfollowing} heavily relies on the initial guess for the controls. \section{Conclusions}\label{sec:conclusions} Optimal control problems with complementarity constraints on the controls admit solutions if the controls are chosen from a first-order Sobolev space. Although necessary optimality conditions of strong stationarity-type can be derived in this case, the explicit characterization of the associated Lagrange multipliers is difficult and remains the topic of further research. However, a penalty method based on the Fischer--Burmeister function can be formulated that ensures convergence to a global minimizers of the original complementarity-constrained problem. In theory, this requires computing global minimizers of the penalized problems, and it has to be investigated whether an adapted method based on KKT points is theoretically possible. Nevertheless, numerical examples illustrate that combined with a computable check for a discrete strong stationarity-type condition, this approach leads to a numerical procedure that in many cases results in nearly strongly stationary points. In light of prominent literature which deals with the numerical treatment of finite-dimensional complementarity problems, see \cite{HoheiselKanzowSchwartz2013} and the references therein, this seems to be the best to be hoped for. \appendix \section{A helpful lemma} In the proof of \cref{prop:strong_convergence_of_surrogate_solutions}, the following lemma is used twice. \begin{lemma}\label{lem:sum_of_real_sequences} Let $\{\alpha_k\}_{k\in\N},\{\beta_k\}_{k\in\N}\subset\R$ be sequences such that $\alpha_k+\beta_k\to\alpha+\beta$ holds where $\alpha,\beta\in\R$ satisfy \[ \alpha\leq\liminf_{k\to\infty}\alpha_k,\qquad\beta\leq\liminf_{k\to\infty}\beta_k. \] Then, the convergences $\alpha_k\to\alpha$ and $\beta_k\to\beta$ are valid. \end{lemma} \begin{proof} The assumptions imply that \begin{align*} \alpha \leq\liminf_{k\to\infty}\alpha_k &\leq\limsup_{k\to\infty}\alpha_k =\limsup_{k\to\infty}(\alpha_k+\beta_k-\beta_k)\\ &=\lim_{k\to\infty}(\alpha_k+\beta_k)+\limsup_{k\to\infty}(-\beta_k) \leq \alpha+\beta -\beta =\alpha, \end{align*} which implies that $\alpha_k\to\alpha$. Now, $\beta_k\to\beta$ follows from $\alpha_k+\beta_k\to\alpha+\beta$. \end{proof} \section*{Acknowledgments} The authors sincerely thank Frank Heyde for fruitful discussions about the explicit form of the generalized second-order derivative of the discretized squared Fischer--Burmeister function. Furthermore, the authors appreciate the comments of two anonymous reviewers which helped to improve the presentation of the obtained results. This work is partially supported by the DFG grants \emph{Parameter Identification in Models With Sharp Phase Transitions} and \emph{Analysis and Solution Methods for Bilevel Optimal Control Problems} under the respective grant numbers CL\,487/2-1 and DE\,650/10-1 within the Priority Program SPP 1962 (Non-smooth and Complementarity-based Distributed Parameter Systems: Simulation and Hierarchical Optimization). \printbibliography \end{document}
\begin{document} \title{Invariants from the Linking Number} \author{H. A. Dye } \maketitle \begin{abstract} We explore a family of invariants obtained from linking numbers. This is a family of Kauffman finite type invariants. \end{abstract} \section{Introduction} Given a virtual knot $K$ smoothing a crossing vertically (as shown in Figure \ref{fig:smooth}) results in a 2 component link $L$. \begin{figure} \caption{Vertical Smoothing} \label{fig:smooth} \end{figure} We compute the linking number of the link $L$ by summing the sign of the crossings that involve edges from two different components. For classical knots, the linking number is always divisible by two. In the virtual case, this does not occur and we obtain linking numbers that are congruent to both zero and one mod two. This allows us to compute a new type of invariant that can be computed directly from either the Gauss code or signed chord diagrams, and is a Kauffman finite type invariant of virtual links and knots. This invariant is related to the invariants of Henrich \cite{allison} and Manturov \cite{m1}, \cite{m2}. Recall that a virtual link diagram is a decorated immersion of $n$ copies of $S^1$ into the plane that contains two types of crossings, classical crossings (indicated by over-under markings) and virtual crossings (indicated by a solid encircled X). Two virtual link diagrams are said to be equivalent if they are related by a sequence of classical and virtual Reidemeister moves. These moves are illustrated in Figures \ref{fig:moves} and \ref{fig:vmoves}. In this paper, virtual knots and virtual knot diagrams will simply be referred to as knot and knot diagrams for convenience. \begin{figure} \caption{Classical Reidemeister move} \label{fig:moves} \end{figure} \begin{figure} \caption{Virtual Reidemeister move} \label{fig:vmoves} \end{figure} Let $K$ denote a virtual knot diagram and let $c$ denote a crossing of $K$. We denote the sign of the crossing as $sgn(c)$ and the sign of the crossing is evaluated as shown in Figure \ref{fig:sgn}. The writhe of a virtual knot $K$, $w(K)$, is defined to be: \begin{equation*} w(k) = \sum_{c \in K} sgn(c). \end{equation*} \begin{figure} \caption{Crossing sign} \label{fig:sgn} \end{figure} Fix an orientation of the virtual knot $K$ and a choose a crossing $c$. Let $K_c$ denote the two component link diagram (with an inherited orientation) obtained by smoothing the crossing $c$ vertically. Let $K_i$ denote the $i^{th}$ component of $K_c$ and let $K_1 \cap K_2 $ denote the set of crossings in $K_c$ that contain one strand from each component. We define $ L(K_c) $ to be a linking number of the two components where \begin{equation*} L(K_c) = \sum_{c \in K_1 \cap K_2 } sgn(K_c). \end{equation*} Further, let $ \bar{L} (K_c) = L(K_c)$ mod 2. \begin{rem} The classical definition of linking number either divides $ \bar{L_c} $ by two or sums over crossings where $K_1$ is the overcrossing and $K_2$ is the undercrossing or vice versa. \end{rem} We define $ \gamma (K) $ to be a sum over all crossings in $K$: \begin{equation} \label{definvar} \gamma (K) = \sum_{c} t^{\bar{L} (K_C)} sgn(c) \end{equation} Note that for any virtual knot diagram $K$, $ \gamma (K) $ has the form $ a + b t $ where $a, b \in \mathbb{Z}$. That is, $ \gamma (K) $ is an element of a free left $ \mathbb{Z}$ module over the set $ {1, t }$. We define $ \bar{ \gamma} (K) $: \begin{equation} \bar{ \gamma} (K) = \gamma (K) mod 2. \end{equation} \begin{thm}[Invariance] The sums $ \gamma(K) $ and $\bar { \gamma} (K) $ are invariant under all classical and virtual Reidemeister moves except Reidemeister move I.\end{thm} \textbf{Proof:} We use chord diagrams to show the invariance of $ \gamma (K)$. An introduction to chord diagrams can be found in \cite{gpv}. We note that $ \gamma (K) $ is not invariant under the Reidemeister I move. The introduction of a single Reidemeister I type twist is indicated on a chord diagram by an isolated chord. Smoothing along an isolated chord produces an unlinked two component link diagram, contributing $ \pm 1 $ to the sum, as shown in Figure \ref{fig:r1chord}. \begin{figure} \caption{Reidemeister I move} \label{fig:r1chord} \end{figure} A Reidemeister II move in a knot diagram introduces two crossings with opposite sign. \begin{figure} \caption{Reidemeister II move} \label{fig:r2chord} \end{figure} The two possible chord diagrams obtained after the introduction of a Reidemeister II move are shown in Figure \ref{fig:r2chord}. To obtain the chord diagram corresponding to the removal of a Reidemeister II move, simply remove the pair of chords. In the first type of Reidemeister II move (shown at the top of Figure \ref{fig:r2chord}) smoothing vertically along the negative edge results in a link ($K_{n}$) with linking number $L+1$ and smoothing along the positive edge results in a link ($K_{+}$) with linking number $L-1 $. Hence \begin{equation*} \bar{L} (K_{n}) = \bar{L} (K_{+}). \end{equation*} Now \begin{equation*} \gamma (K) = \sum_{c} t^{\bar{L} (K_c) } sgn(c) \end{equation*} and \begin{equation*} \gamma (K) = t^{\bar{L} (K_+)} - t^{\bar{L} (K_{n})} + \sum_{c \neq +,n } t^{\bar{L} (K_c) } sgn(c). \end{equation*} In the second type of Reidemeister II move (shown at the bottom of Figure \ref{fig:r2chord}), smoothing along the positive and negative edges produces links with linking number $L$. Because the edges have opposite sign, these contributions cancel out in the sum $ \gamma (K)$. There are two sets of chord diagrams of the Reidemeister III moves, based on the directionality of the edges. We will refer to these sets as type 1 and type 2, Figures \ref{fig:r3chordt1} and \ref{fig:r3chordt2} respectively. In these diagrams, numbers are used to distinguish individual chords while letters indicate (possible) arrangements of endpoints of chords. On the left hand side of Figure \ref{fig:r3chordt1}, we show the chord diagrams corresponding to both sides of a Reidemeister III move, type 1. On the right hand side, we show schematics of the chords diagrams of the links obtained by vertically smoothing each of the three crossings involved in a Reidemeister III move. \begin{figure} \caption{Reidemeister III move, type I} \label{fig:r3chordt1} \end{figure} \begin{figure} \caption{Reidemeister III move, type 2} \label{fig:r3chordt2} \end{figure} For a vertical smoothing of edge $i$, that the linking number of the resultant links (although the links themselves may not be equivalent) are equivalent modulo two. As a result, both result in the same contribution to $ \gamma (K)$. The same argument applies to the second type of Reidemeister III move as shown in Figure \ref{fig:r3chordt2}. As a result, $ \gamma (K) $ and $\bar{ \gamma } (K) $ are both invariant under the classical and virtual Reidemeister moves.\vrule height5pt width3pt depth.5pt We immediately realize the following propositions. \begin{prop} For a virtual knot $K$, $ \gamma (K) = a + (2k) t$ where $a$ and $k$ are integers. \end{prop} \textbf{Proof:} Let $K$ be a virtual knot diagram and $C_K$ denote a chord diagram corresponding to $K$. The invariant $ \gamma (K)$ counts, up to sign, the number of chords that are intersected by an even number of chords and the number of chords that are intersected by an odd number of chords. The simplest chord diagram contains zero chords and has $0$ even chord and $0$ odd chords. A diagram with $1$ chord contains $1$ even chord and $0$ odd chords. We assume that a diagram with $n$ chords has an even number of odd chords, denoted by $k_o$. Let $k_e $ denote the number of even chords. Suppose that we introduce a single new chord to the diagram. We have four cases based on the parity of the number of even and odd chords intersected. \textbf{Case 1:} The new chord intersects both an even number of odd and even chords, denoted $2o$ and $2e$. We note that the new chord is an even chord. Now: \begin{gather*} k_o \rightarrow k_o + 2e - 2o \\ k_e \rightarrow k_e - 2e + 2o+1 \end{gather*} As result, the total number of odd chords has changed by an even number. \textbf{Case 2:} The new chord intersects an even number of odd chords and an odd number even chords, denoted $2o$ and $2e+1$. We note that the new chord is an odd chord. Now: \begin{gather*} k_o \rightarrow k_o - 2o + (2e+1) + 1 \\ k_e \rightarrow k_e + 2o -(2e + 1) \end{gather*} The total number of odd chords has changed by an even number. \textbf{Case 3:} The new chord intersects an odd number of odd chords and an even number of even chords. This also changes the total number of odd chords by an even number. The computation is analogous. \textbf{Case 4:} The new chord intersects an odd number of odd chords and an odd number of even chords. The total number of chords changes by an even number.\vrule height5pt width3pt depth.5pt \begin{prop} For a virtual knot $K$, $ \gamma (K)_{t=1} $ is the writhe. In particular, for a classical knot diagram, $ \gamma (K) = w(K)$. \end{prop} \textbf{Proof:} Clear. \vrule height5pt width3pt depth.5pt \begin{prop} \label{fintype} Let $K$ denote a virtual knot diagram with a positive crossing $l$. Let $K'$ denote a virtual knot diagram where the crossing $l$ is switched to a negative crossing. Then $ \gamma (K) - \gamma (K') = \pm 2 t^{\bar{L} (K_l) } $ and $ \bar{ \gamma }(K) - \bar{ \gamma} (K') =0 $. \end{prop} \textbf{Proof:} Let $K$ denote a knot diagram with a positive crossing $l$. Let $K'$ denote a knot diagram where the crossing $l$ is switched to a negative crossing. For any crossing $c$ except $l$, the smoothed diagrams $K_c$ and $K'_c $ will differ only at the crossing $l$. If the crossing $l$ does not involve both componnents then it is clear that $ \bar{L} (K_c) = \bar{L} (K'_c) $. However, if the crossing $l$ does involve both components of the smoothing link diagram then $ L (K_c) = m+1 $ and $L (K'_c) = m-1 $ for some $m \in \mathbb{Z}$. Hence, $ \bar{L} (K_c) = \bar{L} (K'_c) $. If the crossing $l$ is smoothed then $ \bar{L} (K_l) = \bar{L} (K'_L) $. Then \begin{gather} \label{f1} \gamma (K) = \sum_{c \in K} t^{\bar{L}(K_c)} sgn(c) \\ = t^{ \bar{L} (K_L) }+ \sum_{c \in K, c \neq l} t^{ \bar{L} (K_c)} sgn (c). \end{gather} Similarly: \begin{gather} \label{f2} \gamma (K') = \sum_{c \in K'} t^{\bar{L}(K'_c)} sgn(c) \\ = -t^{\bar{L} (K'_L)} + \sum_{c \in K', c \neq l} t^{ \bar{L} (K'_c)} sgn (c). \end{gather} Combining equations \ref{f1} and \ref{f2}, we obtain \begin{equation*} \gamma (K) - \gamma (K') = 2 t^{\bar{L} (K_L) } \end{equation*} and \begin{equation*} \bar{ \gamma } (K) - \bar{ \gamma (K') } =0. \vrule height5pt width3pt depth.5pt \end{equation*} \begin{cor} Let $K$ denote a virtual knot diagram with a positive crossing $l$. Let $K'$ denote a virtual knot diagram where the crossing $l$ is switched to a negative crossing. Then $ \gamma (K) + \gamma (K') \equiv 0 $ modulo 2. \end{cor} Let $S$ denote a subset of the classical crossings in the virtual knot diagram $D$. Let $D_S$ denote the diagram obtained by switching the set of crossings in $S$. Let $|S|$ denote the cardinality of $S$. Recall that a finite type invariant $v$ has degree $n$ if for $ |S| =n $ then $ v(D) = (-1)^|S| v(D_s) $ \cite{kvirt}. By this definition, proposition \ref{fintype} shows that $ \bar{ \gamma} (K) $ is a Kauffman finite type invariant of degree one \cite{gpv} \cite{allison}. \section{Extending the invariant} We can extend $\bar{\gamma} $ to form a sequence of invariants by smoothing pairs of crossings and summing the knot diagrams obtained in this manner. A chord has odd parity (denoted by a $\bullet $ in diagrams) if an odd number of chords intersect the chord. Otherwise, a chord has even parity (denoted by a $ \circ $ in diagrams). Parity is invariant under the classical and virtual Reidemeister moves. (Note that this is the parity defined by Manturov in \cite{m2}.) Let $p$ denote a pair of intersecting chords with opposite parity and let $P$ denote the set of all interesecting chords with opposite parity. The virtual knot diagram obtained by smoothing the crossings in the diagram that correspond to the pair of chords is $K_p$. We define $ \varsigma(K) $, a formal sum of diagrams obtained by smoothing the original diagram along selected pairs of crossings. We select pairs of crossings that correspond to intersecting chords with opposite parity. \begin{equation} \varsigma(K) = \sum_{p \in P} K_p. \end{equation} We now define $ \bar{\gamma}_2 (K) = \gamma (t^2 \varsigma(K) ) \text{mod 2}$. \begin{rem}For each diagram $K_p$, we obtain a polynomial of the form $ a + b t$ for $a,b \in \mathbb{Z}$. The evaluation of $ \gamma (t^2 \varsigma(K) ) $ is $ \sum_{p \in P} t^2 \gamma (K_p)$. This becomes $ \sum_{p \in P} t^2 (a_p + b_p t) $. We consider this sum mod 2 in order to obtain invariance under the Reidemeister moves. Unfortunately, since $ b_p $ is always even, $ \bar{ \gamma}_2 (K) $ is either $0$ or $ t^3$. \end{rem} We prove that $ \bar{ \gamma}_2 $ is invariant under the classical and virtual Reidemeister moves. \begin{thm} For a virtual knot diagram $K$, $\bar{ \gamma }_2 (K) $ is invariant under the Reidemeister moves. \end{thm} \textbf{Proof:} Let $K$ be a virtual knot diagram and let $C(K)$ represent the corresponding chord diagram. In the chord diagrams for this proof, we observe the following conventions. Ennumerated, signed edges represent the chords corresponding to the crossings involved in the studied moves. The labeled edge $g$ represents a generic edge that intersects the ennumerated edges. The letters $a,b,c,d,e$ represent sequences of chord endpoints and are used to calculate and compare $\gamma (K)$ from different chord diagrams. Note that a crossing introduced by a Reidemeister I move has even parity. The edge is isolated and does not intersect any edges. Hence, it does not contribute to $\varsigma(K)$. (Notice that if $K$ is the unknot with a single twist, $ \varsigma (K) $ is the empty sum and $ \bar{ \gamma}_2 (K) = 0 $.) We now consider the Reidemeister II move. A Reidemeister II move introduces two oppositely signed crossings. In a chord diagram, these crossings are represented as a pair of edges with the same parity. Without loss of generality we will assume that the crossings have even parity as shown in Figure \ref{fig:g2r2t1}. \begin{figure} \caption{Reidemeister II move} \label{fig:g2r2t1} \end{figure} In the diagram, the edges $1$ and $2$ represent the crossings involved in the Reidemeister II. An edge $g$ with opposite parity will either intersect both $1$ and $2$ or intersect either. The smoothing pairs $(1,g)$ and $(2,g)$ both contribute to the sum $\varsigma(K)$. Note that $ \bar{ \gamma} (K_{ (1,g)}) = \bar{ \gamma } (K_{ (2,g) }) $ for a net contribution of zero to $ \bar{ \gamma}_2 (K) $. For the second type of Reidemeister II move, we obtain an analogous pair of diagrams. We consider the Reidemeister III move. There are two sets of chord diagrams corresponding to the Reidemeister III move and two cases for the parity of the chords involved. We first consider the case where all three chords in the Reidemeister III move has the same parity. Without loss of generality, we let all three chords have even parity. In the computation of $ \varsigma (K) $, we do not obtain any diagrams from pairs of chords in the Reidemeister III move. A contribution to $ \varsigma (K) $ would involve one chord from the Reidemeister III move and a chord of different parity that intersects the Reidmeister III move chords. In Figure \ref{fig:g2r3t1}, we examine a type 1 Reidmeister move where all three chords have even parity. We consider the chord $g$ as shown in Figure \ref{fig:g2r3t1}. The chord diarams obtained by smoothing the pairs $(1,g)$ and $(2,g) $ contribute to $ \varsigma $. We observe that $ \gamma (K_{(i,g)} ) = \gamma ( K' _ {(i,g)} )$. We observe that in the case of other smoothing pairs, the Reideister III move remains intact. \begin{figure} \caption{Reidemeister III move, type 1} \label{fig:g2r3t1} \end{figure} In Figures \ref{fig:g2r3t2} and \ref{fig:g2r3t3}, we consider the Reidemeister move III, type 2 where all chords have even parity. There are two potential ways for a chord $g$ with opposite parity to intersect the Reidemeister III move. \begin{figure} \caption{Reidemeister III move, type 2} \label{fig:g2r3t2} \end{figure} \begin{figure} \caption{Reidemeister III move, type 2, case 2} \label{fig:g2r3t3} \end{figure} In Figures \ref{fig:diffparityg2r3t1}, \ref{fig:diffparityg2r3t2}, and \ref{fig:diffparityg2r3t3}, we consider the Reidemeister III move where the chords have differing parity. The most interesting case is shown in Figure \ref{fig:diffparityg2r3t1}. From $K$, we obtain two diagrams $K_{(1,2)} $ and $K_{ (2,3)} $ which differ only by a Reidemeister I move. Note that $ \bar{ \gamma} (K_{ (1,2)} ) = \bar{ \gamma} (K_{(2,3)}) $, and as a result these terms cancel out in $ \gamma_2 $. From $K'$, we obtain no diagrams resulting in the same net contribution to $ \gamma_2 $. \begin{figure} \caption{Reidemeister III move, type 1, edges with different parity} \label{fig:diffparityg2r3t1} \end{figure} In Figure \ref{fig:diffparityg2r3t2} and \ref{fig:diffparityg2r3t3}, we examine the cases obtained from a type 2 Reidemeister III moves where the edges have different parity. \begin{figure} \caption{Reidemeister III move, type2, edges with different parity} \label{fig:diffparityg2r3t2} \end{figure} \begin{figure} \caption{Reidemeister III move, type 2, case 2, edges with different parity} \label{fig:diffparityg2r3t3} \end{figure} We observe that the net contributions are equivalent and $ \bar{ \gamma}_2 $ is invariant under the classical and virtual Reidemeister moves.\vrule height5pt width3pt depth.5pt \begin{prop} The invariant $ \bar{ \gamma}_2 $ is a finite type invariant of degree one. \end{prop} \textbf{Proof:} We consider the knot diagrams $K$ and $K'$ which are related by switching exactly one crossing, say $s$. To compute $\varsigma$, we expand pairs of crossings with opposite parity. If these pairs contain $s$, we denote the diagrams obtained from $K$ and $K'$ as $K_{p_i} $ and $K'_{p_i} $ respectively. Note that $K_{p_i} $ and $K'_{p_i} $ are the same diagram, so that $ \gamma (K_{p_i}) = \gamma (K'_{p_i}) $. If the pair does not contain $s$, we denote the diagrams obtained from $K$ and $K'$ as $K_{q_i} $ and $K'_{q_i} $ respectively. Note that $K_{q_i} $ and $K'_{q_i} $ differ by exactly one crossing. Now by Proposition \ref{fintype}, $ \gamma (K_{p_i}) - \gamma (K'_{p_i}) \equiv 0$ modulo two. Hence $ \bar{ \gamma}_2 (K) - \bar{ \gamma}_2 (K') = 0 $ and $ \bar{ \gamma}_2$ is finite type invariant of degree one.\vrule height5pt width3pt depth.5pt \section{Examples} We compute a variety of examples of the invariants $ \gamma $ and $ \gamma_n $ in this section. \subsection{Trefoil} The trefoil shown in Figure \ref{fig:tref} has writhe $-3$. \begin{figure} \caption{Trefoil} \label{fig:tref} \end{figure} We observe that $ \gamma (K)= -3 $ which is equivalent to the writhe. \subsection{Virtual Trefoil} \begin{figure} \caption{Virtual Trefoil} \label{fig:vtref} \end{figure} The virtual trefoil, with writhe $-2$ is shown in Figure \ref{fig:vtref}. The value of the $ \gamma $ invariant is $-2t$ and $ \gamma (K) |_{t=1} = -2 $. \subsection{Miyazawa's Knot} \begin{figure} \caption{Miyazawa's Knot} \label{fig:miya} \end{figure} In Figure \ref{fig:miya} is Miyazawa's knot and $ \gamma (K) = 2 + 2t $. \subsection{A knot with $ \gamma (K) = 2k+3 + (2n+2)t$} \begin{figure} \caption{Knot with $\gamma (K) = (2k+3) + (2n+2) t $} \label{fig:arbknot} \end{figure} For the diagram shown in Figure \ref{fig:arbknot}, complete the labeled tangles with $2k+1 $ and $2n$ positive twists, respectively. This produces a virtual knot diagram with $\gamma (K) = (2k+3) + (2n+2) t $ for $n,k>0 $. \subsection{Computing $\gamma_2 $} \begin{figure} \caption{A knot with non-zero $ \gamma_2 $.} \label{fig:gamma2knot} \end{figure} For the knot shown in Figure \ref{fig:gamma2knot}, the $ \gamma $ invariant is $ 2 + 4t $ and the $ \bar{ \gamma}_2 $ is $t^2$. \section{Conclusion} The invariants $ \gamma $ and $ \bar{\gamma}_2 $ can be extended by taking additional characteristics of the chord diagram into consideration. We can extend the invariants by either enhancing the parity or considering formal sums of diagrams obtained by smoothing pairs of crossings. To ehance parity, we could consider over/under markings that are usually indicated on the chord diagrams by the presence of arrowheads at one end of the chord. Counting the chord intersections using the arrowheads to mark orientations will augment $ L (K_c) $. In the second case, it may be possible to extend $\gamma $ by smoothing particular quadruples of crossings. These ideas will be investigated in future work. \end{document}
\begin{document} \title{A General Framework for Uncertainty Quantification via Neural SDE-RNN\\ } \author{ Shweta Dahale, \IEEEmembership{Graduate Student Member,~IEEE}, Sai Munikoti, \IEEEmembership{Member,~IEEE}, and \\ Balasubramaniam Natarajan, \IEEEmembership{Senior Member,~IEEE} \\ \thanks{S. Dahale and B. Natarajan are with the Electrical and Computer Engineering, Kansas State University, Manhattan, KS-66506, USA (e-mail: [email protected], [email protected]). S.Munikoti is with the Data Science and Machine Intelligence group, Pacific Northwest National Laboratory, USA, (e-mail: [email protected]). This material is based upon work supported by the Department of Energy, Office of Energy Efficiency and Renewable Energy (EERE), Solar Energy Technologies Office, under Award Number DE-EE0008767. }} \maketitle \begin{abstract} Uncertainty quantification is a critical yet unsolved challenge for deep learning, especially for the time series imputation with irregularly sampled measurements. To tackle this problem, we propose a novel framework based on the principles of recurrent neural networks and neural stochastic differential equations for reconciling irregularly sampled measurements. We impute measurements at any arbitrary timescale and quantify the uncertainty in the imputations in a principled manner. Specifically, we derive analytical expressions for quantifying and propagating the epistemic and aleatoric uncertainty across time instants. Our experiments on the IEEE 37 bus test distribution system reveal that our framework can outperform state-of-the-art uncertainty quantification approaches for time-series data imputations. \end{abstract} \begin{IEEEkeywords} Neural ordinary differential equations, uncertainty propagation, linearization approximation, imputation \end{IEEEkeywords} \section{Introduction} Irregularly sampled multivariate time-series data are found in several domains spanning from bio-medical systems to the smart grid. This is primarily the outcome of two factors. The first source is the inherent system design where data generation happens at different rate. For instance, in a smart grid, different sensors sense and transmit measurements at different time scales, i.e., advanced metering infrastructure (AMI) samples at 15-min interval, data acquisition (SCADA) sensors at 1-min interval, etc.\cite{dahale2022recursive}. The second factor corresponds to system malfunctions that causes frequent unavailability of data samples. For example, communication impairments in the smart grid result in loss of data, further aggravating this issue \cite{dahale2022bayesian}. It is important to reconcile such multi-scale intermittent measurements at a common timescale in order to enhance the situational awareness of the grid. Typically, this kind of irregularly sampled data is first imputed (i.e., estimate missing values) to a uniform scale so that an effective analysis can be carried out with the processed data. To this end, several methods are being proposed in the literature such as linear interpolation \cite{gomez2014state}, kNN \cite{al2016state} and multitask Gaussian process \cite{dahale2022recursive,9637824}. Recently, neural ODES (NeuODES) have shown to be a very effective framework for handling irregular sampled data \cite{chen2018neural, dahale2022latent, 9180216, lu2021neural}. This is primarily due to the fact that NeuODES can estimate values in a continuous space (using dynamics) unlike discrete space (proportional to the number of layers) in a vanilla neural network. However, most of the data imputation frameworks solely offer a point estimate of the missing value, ignoring their uncertainties. It is crucial to provide confidence scores with prediction so that an informed decision can be made while deployed in the real-world applications. Uncertainty quantification (UQ) of the imputed measurements can be helpful in various ways, including (i) variance-informed state estimation where variance (confidence interval) of the imputed measurement's can be used to modulate the inputs of the estimator \cite{dahale2022bayesian}. For instance, high variance input is disregarded or corrected before being fed to the estimator. (ii) Effective data sampling where UQ helps in selecting appropriate measurements from high or low-fidelity sensors. Uncertainty in deep neural network-based models such as NeuODES arises due to two components. The first factor is with respect to the model parameters and it arises due to the model either being blind to some portion of the data distribution which has not been seen while it's training or due to its over-parameterization. There are various ways to handle this and can be quantified in a computationally efficient manner. On the other hand, the second class of uncertainty is related to the input data, also known as aleatoric uncertainty. It is inherent in nature and cannot be alleviated by any means of model engineering \cite{gal2016dropout}. Modeling and quantifying these uncertainties are critical with regard to time series imputation since the sensors are often associated with noise, and one cannot cover the entire data distribution while training the models. There exist a few works in the literature that quantify uncertainty in NeuODES-based frameworks, \cite{li2020scalable, jia2019neural, hegde2018deep,herrera2020neural}. Authors in \cite{herrera2020neural} proposed a neural Jump ODE framework for modeling the conditional expectation of a stochastic process in an irregularly observed time series. However, most of these works only retrieve the epistemic part and ignore the aleatoric part. It is important to incorporate the aleatoric part, especially for critical complex systems where sensor-generated data is pivotal in various operational decision-making. Therefore, we propose a novel framework to quantify the uncertainties in a comprehensive manner. In this regard, we first formulate an SDE-RNN framework that combines the stochastic differential equation (SDE) form of NeuODES and recurrent neural network (RNN) for imputation. Specifically, SDE is chosen since it can propagate both mean (prediction) and variances (uncertainty) along the NeuODES framework in an efficient manner. Furthermore, RNN helps us to capture the inputs at observed time instances, and effectively carry forward the information to future states via its memory cell. Altogether, their combination offers an powerful and efficient approach to quantify and propagate both uncertainties from the input to final predictions. The main contributions of this paper are summarized below: \begin{itemize} \item We formulate a novel SDE-RNN which combines the principle of stochastic differential equations and neural networks to model the irregularly sampled time series. \item The proposed SDE-RNN approach allows us to quantify both aleatoric and epistemic uncertainty in the imputed measurements. Analytical results capturing these sources of uncertainty have been derived. We derive the theoretical expressions of uncertainty propagation for Gated Recurrent Unit (GRU) model. \item Simulation results on the power distribution test system (IEEE 37 network) demonstrate the effectiveness of the proposed approach compared to the classic baseline (RNN + MC dropout). \end{itemize} The paper is organized as follows. Section \RomanNumeralCaps{2} presents a background of NeuODES with a literature review. Section \RomanNumeralCaps{3} formulates our fundamental SDE-RNN framework followed by the detailed process of uncertainty quantification in Section \RomanNumeralCaps{4}. Experiments are discussed in Section \RomanNumeralCaps{5} with conclusions and future work in Section \RomanNumeralCaps{6}. \section{Background and Related work} This section provides an overview of the UQ literature for unevenly sampled time series imputation models. Additionally, the mathematical background of neural ODES, which serves as a fundamental module in our proposed approach, is provided. \subsection{Related work} Recurrent neural networks (RNN) form the first choice for modeling high dimensional, regularly sampled time series data. The success of RNNs is due to its memory and cell state modules that can capture long range dependencies. A natural extension of RNNs to unevenly sampled time series data is to divide the timeline into equally-sized intervals \cite{lipton2016directly}, \cite{che2018recurrent}, and impute or aggregate observations using averages. This pre-processing stage maligns the information, particularly about the timing of measurements, as they constitute a piece of important information about the data. Another approach uses a simple exponential decay between observations, and updates the hidden states accordingly \cite{che2018recurrent}. But the states of RNN-decay model approach zero if the time gaps between observations are high. However, Neural ODE, which is a novel framework combining deep learning principles and differential equations has been found to be suitable for modeling irregularly sampled time series \cite{chen2018neural}. Neural ODEs can systematically accommodate continuous time series data using a technique that captures the continuous nature of hidden states. Recently, \cite{rubanova2019latent} combines neural ODEs with RNN, leading to a method referred to as ODE-RNN. The hidden states in these ODE-RNN models obey an ODE between consecutive observations and are only updated at observed time instances. The ODE-RNN models are suitable for sparse observations and thus very effective for imputation and one-step prediction. Authors in \cite{li2020scalable} propose a Latent SDE with an adjoint method for continuous time series modeling. Neural SDEs driven by stochastic processes with jumps is proposed in \cite{jia2019neural} to learn continuous and discrete dynamic behavior. Generative models built from SDEs whose drift and diffusion coefficients are samples from a Gaussian process are introduced in \cite{hegde2018deep}. A neural Jump ODE framework for modeling the conditional expectation of a stochastic process in an irregularly observed time series is proposed in \cite{herrera2020neural}. However, all neural ODE driven methodologies only offer a point estimate and fail to quantify the uncertainty associated with the estimates. UQ approaches in general neural networks are mainly categorized into two classes: Bayesian and Ensemble methods. Bayesian approaches, such as Bayesian neural nets proposed in \cite{denker1990transforming} quantify the uncertainty by imposing probability distributions over model parameters. Though, these approaches provide a principled way of uncertainty quantification, the exact inference of the parameter posteriors is intractable. Also, specifying the priors for the parameters of deep neural networks becomes challenging when size of the network increases. To deal with these challenges, approximation methods like variational inference \cite{blundell2015weight}, Laplace approximation \cite{ritter2018scalable}, Assumed density filtering \cite{munikoti2023general} and stochastic gradient MCMC \cite{li2016preconditioned} are used. Authors in \cite{gal2016dropout} proposed to use Monte-Carlo Dropout (MC-dropout) during inference to quantify the uncertainty in neural networks. However, MC dropout is computationally expensive for larger network and only capture epistemic uncertainty. Non-Bayesian approaches have also been used for UQ but they also demand a large computational effort \cite{lakshminarayanan2017simple}. A different route has been taken in \cite{kong2020sde} where SDEs are used to quantify both aleatoric and epistemic uncertainty by training the SDE model based on out-of-distribution (OOD) training data. However, this approach fails to evaluate uncertainty in a principled manner. \subsection{Background: Neural ODE} Consider time-series sensor data $\{x_i, t_i\}_{i=0}^{N}$, where the measurements $\{ x_i\}_{i=0}^{N} \in \mathbb{R}^d$ are obtained at times $\{ t_i\}_{i=0}^{N}$. The goal of timeseries imputation is to reconcile the unevenly sampled measurements at at the finest time resolution. Neural ODE exploits the continuous-time dynamics of variables from input state to final predictions, unlike a standard deep neural network which only performs a limited number of transformations depending on the number of layers chosen in the architecture. The state transitions in RNNs are generalized to continuous time dynamics specified by a neural network. We denote this model as ODE-RNN. The hidden state in ODE-RNN is a solution to an ODE initial-value problem, given as: \begin{equation} \frac{d\mathbf{h}_t}{dt} = f(\mathbf{h}_t, \theta) \label{eq:2} \end{equation} where, $f$ is a neural network parameterized by $\theta$ that defines the ODE dynamics. $\mathbf{h}_t$ is the hidden state of the Neural ODE. Thus, starting from an initial point $\mathbf{h}_0$, the transformed state at any time $t_i$ is given by integrating an ODE forward in time, given as, \begin{equation} \begin{aligned} \mathbf{h}_i = \mathbf{h}_0 + \int_{t_0}^{t_i} \frac{d\mathbf{h}_t}{dt} dt \\ \mathbf{h}_i = ODESolve (\mathbf{h}_0, (t_0, t_i), f) \end{aligned} \label{eq:3} \end{equation} \eqref{eq:3} can be solved numerically using any ODE Solver (e.g., Euler's method). In order to train the parameters of the ODE function $f$, an adjoint sensitivity approach is proposed in \cite{chen2018neural}. This approach computes the derivatives of the loss function with respect to the model parameters $\theta$ by solving a second augmented ODE backward in time. Some of the advantages of using Neural ODE solvers over other conventional approaches are: (1) \textit{Memory efficiency:} The adjoint sensitivity approach allows us to train the model with constant memory cost independent of the layers in the ODE function $f$; (2) \textit{Adaptive computation:} Adaptive number of updation steps by ODE solver is more effective than fixed number of steps in vanilla DNN. (3) \textit{Effective formulation:} Finally, the continuous dynamics of the hidden states can naturally incorporate data which arrives at arbitrary times. Based on the foundations of NeuODEs, ODE-RNN was developed. ODE-RNN is autoregressive in nature unlike the generative form of NeuODES \cite{rubanova2019latent}. It models the irregularly sampled time series by applying ODE and RNN interchangeably through a time series. The states evolve continuously using the ODE model, while they are updated using the RNN cell at the instances where the measurements are available. The function $f$ at time $t$ is described in a neural ODE with initial hidden state $h_t$: \begin{equation} \dot{\mathbf{h}} = f(\mathbf{h}_t, \theta) \end{equation} \begin{equation} o_t = c(\mathbf{h}_t, \theta_c) \end{equation} where, $\mathbf{h} \in \mathbb{R}^m$ is the hidden state of the data, $\dot{\mathbf{h}} = \frac{d\mathbf{h}}{dt}$ is the derivative of the hidden state, $o \in \mathbb{R}^d$ is the output of the ODE-RNN. $f:\mathbb{R}^m \to \mathbb{R}^m $ and $c:\mathbb{R}^m \to \mathbb{R}^d$ are the neural ODE operator and output function parameterized by neural network parameters $\theta$ and $\theta_c$, respectively. The hidden states are modeled using a Neural ODE, where they obey the solution of an ODE as, \begin{equation} \mathbf{h}_{i}^{'} = ODESolve (\mathbf{h}_{i-1}, (t_{i-1}, t_{i}), f) \end{equation} Lets represent the RNN cell function by $v(\cdot)$ with parameters $\theta_v$. At each observation $x_i$, the hidden states are updated by an RNN as, \begin{equation} {\mathbf{h}_{i}} = v(\mathbf{h}_{i}^{'},x_i, \theta_v). \label{rnncell} \end{equation} The ODE-RNN approach proposed in \cite{rubanova2019latent} provides imputations but fails to quantify the uncertainty associated with it. Therefore, we offer a novel SDE-RNN framework, which, in addition to the predictions, is capable of quantifying the uncertainties in a principled manner. \section{Proposed SDE-RNN Approach} In this section, we describe the key elements of the proposed SDE-RNN approach. Our approach involves the modification of ODE-RNN to include a stochastic differential equation (SDE-RNN) to capture the evolution of the hidden states (as opposed to ODE). Stochastic differential equation that governs the dynamics of the hidden states corresponds to, \begin{equation} d\mathbf{h}_t = f(\mathbf{h}_t, t) dt + g(\mathbf{h}_t, t) d\mathbf{B}_t \label{sde} \end{equation} where, $f$ and $g$ are the drift and diffusion functions, respectively. The transformations $f$ and $g$ are carried through neural networks. $\mathbf{B}_t$ represents the Brownian motion with the distribution $ d\mathbf{B}_t \sim \mathcal{N}(0, \mathbf{Q} \Delta t)$ where $\mathbf{Q}$ is the diffusion matrix. $g(\mathbf{h}_t, t)$ denotes the variance of the Brownian motion and represents the epistemic uncertainty in the hidden state dynamics. Both the drift and diffusion functions are nonlinear, thanks to the nonlinear transformation of neural networks $f$ and $g$. Therefore, the SDEs in (\ref{sde}) is a nonlinear SDE. The hidden states in the SDE-RNN model are the solution of the SDE in (\ref{sde}), i.e., \begin{equation} \mathbf{h}_{i}^{'} = SDESolve (\mathbf{h}_{i-1}, (t_{i-1}, t_{i}), f, g) \end{equation} Here, SDESolve represents the SDE Solver. It is important to note that states are updated based on observations $x_i$ at time $t_i$ using the RNN model. Using this SDE-RNN approach, we can quantify the uncertainty in the hidden and output states, as discussed in the following subsection. \begin{figure*} \caption{Propagation of uncertainty. $x_{i} \label{fig:framework1} \end{figure*} \subsection{Uncertainty quantification} We propose a novel uncertainty propagation approach in the SDE-RNN model. The proposed method quantifies the uncertainty in a holistic manner accounting for both aleatoric and epistemic, without relying on prior specification of model parameters and complicated Bayesian inference. The SDE-RNN approach propagates the uncertainty from the noisy input observations to the final outputs using the CVRNN model and the SDE model. Here, we refer to the CVRNN model as a modified form of RNN, that propagates the mean and uncertainty arising from the previous hidden states and the inputs to the next hidden state. Fig. \ref{fig:framework1} and Algorithm \ref{SDERNN_approach} illustrate the overall uncertainty propagation in the SDE-RNN framework. We can initialize the mean and covariance of the hidden state as zero and then update them continuously. The CVRNN model updates the hidden states at the time when an observation is available along with its associated uncertainty. This approach accounts for the uncertainty in both the input observation and the previous hidden states. SDE model captures the epistemic uncertainty of the SDE-RNN approach. In the following subsections, we elaborate the uncertainty propagation in both the CVRNN model and SDE model. \subsubsection{CVRNN Model} The RNN cell in Eq. (\ref{rnncell}) updates the hidden states at time instants where measurements are available. The dynamics of a general RNN cell are modeled by, \begin{equation} {\mathbf{h}_{i}} = v(\mathbf{h}_{i-1}, x_i, \theta_v), \end{equation} \begin{equation} o_i = c(\mathbf{h}_i, \theta_c). \end{equation} Assume the input $x_i$ is corrupted with additive Gaussian noise given as \begin{equation} \Tilde{x}_i = {x}_i + w_i, \end{equation} where $w_i$ = $\mathcal{N}(0, \Sigma_i$) with $\Sigma_i$ representing the covariance matrix of the measurement. These noisy observations are fed to the RNN model, where the hidden states and the outputs become a random variable corresponding to, \begin{equation} {\Tilde{\mathbf{h}}_i} = v(\Tilde{\mathbf{h}}_{i-1}, \Tilde{x}_i , \theta_v), \label{eq8} \end{equation} \begin{equation} \Tilde{o}_i = c(\Tilde{\mathbf{h}}_i, \theta_c). \end{equation} Let $\hat{\mathbf{h}}_i = \operatorname{\mathbb{E}}[\Tilde{\mathbf{h}_i}]$ and $\hat{P_i}$ denotes the estimate of the covariance matrix $P_i = \operatorname{\mathbb{E}} \{(\Tilde{\mathbf{h}_i} - \hat{\mathbf{h}_i})(\Tilde{\mathbf{h}_i} - \hat{\mathbf{h}_i})^\intercal \}$. Based on (\ref{eq8}), the linearization around $\hat{\mathbf{h}}_{i-1}$ implies that, \begin{equation} \label{eq1} \begin{split} {\Tilde{\mathbf{\mathbf{h}}}_i} & = v(\Tilde{\mathbf{h}}_{i-1}, \Tilde{x}_i) \\ & = v(\hat{\mathbf{h}}_{i-1} + \delta h_{i-1}, \hat{x}_i + w_i) \\ & = v(\hat{\mathbf{h}}_{i-1}, \hat{x}_i) + \Delta_h v \delta \mathbf{h}_{i-1} + \Delta_x v w_i + o(\delta \mathbf{h}_{i-1}^2, w_i^2) \end{split} \end{equation} where, $\Delta_h v$ and $\Delta_x v$ are calculated at operating points $(\hat{\mathbf{h}}_{i-1},x_i)$. According to the transformation of uncertainty \cite{amini2021robust} given in Theorem 1, we can evaluate the expected value and covariance of $\Tilde{\mathbf{h}}_{i}$. \begin{theorem} \textit{Transformation of uncertainty:} Consider a recurrent neural network represented by $v(\cdot)$ with neural network parameters $\theta_v$ and hidden states given by ${\Tilde{\mathbf{h}}_i} = v(\Tilde{\mathbf{h}}_{i-1}, \Tilde{x}_i , \theta_v)$. The input $\Tilde{x}_i$ is corrupted with noise $w_i$ = $\mathcal{N}(0, \Sigma_i)$. The estimation of the expected value and covariance matrix of the hidden state ${\Tilde{\mathbf{h}}_i}$ at time $i$ can be recursively calculated as, \begin{equation} \hat{\mathbf{h}}_i = v(\hat{\mathbf{h}}_{i-1}, x_i) \label{hiddenstates_uncertainty} \end{equation} \begin{equation} \hat{\mathbf{P}}_i = (\Delta_h v) \hat{\mathbf{P}}_{i-1} (\Delta_h v)^\intercal + (\Delta_x v) \Sigma_i (\Delta_x v)^\intercal \label{hiddencovariance_uncertainty} \end{equation} \end{theorem} where, the expected value of the previous hidden state $ \operatorname{\mathbb{E}}[\Tilde{\mathbf{h}}_{i-1}] = \hat{\mathbf{h}}_{i-1} $ and its associated covariance matrix is ${\hat{\mathbf{P}}_{i-1}}$ are given. Thus, the mean and covariance can be computed in a CVRNN framework with noisy input measurement data at time $i$ using (\ref{hiddenstates_uncertainty}) and (\ref{hiddencovariance_uncertainty}). We discuss the SDE model in the following subsection. \subsubsection{SDE Model} A neural SDE model aims to capture the epistemic uncertainty with Brownian motion and propagate it to the next time step. It also aims to capture the uncertainties arising from the previously obtained hidden states and their co-variances. A neural SDE can be expressed using the dynamical equation in Eq. (\ref{sde}). As this SDE is nonlinear, its statistics can be computed by adopting certain approximations. Linearized approximation of SDEs \cite{sarkka2019applied} is a technique which computes the statistics by linearizing the drift and diffusion function around a certain point. Using the Taylor series, the drift function $f(\mathbf{h},t)$ around the mean $\mathbf{m}$ is linearized as, \begin{equation} f(\mathbf{h},t) \approx f(\mathbf{m},t) + \mathbf{F}_h(\mathbf{h},t) (\mathbf{h}-\mathbf{m}) \end{equation} The diffusion function is linearized as, \begin{equation} g(\mathbf{h},t) \approx g(\mathbf{m},t) + \mathbf{G}_h(\mathbf{h},t) (\mathbf{h}-\mathbf{m}) \end{equation} A linearized approximation to estimate the mean and covariance of an SDE (\ref{sde}) can be obtained by integrating the following differential equations from the initial conditions $\mathbf{m}(t_0) = E[\mathbf{h}(t_0)]$ and $\mathbf{P}(t_0)$ = Cov$[\mathbf{h}(t_0)] $ to the target time t: \begin{equation} \frac{d\mathbf{m}}{dt} = f(\mathbf{m},t), \label{sdemean} \end{equation} \begin{equation} \frac{d\mathbf{P}}{dt} = \mathbf{P} \mathbf{F}_h^\intercal (\mathbf{m},t) + \mathbf{F}_h(\mathbf{m},t)\mathbf{P} + \mathbf{G}_h(\mathbf{m},t) \mathbf{Q} \mathbf{G}_h^\intercal(\mathbf{m},t) \label{sdecovariance} \end{equation} Thus, (\ref{sdemean}) and (\ref{sdecovariance}) captures the evolution of both the mean and covariance of the hidden states. After the hidden states are updated, they are transformed by another neural network to get final predictions (see Fig.\ref{fig:framework1}). The estimation of the mean and the covariance of the output is obtained according to the transformation of uncertainty (Theorem 1) as, \begin{equation} \hat{o}_i = c(\hat{\mathbf{h}}_{i}) \label{outputstates_uncertainty} \end{equation} \begin{equation} \hat{R}_i = (\Delta_h c) \hat{\mathbf{P}}_{i} (\Delta_h c)^\intercal \label{outputcovariance_uncertainty} \end{equation} Algorithm \ref{SDERNN_approach} illustrates the working mechanism of the proposed SDE-RNN approach. As discussed earlier, the hidden states $\mathbf{m}_i$ are updated at times $i$ where measurements are present. The availability of the measurements is indicated by the vector mask $\in \{0,1\}$. The imputed measurements along-with the associated variances are indicated by $\{o_i\}_{i=0}^N$ and $\{R_i\}_{i=0}^N$ respectively. \begin{algorithm} \KwInput{Datapoints $\{ x_i\}_{i=1}^{N}$ and the corresponding times $\{ t_i\}_{i=1}^{N}$, drift function $f$, diffusion function $g$\\} Initialization: $\mathbf{h}_{0}$, $\mathbf{P}_{0}$ \begin{algorithmic}[1] \FOR{$i = 1,..,N$} \STATE ${\mathbf{h}_{i}^{'}, \mathbf{P}_{i}^{'}} = SDESolve(\mathbf{h}_{i-1}, \mathbf{P}_{i-1}, (t_{i-1}, t_{i}), f, g)$ \STATE ${\mathbf{h}_{i}, \mathbf{P}_{i}} = CVRNN(\mathbf{h}_{i}^{'},\mathbf{P}_{i}^{'}, x_i)$ \STATE $\mathbf{h}_{i}$ = mask $\times$ $\mathbf{h}_{i}$ + (1-mask) $\times$ $\mathbf{h}_{i}^{'}$ \STATE $\mathbf{P}_{i}$ = mask $\times$ $\mathbf{P}_{i}$ + (1-mask) $\times$ $\mathbf{P}_{i}^{'}$ \STATE $\{o_{i}, R_{i}\}_{i=1}^{N} = c(\mathbf{h}_{i} ,\mathbf{P}_{i} )$ \ENDFOR \STATE \textbf{return} $\{o_i\}_{i=0}^N$, $\{R_i\}_{i=0}^N$ \end{algorithmic} \caption{SDE-RNN Approach} \label{SDERNN_approach} \end{algorithm} \subsection{Uncertainty propagation for GRU} The CVRNN model discussed in the previous section can be any form of the recurrent neural network; for example, it could be either RNN, Long Short Term Memory (LSTM), or Gated Recurrent Unit (GRU) model whose hidden states are given as, \begin{equation} {h_{t}} = v(h_{t-1}, x_t, \theta_v), \end{equation} In this section, we will derive the uncertainty propagation for a GRU model by using the transformation of uncertainty as discussed in Theorem 1. To begin with, we need to compute the $\Delta_h v$ and $\Delta_x v$. In a GRU model, the hidden states are given as, \begin{equation} h_t = z_t \circ h_{t-1} + (1-z_t) \circ h_{t}^{'} \label{hidden_gru} \end{equation} \begin{equation} h_{t}^{'} = \text{tanh}(W_{in} x_t + b_{in} + r_t(W_{hn} h_{t-1} + b_{hn})) \end{equation} \begin{equation} z_t = \sigma (W_{iz} x_t + b_{iz} + W_{hz} h_{t-1} + b_{hz}) \end{equation} \begin{equation} r_t = \sigma (W_{ir} x_t + b_{ir} + W_{hr} h_{t-1} + b_{hr}) \end{equation} Here, $x_t$, $z_t$ and $r_t$ are the inputs, update and reset gates of the GRU model, respectively. \text{tanh} and $\sigma$ are the Tanh and Sigmoid activation functions, respectively. $\circ$ represents the Hadamard product. The gradient of the hidden states (\ref{hidden_gru}) at time $t$ computed at operating points $(\hat{h_{t-1}}, x_t)$ are given as, \begin{equation} \Delta_h v = \frac{\partial z_t}{\partial h_{t-1}} h_{t-1} + z_t \frac{\partial h_{t-1}}{\partial h_{t-1}} + \frac{\partial (1-z_t)}{\partial h_{t-1}} h_{t}^{'} + \frac{\partial h_{t}^{'} }{\partial h_{t-1}} (1-z_t) \label{gradient_eq} \end{equation} The derivative of the sigmoid function is denoted as $ \sigma^{'} (x) = \sigma (x) (1- \sigma (x))$. Each gradient in the (\ref{gradient_eq}) is given as, \begin{equation} \frac{\partial z_t}{\partial h_{t-1}} = \sigma^{'} (W_{iz} x_t + b_{iz} + W_{hz} h_{t-1} + b_{hz}) \circ W_{hz} \end{equation} \begin{equation} \frac{\partial r_t}{\partial h_{t-1}} = diag(r_t (1-r_t)) \circ W_{hr} \end{equation} \begin{equation} \frac{\partial h_{t}^{'} }{\partial h_{t-1}} = diag(1-h_t^{'2}) \circ \{\frac{\partial r_t}{\partial h_{t-1}} (W_{hn} h_{t-1} + b_{hn}) +W_{hn} r_t \} \end{equation} Similarly, $\Delta_x v$ is given as, \begin{equation} \Delta_x v = \frac{\partial z_t}{\partial x_{t}} h_{t-1} + \frac{\partial (1-z_t)}{\partial x_{t}} h_{t}^{'} + \frac{\partial h_{t}^{'}}{\partial x_{t}} (1-z_t) \label{gradient_eq2} \end{equation} Each gradient in (\ref{gradient_eq2}) is given as, \begin{equation} \frac{\partial z_t}{\partial x_{t}} = diag(z_t (1-z_t)) \circ W_{iz} \end{equation} \begin{equation} \frac{\partial r_t}{\partial x_{t}} = diag(r_t (1-r_t)) \circ W_{ir} \end{equation} \begin{equation} \frac{\partial h_{t}^{'}}{\partial x_{t}} = diag((1-h_t^{'2}) \circ (W_{in} + \frac{\partial r_t}{\partial x_{t}} (W_{hn} h_{t-1} + b_{hn})) \end{equation} Using Eq. (\ref{gradient_eq}) and (\ref{gradient_eq2}), the covariance matrix of the hidden states $P_t$ at time $t$ is given by (\ref{hiddencovariance_uncertainty}). \section{Experimental results} We validated the proposed SDE-RNN approach for imputations on irregularly sampled measurements from the power distribution network (IEEE 37 bus test system). It is important to note that the proposed framework can be applied to any irregularly sampled time series data. \subsection{Data pre-processing} The irregularly sampled measurements are considered from two sensors, namely the smart meters and SCADA sensors. The smart meter measurements consist of active and reactive power injection for 24-hrs duration. This 24-hr load profile consists of a mixture of load profiles, i.e., industrial/commercial load profiles \cite{carmona2013fast}, and residential loads \cite{al2016state}. Reactive power profiles are obtained by assuming a power factor of $0.9$ lagging. The SCADA measurements are the voltage magnitude measurements obtained by executing load flows on the test network. The SCADA measurements are assumed at a subset of node locations. The aggregated smart meter data are averaged over 15-minute intervals while the SCADA measurements are sampled at 1-min interval. Gaussian noise with $0$ mean and standard deviation equal to $10\%$ of the actual power values is added to the smart meter data to mimic real-world measurement noise. The smart meter and SCADA measurements constitute our training dataset. The sensors data is represented as a list of records, where each record represents the information about the time-series data with the format given as, \textit{record = [measurement type, values, times, mask]}. Here, time-series data at each node of the IEEE 37 bus network represents one \textit{record}. The \textit{measurement type} denotes the sensor type, i.e., $P,Q,$ or $V$. \textit{Values} $\in \mathbb{R}^{N \times 1}$ represent the sensor measurements with \textit{times} $\in \mathbb{R}^{N}$ as the corresponding time instants. \textit{Mask} $\in \mathbb{R}^{N \times 1}$ represents the availability of the corresponding measurements. The dataset is further normalized between [0,1] intervals and takes the union of all time points across different nodes in the dataset that are irregularly sampled. \subsection{Model Specifications} We use the GRU cell with hidden size 5 to encode the observations. The drift function of the SDE is a feedforward neural network with 1 layer and 100 units. The diffusion function is a feedforward neural network with 1 layer, 100 units, and a sigmoid activation function. We consider Ito SDE with diagonal noise of Brownian motion. Python is used for coding the entire framework with the support of PyTorch’s torchsde package \cite{li2020scalable}. We consider the output neural network as a 1-layer feedforward network with an input size of 5 and an output size of 1. We set the learning rate to 0.01, batch size as 10 and report the loss as mean squared error (MSE). We compare the efficacy of the proposed SDE-RNN with the classic GRU approach. The classic GRU approach consists of a GRU model with 3 input features (observations, times, and mask) and 5 output features. The predictions are obtained by transforming the output of the GRU cell via another feedforward neural network with 2 layers. The first layer is a neural network with output features 100 followed by the Tanh activation function. We then employ a dropout rate of 0.3 after this activation function followed by another feedforward neural network of input features 100 and output feature 1. The uncertainty estimates provided by the SDE-RNN is compared with that of Monte-Carlo dropout approach in classic RNN using the Expected normalized calibration error (ENCE) metric \cite{levi2022evaluating}. Calibration error is typically used to evaluate uncertainty estimates since they are not associated with any groundtruth values. In a well calibrated model, average confidence score should match with the average model accuracy \cite{guo2017calibration}. Thus, in a classification setting, calibration implies that whenever a forecaster assigns a probability of $p$ to an event, that event should occur about $p\%$ of the time. On the other hand, in a regression setting, calibration signifies that the prediction $y_t$ should fall in a $c\%$ (e.g., $90\%$) confidence interval approximately $c\%$ (e.g., $90\%$) of the time. The expected normalized calibration error (ENCE) is based on the similar regression setting that for each value of uncertainty, measured through the standard deviation $\sigma$, the expected mistake (measured in MSE) matches the predicted error $\sigma^2$, i.e., \begin{equation} E_{x,y} [(\mu(x) - y)^2| \sigma(x)^2 = \sigma^2] = \sigma^2 \end{equation} ENCE metric is evaluated using the binning approach, where we assume that the number of bins $N$ divides by the number of time points $T$. We divide the indices of the examples to $N$ bins, $\{ B_j\}_{j=1}^N$, such that $B_j =\{(j-1)\frac{T}{N}+1,..., j\frac{T}{N}\}$. Each bin represents an interval in the standard deviation axis: $[min_{t \in B_{j}} \{\sigma_t\}, max_{t \in B_{j}} \{\sigma_t\}]$. Expected Normalized Calibration Error ($ENCE$) is calculated as, \begin{equation} ENCE = \sqrt{\frac{1}{N} \sum_{j=1}^{N} \frac{|mVAR(j) - RMSE(j)|}{mVAr(j)}} \end{equation} where, the root of the mean variance ($mVAr (j)$) at bin $j$ is, \begin{equation} mVAr(j) = \sqrt{\frac{1}{| B_j|} \sum_{t \in B_j} \sigma_t^2}, \end{equation} and root mean squared error ($RMSE(j)$) at bin $j$ is, \begin{equation} RMSE(j) = \sqrt{\frac{1}{| B_j|} \sum_{t \in B_j} (y_t - \hat{y}_t)^2}. \end{equation} It is expected that the $mVar$ will equal the $RMSE$ for each bin, i.e., the plot of $RMSE$ as a function of $mVar$ should be an identity function. However, in reality, the models are not well calibrated and the plot is not an identity function. Thus, a model is said to provide better uncertainty estimates if they have a lower ENCE. We compare the uncertainty estimation of our SDE-RNN approach with the classic RNN in the following subsection. \begin{figure} \caption{Uncertainty quantification in SDE-RNN approach- node 1} \label{fig:imputation_node1} \end{figure} \begin{figure} \caption{Uncertainty quantification in SDE-RNN approach- node 12} \label{fig:imputation_node12} \end{figure} \subsection{Imputation:} In the first experiment, the smart meter measurements are imputed at a 1-min interval using the proposed SDE-RNN approach. The training and test dataset contains the available AMI (15-minute) and SCADA (1-minute interval) measurements. In addition, in the test dataset, we introduce missing data as a percentage of the total number of time instants (minutes) in the 24-hr interval (i.e., 1440 time instants). The prediction and uncertainty estimates provided by the SDE-RNN model are shown in Fig.\ref{fig:imputation_node1} and Fig. \ref{fig:imputation_node12} for nodes 1 and 12, respectively. Here, $5\%$ of total interpolated points (i.e., 1440 data points) are observed. It can be observed that the epistemic uncertainty increases as we move away from the observed values. For instance in Fig. \ref{fig:imputation_node1}, the sparse region of $10^{th}$-$12^{th}$ hr depicts less confidence compared to data-rich regions of $5^{th}$-$9^{th}$ hr. The aleatoric uncertainty due to the sensor noise is well captured by the SDE-RNN model, reflected by the uncertainty estimates at the observations. The uncertainty estimates at the observation times is nonzero and present due to the noisy input data, which is the aleatoric uncertainty part. In the second experiment, we compare the uncertainty estimates provided by the SDE-RNN with the classic GRU + MC dropouts approach. We assume the number of bins is 5. Table \ref{imputation_table} shows the performance of both these approaches for different percentages of missing data in the test dataset. We provide the test dataset's mean squared error (MSE) and ENCE. It is evident from the Table that the SDE-RNN approach offers better accuracy and uncertainty estimates than the classic GRU model for all the levels of missingness in the test dataset. \begin{table}[] \caption{Imputation results for different percentages of missing data in test dataset} \begin{tabular}{|l|lll|lll|} \hline Metric & \multicolumn{3}{l|}{MSE} & \multicolumn{3}{l|}{ENCE} \\ \hline \begin{tabular}[c]{@{}l@{}}Missing \\ data (\%) \end{tabular} & \multicolumn{1}{l|}{40\%} & \multicolumn{1}{l|}{60\%} & 80\% & \multicolumn{1}{l|}{40\%} & \multicolumn{1}{l|}{60\%} & 80\% \\ \hline SDE-RNN & \multicolumn{1}{l|}{0.0005} & \multicolumn{1}{l|}{0.0008} & 0.0013 & \multicolumn{1}{l|}{57.45} & \multicolumn{1}{l|}{49.11} & 54.35 \\ \hline Classic-GRU & \multicolumn{1}{l|}{0.0506} & \multicolumn{1}{l|}{0.0978} & 0.1527 & \multicolumn{1}{l|}{534.05} & \multicolumn{1}{l|}{419.06} & 457.92 \\ \hline \end{tabular} \label{imputation_table} \end{table} \section{Conclusion and Future work} This paper proposes a neural SDE-RNN framework for imputing the multi-timescale measurements and quantifying the uncertainty associated with them. We capture both the aleatoric and epistemic uncertainty and propagate it across different time instances through neural SDE-RNN modules. Simulation results (ENCE, MSE) on power distribution (IEEE 37 bus) test system validate the effectiveness of our approach. As a part of our future work, We aim to compare the epistemic uncertainty obtained through our model with that of conventional approaches such as Monte-Carlo dropouts, Deep Ensemble, etc. \end{document}
\begin{document} \title[Special termination for log canonical pairs]{Special termination for log canonical pairs} \author{Vladimir Lazi\'c} \address{Fachrichtung Mathematik, Campus, Geb\"aude E2.4, Universit\"at des Saarlandes, 66123 Saarbr\"ucken, Germany} \email{[email protected]} \author{Joaqu\'in Moraga} \address{UCLA Mathematics Department, Box 951555, Los Angeles, CA 90095-1555, USA} \email{[email protected]} \author{Nikolaos Tsakanikas} \address{EPFL SB MATH CAG, MA C3 595 (B\^atiment MA), Station 8, 1015 Lausanne, Switzerland} \email{[email protected]} \thanks{ Lazi\'c was supported by the DFG-Emmy-Noether-Nachwuchsgruppe ``Gute Strukturen in der h\"oherdimensionalen birationalen Geometrie". We would like to thank O.\ Fujino and K.\ Hashizume for useful discussions related to this work. \newline \indent 2010 \emph{Mathematics Subject Classification}: 14E30.\newline \indent \emph{Keywords}: Minimal Model Program, termination of flips, special termination. } \begin{abstract} We prove the special termination for log canonical pairs and its generalisation in the context of generalised pairs. \end{abstract} \maketitle \setcounter{tocdepth}{1} \tableofcontents \section{Introduction} The first goal of this paper is to give a rigorous and complete proof of the following result, which is a natural step towards proving one of the big open problems in the Minimal Model Program (MMP) in characteristic zero -- the termination of flips. \begin{thm}\label{thm:main} Assume the termination of flips for $ \mathbb{Q} $-factorial klt pairs of dimension at most $ n-1 $. Let $(X_1,B_1)$ be a quasi-projective log canonical pair of dimension $n$ which is projective over a normal quasi-projective variety $Z$. Consider a sequence of flips over $Z$: \begin{center} \begin{tikzcd}[column sep = 2em, row sep = 2.25em] (X_1,B_1) \arrow[dr, "\theta_1" swap] \arrow[rr, dashed, "\pi_1"] && (X_2,B_2) \arrow[dl, "\theta_1^+"] \arrow[dr, "\theta_2" swap] \arrow[rr, dashed, "\pi_2"] && (X_3,B_3) \arrow[dl, "\theta_2^+"] \arrow[rr, dashed, "\pi_3"] && \dots \\ & Z_1 && Z_2 \end{tikzcd} \end{center} Then there exists a positive integer $N$ such that \[ \Exc(\theta_i)\cap\nklt(X_i,B_i)=\emptyset \text{ for all } i\geq N . \] \end{thm} The result is usually referred to as \emph{special termination for log canonical pairs}; in other words, in any sequence of flips of a log canonical pair, the flipping (and thus also the flipped) locus will avoid the non-klt locus of the pair. The theorem has its origins in \cite{Sho92} and was stated in this form in \cite{Sho03,Sho04}; however only a sketch of a proof in a special case is given. On the state of the art, see comments in \cite[Section 4.2]{Fuj07a} and \cite[Section 5]{Fuj11}. The only complete proof of special termination is in \cite{Fuj07a} for \emph{dlt pairs}; in that case, special termination says that the flipping locus is eventually disjoint from the round-down of the boundary of the dlt pair. Even though that statement suffices in many applications, it seems that the generalisation to log canonical pairs is necessary if one wants to attack the termination of flips. In particular, special termination as in Theorem \ref{thm:main} was used in \cite{Bir07}. Our second goal is to prove a form of special termination in the context of g-pairs. This is a recently introduced category which includes the usual pairs, see Section \ref{Sec:prelim} for details. \begin{thm}\label{thm:main_g} Assume the termination of flips for NQC $ \mathbb{Q} $-factorial klt g-pairs of dimension at most $ n-1 $. Let $(X_1,B_1+M_1)$ be a quasi-projective NQC log canonical g-pair of dimension $n$ which is projective over a normal quasi-projective variety $Z$. Consider a sequence of flips over $Z$: \begin{center} \begin{tikzcd}[column sep = 0.8em, row sep = 1.75em] (X_1,B_1+M_1) \arrow[dr, "\theta_1" swap] \arrow[rr, dashed, "\pi_1"] && (X_2,B_2+M_2) \arrow[dl, "\theta_1^+"] \arrow[dr, "\theta_2" swap] \arrow[rr, dashed, "\pi_2"] && (X_3,B_3+M_3) \arrow[dl, "\theta_2^+"] \arrow[rr, dashed, "\pi_3"] && \dots \\ & Z_1 && Z_2 \end{tikzcd} \end{center} Then there exists a positive integer $N$ such that \[ \Exc(\theta_i)\cap\nklt(X_i,B_i+M_i)=\emptyset \text{ for all } i\geq N . \] \end{thm} As demonstrated in recent papers \cite{Mor18,HL22,HM20,LT22}, understanding the Minimal Model Program of g-pairs is indispensable even if one is interested only in results involving the usual pairs or even only varieties. We expect Theorem \ref{thm:main_g} to play a prominent role in future developments on the existence of minimal models and the termination of flips. \section{Preliminaries}\label{Sec:prelim} Throughout the paper we work over an algebraically closed field of cha\-rac\-te\-ris\-tic zero. All morphisms are projective. Given a projective morphism $f\colon X\to Z $ between normal varieties and two $ \mathbb{R} $-Cartier divisors $ D_1 $ and $ D_2 $ on $ X $, we say that $ D_1 $ and $ D_2 $ are \emph{numerically equivalent over $ Z $}, denoted by $ D_1 \equiv_Z D_2 $, if $ D_1\cdot C = D_2 \cdot C $ for any curve $ C $ contained in a fibre of $ f $. We say that $ D_1 $ and $ D_2 $ are \emph{$\mathbb{R}$-linearly equivalent over $ Z $}, denoted by $ D_1 \sim_{\mathbb{R},Z} D_2 $, if there exists an $\mathbb{R}$-Cartier $\mathbb{R}$-divisor $G$ on $Z$ such that $D_1\sim_\mathbb{R} D_2+f^*G$. An $\mathbb{R}$-divisor $D$ on a variety $X$ over $Z$ is an \emph{NQC divisor} if it is a non-negative linear combination of $\mathbb{Q}$-Cartier divisors on $X$ which are nef over $ Z $. The acronym NQC stands for \emph{nef $\mathbb{Q}$-Cartier combinations}. \begin{dfn} Let $X$ be a normal variety which is projective over a normal variety $Z$ and let $ D $ be an $ \mathbb{R} $-Cartier divisor on $ X $. An \emph{NQC weak Zariski decomposition} of $ D $ over $ Z $ consists of a projective birational morphism $ f \colon W \to X $ from a normal variety $ W $ and a numerical equivalence $ f^* D \equiv_Z P + N $ such that $ P $ is an NQC divisor and $ N $ is an effective $\mathbb{R}$-Cartier divisor on $W$. \end{dfn} \begin{dfn} Let $X$ and $Y$ be normal varieties and let $\varphi \colon X\dashrightarrow Y$ be a \emph{birational contraction}, i.e.\ the map $ \varphi^{-1} $ contracts no divisors. Let $D$ be an $\mathbb{R}$-Cartier $\mathbb{R}$-divisor on $X$ and assume that $\varphi_*D$ is $\mathbb{R}$-Cartier. Then $\varphi$ is \emph{$D$-nonpositive} if there exists a smooth resolution of indeterminacies $(p,q)\colon W\to X\times Y$ of $\varphi$ such that $$ p^*D\sim_\mathbb{R} q^* \varphi_*D + E,$$ where $E$ is an effective $q$-exceptional $\mathbb{R}$-Cartier divisor on $W$. \end{dfn} \subsection{The relative stable base locus} Let $ X \to Z $ be a projective morphism of normal varieties and let $ D $ be an $ \mathbb{R} $-divisor on $ X $. The \emph{$ \mathbb{R} $-linear system} associated with $ D $ over $ Z $ is \[ |D/Z|_\mathbb{R} := \{ G \geq0 \mid G \sim_{\mathbb{R},Z} D \} , \] and the \emph{stable base locus} of $ D $ over $ Z $ is defined as \[ \sB(D) := \bigcap_{E \in |D/Z|_\mathbb{R}} \Supp E . \] \begin{lem}\label{lem:baselocus} Let $f\colon Y\to X$ and $g\colon X\to Z$ be projective birational morphisms between normal varieties. Let $D$ be an $\mathbb{R}$-Cartier $\mathbb{R}$-divisor on $X$. Then $\sB(f^*D/Z)=f^{-1}\big(\sB(D/Z)\big)$. \end{lem} \begin{proof} It suffices to show that $f^*|D/Z|_\mathbb{R}=|f^*D/Z|_\mathbb{R}$. It is clear that $f^*|D/Z|_\mathbb{R}\subseteq |f^*D/Z|_\mathbb{R}$. For the converse inclusion, let $ G \in | f^*D/Z |_\mathbb{R} $. We may write $ f^* f_* G = G + E $, where $ E $ is $ f $-exceptional. There exists an $ \mathbb{R} $-Cartier $\mathbb{R}$-divisor $ L $ on $ Z $ such that \[ G \sim_\mathbb{R} f^*D + (g \circ f)^*L = f^* ( D+g^*L ) , \] and thus \[ E \sim_\mathbb{R} f^* ( f_*G - D - g^*L ) . \] Therefore $ E = 0 $ by the Negativity Lemma \cite[Lemma 3.39(1)]{KM98}, and consequently $ f^* f_* G = G $. This proves that $f^*|D/Z|_\mathbb{R}\supseteq |f^*D/Z|_\mathbb{R}$ and completes the proof. \end{proof} \subsection{Generalised pairs} For the definitions and basic results on the singularities of pairs and the MMP we refer to \cite{KM98}. Below we discuss briefly generalised pairs, abbreviated as g-pairs; for futher information we refer to \cite[Section 4]{BZ16}, and in particular to \cite[\S 2.1 and \S 3.1]{HL22} for properties of dlt g-pairs. \begin{dfn} A \emph{generalised pair} or \emph{g-pair} $(X,B+M)$ consists of a normal variety $ X $ equipped with projective morphisms $$ X' \overset{f}{\longrightarrow} X \longrightarrow Z , $$ where $ f $ is birational and $ X' $ is normal, $B$ is an effective $ \mathbb{R} $-divisor on $X$, and $M'$ is an $\mathbb{R}$-Cartier divisor on $X'$ which is nef over $Z $ such that $ f_* M' = M $ and $ K_X + B + M $ is $ \mathbb{R} $-Cartier. We say often that the g-pair $(X,B+M)$ is given by the data $X'\to X\to Z$ and $M'$. Moreover, if $M'$ is an NQC divisor on $ X' $, then the g-pair $(X/Z,B+M)$ is an \emph{NQC g-pair}. Finally, we say that a g-pair $ (X,B+M) $ admits an NQC weak Zariski decomposition over $ Z $ if the divisor $ K_X + B + M $ has an NQC weak Zariski decomposition over $ Z $. \end{dfn} For simplicity, we denote such a g-pair only by $ (X/Z,B+M) $, but we implicitly remember the whole g-pair structure. Additionally, we note that the definition is flexible with respect to $X'$ and $M'$: if $ g \colon Y \to X' $ is a projective birational morphism from a normal variety $ Y $, then we may replace $X'$ with $Y$ and $M'$ with $ g^*M'$. Hence, we may always assume that $f \colon X'\to X$ in the above definition is a sufficiently high birational model of $ X $. \begin{dfn} Let $ (X,B+M) $ be a g-pair with data $ X' \overset{f}{\to} X \to Z $ and $ M' $. We can then write $$ K_{X'} + B' + M' \sim_\mathbb{R} f^* ( K_X + B + M ) $$ for some $ \mathbb{R} $-divisor $ B' $ on $ X' $. Let $ E $ be a divisorial valuation over $ X $ which is a prime divisor on $ X' $; its centre on $X$ is denoted by $c_X(E)$. The \emph{discrepancy of $ E $} with respect to $ (X,B+M) $ is $ a (E, X, B+M) := {-} \mult_E B' $. The g-pair $ (X,B+M) $ is: \begin{enumerate} \item[(a)] \emph{klt} if $a (E, X, B+M) > -1 $ for all divisorial valuations $E$ over $X$, \item[(b)] \emph{log canonical} if $a (E, X, B+M) \geq -1 $ for all divisorial valuations $E$ over $X$, \item[(c)] \emph{dlt} if it is log canonical, if there exists an open subset $U\subseteq X$ such that the pair $(U,B|_U)$ is log smooth, and if $a(E,X,B+M) = {-}1$ for some divisorial valuation $E$ over $X$, then the set $c_X(E)\cap U$ is non-empty and it is a log canonical centre of $(U,B|_U)$. \end{enumerate} If $ (X,B+M) $ is a log canonical g-pair, then: \begin{enumerate} \item[(i)] an irreducible subvariety $ S $ of $ X $ is a \emph{log canonical centre} of $ (X,B+M) $ if there exists a divisorial valuation $ E $ over $ X $ such that $ c_X(E) = S $ and $ a(E,X,B+M) = -1 $, \item[(ii)] the \emph{non-klt locus} of $ (X,B+M) $, denoted by $ \nklt(X,B+M) $, is the union of all log canonical centres of $ (X,B+M) $. \end{enumerate} When $M'=0$, one recovers the definitions of singularities of usual pairs. \end{dfn} It is clear from the definition that if $ (X,B+M) $ is a dlt g-pair with $ \lfloor B \rfloor = 0 $, then $ (X,B+M) $ is klt. If $ (X,B+M) $ is a $ \mathbb{Q} $-factorial dlt g-pair, then by definition and by \cite[Remark 4.2.3]{BZ16}, the underlying pair $ (X,B) $ is $ \mathbb{Q} $-factorial dlt and the log canonical centres of $ (X,B+M) $ coincide with those of $ (X,B) $. In particular \[ \nklt(X,B+M) = \nklt(X,B) = \Supp \lfloor B \rfloor \] by \cite[Proposition 3.9.2]{Fuj07a}. We will use this repeatedly in the paper without explicit mention. We adopted the definition dlt g-pairs from \cite{HL22}, which behaves well under restrictions to log canonical centres and under operations of an MMP; such operations are analogous to those in the standard setting, see \cite[Section 4]{BZ16} or \cite[\S3.1]{HL22} for details. \begin{notation} We will use the following notation throughout the paper. Let $(X,B+M)$ be a dlt g-pair and let $S$ be a log canonical centre of $(X,B+M)$. We define a dlt g-pair $(S,B_S+M_S)$ by adjunction, i.e.\ by the formula $$K_S + B_S +M_S = (K_X + B +M)|_S$$ as in \cite[Proposition 2.8]{HL22}. \end{notation} The next result is \cite[Proposition 3.9]{HL22} and is used frequently in the paper. \begin{lem} Let $(X,B+M)$ be a log canonical g-pair with data $ X' \overset{f}{\to} X \to Z $ and $ M' $. Then, after possibly replacing $f$ with a higher model, there exist a $\mathbb{Q}$-factorial dlt g-pair $(Y,\Delta+N)$ with data $ X' \overset{g}{\to} Y \to Z $ and $ M' $, and a projective birational morphism $\pi \colon Y \to X$ such that $$K_Y + \Delta + N \sim_\mathbb{R} \pi^*(K_X + B +M).$$ The g-pair $(Y,\Delta+N)$ is a \emph{dlt blowup} of $(X,B+M)$. \end{lem} \begin{rem}\label{rem:nklt_dlt_blow-up} If $ f \colon (Y,\Delta+N) \to (X,B+M) $ is a dlt blowup of a log canonical g-pair $ (X,B+M) $, then by a suitable analogue of \cite[Lemma 2.30]{KM98} we have $$ \nklt(X,B+M) = f\big(\nklt(Y,\Delta+N)\big) = f\big( \Supp \lfloor \Delta \rfloor \big). $$ In particular, the number of log canonical centres of a given log canonical g-pair is finite. \end{rem} \subsection{Monotonicity of discrepancies} Parts (i) and (iii) of the following result are a version of the so-called monotonicity lemma for g-pairs. Parts (ii) and (iv) will also be needed below, when we deal with the \emph{difficulty} of g-pairs. \begin{lem} \label{lem:discrep} Let $(X,B+M)$ and $(X',B'+M')$ be g-pairs such that there exists a diagram \begin{center} \begin{tikzcd} & Z \arrow[dl, "g" swap] \arrow[dr, "g'"] \\ X \arrow[rr, "\varphi", dashed] \arrow[dr, "f" swap] && X' \arrow[dl, "f'"] \\ & Y , \end{tikzcd} \end{center} where $Y$ and $Z$ are normal varieties, all morphisms are proper birational, $ K_{X'} + B' + M' $ is $ f' $-nef, and there exists a nef $ \mathbb{R} $-Cartier $ \mathbb{R} $-divisor $ M_Z $ on $ Z $ with $ g_* M_Z = M $ and $ g'_* M_Z = M' $. \begin{enumerate} \item[(i)] Assume that $B'=\varphi_*B+E$, where $E$ is the sum of all prime divisors which are contracted by $\varphi^{-1}$, and that $$a(F,X,B+M)\leq a(F,X',B'+M')$$ for every $\varphi$-exceptional divisor $F$ on $X$. Then for any geometric valuation $ F $ over $ X $ we have \[ a(F,X,B+M) \leq a(F,X',B'+M') . \] \item[(ii)] Under the assumptions of (i), assume additionally that $(X,B+M)$ is dlt and let $S$ be a log canonical centre of $(X,B+M)$. Assume that $\varphi$ is an isomorphism at the generic point of $S$ and define $S'$ as the strict transform of $S$ on $X'$. Then for any geometric valuation $ F $ over $ S $ we have \[ a(F,S,B_S+M_S) \leq a(F,S',B_{S'}+M_{S'}) . \] \item[(iii)] Assume that $ -(K_X + B + M) $ is $ f $-nef and that $ f_* B = f'_* B' $. Then for any geometric valuation $ F $ over $ Y $ we have \[ a(F,X,B+M) \leq a(F,X',B'+M') , \] and strict inequality holds if either \begin{enumerate} \item[(a)] $ -(K_X + B + M) $ is $ f $-ample and $ f $ is not an isomorphism above the generic point of $ c_Y(F) $, or \item[(b)] $ K_{X'} + B' + M' $ is $ f' $-ample and $ f' $ is not an isomorphism above the generic point of $ c_Y(F) $. \end{enumerate} In particular, if $(X,B+M)$ is log canonical and if either (a) or (b) holds, then $F$ is not a log canonical centre of $(X',B'+M')$. \item[(iv)] Assume that $ -(K_X + B + M) $ is $ f $-nef, that $ f_* B = f'_* B' $ and that $(X,B+M)$ is dlt. Let $S$ be a log canonical centre of $(X,B+M)$, assume that $\varphi$ is an isomorphism at the generic point of $S$ and define $S'$ as the strict transform of $S$ on $X'$. Let $T$ be the normalisation of $f(S)$, so that we have the following diagram: \begin{center} \begin{tikzcd}[column sep = 2em, row sep = 2.25em] (S,B_S+M_S) \arrow[dr, "f|_S" swap] \arrow[rr, dashed, "\varphi|_S"] && (S',B_{S'}+M_{S'}) \arrow[dl, "f'|_{S'}"] \\ & T. & \end{tikzcd} \end{center} Then for any geometric valuation $ F $ over $ T $ we have \[ a(F,S,B_S+M_S) \leq a(F,S',B_{S'}+M_{S'}) , \] and strict inequality holds if either \begin{enumerate} \item[(a)] $ -(K_X + B + M) $ is $ f $-ample and $ f|_S $ is not an isomorphism above the generic point of $ c_T(F) $, or \item[(b)] $ K_{X'} + B' + M' $ is $ f' $-ample and $ f'|_S $ is not an isomorphism above the generic point of $ c_T(F) $. \end{enumerate} In particular, if either (a) or (b) holds, then $F$ is not a log canonical centre of $(S',B_{S'}+M_{S'})$. \end{enumerate} \end{lem} \begin{proof} The proofs of (i) and (iii) are analogous to the proofs of \cite[Lemma 3.38 and Proposition 3.51]{KM98} and we provide the details for the benefit of the reader. By possibly replacing $ Z $ by a higher birational model, we may additionally assume that $ c_Z(F) $ is a divisor on $ Z $. Set $ h := f \circ g = f' \circ g' $. Then \begin{align*} K_Z + M_Z &\sim_\mathbb{R} g^* (K_X + B + M) + \sum a(F_i,X,B+M) F_i \\ &\sim_\mathbb{R} (g')^* (K_{X'} + B' + M') + \sum a(F_i,X',B'+M') F_i . \end{align*} Consider the $ \mathbb{R} $-Cartier $ \mathbb{R} $-divisor \begin{align} H&:=\sum \big( a(F_i,X,B+M) - a(F_i,X',B'+M') \big) F_i \label{eq:78}\\ & \sim_\mathbb{R} (g')^* (K_{X'} + B' + M') - g^* (K_X + B + M).\notag \end{align} Under the assumptions of (i) the divisor $H$ is $g$-nef and $g_*H\leq0$, hence $H\leq0$ by \cite[Lemma 3.39(1)]{KM98}. Under the assumptions of (iii) the divisor $H$ is $ h $-nef and $ h_*H=0 $ since $ f_* B = f'_* B' $, hence $H\leq0$ by \cite[Lemma 3.39(1)]{KM98}. This yields (i) and the first statement of (iii). If the case (a) or the case (b) of (iii) holds, then $ H $ is not numerically $ h $-trivial over the generic point $\eta$ of $ c_Y(F) $. Then \cite[Lemma 3.39(2)]{KM98} implies that $ h^{-1}(\eta) \subseteq \Supp H $ and therefore that $ F \subseteq \Supp H $. This yields the second statement of (iii). For (ii) and (iv), by \cite[Lemma 2.45]{KM98} there is a sequence of blowups of $S$ along the centres of $F$ such that the centre of $F$ becomes a divisor. By considering these blowups as blowups of $X$ and possibly blowing up further, we may assume that the centre $c_{S_Z}(F)$ is a divisor, where $S_Z$ is the strict transform of $S$ on $Z$, and that there exist finitely many prime divisors $\widehat{F}_i$ on $Z$ such that $\widehat{F}_i|_{S_Z}=c_{S_Z}(F)$ and $c_T(F)=c_Y\big(\widehat{F}_i\big)|_T$ for each such $\widehat{F}_i$. Then (ii) follows by restricting the relation \eqref{eq:78} to $S_Z$: indeed, $S_Z\nsubseteq \Supp H$ since $\varphi$ is an isomorphism at the generic point of $S$. In the case (a) of (iv) we have then that $f$ is not an isomorphism above the generic point of each $c_Y\big(\widehat{F}_i\big)$, so (iv) follows from (iii) as in the proof of (ii) above. We obtain (iv) in the case (b) analogously, by first blowing up along the centres of $F$ on $S'$ instead. \end{proof} \subsection{Minimal models and canonical models} \begin{dfn} Let $(X,B +M)$ be a log canonical g-pair with data $ X' \overset{f}{\to} X \to Z $ and $ M' $ and consider a birational map $ \varphi \colon (X,B+M) \dashrightarrow (Y,B_Y+M_Y)$ over $Z$ to a $\mathbb{Q}$-factorial g-pair $ (Y/Z,B_Y+M_Y) $. We may assume that $X'$ is a high enough model so that the map $\varphi\circ f$ is a morphism. Then $\varphi$ is a \emph{minimal model in the sense of Birkar-Shokurov over $Z$} of the g-pair $(X,B+M)$ if $ B_Y =\varphi_*B+E$, where $E$ is the sum of all prime divisors which are contracted by $\varphi^{-1}$, if $M_Y=(\varphi\circ f)_*M'$, if the divisor $K_Y+B_Y+M_Y$ is nef over $Z$ and if $$a(F,X,B+M) < a(F,Y,B_Y+M_Y)$$ for any prime divisor $ F $ on $ X $ which is contracted by $\varphi $. Note that then the g-pair $ (Y,B_Y+M_Y)$ is log canonical by Lemma \ref{lem:discrep}(i). If, moreover, the map $\varphi$ is a birational contraction, but $Y$ is not necessarily $\mathbb{Q}$-factorial if $X$ is not $\mathbb{Q}$-factorial (and $Y$ is $\mathbb{Q}$-factorial if $X$ is $\mathbb{Q}$-factorial), then $\varphi$ is a \emph{minimal model} of $(X/Z,B+M)$. \end{dfn} For the differences among these notions of a minimal model, see \cite[\S 2.2]{LT22}; note that here we allow a minimal model in the sense of Birkar-Shokurov to be log canonical and not only dlt, which is in alignment with the definitions in \cite{Hash18a,Hash19b}. \begin{rem}\label{rem:models} Let $(X,B +M)$ be a log canonical g-pair and let $ \varphi \colon (X,B+M) \dashrightarrow (Y,B_Y+M_Y)$ be a minimal model of $(X,B +M)$ over $Z$. Then any dlt blowup of $(Y,B_Y+M_Y)$ is a minimal model in the sense of Birkar-Shokurov of $(X,B +M)$ over $Z$. \end{rem} \begin{dfn} Assume that we have a commutative diagram \begin{center} \begin{tikzcd} (X,B+M) \arrow[rr, "\varphi", dashed] \arrow[dr, "f" swap] && (X',B'+M') \arrow[dl, "f'"] \\ & Y \end{tikzcd} \end{center} where $(X,B+M)$ and $(X',B'+M')$ are g-pairs and $Y$ is normal, $f$ and $f'$ are projective birational morphisms and $\varphi$ is a birational contraction, $ \varphi_* B = B' $, and $ M $ and $ M' $ are pushforwards of the same nef $\mathbb{R}$-divisor on a common birational model of $X$ and $X'$. If $(X,B+M)$ is log canonical, if $ K_{X'} + B' + M' $ is ample over $Y$ and if $a(F,X,B+M)\leq a(F,X',B'+M')$ for every $\varphi$-exceptional prime divisor $F$ on $X$, then $(X',B'+M')$ is a \emph{log canonical model of $(X,B+M)$}. \end{dfn} The following result shows how a minimal model and a log canonical model of a g-pair are related. \begin{lem} \label{lem:maptoCM} Let $ (X/Z,B+M) $ be a log canonical g-pair, let $ (X^m,B^m+M^m) $ be a minimal model of $ (X,B+M) $ over $ Z $ and let $ (X^\textit{lc},B^\textit{lc}+M^\textit{lc}) $ be a log canonical model of $ (X,B+M) $ over $ Z $. Then there exists a birational morphism $ \alpha \colon X^m \to X^\textit{lc} $ such that \[ K_{X^m} + B^m+M^m \sim_\mathbb{R} \alpha^* (K_{X^\textit{lc}} + B^\textit{lc}+M^\textit{lc}) . \] In particular, $ K_{X^m} + B^m+M^m $ is semiample over $ Z $ and there exists a unique log canonical model of $(X,B+M)$, up to isomorphism. \end{lem} \begin{proof} The proof is analogous to the proof of \cite[Lemma 4.8.4]{Fuj17} and we provide the details for the benefit of the reader. Let $W$ be a common resolution of $X$, $X^m$ and $X^\textit{lc}$, together with morphisms $p\colon W\to X$, $q\colon W\to X^m$ and $r\colon W\to X^\textit{lc}$. We may write \[ p^*(K_X + B + M) \sim_\mathbb{R} q^*(K_{X^m} + B^m+M^m) + F \] and \[ p^*(K_X + B + M) \sim_\mathbb{R} r^*(K_{X^\textit{lc}} + B^\textit{lc}+M^\textit{lc}) + G , \] where $ F$ is effective and $ q $-exceptional and $ G$ is effective and $ r $-exceptional, see Lemma \ref{lem:discrep}. Therefore, \[ q^*(K_{X^m} + B^m+M^m) + F \sim_\mathbb{R} r^*(K_{X^\textit{lc}} + B^\textit{lc}+M^\textit{lc}) + G . \] Note that $ q_*(G-F)\geq0 $ and $ -(G-F) $ is $ q $-nef, and that $ r_*(F-G)\geq0 $ and $ -(F-G) $ is $ r $-nef. This implies that $ F=G $ by the Negativity lemma \cite[Lemma 3.39]{KM98}, and therefore, \begin{equation}\label{eq:rig} q^*(K_{X^m} + B^m+M^m) = r^*(K_{X^\textit{lc}} + B^\textit{lc}+M^\textit{lc}). \end{equation} Let $ C $ be a curve on $ W $ which is contracted by $ q $. Then \begin{align*} 0 &= q^*(K_{X^m} + B^m+M^m) \cdot C = r^*(K_{X^\textit{lc}} + B^\textit{lc}+M^\textit{lc}) \cdot C \\ &= (K_{X^\textit{lc}} + B^\textit{lc}+M^\textit{lc}) \cdot r_* C , \end{align*} hence $ C $ is contracted by $ r $ as $ K_{X^\textit{lc}} + B^\textit{lc}+M^\textit{lc} $ is ample over $ Z $. Thus, by the Rigidity lemma \cite[Lemma 1.15]{Deb01} there exists a birational morphism $ \alpha \colon X^m \to X^\textit{lc} $ such that $ r=\alpha \circ q $, and the first statement follows from \eqref{eq:rig}. Assume that there exists another log canonical model $(Y,B_Y+M_Y)$ of $(X,B+M)$. Then analogously as above, there exists a birational morphism $\beta\colon X^\textit{lc}\to Y$ such that \[ K_{X^\textit{lc}} + B^\textit{lc}+M^\textit{lc} \sim_\mathbb{R} \beta^* (K_{Y} + B_Y+M_Y) . \] Since the divisor $K_{X^\textit{lc}} + B^\textit{lc}+M^\textit{lc}$ is ample over $Z$, the map $\beta$ must be an isomorphism. \end{proof} \begin{dfn} Assume that we have a commutative diagram \begin{center} \begin{tikzcd} (X,B+M) \arrow[rr, "\varphi", dashed] \arrow[dr, "f" swap] && (X',B'+M') \arrow[dl, "f'"] \\ & Y \end{tikzcd} \end{center} where $(X,B+M)$ and $(X',B'+M')$ are g-pairs and $Y$ is normal, $f$ and $f'$ are projective birational morphisms and $\varphi$ is an isomorphism in codimension one, $ \varphi_* B = B' $, and $ M $ and $ M' $ are pushforwards of the same nef $\mathbb{R}$-divisor on a common birational model of $X$ and $X'$. \begin{enumerate} \item[(a)] If $ -(K_X + B + M) $ is ample over $Y$ and if $ K_{X'} + B' + M' $ is ample over $Y$, then the diagram is an \emph{ample small quasi-flip}. \item[(b)] An ample small quasi-flip is a \emph{flip} if $f$ and $f'$ are isomorphisms in codimension one and if $ \rho(X/Y) = \rho(X'/Y) =1 $. \end{enumerate} \end{dfn} We recall that flips for log canonical pairs exist by \cite[Corollary 1.2]{Bir12a} or \cite[Corollary 1.8]{HX13}. On the other hand, the existence of flips for g-pairs is not known yet in full generality. We refer, however, to \cite[Section 4]{BZ16} and \cite[\S 3.1]{HL22} for known special cases. The following result follows from \cite[Lemmas 4.3.8 and 4.9.3]{Fuj17}. \begin{lem} \label{lem:klt_lc} The termination of flips for $ \mathbb{Q} $-factorial klt pairs of dimension at most $ d $ implies the termination of flips for log canonical pairs of dimension $ d $. \end{lem} \subsection{The difficulty} In this subsection we follow closely \cite{HL22} and we include the details for the benefit of the reader. The \emph{difficulty} stands as a collective noun for various invariants related to the discrepancies of a pair or a g-pair, which behave well under the operations of the Minimal Model Program. The first version of the difficulty was introduced in \cite{Sho85}. The version below was defined in \cite{HL22}. \begin{dfn}\label{dfn:difficulty} Let $(X,B + M)$ be an NQC $\mathbb{Q}$-factorial dlt g-pair with data $ X' \to X \to Z $ and $ M' $. We may write $B=\sum_{i=1}^k b_i B_i$ with prime divisors $B_i$ and $b_i\in (0,1]$, and $ M' =\sum_{i=1}^\ell \mu_i M_i'$ with $ M_i'$ Cartier divisors which are nef over $ Z $ and $ \mu_i \in (0, +\infty) $. Let $S$ be a log canonical centre of $(X,B +M)$. Set $b = \{b_1,\dots,b_k\}$, $\mu = \{\mu_1,\dots,\mu_\ell\}$, and $$\mathcal S(b,\mu) = \bigg\{\frac{m-1}{m} + \sum_{i=1}^k\frac{r_ib_i}{m} + \sum_{i=1}^\ell\frac{s_i\mu_i}{m}\leq 1 \ \Big|\ m \in \mathbb{N}_{>0}, r_i , s_i \in\mathbb{N}\bigg\}.$$ Note that the coefficients of $B_S$ belong to the set $\mathcal S(b,\mu)$ by the proof of \cite[Proposition 4.9]{BZ16}. For each $\alpha\in\mathcal S(b,\mu)$ set $$ d_{<-\alpha}(S,B_S +M_S) = \#\big\{E \mid a(E,S,B_S +M_S) < {-} \alpha, \ c_S(E) \nsubseteq \Supp \lfloor B_S \rfloor \big\}$$ and $$ d_{\leq -\alpha}(S,B_S +M_S) = \#\big\{E \mid a(E,S,B_S +M_S) \leq {-} \alpha, \ c_S(E) \nsubseteq \Supp \lfloor B_S \rfloor \big\}.$$ The \emph{difficulty} of the g-pair $(S,B_S + M_S)$ is defined as $$ d_{b,\mu}(S,B_S +M_S) = \sum_{\alpha\in\mathcal S(b,\mu)} \Big(d_{<-\alpha}(S,B_S +M_S)+d_{\leq-\alpha}(S,B_S +M_S)\Big).$$ \end{dfn} \begin{lem}\label{lem:finite} In the notation from Definition \ref{dfn:difficulty}: \begin{enumerate} \item[(i)] there exists $\gamma\in(0,1)$ such that $a(E,S,B_S +M_S)\geq-\gamma$ for each geometric valuation $E$ over $S$ such that $c_S(E) \nsubseteq \Supp \lfloor B_S \rfloor$; \item[(ii)] the set $\mathcal S(b,\mu)\cap[0,\gamma]$ is finite; \item[(iii)] we have $$d_{b,\mu}(S,B_S +M_S) <+\infty.$$ \end{enumerate} \end{lem} \begin{proof} Let $S'\stackrel{\sigma}{\to} S\to Z$ and $M'_S$ be the data of the g-pair $(S,B_S+M_S)$. Consider the set $U:=S\setminus\Supp\lfloor B_S\rfloor$ and let $U':=\sigma^{-1}(U)$. Then we obtain the klt g-pair $(U,B_S|_U+M_S|_U)$ with data $U'\to U\to U$ and $M'_S|_{U'}$. Define $B'_S$ by the equation $$K_{S'}+B'_S+M'_S\sim_\mathbb{R} \sigma^*(K_S+B_S+M_S).$$ Then for each geometric valuation $E$ over $S$ such that $c_S(E) \nsubseteq \Supp \lfloor B_S \rfloor$ we have \begin{align*} a(E,S,B_S +M_S)&=a(E,U,B_S|_U +M_S|_U)\\ &=a(E,U',B'_S|_{U'} +M'_S|_{U'})=a(E,U',B'_S|_{U'}), \end{align*} hence \begin{equation}\label{eq:55} d_{<-\alpha}(S,B_S +M_S) = \#\big\{E \mid a(E,U',B'_S|_{U'}) < {-} \alpha\big\} \end{equation} and \begin{equation}\label{eq:56} d_{\leq -\alpha}(S,B_S +M_S) = \#\big\{E \mid a(E,U',B'_S|_{U'}) \leq {-} \alpha\big\}. \end{equation} Since the pair $(U',B'_S|_{U'})$ is klt, by \eqref{eq:55} and \eqref{eq:56} there exists $\gamma\in(0,1)$ such that (i) holds, and in particular: $$d_{<-\alpha}(S,B_S +M_S) =d_{\leq-\alpha}(S,B_S +M_S) =0\quad \text{if }\alpha>\gamma.$$ On the other hand, $d_{<-\alpha}(S,B_S +M_S) $ and $d_{\leq-\alpha}(S,B_S +M_S) $ are finite for any $\alpha\in\mathcal S(b,\mu)$ by \eqref{eq:55} and \eqref{eq:56} and by \cite[Proposition 2.36(2)]{KM98}. Since the set $\mathcal S(b,\mu)\cap[0,\gamma]$ is finite by \cite[Lemma 7.4.4]{Kol92}, (ii) and (iii) follow. \end{proof} \begin{pro}\label{pro:monoton} Assume the notation from Definition \ref{dfn:difficulty}. Consider a flip \begin{center} \begin{tikzcd}[column sep = 0.8em, row sep = 1.75em] (X,B+M) \arrow[dr, "\theta" swap] \arrow[rr, dashed, "\pi"] && (X^+,B^++M^+) \arrow[dl, "\theta^+"] \\ & Z & \end{tikzcd} \end{center} Assume that $\pi$ is an isomorphism at the generic point of $S$ and define $S^+$ as the strict transform of $S$ on $X^+$. Moreover, assume that $\pi|_S$ is an isomorphism along $\Supp \lfloor B_S\rfloor$. Then the following hold. \begin{enumerate} \item[(i)] We have $$ d_{b,\mu}(S,B_S +M_S) \geq d_{b,\mu}(S^+,B_{S^+} +M_{S^+}). $$ \item[(ii)] If there exists a geometric valuation $E$ over $S$ such that $c_S(E)$ is a divisor but $c_{S^+}(E)$ is not a divisor, then there exists $\alpha_0\in\mathcal S(b,\mu)\setminus\{1\}$ such that $$d_{\leq-\alpha_0}(S,B_S +M_S) > d_{\leq-\alpha_0}(S^+,B_{S^+} +M_{S^+}).$$ \item[(iii)] If there exists a geometric valuation $E$ over $S$ such that $c_S(E)$ is not a divisor but $c_{S^+}(E)$ is a divisor, then there exists $\alpha_0\in\mathcal S(b,\mu)\setminus\{1\}$ such that $$d_{<-\alpha_0}(S,B_S +M_S) > d_{<-\alpha_0}(S^+,B_{S^+} +M_{S^+}).$$ \item[(iv)] If $\pi|_S$ is not an isomorphism in codimension $1$, then $$ d_{b,\mu}(S,B_S +M_S) > d_{b,\mu}(S^+,B_{S^+} +M_{S^+}). $$ \end{enumerate} \end{pro} \begin{proof} Part (i) follows immediately from Lemma \ref{lem:discrep}. For (ii), note that $c_S(E)\not\subseteq\Supp\lfloor B_S\rfloor$ and $c_{S^+}(E)\not\subseteq\Supp\lfloor B_{S^+}\rfloor$ since $\pi|_S$ is an isomorphism along $\Supp \lfloor B_S\rfloor$. Then there exists $\alpha_0\in\mathcal S(b,\mu)\setminus\{1\}$ such that, by Lemma \ref{lem:discrep}, $$-\alpha_0=a(E,S,B_S +M_S)< a(E,S^+,B_{S^+} +M_{S^+}),$$ and (ii) follows. For (iii), as above we again have $c_S(E)\not\subseteq\Supp\lfloor B_S\rfloor$ and $c_{S^+}(E)\not\subseteq\Supp\lfloor B_{S^+}\rfloor$. Then there exists $\alpha_0\in\mathcal S(b,\mu)\setminus\{1\}$ such that, by Lemma \ref{lem:discrep}, $$a(E,S,B_S +M_S)< a(E,S^+,B_{S^+} +M_{S^+})=-\alpha_0,$$ and (iii) follows. Part (iv) is an immediate consequence of (ii) and (iii). \end{proof} \section{Lifting a sequence of quasi-flips} The following result allows us to pass from a sequence of (quasi-)flips of log canonical pairs to a sequence of flips of dlt pairs. \begin{lem}\label{lem:lifting} Let $ (X_1,B_1) $ be a quasi-projective log canonical pair over a quasi-projective variety $Z$. Consider a sequence of small ample quasi-flips over $Z$: \begin{center} \begin{tikzcd}[column sep = 2em, row sep = 2.25em] (X_1,B_1) \arrow[dr, "\theta_1" swap] \arrow[rr, dashed, "\pi_1"] && (X_2,B_2) \arrow[dl, "\theta_1^+"] \arrow[dr, "\theta_2" swap] \arrow[rr, dashed, "\pi_2"] && (X_3,B_3) \arrow[dl, "\theta_2^+"] \arrow[rr, dashed, "\pi_3"] && \dots \\ & Z_1 && Z_2 \end{tikzcd} \end{center} Then there exists a diagram \begin{center} \begin{tikzcd}[column sep = 2em, row sep = large] (Y_1,\Delta_1) \arrow[d, "f_1" swap] \arrow[rr, dashed, "\rho_1"] && (Y_2,\Delta_2) \arrow[d, "f_2" swap] \arrow[rr, dashed, "\rho_2"] && (Y_3,\Delta_3) \arrow[d, "f_3" swap] \arrow[rr, dashed, "\rho_3"] && \dots \\ (X_1,B_1) \arrow[dr, "\theta_1" swap] \arrow[rr, dashed, "\pi_1"] && (X_2,B_2) \arrow[dl, "\theta_1^+"] \arrow[dr, "\theta_2" swap] \arrow[rr, dashed, "\pi_2"] && (X_3,B_3) \arrow[dl, "\theta_2^+"] \arrow[rr, dashed, "\pi_3"] && \dots \\ & Z_1 && Z_2 \end{tikzcd} \end{center} where, for each $i \geq 1$, the map $ \rho_i \colon Y_i \dashrightarrow Y_{i+1} $ is a $(K_{Y_i}+\Delta_i)$-MMP over $ Z_i $ and the map $f_i $ is a dlt blowup of the pair $ (X_i,B_i) $. In particular, the sequence on top of the above diagram is an MMP for a $\mathbb{Q}$-factorial dlt pair $ (Y_1,\Delta_1) $. \end{lem} \begin{proof} Let $ f_1 \colon (Y_1,\Delta_1) \to (X_1,B_1) $ be a dlt blowup of $ (X_1,B_1) $. By Remark \ref{rem:models} the pair $ (X_1,B_1) $ has a minimal model in the sense of Birkar-Shokurov over $ Z_1 $, hence $ (Y_1,\Delta_1) $ has a minimal model in the sense of Birkar-Shokurov over $ Z_1 $ by \cite[Lemma 2.15]{Hash19b}. Therefore, by \cite[Theorem 1.9(ii),(iii)]{Bir12a} there exists a $ (K_{Y_1} + \Delta_1) $-MMP with scaling of an ample divisor over $Z_1$ which terminates with a minimal model $ (Y_2,\Delta_2) $ of $ (Y_1,\Delta_1) $ over $Z_1$. Since $(X_2,B_2)$ is a log canonical model of $(Y_1,\Delta_1)$ over $ Z_1 $, by Lemma \ref{lem:maptoCM} there exists a morphism $f_2\colon Y_2\to X_2 $ such that $ K_{Y_2} + \Delta_2 \sim_\mathbb{R} f_2^*(K_{X_2} + B_2) $. In particular, the pair $ (Y_2,\Delta_2) $ is a dlt blowup of $ (X_2,B_2) $. By continuing this process analogously, we obtain the required diagram. \end{proof} The analogue of Lemma \ref{lem:lifting} in the context of g-pairs is the following: \begin{lem}\label{lem:lifting_g} Assume the existence of minimal models for smooth varieties of dimension at most $ n-1 $. Let $ (X_1,B_1+M_1) $ be a quasi-projective NQC log canonical g-pair of dimension $ n $ which is projective over a quasi-projective variety $Z$. Consider a sequence of small ample quasi-flips over $Z$: \begin{center} \begin{tikzcd}[column sep = 0.8em, row sep = 1.75em] (X_1,B_1+M_1) \arrow[dr, "\theta_1" swap] \arrow[rr, dashed, "\pi_1"] && (X_2,B_2+M_2) \arrow[dl, "\theta_1^+"] \arrow[dr, "\theta_2" swap] \arrow[rr, dashed, "\pi_2"] && (X_3,B_3+M_3) \arrow[dl, "\theta_2^+"] \arrow[rr, dashed, "\pi_3"] && \dots \\ & Z_1 && Z_2 \end{tikzcd} \end{center} Then there exists a diagram \begin{center} \begin{tikzcd}[column sep = 0.8em, row sep = large] (Y_1,\Delta_1+N_1) \arrow[d, "f_1" swap] \arrow[rr, dashed, "\rho_1"] && (Y_2,\Delta_2+N_2) \arrow[d, "f_2" swap] \arrow[rr, dashed, "\rho_2"] && (Y_3,\Delta_3+N_3) \arrow[d, "f_3" swap] \arrow[rr, dashed, "\rho_3"] && \dots \\ (X_1,B_1+M_1) \arrow[dr, "\theta_1" swap] \arrow[rr, dashed, "\pi_1"] && (X_2,B_2+M_2) \arrow[dl, "\theta_1^+"] \arrow[dr, "\theta_2" swap] \arrow[rr, dashed, "\pi_2"] && (X_3,B_3+M_3) \arrow[dl, "\theta_2^+"] \arrow[rr, dashed, "\pi_3"] && \dots \\ & Z_1 && Z_2 \end{tikzcd} \end{center} where, for each $i \geq 1$, the map $ \rho_i \colon Y_i \dashrightarrow Y_{i+1} $ is a $(K_{Y_i}+\Delta_i+N_i)$-MMP over $ Z_i $ and the map $f_i $ is a dlt blowup of the g-pair $ (X_i,B_i+M_i) $. In particular, the sequence on top of the above diagram is an MMP for an NQC $\mathbb{Q}$-factorial dlt g-pair $ (Y_1,\Delta_1+N_1) $. \end{lem} \begin{proof} Let $ f_1 \colon (Y_1,\Delta_1+N_1) \to (X_1,B_1+M_1) $ be a dlt blowup of the g-pair $ (X_1,B_1+M_1) $. By the definition of a small ample quasi-flip and by Lemma \ref{lem:discrep}, the g-pair $ (X_2,B_2+M_2) $ is a minimal model of $ (X_1,B_1+M_1) $ over $ Z_1 $, hence the g-pair $ (X_1,B_1+M_1) $ has a minimal model in the sense of Birkar-Shokurov over $ Z_1 $ by Remark \ref{rem:models}. Hence, $ (X_1,B_1+M_1) $ has an NQC weak Zariski decomposition over $ Z_1 $ by \cite[Proposition 5.1]{HL22}, and it follows by \cite[Remark 2.11]{LT22} that $ (Y_1,\Delta_1+N_1) $ has an NQC weak Zariski decomposition over $ Z_1 $. Therefore, by \cite[Theorem 4.4(ii)]{LT22} there exists a $ (K_{Y_1} + \Delta_1+N_1) $-MMP with scaling of an ample divisor over $Z_1$ which terminates with a minimal model $ (Y_2,\Delta_2+N_2) $ of $ (Y_1,\Delta_1+N_1) $ over $Z_1$. Since $(X_2,B_2+M_2)$ is a log canonical model of $(Y_1,\Delta_1+N_1)$ over $ Z_1 $, by Lemma \ref{lem:maptoCM} there exists a morphism $f_2\colon Y_2\to X_2 $ such that $ K_{Y_2} + \Delta_2 + N_2 \sim_\mathbb{R} f_2^*(K_{X_2} + B_2 + M_2) $. In particular, the g-pair $ (Y_2,\Delta_2 + M_2) $ is a dlt blowup of the g-pair $ (X_2,B_2+M_2) $. By continuing this process analogously, we obtain the required diagram. \end{proof} \section{Special termination for pairs} \label{sect:SpecialTerm} \begin{lem}\label{lem:technical} Let $f\colon Y\to X$ be a projective birational morphism between normal varieties. Assume that we have a diagram \begin{center} \begin{tikzcd}[column sep = 1em, row sep = normal] Y \arrow[d, "f" swap] \arrow[rr, dashed, "\mu"] && W \arrow[d] \\ X \arrow[rr, "\theta"] && Z \end{tikzcd} \end{center} where $ \theta $ is birational and $\mu$ is an isomorphism in codimension one. Let $D_X$ be an $\mathbb{R}$-Cartier $\mathbb{R}$-divisor on $X$, set $D_Y:=f^*D_X$ and $D_W:=\mu_*D_Y$, and assume that $D_W$ is $\mathbb{R}$-Cartier. Let $V_X\subseteq X$ and $V_Y\subseteq Y$ be closed subsets such that $f(V_Y)=V_X$. Assume that: \begin{enumerate} \item[(i)] the map $\mu$ is $D_Y$-nonpositive, \item[(ii)] $D_W$ is semiample over $Z$, \item[(iii)] $V_Y$ is contained in the locus in $Y$ where the map $\mu$ is an isomorphism, \item[(iv)] the set $\Exc(\theta)$ is covered by curves $\gamma$ which are contracted by $ \theta $ and such that $D_X\cdot\gamma<0$. \end{enumerate} Then $\Exc(\theta)\cap V_X=\emptyset$. \end{lem} \begin{proof} Arguing by contradiction, assume that there exists $ x \in \Exc(\theta) \cap V_X $ and set $ F := f^{-1}(x) $. We first claim that \begin{equation}\label{eq:6} F \subseteq \sB(D_Y/Z). \end{equation} To this end, by (iv) we may find a curve $ \gamma \subseteq \Exc(\theta) $ passing through $ x $, contracted by $ \theta $ and such that $ D_X \cdot \gamma < 0 $. But then for each $H \in | D_X /Z |_\mathbb{R}$ we have $ H \cdot \gamma < 0 $, and thus $ x \in \gamma \subseteq \Supp H $. This implies that $ x \in \sB(D_X/Z) $, and by Lemma \ref{lem:baselocus} we infer that \[ F \subseteq f^{-1} \big( \sB(D_X/Z) \big) = \sB(D_Y/Z) , \] as desired. Now, since $ F \cap V_Y \neq \emptyset $, from \eqref{eq:6} we obtain \begin{equation}\label{eq:5} V_Y \cap \sB(D_Y/Z) \neq \emptyset . \end{equation} Define $V_W:=\mu(V_Y)$ and note that $ V_W $ is well-defined by (iii). We claim that $$ V_W \cap \sB(D_W/Z)\neq\emptyset ,$$ which would then contradict (ii) and finish the proof. To this end, by (iii) there exists a resolution of indeterminacies $ (p,q) \colon T \to Y\times W $ of the map $ \mu $ such that $p$ and $q$ are isomorphisms over some neighbourhoods of $V_Y$ and $V_W$, respectively. \begin{center} \begin{tikzcd} & T \arrow[dl, "p" swap] \arrow[dr, "q"] \\ Y \arrow[rr, dashed, "\mu"] && W \end{tikzcd} \end{center} Then by (i) there exists an effective $q$-exceptional $\mathbb{R}$-divisor $E_T$ on $T$ such that \[ p^* D_Y \sim_\mathbb{R} q^* D_W + E_T . \] Fix $ G_W \in |D_W /Z |_\mathbb{R} $ and set $$G_Y:=p_*(q^* G_W + E_T)\in |D_Y/Z|_\mathbb{R} . $$ We then have $$ p^* G_Y = q^* G_W + E_T $$ by the Negativity Lemma \cite[Lemma 3.39]{KM98}. Since $ V_Y \cap \Supp G_Y \neq\emptyset $ by \eqref{eq:5}, we obtain $$ \emptyset\neq p^{-1}(V_Y) \cap \Supp (p^* G_Y)=p^{-1}(V_Y)\cap \Supp(q^* G_W + E_T) ,$$ hence $ p^{-1}(V_Y) \cap q^{-1}(\Supp G_W)\neq\emptyset $, as $ p^{-1}(V_Y) $ does not intersect $ \Supp E_T $ by construction. Thus, as $V_W=q\big(p^{-1}(V_Y)\big)$, we have $$ V_W\cap \Supp G_W\neq\emptyset ,$$ and the claim follows. \end{proof} \begin{proof}[Proof of Theorem \ref{thm:main}] By Lemma \ref{lem:klt_lc} we may assume the termination of flips for $ \mathbb{Q} $-factorial dlt pairs of dimension at most $ n-1 $. By Lemma \ref{lem:lifting} there exists a diagram \begin{center} \begin{tikzcd}[column sep = 2em, row sep = large] (Y_1,\Delta_1) \arrow[d, "f_1" swap] \arrow[rr, dashed, "\rho_1"] && (Y_2,\Delta_2) \arrow[d, "f_2" swap] \arrow[rr, dashed, "\rho_2"] && (Y_3,\Delta_3) \arrow[d, "f_3" swap] \arrow[rr, dashed, "\rho_3"] && \dots \\ (X_1,B_1) \arrow[dr, "\theta_1" swap] \arrow[rr, dashed, "\pi_1"] && (X_2,B_2) \arrow[dl, "\theta_1^+"] \arrow[dr, "\theta_2" swap] \arrow[rr, dashed, "\pi_2"] && (X_3,B_3) \arrow[dl, "\theta_2^+"] \arrow[rr, dashed, "\pi_3"] && \dots \\ & Z_1 && Z_2 \end{tikzcd} \end{center} where the sequence of rational maps $\rho_i$ is a composition of steps of a $ (K_{Y_1} + \Delta_1) $-MMP. By relabelling, we may assume that this MMP is a sequence of flips, and by \cite[Theorem 4.2.1]{Fuj07a} we may assume that the flipping locus avoids the non-klt locus at each step in this MMP. We conclude by applying Lemma \ref{lem:technical} for $X=X_1$, $Y=Y_1$, $D_X=K_{X_1}+B_1$, $D_Y=K_{Y_1}+\Delta_1$, $V_X=\nklt(X_1,B_1)$ and $V_Y=\nklt(Y_1,\Delta_1)$, taking Remark \ref{rem:nklt_dlt_blow-up} into account. \end{proof} \section{Special termination for g-pairs} \begin{thm}\label{thm:specterm_g-pairs} Assume the termination of flips for NQC $ \mathbb{Q} $-factorial dlt g-pairs of dimension at most $ n-1 $. Let $ (X_1,B_1+M_1) $ be a quasi-projective NQC $ \mathbb{Q} $-factorial dlt g-pair of dimension $ n $, which is projective over a normal quasi-projective variety $Z$. Consider a sequence of flips over $Z$: \begin{center} \begin{tikzcd}[column sep = 0.8em, row sep = 1.75em] (X_1,B_1+M_1) \arrow[dr, "\theta_1" swap] \arrow[rr, dashed, "\pi_1"] && (X_2,B_2+M_2) \arrow[dl, "\theta_1^+"] \arrow[dr, "\theta_2" swap] \arrow[rr, dashed, "\pi_2"] && (X_3,B_3+M_3) \arrow[dl, "\theta_2^+"] \arrow[rr, dashed, "\pi_3"] && \dots \\ & Z_1 && Z_2 \end{tikzcd} \end{center} Then there exists a positive integer $N$ such that \[ \Exc(\theta_i)\cap\nklt(X_i,B_i+M_i)=\emptyset \text{ for all } i\geq N . \] \end{thm} \begin{proof} We follow closely the proofs of \cite[Theorem 4.5]{HL22} and \cite[Theorem 4.2.1]{Fuj07a}. We prove by induction on $d$ the following claim, and at the end of the proof we show how it implies the theorem. \emph{Claim.} For each nonnegative integer $d$ there exists a positive integer $N_d$ such that the restriction of $\theta_i$ to each log canonical centre of dimension at most $d$ is an isomorphism for all $i\geq N_d$. To prove the Claim, recall first that the number of log canonical centres of any log canonical g-pair is finite. At step $i$ of the MMP as above, if a log canonical centre of $(X_i,B_i+M_i)$ belongs to $\Exc(\theta_i)$, then the number of log canonical centres of $(X_{i+1},B_{i+1}+M_{i+1})$ is smaller than the number of log canonical centres of $(X_i,B_i+M_i)$ by Lemma \ref{lem:discrep}. Thus, there exists a positive integer $N_0$ such that the set $\Exc(\theta_i)$ does not contain any log canonical centre of $(X_i,B_i+M_i)$ for $i\geq N_0$. By relabelling, we may assume that $N_0=1$. In particular, this proves the Claim for $d=0$. Therefore, we may assume that for each $i\geq1$, the map $\pi_i$ is an isomorphism at the generic point of each log canonical centre of $(X_i,B_i+M_i)$. Let $d$ be a positive integer. By induction and by relabelling, we may assume that each map $ \pi_i $ is an isomorphism along every log canonical centre of dimension at most $ d -1 $. Now, we consider a log canonical centre $ S_1 $ of $ (X_1,B_1+M_1) $ of dimension $ d $. We obtain a sequence of birational maps $\pi_i|_{S_i}\colon S_i\dashrightarrow S_{i+1}$, where $ S_i $ is the strict transform of $ S_1 $ on $ X_i $. Every log canonical centre of the NQC dlt g-pair $ (S_i,B_{S_i} + M_{S_i}) $ is a log canonical centre of $(X_i,B_i + M_i)$, and hence by induction, each map $\pi_i$ is an isomorphism along $\Supp \lfloor B_{S_i}\rfloor$. Then by Proposition \ref{pro:monoton} and since the difficulty takes values in $\mathbb{N}$, after relabelling the indices we may assume that $ S_i $ and $ S_{i+1} $ are isomorphic in codimension $1$ for every $ i $. Moreover, by relabelling the indices we may assume that $(\pi_i|_{S_i})_*B_{S_i}=B_{S_{i+1}}$: indeed, this is equivalent to saying that we have \begin{equation}\label{eq:79} a(E,S_i,B_{S_i} +M_{S_i}) = a(E,S_{i+1},B_{S_{i+1}}+M_{S_{i+1}}) \end{equation} for each component of $B_{S_i}$ and $B_{S_{i+1}}$. Since $\pi_i$ is an isomorphism along $\Supp \lfloor B_{S_i}\rfloor$, the equation \eqref{eq:79} is clear if $E\subseteq \Supp \lfloor B_{S_i}\rfloor$. Note that in general we have $a(E,S_i,B_{S_i} +M_{S_i}) \leq a(E,S_{i+1},B_{S_{i+1}}+M_{S_{i+1}})$ by Lemma \ref{lem:discrep}. If $E\nsubseteq \Supp \lfloor B_{S_i}\rfloor$, then by Lemma \ref{lem:finite} there exists a finite subset $\Gamma\subseteq\mathcal S(B,\mu)$, which is independent of the index $i$, such that $a(E,S_i,B_{S_i} +M_{S_i})\in\Gamma$. Therefore, after relabelling the indices we may assume that \eqref{eq:79} holds. For every $ i \geq 1 $ we denote by $ T_i $ the normalisation of $ \theta_i(S_i) $. By Lemma \ref{lem:lifting_g} there exists a diagram \begin{center} \begin{tikzcd}[column sep = 0.65em, row sep = 2.5em] (W_1,\Delta_1+N_1) \arrow[d, "f_1" swap] \arrow[rr, dashed, "\rho_1"] && (W_2,\Delta_2+N_2) \arrow[d, "f_2" swap] \arrow[rr, dashed, "\rho_2"] && (W_3,\Delta_3+N_3) \arrow[d, "f_3" swap] \arrow[rr, dashed, "\rho_3"] && \dots \\ (S_1,B_{S_1}+M_{S_1}) \arrow[dr, "\theta_1|_{S_1}" swap] \arrow[rr, dashed, "\pi_1|_{S_1}"] && (S_2,B_{S_2}+M_{S_2}) \arrow[dl, "\theta_1^+|_{S_2}"] \arrow[dr, "\theta_2|_{S_2}" swap] \arrow[rr, dashed, "\pi_2|_{S_2}"] && (S_3,B_{S_3}+M_{S_3}) \arrow[dl, "\theta_2^+|_{S_3}"] \arrow[rr, dashed] && \dots \\ & T_1 && T_2 \end{tikzcd} \end{center} where the sequence of rational maps $\rho_i$ yields an MMP for the NQC $ \mathbb{Q} $-factorial dlt g-pair $ (W_1, \Delta_1+N_1) $. By the assumptions of the theorem, this MMP terminates, so by relabelling, we may assume that $$ (W_i,\Delta_i+N_i) = (W_{i+1}, \Delta_{i+1} + N_{i+1}) \quad \text{for all } i\geq1 .$$ Since $-(K_{W_i}+\Delta_i+N_i)$ is nef over $T_i$ and $K_{W_{i+1}}+\Delta_{i+1}+N_{i+1}$ is nef over $T_i$ by construction, we obtain that $K_{W_i}+\Delta_i+N_i$ is numerically trivial over $T_i$ for each $i$. In particular, $K_{S_i}+B_{S_i}+M_{S_i}$ and $K_{S_{i+1}}+B_{S_{i+1}}+M_{S_{i+1}}$ are numerically trivial over $T_i$ for each $i$, and thus $\theta_i |_{S_i}$ and $\theta_i^+ |_{S_{i+1}}$ contract no curves. Therefore, $\theta_i |_{S_i}$ and $\theta_i^+ |_{S_{i+1}}$ are isomorphisms, and consequently all maps $\pi_i|_{S_i}$ are isomorphisms. This finishes the proof of the Claim. Finally, we show that the Claim implies the Theorem: indeed, the Claim shows that $\lfloor B_i\rfloor$ does not contain any flipping or flipped curves for all $i\geq N_{n-1}$. Thus, if $\Exc(\theta_i)\cap\lfloor B_i\rfloor\neq\emptyset$ for some $i\geq N_{n-1}$, then there is a curve $C$ contracted by $\theta_i$ with $C\cdot\lfloor B_i\rfloor > 0$. But then $C^+\cdot\lfloor B_{i+1}\rfloor < 0$ for every curve $C^+$ contracted by $\theta_i^+$, hence $C^+\subseteq\lfloor B_{i+1}\rfloor$, a contradiction. \end{proof} The analogue of Lemma \ref{lem:klt_lc} in the context of g-pairs is the following: \begin{lem} \label{lem:reduction_g-term} The termination of flips for quasi-projective NQC $\mathbb{Q}$-factorial klt g-pairs of dimension at most $ d $ which are projective over a normal quasi-projective variety $Z$ implies the termination of flips for quasi-projective NQC log canonical g-pairs of dimension $ d $ over $Z$. \end{lem} \begin{proof} By induction, we may assume the termination of flips for NQC $ \mathbb{Q} $-factorial dlt g-pairs of dimension at most $ d-1 $. Consider a sequence of flips starting from an NQC log canonical g-pair $ (X_1,B_1+M_1) $ of dimension $ d $: \begin{center} \begin{tikzcd}[column sep = 0.8em, row sep = 1.75em] (X_1,B_1+M_1) \arrow[dr, "\theta_1" swap] \arrow[rr, dashed, "\pi_1"] && (X_2,B_2+M_2) \arrow[dl, "\theta_1^+"] \arrow[dr, "\theta_2" swap] \arrow[rr, dashed, "\pi_2"] && (X_3,B_3+M_3) \arrow[dl, "\theta_2^+"] \arrow[rr, dashed, "\pi_3"] && \dots \\ & Z_1 && Z_2 \end{tikzcd} \end{center} By Lemma \ref{lem:lifting_g} there exists a diagram \begin{center} \begin{tikzcd}[column sep = 0.8em, row sep = large] (Y_1,\Delta_1+N_1) \arrow[d, "f_1" swap] \arrow[rr, dashed, "\rho_1"] && (Y_2,\Delta_2+N_2) \arrow[d, "f_2" swap] \arrow[rr, dashed, "\rho_2"] && (Y_3,\Delta_3+N_3) \arrow[d, "f_3" swap] \arrow[rr, dashed, "\rho_3"] && \dots \\ (X_1,B_1+M_1) \arrow[dr, "\theta_1" swap] \arrow[rr, dashed, "\pi_1"] && (X_2,B_2+M_2) \arrow[dl, "\theta_1^+"] \arrow[dr, "\theta_2" swap] \arrow[rr, dashed, "\pi_2"] && (X_3,B_3+M_3) \arrow[dl, "\theta_2^+"] \arrow[rr, dashed, "\pi_3"] && \dots \\ & Z_1 && Z_2 \end{tikzcd} \end{center} where the sequence of rational maps $\rho_i$ is a composition of steps in an MMP for an NQC $\mathbb{Q}$-factorial dlt g-pair $ (Y_1,\Delta_1 + N_1) $. It suffices to show that this MMP terminates; we may assume that this sequence consists only of flips. By Theorem \ref{thm:specterm_g-pairs} and by relabelling, we may also assume that in this sequence the flipping locus at each step avoids the non-klt locus. Consequently, this sequence of flips is also a sequence of flips for the NQC $\mathbb{Q}$-factorial klt g-pair $ \big(Y_1,(\Delta_1 - \lfloor \Delta_1 \rfloor) +N_1 \big) $, which terminates by assumption. \end{proof} Finally, we obtain the analogue of Theorem \ref{thm:main} in the context of g-pairs. \begin{proof}[Proof of Theorem \ref{thm:main_g}] The proof is analogous to that of Theorem \ref{thm:main}, by replacing Lemma \ref{lem:klt_lc} by Lemma \ref{lem:reduction_g-term}, Lemma \ref{lem:lifting} by Lemma \ref{lem:lifting_g}, and \cite[Theorem 4.2.1]{Fuj07a} by Theorem \ref{thm:specterm_g-pairs}. \end{proof} \end{document}
\begin{document} \begin{abstract} For a finite alphabet $\mathcal{A}$ and shift $X\subseteq\mathcal{A}^{\mathbb{Z}}$ whose factor complexity function grows at most linearly, we study the algebraic properties of the automorphism group $\mathcal{A}ut(X)$. For such systems, we show that every finitely generated subgroup of $\mathcal{A}ut(X)$ is virtually $\mathbb{Z}^d$, in contrast to the behavior when the complexity function grows more quickly. With additional dynamical assumptions we show more: if $X$ is transitive, then $\mathcal{A}ut(X)$ is virtually $\mathbb{Z}$; if $X$ has dense aperiodic points, then $\mathcal{A}ut(X)$ is virtually $\mathbb{Z}^d$. We also classify all finite groups that arise as the automorphism group of a shift. \end{abstract} \maketitle \section{Introduction} Given a finite alphabet $\mathcal{A}$, a shift system $(X,\sigma)$ is a closed set $X\subseteq\mathcal{A}^\mathbb{Z}$ that is invariant under the left shift $\sigma\colon \mathcal{A}^\mathbb{Z}\to\mathcal{A}^\mathbb{Z}$ and its automorphism group $\mathcal{A}ut(X)$ is the group of homeomorphisms of $X$ that commute with $\sigma$ (these notions are made precise in Section~\ref{sec:notation}). For general shift systems, while $\mathcal{A}ut(X)$ is countable, it can be quite complicated: for the full shift~\cite{Hedlund2} or for mixing shifts of finite type~\cite{BLR}, $\mathcal{A}ut(X)$ is not finitely generated and is not amenable (see also~\cite{BK, KRW, FF, ward, hochman}). The assumption of topological mixing can be used to construct a rich collection of subgroups of the automorphism group. For example, the automorphism group contains isomorphic copes of all finite groups, the direct sum of countably many copies of $\mathbb{Z}$, and the free group on two generators. In these examples, the topological entropy is positive, and the complexity function $P_X(n)$, which counts the number of nonempty cylinder sets of length $n$ taken over all elements $x\in X$, grows quickly. When the complexity function of a shift system grows slowly, the automorphism group is often much simpler and the main goal of this paper is to study the algebraic properties of $\mathcal{A}ut(X)$ in this setting. In contrast to mixing shifts, we study general shifts of low complexity, without an assumption of minimality or transitivity. We show that the automorphism group of any shift of low complexity is amenable, yet its behavior can still be be quite complicated. As $P_X(n)$ is non-decreasing, boundedness is the slowest possible growth property that $P_X(n)$ can have. As expected, this case is simple: the Morse-Hedlund Theorem~\cite{MH} implies that if there exists $n\in\mathbb{N}$ such that $P_X(n)\leq n$, then $X$ is comprised entirely of periodic points. Thus $\mathcal{A}ut(X)$ is a finite group (and we classify all finite groups that arise in this way in Section~\ref{sec:periodic2}). It follows that if $(X,\sigma)$ is a shift for which $P_X(n)/n\xrightarrow{n\to\infty}0$, then $|\mathcal{A}ut(X)|<\infty$. It is thus natural to study shifts for which $P_X(n) > n$ for all $n\in\mathbb{N}$. The first nontrivial growth rate that such a system can have is linear, by which we mean $$ 0<\limsup_{n\to\infty}\frac{P_X(n)}{n}<\infty. $$ In previous work~\cite{CK3}, we studied the algebraic properties of $\mathcal{A}ut(X)$ for transitive shifts of subquadratic growth and showed that $\mathcal{A}ut(X)/\langle\sigma\rangle$ is a periodic group. In particular, this holds for transitive shifts of linear growth. Periodic groups, however, can be quite complicated: for example, a periodic group need not be finitely generated, and there are finitely generated, nonamenable periodic groups. In this paper, we study $\mathcal{A}ut(X)$ for general (not necessarily transitive) shifts of linear growth. In the transitive case, we prove a stronger result than is implied by~\cite{CK3}, showing that $\mathcal{A}ut(X)/\langle\sigma\rangle$ is finite. However, the main novelty of this work is that our techniques remain valid even without the assumption of transitivity. Depending on dynamical assumptions on the system, shift systems with linear growth exhibit different behavior. Our most general result is: \begin{theorem}\label{thm:main} Suppose $(X,\sigma)$ is a shift system for which there exists $k\in\mathbb{N}$ such that $$ \limsup_{n\to\infty}P_X(n)/n<k. $$ Then every finitely generated subgroup of $\mathcal{A}ut(X)$ is virtually $\mathbb{Z}^d$ for some $d<k$. \end{theorem} Let $[\sigma]$ denote the full group of a shift $(X,\sigma)$ (see Section~\ref{sec:full-group} for the definition). With the additional assumption that $(X,\sigma$) has a dense set of aperiodic points, we have: \begin{theorem} \label{th:finitely-generated} Suppose $(X,\sigma)$ is a shift system for which there exists $k\in\mathbb{N}$ such that $$ \limsup_{n\to\infty}P_X(n)/n<k. $$ If $X$ has a dense set of aperiodic points, then $\mathcal{A}ut(X)\cap[\sigma]\cong\mathbb{Z}^d$ for some $d<k$ and $\mathcal{A}ut(X)/\mathcal{A}ut(X)\cap[\sigma]$ is finite. In particular, $\mathcal{A}ut(X)$ is virtually $\mathbb{Z}^d$. \end{theorem} For a shift $(X, \sigma)$, let $\langle\sigma\rangle$ denote the subgroup of $\mathcal{A}ut(X)$ generated by $\sigma$. With the additional assumption that $(X,\sigma)$ is topologically transitive, meaning there exists of a point whose orbit is dense in $X$, we show: \begin{theorem} \label{thm:transitive} Suppose $(X,\sigma)$ is a transitive shift system for which $$ 0<\limsup_{n\to\infty}P_X(n)/n<\infty. $$ Then $\mathcal{A}ut(X)/\langle\sigma\rangle$ is finite. In particular, $\mathcal{A}ut(X)$ is virtually $\mathbb{Z}$. \end{theorem} For minimal shifts, meaning shifts such that every point has dense orbit, we show (note the growth condition on the complexity only assumes $\liminf$ instead of $\limsup$): \begin{theorem} \label{theorem:minimal} Suppose $(X,\sigma)$ is a minimal shift for which there exists $k\in\mathbb{N}$ satisfying $$ \liminf_{n\to\infty}P_X(n)/n<k. $$ Then $\mathcal{A}ut(X)/\langle\sigma\rangle$ is finite and $|\mathcal{A}ut(X)/\langle\sigma\rangle|<k$. \end{theorem} For periodic minimal shifts, it is easy to see that $\mathcal{A}ut(X)\cong\mathbb{Z}/nZ$ where $n$ is the minimal period. Salo and T\"orm\"a~\cite{SaTo} asked if the automorphism group of any linearly recurrent shift is virtually $\mathbb{Z}$. Linearly recurrent shifts are minimal and the factor complexity function grows at most linearly, and so Theorem~\ref{theorem:minimal} gives an affirmative answer to their question. Roughly speaking, the proof of Theorem~\ref{thm:main} splits into two parts. We start by studying shifts with a dense set of aperiodic points in Section~\ref{sec:aperiodic}, showing that the automorphism group is locally a group of polynomial growth, with the polynomial growth rate depending on the linear complexity assumption on the shift. We sharpen this result to understand transitive shifts of linear growth, leading to the proof of Theorem~\ref{thm:transitive} in Section~\ref{subsec:transitive}. We then combine this with information on existence of aperiodic points, completing the proof of Theorem~\ref{thm:main} in Section~\ref{sec:general-linear}. The proof of Theorem~\ref{theorem:minimal} in Section~\ref{sec:minimal} proceeds in a different manner, relying on a version of a lemma of Boshernitzan used to bound the number of ergodic probability measures on a shift with linear growth, which we use to bound the number of words in the language of the system that have multiple extensions. For some of these results, we are able to give examples showing that they are sharp. These examples are included in Section~\ref{sec:examples}. While writing up these results, we became aware of related work by Donoso, Durand, Maass, and Petit~\cite{DDMP}. While some of the results obtained are the same, the methods are different and each method leads to new open directions. \section{Background and notation}\label{sec:notation} \subsection{Shift systems} We assume throughout that $\mathcal{A}$ is a fixed finite set endowed with the discrete topology. If $x\in\mathcal{A}^{\mathbb{Z}}$, we denote the value of $x$ at $n\in\mathbb{Z}$ by $x(n)$. The metric $d(x,y):=2^{-\inf\{|n|\colon x(n)\neq y(n)\}}$ generates the product topology on $\mathcal{A}^{\mathbb{Z}}$ and endowed with this metric, $\mathcal{A}^\mathbb{Z}$ is a compact metric space; henceforth we assume this metric structure on $\mathcal{A}^{\mathbb{Z}}$. The {\em left shift} $\sigma\colon\mathcal{A}^{\mathbb{Z}}\to\mathcal{A}^{\mathbb{Z}}$ is the map defined by $(\sigma x)(n):=x(n+1)$ and is a homeomorphism from $\mathcal{A}^{\mathbb{Z}}$ to itself. If $X\subseteq\mathcal{A}^{\mathbb{Z}}$ is a closed, $\sigma$-invariant subset, then the pair $(X,\sigma)$ is called a {\em subshift of $\mathcal{A}^{\mathbb{Z}}$}, or just a {\em shift of $\mathcal{A}^{\mathbb{Z}}$}. If the alphabet $\mathcal{A}$ is clear from the context, we refer to $(X,\sigma$) as just a {\em shift}. The set $$ \mathcal{O}(x):=\{\sigma^nx\colon n\in\mathbb{N}\} $$ is the {\em orbit of $x$} and we use $\overline{\mathcal{O}}(x)$ to denote its closure. The shift $(X,\sigma)$ is {\em transitive} if there exists some $x\in X$ such that $\overline{\mathcal{O}}(x) = X$ and it is {\em minimal} if $\overline{\mathcal{O}}(x) = X$ for all $x\in X$. A point $x\in X$ is {\em periodic} if there exists some $n\in\mathbb{N}$ such that $\sigma^nx = x$ and otherwise it is said to be {\em aperiodic}. \subsection{Complexity of shifts} For a shift $(X,\sigma)$ and $w=(a_{-m+1},\dots,a_{-1},a_0,a_1,$ $\dots,a_{m-1})\in\mathcal{A}^{2m+1}$, the {\em central cylinder set $[w]_0$ determined by $w$} is defined to be $$ [w]_0:=\left\{x\in X\colon x(n)=a_n\text{ for all }-m<n<m\right\}. $$ The collection of central cylinder sets forms a basis for the topology of $X$. If $w=(a_0,\dots,a_{m-1})\in\mathcal{A}^m$, then the {\em one sided cylinder set $[w]_0^+$ determined by $w$} is given by $$ [w]_0^+:=\left\{x\in X\colon x(n)=a_n\text{ for all }0\leq n<m\right\}. $$ For $m\in\mathbb{N}$, define the set of {\em words $\mathcal{L}_m(X)$ of length $m$ in $X$} by $$ \mathcal{L}_m(X):=\left\{w\in\mathcal{A}^m\colon[w]_0^+\neq\emptyset\right\} $$ and define the {\em language $\mathcal{L}(X)$} of $X$ to be $\mathcal{L}(X):=\bigcup_{m=1}^{\infty}\mathcal{L}_m(X)$. For $w\in\mathcal{L}(X)$, we denote the length of $w$ by $|w|$. A word in $x\in X$ is also referred to as a {\em factor} of $x$. A measure of the complexity of $X$ is the {\em (factor) complexity function} $P_X\colon X\to\mathbb{N}$, which counts the number of words of length $n$ in the language of $X$: $$ P_X(n):=|\mathcal{L}_n(X)|. $$ If $P_x(n)$ is the complexity function of a fixed $x\in X$, meaning it is the number of configurations in a block of size $n$ in $x$, then $P_X(n) \geq \sup_{x\in X}P_x(n)$, with equality holding when $X$ is a transitive shift. \subsection{The automorphism group of a shift} Let ${\rm Hom}(X)$ denote the group of homeomorphisms from $X$ to itself. If $h_1,\dots,h_n\in{\rm Hom}(X)$, then $\langle h_1,\dots,h_n\rangle$ denotes the subgroup of ${\rm Hom}(X)$ generated by $h_1,\dots,h_n$. Thus the shift $\sigma\in{\rm Hom}(X)$ and its centralizer in ${\rm Hom}(X)$ is called the {\em automorphism group} of $(X,\sigma)$. We denote the automorphism group of $(X,\sigma)$ by $\mathcal{A}ut(X)$ and endow it with the discrete topology. A map $\varphi\colon X\to X$ is a {\em sliding block code} if there exists $R\in\mathbb{N}$ such that for any $w\in\mathcal{L}_{2R+1}(X)$ and any $x,y\in[w]_0$, we have $(\varphi x)(0)=(\varphi y)(0)$. Any number $R\in\mathbb{N}\cup\{0\}$ for which this property holds is called a {\em range} for $\varphi$. The {\em minimal range} of $\varphi$ is its smallest range. If $\varphi\colon X\to X$ is a sliding block code of range $R$, there is a natural map (which, by abuse of notation, we also denote by $\varphi$) taking $\bigcup_{m=2R+1}^{\infty}\mathcal{L}_m(X)$ to $\mathcal{L}(X)$. To define this extension of $\varphi$, let $m>2R$ and let $w=(a_0,\dots,a_{m-1})\in\mathcal{A}^m$. For $0\leq i<m-2R$, choose $x_i\in[(a_i,\dots,a_{i+2R})]_0$ and define $$ \varphi(w):=\bigl((\varphi x_0)(0),(\varphi x_1)(0),\dots,(\varphi x_{m-2R-1})(0)\bigr). $$ Therefore if $w$ is a word of length at least $2R+1$, then $\varphi(w)$ is a word of length $|w|-2R$. The elements of $\mathcal{A}ut(X)$ have a concrete characterization: \begin{theorem}[Curtis-Hedlund-Lyndon Theorem~\cite{Hedlund2}] \label{th:CHL} If $(X,\sigma)$ is a shift, then any element of $\mathcal{A}ut(X)$ is a sliding block code. \end{theorem} For $R\in\mathbb{N}\cup\{0\}$, we let $\mathcal{A}ut_R(X)\subseteq \mathcal{A}ut(X)$ denote the automorphisms of $(X,\sigma)$ for which $R$ is a (not necessarily minimal) range. Thus $\mathcal{A}ut(X)=\bigcup_{R=0}^{\infty}\mathcal{A}ut_R(X)$. We observe that if $\varphi_1\in\mathcal{A}ut_{R_1}(X)$ and $\varphi_2\in\mathcal{A}ut_{R_2}(X)$, then $\varphi_1\circ\varphi_2\in\mathcal{A}ut_{R_1+R_2}(X)$. In general, the automorphism group of a shift can be complicated, but Theorem~\ref{th:CHL} implies that $\mathcal{A}ut(X)$ is always countable. \subsection{Automorphisms and the full group} \label{sec:full-group} The {\em full group} $[\sigma]$ of a shift $(X,\sigma)$ is the subgroup of ${\rm Hom}(X)$ comprised of the orbit preserving homeomorphisms: $$ [\sigma]:=\left\{\psi\in{\rm Hom}(X):\psi(x)\in\mathcal{O}(x)\text{ for all }x\in X\right\}. $$ Thus if $\psi\in[\sigma]$, then there is a function $k_{\psi}\colon X\to\mathbb{Z}$ such that $\psi(x)=\sigma^{k_{\psi}(x)}(x)$ for all $x\in X$. It follows from the definitions that the group $\mathcal{A}ut(X)\cap[\sigma]$ is the centralizer of $\sigma$ in $[\sigma]$. We note two basic facts about $\mathcal{A}ut(X)\cap[\sigma]$ which we will need in order to study $\mathcal{A}ut(X)/\mathcal{A}ut(X)\cap[\sigma]$ in Section~\ref{lemma:aperiodic-finite}. \begin{lemma} \label{lemma:normal} If $(X,\sigma)$ is a shift, then $\mathcal{A}ut(X)\cap[\sigma]$ is normal in $\mathcal{A}ut(X)$. \end{lemma} \begin{proof} Let $\varphi\in\mathcal{A}ut(X)$ and suppose $\psi\in\mathcal{A}ut(X)\cap[\sigma]$. Let $k_{\varphi}\colon X\to\mathbb{Z}$ be a function such that $\varphi(x)=\sigma^{k_{\varphi}(x)}(x)$ for all $x\in X$. Fix $x\in X$ and observe that since $\varphi$ and $\sigma$ commute, $$ \varphi\circ\psi\circ\varphi^{-1}(x)=\varphi\circ\sigma^{k_{\varphi}(\varphi^{-1}(x))}\circ\varphi^{-1}(x)=\sigma^{k_{\varphi}(\varphi^{-1}(x))}(x). $$ As this holds for any $x\in X$, it follows that $\varphi\circ\psi\circ\varphi^{-1}\in\mathcal{A}ut(X)\cap[\sigma]$. Since $\phi\in\mathcal{A}ut(X)$ and $\psi\in\mathcal{A}ut(X)\cap[\sigma]$ are arbitrary, we have $$\mathcal{A}ut(X)\cap[\sigma]=\varphi\cdot\left(\mathcal{A}ut(X)\cap[\sigma]\right)\cdot\varphi^{-1}$$ for all $\varphi\in\mathcal{A}ut(X)$. So $\mathcal{A}ut(X)\cap[\sigma]$ is normal in $\mathcal{A}ut(X)$. \end{proof} \begin{lemma}\label{lemma:abelian} If $(X,\sigma)$ is a shift, then $\mathcal{A}ut(X)\cap[\sigma]$ is abelian. \end{lemma} \begin{proof} Suppose $\varphi_1, \varphi_2\in\mathcal{A}ut(X)\cap[\sigma]$. For $i=1,2$, let $k_{\varphi_i}\colon X\to\mathbb{Z}$ be functions such that $\varphi_i(x)=\sigma^{k_{\varphi_i(x)}}(x)$ for all $x\in X$. For any $x\in X$, \begin{eqnarray*} \varphi_1\circ\varphi_2(x)&=&\varphi_1\circ\sigma^{k_{\varphi_2}(x)}(x)=\sigma^{k_{\varphi_2}(x)}\circ\varphi_1(x) \\ &=&\sigma^{k_{\varphi_2}(x)}\circ\sigma^{k_{\varphi_1}(x)}(x)=\sigma^{k_{\varphi_1}(x)}\circ\sigma^{k_{\varphi_2}(x)}(x) \\ &=&\sigma^{k_{\varphi_1}(x)}\circ\varphi_2(x)=\varphi_2\circ\sigma^{k_{\varphi_1}(x)}(x) \\ &=&\varphi_2\circ\varphi_1(x). \end{eqnarray*} Therefore $\varphi_1\circ\varphi_2=\varphi_2\circ\varphi_1$. \end{proof} \subsection{Summary of group theoretic terminology} For convenience, we summarize the algebraic properties that we prove $\mathcal{A}ut(X)$ may have. We say that a group $G$ is {\em locally $P$} if every finitely generated subgroup of $G$ has property $P$. The group $G$ is {\em virtually $H$} if $G$ contains $H$ as a subgroup of finite index. The group $G$ is {\em $K$-by-$L$} if there exists a normal subgroup $H$ of $G$ which is $K$ and such that the quotient $G/H$ is $L$. \section{Shifts of linear growth with a dense set of aperiodic points} \label{sec:transitive} \subsection{Cassaigne's characterization of linear growth} Linear growth can be characterized in terms of the (first) difference of the complexity function: \begin{theorem}[Cassaigne~\cite{C}]\label{theorem:cassaigne} A shift $(X,\sigma)$ satisfies $p_X(n)=O(n)$ if and only if the difference function $p_X(n+1)-p_X(n)$ is bounded. \end{theorem} \begin{definition} Let $w=(a_0,\dots,a_{|w|-1})\in\mathcal{L}_{|w|}(X)$. For fixed $m\in\mathbb{N}$, we say that $w$ {\em extends uniquely $m$ times to the right} if there is exactly one word $\widetilde{w}=(b_0,\dots,b_{|w|+m-1})\in\mathcal{L}_{|w|+m}(X)$ such that $a_i=b_i$ for all $0\leq i<|w|$. \end{definition} \begin{corollary}\label{corollary:extend} Assume $(X,\sigma)$ satisfies $p_X(n)=O(n)$. Then for any $m,n\in\mathbb{N}$, the number of words of length $n$ that do not extend uniquely $m$ times to the right is at most $B m$, where $B=\max_{n\in\mathbb{N}}\bigl(p_X(n+1)-p_X(n)\bigr)$. \end{corollary} Note that it follows from Cassaigne's Theorem that $B$ is finite. \begin{proof} For any $N\in\mathbb{N}$, the quantity $p_X(N+1)-p_X(N)$ is an upper bound on the number of words of length $N$ that do not extend uniquely to the right. For any word $w$ of length $n$ which does not extend uniquely $m$ times to the right, there exists $0\leq k<m$ such that $w$ extends uniquely $k$ times to the right, but not $k+1$ times. For fixed $k$, the number of words for which this is the case is at most the number of words of length $n+k$ that do not extend uniquely to the right. So the number of words of length $n$ that fail to extend uniquely $m$ times to the right is at most \begin{equation*} \sum_{k=1}^m\bigl(p_X(n+k)-p_X(n+k-1)\bigr)\leq Bm. \qedhere \end{equation*} \end{proof} \subsection{Assuming a dense set of aperiodic points} \label{sec:aperiodic} We start by considering shifts with a dense set of aperiodic points. This assumption holds in particular when the shift has no isolated points: if $X$ has no isolated points then, for any fixed period, the set of periodic points with that period has empty interior. Then the Baire Category Theorem implies that the set of all periodic points has empty interior. In particular, the set of aperiodic points is dense. The two assumptions are equivalent if the set of aperiodic points is nonempty. \begin{lemma}\label{lemma:k-transitive} Suppose $(X,\sigma)$ is a shift with a dense set of aperiodic points and there exists $k\in\mathbb{N}$ such that $$ \limsup_{n\to\infty}\frac{P_X(n)}{n}<k. $$ Then there exist $x_1,\dots,x_{k-1}\in X$ such that $$ X=\overline{\mathcal{O}}(x_1)\cup\overline{\mathcal{O}}(x_2)\cup\dots\cup\overline{\mathcal{O}}(x_{k-1}). $$ \end{lemma} \begin{proof} Suppose not and let $x_1\in X$. Since $\overline{\mathcal{O}}(x_1)\neq X$, there is a word $w_1\in\mathcal{L}(X)$ such that $[w_1]_0^+\cap\overline{\mathcal{O}}(x_1)=\emptyset$. Choose $x_2\in X$ with $x_2\in[w_1]_0^+$. Let $i<k$ and suppose that we have constructed $x_1,\dots,x_i\in X$ and $w_1,\dots,w_{i-1}\in\mathcal{L}(X)$ such that $[w_{j_1}]_0\cap\overline{\mathcal{O}}(x_{j_2})=\emptyset$ whenever $j_2\leq j_1$. Since $\overline{\mathcal{O}}(x_1)\cup\dots\cup\overline{\mathcal{O}}(x_i)\neq X$, there is a word $w_i\in\mathcal{L}(X)$ such that $[w_i]_0^+\cap\overline{\mathcal{O}}(x_1)\cup\dots\cup\overline{\mathcal{O}}(x_i)=\emptyset$. Let $x_{i+1}\in[w_i]_0^+$ and we continue this construction until $i=k$. Let $N>\max_{1\leq i<k}|w_i|$ be a fixed large integer (to be specified later). Since $x_1$ is aperiodic, there are at least $N+1$ distinct factors of length $N$ in $\overline{\mathcal{O}}(x_1)$. Therefore there are at least $N+1$ distinct factors of length $N$ in $X$ which do not contain the words $w_1,\dots,w_{k-1}$. We claim that for $1\leq i<k$, there are at least $N-|w_i|$ distinct factors in $\overline{\mathcal{O}}(x_{i+1})$ which contain the word $w_i$ but do not contain any of the words $w_{i+1},w_{i+2},\dots,w_{k-1}$. Assuming this claim, then for any sufficiently large $N$ we have $p_X(N)\geq kN-\sum_{i=1}^k|w_i|$, a contradiction of the complexity assumption. We are left with proving the claim. Let $1\leq i<k$ be fixed. By construction, the word $w_i$ appears in $\overline{\mathcal{O}}(x_{i+1})$ but $[w_j]_0^+\cap\overline{\mathcal{O}}(x_{i+1})=\emptyset$ for any $j>i$. If $w_i$ appears syndetically in $x_{i+1}$ then so long as $N$ is sufficiently large, every factor of $x_{i+1}$ of length $N$ contains the word $w_i$. In this case, since $x_{i+1}$ is aperiodic, there are at least $N+1$ distinct factors in $\overline{\mathcal{O}}(x_{i+1})$ which contain $w_i$ but not $w_j$ for any $j>i$. Otherwise $w_i$ does not appear syndetically in $x_{i+1}$ and so there are arbitrarily long factors in $x_{i+1}$ which do not contain $w_i$. Since $w_i$ appears at least once in $x_{i+1}$, it follows that there are arbitrarily long words which appear in $x_{i+1}$ which contain exactly one occurrence of $w_i$ and we can assume that $w_i$ occurs as either the rightmost or leftmost subword. Without loss, we assume that there exists a word $w$ of length $N$ which contains $w_i$ as its rightmost subword and has no other occurrences of $w_i$. Choose $j\in\mathbb{Z}$ such that $$ w=\bigl(x_{i+1}(j),x_{i+1}(j+1),\dots,x_{i+1}(j+|w|-1)\bigr). $$ By construction, if $0\leq s<|w|-|w_i|$ then the word $$ w^{(s)}:=\bigl(x_{i+1}(j+s),x_{i+1}(j+s+1),\dots,x_{i+1}(j+s+|w|-1)\bigr) $$ is a word of length $N$ for which the smallest $t\in\{0,\dots,|w|-|w_i|\}$ such that $$ w_i=\bigl(x_{i+1}(j+t),x_{i+1}(j+t+1),\dots,x_{i+1}(j+t+|w_i|-1\bigr) $$ is $t=|w|-|w_i|-s$. Therefore, the words $w^{(s)}$ are pairwise distinct and each contains $w_i$ as a subword. By construction, they do not contain $w_j$ for any $j>i$, thus establishing the claim. \end{proof} \begin{proposition}\label{prop:polynomial} Suppose that $(X,\sigma)$ is a shift with a dense set of aperiodic points and there exists $k\in\mathbb{N}$ such that $$ \limsup_{n\to\infty}\frac{P_X(n)}{n}<k. $$ Then $\mathcal{A}ut(X)$ is locally a group of polynomial growth with polynomial growth rate is at most $k-1$. Moreover, if $q\in\mathbb{N}$ is the smallest cardinality of a set $x_1,\dots,x_q\in X$ such that $\mathcal{O}(x_1)\cup\mathcal{O}(x_2)\cup\cdots\cup\mathcal{O}(x_q)$ is dense in $X$, then the polynomial growth rate of any finitely generated subgroup of $\mathcal{A}ut(X)$ is at most $q$. \end{proposition} In Section~\ref{sec:large-poly-growth}, we give an example showing that the growth rate given in this proposition is optimal. \begin{proof} By Lemma~\ref{lemma:k-transitive}, there exist $y_1,\dots,y_{k-1}\in X$ such that the union of the orbits $\mathcal{O}(y_1)\cup\mathcal{O}(y_2)\cup\cdots\cup\mathcal{O}(y_{k-1})$ is dense in $X$. Let $x_1,\dots,x_q\in X$ be a set of minimum cardinality for which $\mathcal{O}(x_1)\cup\mathcal{O}(x_2)\cup\cdots\cup\mathcal{O}(x_q)$ is dense. For $1\leq i\leq q$, define the constant $$ C_i:=\inf\{|w|\colon\text{$w$ is a subword of $x_i$ and $[w]_0^+$ contains precisely one element}\} $$ and define $C_i:=0$ if no such subword exists. Define \begin{equation}\label{eq:constant} C:=\max_{1\leq i\leq q}C_i. \end{equation} Fix $R\in\mathbb{N}$. For $i=1,\ldots, q$, let $\tilde{w}_i$ be a factor of $x_i$ such that \begin{enumerate} \item $|\tilde{w}_i|\geq3R+1$; \item \label{it:two} for all $u\in\mathcal{L}_{2R+1}(X)$, there exists $i$ such that $u$ is a factor of $\tilde{w}_i$. \end{enumerate} Note that~\eqref{it:two} is possible since $\mathcal{O}(x_1)\cup\mathcal{O}(x_2)\cup\cdots\cup\mathcal{O}(x_q)$ is dense. Without loss of generality, we can assume that there exists $M_1\geq0$ such that $[\tilde{w}_i]_0^+$ contains precisely one element for all $i\leq M_1$ and contains at least two elements for all $i>M_1$ (otherwise reorder $x_1,\dots,x_{k-1}$). For each $i>M_1$, either there exists $a\geq0$ such that $\tilde{w}_i$ extends uniquely to the right $a$ times but not $a+1$ times, or there exists $a\geq0$ such that $\tilde{w}_i$ extends uniquely to the left $a$ times but not $a+1$ times. Again, reordering if necessary, we can assume that there exists $M_2\geq M_1$ such that the former occurs for all $M_1<i\leq M_2$ and the latter occurs when $i>M_2$. For $i=1, \ldots, q$, we define words $w_1,\dots,w_q$ as follows: \begin{enumerate} \item For $i=1, \ldots, M_1$, the set $[\tilde{w}_i]_0^+$ contains precisely one element. This must be a shift of $x_i$, and without loss, we can assume it is $x_i$ itself. In this case, we define $u_i$ to be the shortest subword of $x_i$ with the property that $[u_i]_0^+$ contains precisely one element and define $w_i$ to be the (unique) extension $2R+2$ times both to the right and to the left of $u_i$. Observe that if $\varphi, \varphi^{-1}\in\mathcal{A}ut_R(X)$, then $\varphi^{-1}(\varphi(w_i))=u_i$. Since $\varphi^{-1}$ is injective and sends every element of $[\varphi(w_i)]_0^+$ to the one point set $[u_i]_0^+$, it follows that $[\varphi(w_i)]_0^+$ contains precisely one element and the word $\varphi(w_i)$ uniquely determines the word $\varphi(\tilde{w}_i)$. Moreover, $|u_i|\leq C$, where $C$ is the constant in~\eqref{eq:constant}, and so $|w_i|\leq C+4R+4$. \item For $i=M_1+1, \dots, M_2$, there exists $a_i\geq0$ such that $\tilde{w}_i$ extends uniquely to the right $a_i$ times but not $a_i+1$ times. Define $w_i$ to be the (unique) word of length $|\tilde{w}_i|+a_i$ which has $\tilde{w}_i$ as its leftmost factor. By choice of the ordering, $w_i$ does not extend uniquely to its right. \item For $i=M_2+1, \ldots, q$, there exists $a_i\geq0$ such that $\tilde{w}_i$ extends uniquely to the left $a_i$ times but not $a_i+1$ times. Define $w_i$ to the be (unique) word of length $|\tilde{w}_i|+a_i$ which has $\tilde{w}_i$ as its rightmost factor. By choice of the ordering, $w_i$ does not extend uniquely to its left. \end{enumerate} For $\varphi\in \mathcal{A}ut_R(X)$, we have that $\varphi(w_i)$ determines the word $\varphi(\tilde{w}_i)$ and so the block code determines what $\varphi$ does to every word in $\mathcal{L}_{2R+1}(X)$. Thus the map $\Phi\colon\mathcal{A}ut_R(X)\to\mathcal{L}_{|w_1|-2R}(X)\times\mathcal{L}_{|w_2|-2R}(X)\times\cdots\times\mathcal{L}_{|w_q|-2R}(X)$ defined by $$ \Phi(\varphi)=\bigl(\varphi(w_1),\varphi(w_2),\dots,\varphi(w_q)\bigr) $$ is injective. We claim that for $1\leq i\leq q$, we have \begin{equation}\label{eq:words} \left|\left\{\varphi(w_i)\colon\varphi,\varphi^{-1}\in\mathcal{A}ut_R(X)\right\}\right|\leq Bk(C+2)(R+1) , \end{equation} where $B$ is the constant appearing in Corollary~\ref{corollary:extend} and $C$ is the constant in~\eqref{eq:constant}. Before proving the claim, we show how to deduce the proposition from this estimate. It follows from~\eqref{eq:words} that $|\{\Phi(\varphi)\colon\varphi, \varphi^{-1}\in\mathcal{A}ut_R(X)\}|\leq(Bk(C+2))^q(R+1)^q$. Since $\Phi$ is injective, it follows that we have the bound \begin{equation}\label{eq:growth-estimate} |\{\varphi\in\mathcal{A}ut_R(X)\colon\varphi^{-1}\in\mathcal{A}ut_R(X)\}|\leq(Bk(C+2))^q(R+1)^q. \end{equation} Given $\varphi_1,\dots,\varphi_m\in\mathcal{A}ut(X)$, choose $R\in\mathbb{N}$ such that $\varphi_1,\dots,\varphi_m,\varphi_1^{-1},\dots,\varphi_m^{-1}\in\mathcal{A}ut_R(X)$. Then for any $n\in\mathbb{N}$, any $e_1,\dots,e_n\in\{-1,1\}$, and any $f_1,\dots,f_n\in\{1,\dots,m\}$, we have $$ \varphi_{f_1}^{e_1}\circ\varphi_{f_2}^{e_2}\circ\cdots\circ\varphi_{f_n}^{e_n}\in\mathcal{A}ut_{nR}(X). $$ In particular, if $\mathcal{S}:=\{\varphi_1,\dots,\varphi_m,\varphi_1^{-1},\dots,\varphi_m^{-1}\}$ is a (symmetric) generating set for $\langle\varphi_1,\dots,\varphi_m\rangle$, then any reduced word of length $n$ (with respect to $\mathcal{S}$) is an element of $\{\varphi\in\mathcal{A}ut_{nR}(X)\colon\varphi^{-1}\in\mathcal{A}ut_{nR}(X)\}$. By~\eqref{eq:growth-estimate}, there are at most $(Bk(C+2))^q(nR+1)^q$ such words. Therefore $\langle\varphi_1,\dots,\varphi_m\rangle$ is a group of polynomial growth and its polynomial growth rate is at most $q$. This holds for any finitely generated subgroup of $\mathcal{A}ut(X)$ (where the parameter $R$ depends on the subgroup and choice of generating set, but $B$, $C$, $k$, and $q$ depend only on the shift $(X,\sigma)$). As $q\leq k-1$, the proposition follows. We are left with showing that~\eqref{eq:words} holds. There are three cases to consider, depending on the interval in which $i$ lies. \begin{enumerate} \item Suppose $1\leq i\leq M_1$. Then $|w_i|\leq C+4R+4$ and so $\varphi(w_i)$ is a word of length $C+2R+2$. Therefore, there are $$ p_X(C+2R+2)\leq k\cdot(C+2R+2)\leq k(C+2)(R+1) $$ possibilities for the word $\varphi(w_i)$. \item \label{case:two} Suppose $M_1<i\leq M_2$. Then $w_i$ does not extend uniquely to its right. If $\varphi\in\mathcal{A}ut(X)$ is such that $\varphi, \varphi^{-1}\in\mathcal{A}ut_R(X)$, then the word $\varphi(w_i)\in\mathcal{L}_{|w_i|-2R}(X)$ cannot extend uniquely $R+1$ times to its right (as otherwise this extended word would have length $2R+1$ and applying $\varphi^{-1}$ to it would show that there is only one possible extension of $w_i$ to its right). By Corollary~\ref{corollary:extend}, there are at most $B(R+1)$ such words. Therefore $\{\varphi(w_i)\colon\varphi,\varphi^{-1}\in\mathcal{A}ut_R(X)\}$ has at most $B(R+1)$ elements. \item Suppose $i>M_2$. Then $w_i$ does not extend uniquely to its left. As in Case~\eqref{case:two}, if $\varphi\in\mathcal{A}ut(X)$ is such that $\varphi, \varphi^{-1}\in\mathcal{A}ut_R(X)$, then $\varphi(w_i)$ cannot extend uniquely $R+1$ times to its left. By Corollary~\ref{corollary:extend}, there are at most $B(R+1)$ such words. Therefore $\{\varphi(w_i)\colon\varphi,\varphi^{-1}\in\mathcal{A}ut_R(X)\}$ has at most $B(R+1)$ elements. \end{enumerate} This establishes~\eqref{eq:words}, and thus the proposition. \end{proof} \subsection{The automorphism group of a transitive shift} \label{subsec:transitive} \begin{lemma}\label{lemma:transitive-finite-index} Suppose $(X,\sigma)$ is a transitive shift and there exists $k\in\mathbb{N}$ such that $$ \limsup_{n\to\infty}\frac{P_X(n)}{n}<k. $$ If $x_0\in X$ has a dense orbit, then the set $$ \left\{\varphi(x_0)\colon\varphi\in\mathcal{A}ut(X)\right\} $$ is contained in the union of finitely many distinct orbits. \end{lemma} \begin{proof} Suppose not. Let $\varphi_1,\varphi_2,\ldots\in\mathcal{A}ut(X)$ be such that $\varphi_i(x_0)\notin\mathcal{O}(\varphi_j(x_0))$ whenever $i\neq j$. For $N\in\mathbb{N}$, let $R(N)$ be the smallest integer such that we have $\varphi_1,\dots,\varphi_N,\varphi_1^{-1},\dots,\varphi_N^{-1}\in\mathcal{A}ut_{R(N)}(X)$. For $1\leq i\leq N$, $m\in\mathbb{N}$, and $-n\leq j\leq n$, we have $\varphi_i^{\pm1}\circ\sigma^j\in\mathcal{A}ut_{R(N)+n}(X)$. As automorphisms take aperiodic points to aperiodic points, for fixed $i$, the set $$ \{\varphi_i\circ\sigma^j\colon-n\leq j\leq n\} $$ contains $2n+1$ elements. If $i_1\neq i_2$ and $-n\leq j_1,j_2\leq n$, then $\varphi_{i_1}\circ\sigma^{j_1}(x_0)\notin\mathcal{O}(\varphi_{i_2}\circ\sigma^{j_2}(x_0))$. Thus the set $$ \{\varphi_i\circ\sigma^j\colon 1\leq i\leq N\text{ and }-n\leq j\leq n\} $$ contains $2Nn+N$ elements. Therefore, $$ |\{\varphi\in\mathcal{A}ut_{R(N)+n}(X)\colon\varphi^{-1}\in\mathcal{A}ut_{R(N)+n}(X)\}|\geq2Nn+N. $$ It follows that $$ \limsup_{R\to\infty}\frac{|\{\varphi\in\mathcal{A}ut_R(X)\colon\varphi^{-1}\in\mathcal{A}ut_R(X)\}|}{R}\geq 2N. $$ Since $N\in\mathbb{N}$ was arbitrary, we have \begin{equation}\label{eq:slow-growth} \limsup_{R\to\infty}\frac{|\{\varphi\in\mathcal{A}ut_R(X)\colon\varphi^{-1}\in\mathcal{A}ut_R(X)\}|}{R}=\infty. \end{equation} On the other hand, since $(X,\sigma)$ is transitive, the parameter $q$ in the conclusion of Proposition~\ref{prop:polynomial} is $1$. Then by~\eqref{eq:growth-estimate}, we have $$|\{\varphi\in\mathcal{A}ut_R(X)\colon\varphi^{-1}\in\mathcal{A}ut_R(X)\}\leq Bk(C+2)(R+1), $$ where $B,k,C$ are as in Proposition~\ref{prop:polynomial}, which depend only on the shift $(X,\sigma)$ and not on $R$. This estimate holds for any $R\in\mathbb{N}$, a contradiction of~\eqref{eq:slow-growth}. \end{proof} We use this to complete the proof of Theorem~\ref{thm:transitive}, characterizing the automorphism group of transitive shifts of linear growth: \begin{proof}[Proof of Theorem~\ref{thm:transitive}] Assume $(X,\sigma)$ is a transitive shift satisfying $$\limsup_{n\to\infty}P_X(n)/n<k$$ for some $k\in\mathbb{N}$. An automorphism in a transitive shift is determined by the image of a point whose orbit is dense, and so Lemma~\ref{lemma:transitive-finite-index} implies that the group $\mathcal{A}ut(X)/\mathcal{A}ut(X)\cap[\sigma]$ is finite (Lemma~\ref{lemma:normal} implies that $\mathcal{A}ut(X)\cap[\sigma]$ is normal in $\mathcal{A}ut(X)$). However, the only orbit preserving automorphisms in a transitive shift are elements of $\langle\sigma\rangle$, since such an automorphism acts like a power of the shift on a point whose orbit is dense. \end{proof} Theorem~\ref{thm:transitive} shows that if $(X,\sigma)$ is transitive and has low enough complexity, then $\mathcal{A}ut(X)$ is highly constrained. One might hope to have a converse to this theorem: if $(X,\sigma)$ is transitive and is above some ``complexity threshold'' then $\mathcal{A}ut(X)$ is nontrivial. In Section~\ref{sec:no-complexity-threshold}, we give an example showing that no such converse holds. \subsection{The automorphism group of a shift with dense aperiodic points} \label{sec:dense-aperiodic} \begin{lemma}\label{lemma:aperiodic-finite} Suppose $(X,\sigma)$ has a dense set of aperiodic points and there exists $k\in\mathbb{N}$ such that $$ \limsup_{n\to\infty}\frac{P_X(n)}{n}<k. $$ Let $x_1,\dots,x_q\in X$ be a set (of minimal cardinality) such that $\mathcal{O}(x_1)\cup\cdots\cup\mathcal{O}(x_q)$ is dense in $X$. Then for each $1\leq i\leq q$, the set $$ \left\{\varphi(x_i)\colon\varphi\in\mathcal{A}ut(X)\right\} $$ is contained in the union of finitely many distinct orbits. \end{lemma} \begin{proof} By minimality of the set $\{x_1,\dots,x_q\}$, we have $$ x_i\notin\bigcup_{j\neq i}\overline{\mathcal{O}}(x_j) $$ for any $1\leq i\leq q$. Therefore there exists $w_i\in\mathcal{L}(X)$ such that $[w_i]_0^+\cap\mathcal{O}(x_i)\neq\emptyset$ but $[w_i]_0^+\cap\bigcup_{j\neq i}\overline{\mathcal{O}}(x_j) =\emptyset$. This implies that $[w_i]_0^+\subseteq\overline{\mathcal{O}}(x_i)$. Let $\varphi\in\mathcal{A}ut(X)$ and note that $\varphi$ is determined by $\varphi(x_1),\dots,\varphi(x_q)$. If for some $1\leq i\leq q$ we have $\mathcal{O}(\varphi(x_j))\cap[w_i]_0^+=\emptyset$ for all $j$, then $\varphi(X)\cap[w_i]_0^+=\emptyset$ and $\varphi$ is not surjective, a contradiction. Therefore, for each $i$ there exists $1\leq j_i\leq q$ such that $\mathcal{O}(\varphi(x_{j_i}))\cap[w_i]_0^+\neq\emptyset$. By construction, if $\mathcal{O}(\varphi(x_{j_i}))\cap[w_i]_0^+\neq\emptyset$, then $\varphi(x_{j_i})\in\overline{\mathcal{O}}(x_i)$ and so $\mathcal{O}(\varphi(x_{j_i}))\cap[w_k]_0^+=\emptyset$ for any $k\neq i$. That is, the map $i\mapsto j_i$ is a permutation on the set $\{1,2,\dots,q\}$. Let $\pi_{\varphi}\in S_q$, where $S_q$ is the symmetric group on $q$ letters, denote this permutation. Let $$ H:=\{\pi_{\varphi}\colon\varphi\in\mathcal{A}ut(X)\}\subseteq S_q. $$ For each $h\in H$, choose $\varphi_h\in\mathcal{A}ut(X)$ such that $h=\pi_{\varphi_h}$. Then if $\varphi\in\mathcal{A}ut(X)$ and $h=\pi_{\varphi}$, the permutation induced by $\varphi_h^{-1}\circ\varphi$ is the identity. It follows that $\varphi_h^{-1}\circ\varphi$ preserves each of the sets $\overline{\mathcal{O}}(x_1),\dots,\overline{\mathcal{O}}(x_q)$. Consequently, for each $1\leq i\leq q$, the restriction of $\varphi_h^{-1}\circ\varphi$ to $\overline{\mathcal{O}}(x_i)$ is an automorphism of the (transitive) subsystem $\left(\overline{\mathcal{O}}(x_i),\sigma\right)$. By Lemma~\ref{lemma:transitive-finite-index}, the set $\{\psi(x_i)\colon\psi\in\mathcal{A}ut(\overline{\mathcal{O}}(x_i))\}$ is contained in the union of finitely many distinct orbits. Therefore, the set $\{\varphi_{\pi_{\varphi}}^{-1}\circ\varphi(x_i)\colon\varphi\in\mathcal{A}ut(X)\}$ is contained in the union of finitely many distinct orbits. Since $$ \{\varphi(x_i)\colon\varphi\in\mathcal{A}ut(X)\}\subseteq\bigcup_{h\in H}\varphi_h\left(\{\varphi_{\pi_{\varphi}}^{-1}\circ\varphi(x_i)\colon\varphi\in\mathcal{A}ut(X)\}\right) $$ and automorphisms take orbits to orbits, it follows that $\{\varphi(x_i)\colon\varphi\in\mathcal{A}ut(X)\}$ is contained in the union of finitely many distinct orbits. \end{proof} \begin{lemma}\label{lemma:rank} Let $(X,\sigma)$ be a shift with a dense set of aperiodic points and assume that there exists $k\in\mathbb{N}$ such that $$ \limsup_{n\to\infty}\frac{P_X(n)}{n}<k. $$ Then $\mathcal{A}ut(X)\cap[\sigma]\cong\mathbb{Z}^d$ for some $d<k$. \end{lemma} \begin{proof} By Lemmas~\ref{lemma:normal} and~\ref{lemma:abelian}, $\mathcal{A}ut(X)\cap[\sigma]$ is abelian and normal in $\mathcal{A}ut(X)$. By Lemma~\ref{lemma:k-transitive}, there exist points $x_1,\dots,x_{k-1}\in X$ such that $\mathcal{O}(x_1)\cup\cdots\cup\mathcal{O}(x_{k-1})$ is dense in $X$. If $\varphi\in\mathcal{A}ut(X)\cap[\sigma]$, then there exist $e_1(\varphi),\dots,e_{k-1}(\varphi)\in\mathbb{Z}$ such that $\varphi(x_i)=\sigma^{e_i(\varphi)}(x_i)$ for all $1\leq i\leq q$. As an automorphism is determined by the images of $x_1,\dots,x_{k-1}$, the map $\varphi\mapsto(e_1(\varphi),\dots,e_{k-1}(\varphi))$ is an injective homomorphism from $\mathcal{A}ut(X)\cap[\sigma]$ to $\mathbb{Z}^{k-1}$. \end{proof} \begin{proof}[Proof of Theorem~\ref{th:finitely-generated}] By Lemma~\ref{lemma:k-transitive}, there exist $x_1,\dots,x_{k-1}\in X$ such that $\mathcal{O}(x_1)\cup\cdots\cup\mathcal{O}(x_{k-1})$ is dense in $X$. If $\varphi\in\mathcal{A}ut(X)$, then $\varphi$ is determined by the values of $\varphi(x_1),\dots,\varphi(x_{k-1})$. By Lemma~\ref{lemma:aperiodic-finite}, the set $\{\varphi(x_i)\colon\varphi\in\mathcal{A}ut(X)\}$ is contained in the union of finitely many distinct orbits in $X$. Therefore, modulo orbit preserving automorphisms, there are only finitely many choices for $\varphi(x_1),\dots,\varphi(x_{k-1})$. It follows that the group $\mathcal{A}ut(X)/\mathcal{A}ut(X)\cap[\sigma]$ is finite. By Lemma~\ref{lemma:rank}, $\mathcal{A}ut(X)\cong\mathbb{Z}^d$ for some $d<k$. \end{proof} \section{General shifts of linear growth} \label{sec:general-linear} \begin{lemma}\label{lemma:per-finite} Suppose $(X,\sigma)$ is a shift and $w\in\mathcal{L}(X)$ is such that $[w]_0^+$ is infinite. Then there exists aperiodic $x_w\in X$ such that $x_w\in[w]_0^+$. \end{lemma} \begin{proof} Either $w$ occurs syndetically in every element of $[w]_0^+$ with a uniform bound on the gap, or there exists a sequence of elements of $[w]_0^+$ along which the gaps between occurrences of $w$ in $y_w$ grow. In the first case, the subsystem $$ \overline{\left\{\sigma^ix\colon x\in[w]_0^+\text{, }i\in\mathbb{Z}\right\}} $$ is infinite and so contains an aperiodic point $x_w$. Since $w$ occurs syndetically with the same bound in every element of $[w]_0^+$, it also occurs syndetically in any limit taken along elements of $[w]_0^+$, and in particular in $x_w$. In the second case, there is an element of $x_w\in\overline{\mathcal{O}(y_w)}\cap[w]_0^+$ for which either $w$ occurs only finitely many times or infinitely many times with gaps tending to infinity in the semi-infinite word $\{x(n)\colon n\geq0\}$, or the same behavior occurs in the semi-infinite word $\{x(n)\colon n\leq0\}$. In either case, $x_w$ is aperiodic. \end{proof} We use this to complete the proof of Theorem~\ref{thm:main}, characterizing the finitely generated subgroups of a shift of linear growth: \begin{proof}[Proof of Theorem~\ref{thm:main}] Let $(X,\sigma)$ be a shift and assume there exists $k\in\mathbb{N}$ such that $$ \limsup_{n\to\infty}\frac{P_X(n)}{n}<k. $$ Let $$ X_{NP}:=\overline{\left\{x\in X\colon\sigma^i(x)\neq x\text{ for all }i\neq0\right\}} $$ be the closure of the set of aperiodic points in $X$. As automorphisms take aperiodic points to aperiodic points, every element of $\mathcal{A}ut(X)$ preserves $X_{NP}$. Consequently, restriction to $X_{NP}$ defines a natural homomorphism $h\colon\mathcal{A}ut(X)\to\mathcal{A}ut(X_{NP})$. Let $\varphi_1,\dots,\varphi_N\in\mathcal{A}ut(X)$ and choose $R\in\mathbb{N}$ such that $\varphi_1,\dots,\varphi_N,\varphi_1^{-1},\dots,\varphi_N^{-1}\in\mathcal{A}ut_R(X)$. By Lemma~\ref{lemma:k-transitive}, there exists a set $x_1,\dots,x_{k-1}\in X_{NP}$ such that $$ \mathcal{O}(x_1)\cup\cdots\cup\mathcal{O}(x_{k-1}) $$ is dense in $X_{NP}$. Let $\{x_1,\dots,x_q\}\subseteq X_{NP}$ be a set of minimal cardinality with the property that $$ \mathcal{O}(x_1)\cup\cdots\cup\mathcal{O}(x_q) $$ is dense in $X_{NP}$. Then for any $\varphi\in\langle\varphi_1,\dots,\varphi_N\rangle$, the restriction of $\varphi$ to $X_{NP}$ is determined by $\varphi(x_1),\dots,\varphi(x_q)$. By Lemma~\ref{lemma:aperiodic-finite}, for each $1\leq j\leq q$, the set $$ \{\varphi(x_j)\colon\varphi\in\langle\varphi_1,\dots,\varphi_N\rangle\} $$ is contained in the union of finitely many distinct orbits. Therefore there exists a finite collection of automorphisms $\psi_1,\dots,\psi_M\in\langle\varphi_1,\dots,\varphi_N\rangle$ such that for any $\varphi\in\langle\varphi_1,\dots,\varphi_N\rangle$, there exists $1\leq t(\varphi)\leq M$ such that for all $1\leq j\leq q$, we have $$ \varphi(x_j)\in\mathcal{O}(\psi_{t(\varphi)}(x_j)). $$ Thus the restriction of $\psi_{t(\varphi)}^{-1}\circ\varphi$ to $X_{NP}$ is orbit preserving. Let $$ K:=\{\varphi\in\langle\varphi_1,\dots,\varphi_N\rangle\colon\text{the restriction of $\varphi$ to $X_{NP}$ is orbit preserving}\}. $$ Clearly $K$ is a subgroup of $\langle\varphi_1,\dots,\varphi_N\rangle$. For each $1\leq i\leq N$, we have that $\varphi_i$ is a block code of range $R$. Let $$ \mathcal{W}_R:=\left\{w\in\mathcal{L}_{2R+1}(X)\colon[w]_0^+\cap X_{NP}=\emptyset\right\}. $$ Then by Lemma~\ref{lemma:per-finite}, the set $$ Y:=\bigcup_{w\in\mathcal{W}_R}[w]_0^+ $$ is finite. Since every element of $Y$ is periodic and automorphisms preserve the minimal period of periodic points, the ($\langle\varphi_1,\dots,\varphi_N\rangle$-invariant) set $$ Z:=\left\{\varphi_{i_1}^{e_1}\circ\cdots\circ\varphi_{i_S}^{e_S}(y)\colon i_1,\dots,i_S\in\{1,\dots,N\}\text{, }e_1,\dots,e_S\in\{-1,1\},S\in\mathbb{N}\text{, }y\in Y\right\} $$ is finite. For any $1\leq i\leq N$, the restriction of $\varphi_i$ to $X_{NP}$ uniquely determines the restriction of $\varphi_i$ to $X\setminus Z$ (since all words of length $2R+1$ that occur in elements of $X\setminus Z$ also occur in $X_{NP}$). Since $\varphi_1,\dots,\varphi_N$ are automorphisms that preserve $Z$, they take elements of $X\setminus Z$ to elements of $X\setminus Z$. Thus for any $\varphi\in\langle\varphi_1,\dots,\varphi_N\rangle$, the restriction of $\varphi$ to $X_{NP}$ uniquely determines the restriction of $\varphi$ to $X\setminus Z$. In particular, this holds for all $\varphi\in K$. So there exists a finite collection of automorphisms $\alpha_1,\dots,\alpha_T\in K$ such that for all $\varphi\in K$, there is an integer $1\leq s(\varphi)\leq T$ such that $\alpha_{s(\varphi)}^{-1}\circ\varphi$ acts trivially on $Z$. With the functions $t(\varphi)$ and $s(\varphi)$ defined as above, we have that for any $\varphi\in\langle\varphi_1,\dots,\varphi_N\rangle$, the automorphism $$ \alpha^{-1}_{s\left(\psi^{-1}_{t(\varphi)}\circ\varphi\right)}\circ\psi^{-1}_{t(\varphi)}\circ\varphi $$ acts trivially on $Z$ and its restriction to $X_{NP}$ is orbit preserving. Define $H\subseteq\langle\varphi_1,\dots,\varphi_N\rangle$ to be the subgroup of elements $\varphi\in\langle\varphi_1,\dots,\varphi_N\rangle$ such that $\varphi$ acts trivially on $Z$ and the restriction of $\varphi$ to $X_{NP}$ is orbit preserving. Every element of $H$ is uniquely determined by its restriction to $X_{NP}$, and so $H$ is isomorphic to a subgroup of $\mathcal{A}ut(X_{NP})\cap[\sigma]$. By Lemma~\ref{lemma:rank}, this subgroup is isomorphic to $\mathbb{Z}^d$ for some $d<k$. On the other hand, for any $\varphi\in\langle\varphi_1,\dots,\varphi_N\rangle$, there exist $1\leq t\leq M$ and $1\leq s\leq T$ such that $\alpha_s^{-1}\circ\psi_t^{-1}\circ\varphi\in H$. Therefore $H$ has finite index in $\mathcal{A}ut(X)$. Finally, if $\varphi\in H$, then there is a function $k\colon X_{NP}\to\mathbb{Z}$ such that for all $x\in X_{NP}$ we have $\varphi(x)=\sigma^{k(x)}(x)$. Thus if $\psi\in\langle\varphi_1,\dots,\varphi_N\rangle$ and $x\in X_{NP}$, we have $$ \psi\circ\varphi\circ\psi^{-1}(x)=\psi\circ\sigma^{k(\psi^{-1}(x))}\circ\psi^{-1}(x)=\sigma^{k(\psi^{-1}(x))}(x) $$ and if $x\in Z$, we have $$ \psi\circ\varphi\circ\psi^{-1}(z)=z. $$ Therefore $\psi\circ\varphi\circ\psi^{-1}\in H$ and so $H$ is a normal subgroup of $\mathcal{A}ut(X)$. It follows that $\mathcal{A}ut(X)$ is virtually $\mathbb{Z}^d$ for some $d<k$. \end{proof} \section{Minimal shifts of linear growth} \label{sec:minimal} For minimal shifts, we need more information on the words that are uniquely extendable: \begin{definition} For $x\in X$, define $$ x_R:=\{y\in X\colon y(i)=x(i)\text{ for all }i\geq0\}. $$ For $x,y\in X$, we write $x\sim_Ry$ if $x_R=y_R$ and define $X_R:=X/\!\sim_R$ to be $X$ modulo this relation. \end{definition} It is easy to check that $\sim_R$ is an equivalence relation on $X$ and so $X_R$ is well defined. We view $(X_R,\sigma)$ as a one sided shift. If $\varphi\in\mathcal{A}ut(X)$, then $\varphi$ is a block code (say of range $N$) and so determines an endomorphism on $(X_R,\sigma)$ as follows: if $y\in x_R$ and $N\in\mathbb{N}$ is the minimal range of $\varphi$, then $\varphi(x_R):=\left(\sigma^N\circ\varphi(y)\right)_R$. It is easy to check that $\varphi(x_R)$ is well defined. \begin{definition} For $x\in X$, we say that $x_R$ is {\em uniquely left extendable} if it has a unique preimage in $X_R$ and {\em nonuniquely left extendable} otherwise. If $w\in\mathcal{L}_n(X)$ is a word of length $n$ in the language of $X$, we say that $w$ is {\em uniquely left extendable} if there is a unique $\hat{w}\in\mathcal{L}_{n+1}(X)$ that ends with $w$. \end{definition} Boshernitzan~\cite{Bos} showed that if $(X,\sigma)$ is minimal and there exists $k\in\mathbb{N}$ such that $$ \liminf_{n\to\infty}P_X(n)-kn=-\infty, $$ then the number of ergodic probability measures on $(X,\sigma)$ is finite. In his proof, he makes use of a counting lemma and we use an infinite version of this lemma to study minimal shifts of linear growth: \begin{lemma}[Infinite version of Boshernitzan's Lemma] \label{lemma-boshernitzan} Let $(X,\sigma)$ be a shift for which there exists $k\in\mathbb{N}$ such that \begin{equation}\label{eq1} \liminf_{n\to\infty}P_X(n)-kn=-\infty. \end{equation} Then there are at most $k-1$ distinct elements of $(X_R,\sigma)$ which are nonuniquely left extendable. \end{lemma} \begin{proof} We first claim that for infinitely many $n$, the number of words of length $n$ that are nonuniquely left extendable is at most $k-1$. If not, let $L_n$ be the number of words of length $n$ that do not extend uniquely to their left. Then by assumption there exists $N\in\mathbb{N}$ such that for all $n\geq N$ we have $L_n\geq k$. However, $$ P_X(n+1)\geq P_X(n)+L_n, $$ and so $P_X(n)\geq P_X(N)+k\cdot(n-N)$ for all $n\geq N$. This contradicts~\eqref{eq1}, and the claim follows. We use this to show that there are at most $k-1$ elements in $X_R$ which are nonuniquely left extendable. If not, there exist distinct elements $x_1,\dots,x_k\in X_R$ which are all nonuniquely left extendable. Choose $M\in\mathbb{N}$ such that for any $1\leq i<j\leq k$, there exists $0\leq m<M$ such that $x_i(m)\neq x_j(m)$. By the first claim, there exists $n>M$ such that there are at most $k$ words of length $n$ that are nonuniquely left extendable. For all $1\leq i\leq k$, the word $$ \left(x_i(0),x_i(1),\dots,x_i(n-2),x_i(n-1)\right) $$ is a word of length $n$ that is nonuniquely left extendable and these words are pairwise distinct since $n>M$, leading to a contradiction. Thus the number of elements of $(X_R,\sigma)$ that are nonuniquely left extendable is at most $k-1$. \end{proof} \begin{notation}\label{NLE-def} We write $\mathbb{N}LE_0\subseteq X_R$ for the collection of nonuniquely left extendable points in $X_R$. For $m\in\mathbb{N}$, we write $\mathbb{N}LE_m:=\sigma^m(\mathbb{N}LE_0)$ for the collection of elements of $X_R$ whose preimage under $m$ iterates of $\sigma$ contains more than one point. \end{notation} \begin{lemma}\label{lemma-extension} If $y\in X_R\setminus\bigcup_{m=0}^{\infty}\mathbb{N}LE_m$, then there is a unique $z\in X$ for which $y=z_R$. \end{lemma} \begin{proof} If not, there exist distinct $z_1, z_2\in X$ and $y=(z_1)_R=(z_2)_R$. Thus there exists $i\in\mathbb{N}$ such that $z_1(-i)\neq z_2(-i)$. Set $i_0$ to be the minimal such $i$. Then $\sigma^{-i_0+1}y=(\sigma^{-i_0+1}z_1)_R=(\sigma^{-i_0+1}z_2)_R$, \ but $(\sigma^{-i_0}z_1)_R\neq(\sigma^{-i_0}z_2)_R$. Thus $\sigma^{-i_0+1}y\in \mathbb{N}LE_0$, which implies that $y\in \mathbb{N}LE_{-i_0+1}$, a contradiction. \end{proof} \begin{lemma}\label{lemma-finite-extension} If $(X,\sigma)$ is a shift, $\varphi\in\mathcal{A}ut(X)$, and $y\in \mathbb{N}LE_0$, then there exists $m\geq 0$ such that $\varphi(y)\in \mathbb{N}LE_m$. \end{lemma} \begin{proof} It not, then $\varphi(y)\in X_R\setminus\bigcup_{m=0}^{\infty}\mathbb{N}LE_m$ and so Lemma~\ref{lemma-extension} implies that there is a unique $z\in X$ such that $\varphi(y)=z_R$. Since $\varphi$ is an automorphism, it follows that $\varphi^{-1}(z)$ is the only solution to the equation $y=x_R$, a contradiction of $y\in \mathbb{N}LE_0$. \end{proof} We use this to complete the characterization of the automorphism group for minimal aperiodic shifts with linear growth: \begin{proof}[Proof of Theorem~\ref{theorem:minimal}] Assume $(X,\sigma)$ is an aperiodic minimal shift such that there exists $k\in\mathbb{N}$ with $\liminf_{n\to\infty}P_X(n)/n<k$. Fix $y\in \mathbb{N}LE_0$ and let $\varphi\in\mathcal{A}ut(X)$. By Lemma~\ref{lemma-finite-extension}, there exists $m\in\mathbb{N}$ such that $\varphi(y)\in \mathbb{N}LE_m$. Let $m_{\varphi}\geq 0$ be the smallest non-negative integer for which $\varphi(y)\in \mathbb{N}LE_m$. Then there exists $z_{\varphi}\in \mathbb{N}LE_0$ such that $\sigma^{m_{\varphi}}(z_{\varphi})=\varphi(y)$. Now suppose $\varphi_1, \varphi_2\in\mathcal{A}ut(X)$ and $z_{\varphi_1}=z_{\varphi_2}$. We claim that $\varphi_1$ and $\varphi_2$ project to the same element in $\mathcal{A}ut(X)/\langle\sigma\rangle$. Without loss, suppose $m_{\varphi_1}\leq m_{\varphi_2}$. Then $$ \varphi_2(y)=\sigma^{m_{\varphi_2}}(z_{\varphi_2})=\sigma^{(m_{\varphi_2}-m_{\varphi_1})}\circ\sigma^{m_{\varphi_1}}(z_{\varphi_1})=\sigma^{(m_{\varphi_2}-m_{\varphi_1})}\circ\varphi_1(y). $$ By minimality, every word of every length occurs syndetically in every element of $(X,\sigma)$. It follows that all words occur syndetically in every element of $(X_R,\sigma)$, and in particular, all words occur syndetically in $y$. Both $\varphi_2$ and $\sigma^{(m_{\varphi_2}-m_{\varphi_1})}\circ\varphi_1$ are sliding block codes. Since $\varphi_2(y)=\sigma^{(m_{\varphi_2}-m_{\varphi_1})}\circ\varphi_1(y)$, it follows that $\varphi_2$ and $\sigma^{(m_{\varphi_2}-m_{\varphi_1})}\circ\varphi_1$ have the same image on every word, meaning that they define the same block code. In other words, $\varphi_1$ and $\varphi_2$ project to the same element in $\mathcal{A}ut(X)/\langle\sigma\rangle$, proving the claim. Since $|\mathbb{N}LE_0|\leq k-1$, Lemma~\ref{lemma-boshernitzan} implies that there can be at most $k-1$ distinct elements of $(X_R,\sigma)$ that arise as $z_{\varphi}$ for $\varphi\in\mathcal{A}ut(X)$. Therefore, there are at most $k-1$ distinct elements of $\mathcal{A}ut(X)/\langle\sigma\rangle$. $ \square$ \end{proof} This can be used to characterize the automorphism groups for particular systems. We note the simplest case of a Sturmian shift for later use (see~\cite[Example 4.1]{Olli}): \begin{corollary} \label{cor:olli} If $(X,\sigma)$ is a Sturmian shift, then $\mathcal{A}ut(X)=\langle\sigma\rangle$. \end{corollary} \begin{proof} For a Sturmian shift, $(X,\sigma)$ is minimal, aperiodic, and $P_X(n)=n+1$ for all $n\in\mathbb{N}$. Applying Theorem~\ref{theorem:minimal} with $k=2$, we have that $\left|\mathcal{A}ut(X)/\langle\sigma\rangle\right|=1$. \end{proof} More generally: \begin{corollary} If $(X,\sigma)$ is aperiodic, minimal and there exists $k\in\mathbb{N}$ such that $$ \liminf_{n\to\infty}P_X(n)-kn=-\infty, $$ then $\mathcal{A}ut(X)$ is the semi-direct product of a finite group and $\mathbb{Z}$. \end{corollary} \begin{proof} By Theorem~\ref{theorem:minimal}, $\mathcal{A}ut(X)/\langle\sigma\rangle$ is finite. Since $\langle\sigma\rangle$ has infinite order and is contained in the center of $\mathcal{A}ut(X)$, it follows from the classification of virtually cyclic groups (see~\cite{SW}) that $\mathcal{A}ut(X)$ is the semi-direct product of a finite group and $\mathbb{Z}$. \end{proof} \section{Examples} \label{sec:examples} \subsection{Automorphism group with large polynomial growth} \label{sec:large-poly-growth} Proposition~\ref{prop:polynomial} shows that if $(X,\sigma)$ is a shift satisfying $$ \limsup_{n\to\infty}\frac{P_X(n)}{n}<k $$ then $\mathcal{A}ut(X)$ is locally a group of polynomial growth, with polynomial growth rate at most $k-1$. The following Proposition shows that this estimate of the polynomial growth rate of $\mathcal{A}ut(X)$ is optimal. \begin{proposition} Let $k\in\mathbb{N}$ be fixed and let $\mathcal{A}=\{0,1\}\times\{1,\dots,k\}$. There is a shift $X\subseteq\mathcal{A}^{\mathbb{Z}}$ with a dense set of aperiodic points such that $P_X(n)=kn+k$ and $\mathcal{A}ut(X)\cong\mathbb{Z}^k$. \end{proposition} \begin{proof} Recall that a Sturmian shift is an aperiodic, minimal shift of $\{0,1\}^{\mathbb{Z}}$ whose complexity function satisfies $P_X(n)=n+1$ for all $n$. There are uncountably many Sturmian shifts and any particular Sturmian shift only factors onto countably many other Sturmian shifts (since the factor map must be a sliding block code, of which there are only countably many). Therefore there exist $k$ Sturmian shifts $X_1, X_2,\dots,X_k$ such that there exists a sliding block code taking $X_i$ to $X_j$ if and only if $i=j$. We identify $X_i$ with in a natural way with a shift of $\mathcal{A}^{\mathbb{Z}}$ by writing the elements of $X_i$ with the letters $(0,i)$ and $(1,i)$ and will abuse notation by also referring to this shift as $X_i$. Let $X:=X_1\cup\cdots\cup X_k$ (which is clearly shift invariant, and is closed because the minimum distance between a point in $X_i$ and $X_j$ is $1$ whenever $i\neq j$). Let $\varphi\in\mathcal{A}ut(X)$. As $\varphi$ is given by a sliding block code, $\varphi$ must preserve the sets $X_1,\dots,X_k$. Therefore $\mathcal{A}ut(X)\cong\mathcal{A}ut(X_1)\times\cdots\times\mathcal{A}ut(X_k)$. By Corollary~\ref{cor:olli} we have $\mathcal{A}ut(X_i)=\langle\sigma\rangle\cong\mathbb{Z}$ for $i=1,\dots,k$. So $\mathcal{A}ut(X)\cong\mathbb{Z}^k$. \end{proof} \subsection{Quickly growing transitive shifts with trivial automorphism group} \label{sec:no-complexity-threshold} Next we describe a general process which takes a minimal shift of arbitrary growth rate and produces a transitive shift with essentially the same growth, but whose automorphism group consists only of powers of the shift. This shows that there is no ``complexity threshold'' above which the automorphism group of a transitive shift must be nontrivial. \begin{lemma}\label{lemma:dense-orbit} If $(X,\sigma)$ is a transitive shift with precisely one dense orbit, then $\mathcal{A}ut(X)=\langle\sigma\rangle$. \end{lemma} \begin{proof} Suppose that there exists $x_0\in X$ such that $$ \{y\in X\colon y\text{ has a dense orbit}\}=\mathcal{O}(x_0). $$ If $\varphi\in\mathcal{A}ut(X)$, then $\varphi(x_0)$ has a dense orbit and so there exists $k\geq 0$ such that $\varphi(x_0)=\sigma^k(x_0)$. It follows that $\varphi$ and $\sigma^k$ agree on the (dense) orbit of $x_0$. Since both functions are continuous, they agree everywhere. \end{proof} \begin{example} Let $\mathcal{A}=\{0,1,2,\dots,n-1\}$ and let $X\subseteq\mathcal{A}^{\mathbb{Z}}$ be a minimal shift. Let $\tilde{\mathcal{A}}=\mathcal{A}\cup\{n\}$, where we add the symbol $n$ to the alphabet and $n\notin\mathcal{A}$. Fix $x_0\in X$ and define $\tilde{x}_0\in\tilde{\mathcal{A}}^{\mathbb{Z}}$ by: $$ \tilde{x}_0(i)=\begin{cases} x_0(i) & \text{ if }i\neq 0; \\ n & \text{ if }i=0. \end{cases} $$ Let $\tilde{X}\subseteq\tilde{A}^{\mathbb{Z}}$ be the orbit closure of $\tilde{x}_0$. Then $\tilde{X}=X\cup\mathcal{O}(\tilde{x}_0)$, $(\tilde{X},\sigma)$ is transitive, $p_{\tilde{X}}(n)=p_X(n)+n$ for all $n\in\mathbb{N}$, and $\tilde{X}$ has precisely one dense orbit. By Lemma~\ref{lemma:dense-orbit}, $\mathcal{A}ut(\tilde{X})=\langle\sigma\rangle$. \end{example} \subsection{$\mathcal{A}ut(X)$ and $\mathcal{A}ut(X)/\mathcal{A}ut(X)\cap[\sigma]$ are not always finitely generated} \label{sec:not-global} Theorem~\ref{thm:main} shows that every finitely generated subgroup of $\mathcal{A}ut(X)$ is virtually $\mathbb{Z}^d$. When $X$ has a dense set of aperiodic points, Theorem~\ref{th:finitely-generated} shows that $\mathcal{A}ut(X)/\mathcal{A}ut(X)\cap[\sigma]$ is finite. In this section we show that the result of Theorem~\ref{th:finitely-generated} cannot be extended to the general case, and the words ``every finitely generated subgroup'' cannot be removed from the statement of Theorem~\ref{thm:main}. We begin with an example to set up our construction. \begin{example} Let $\mathcal{A}=\{0,1\}$ and for $n\in\mathbb{N}$, let $x_n\in\mathcal{A}^{\mathbb{Z}}$ be the periodic point $$ x_n(i)=\begin{cases} 1 & \text{ if }i\equiv0\text{ (mod $2^n$)}; \\ 0 & \text{ otherwise}. \end{cases} $$ Let $X$ be the closure of the set $\{x_n\colon n\in\mathbb{N}\}$ under $\sigma$. If we define $$ x_{\infty}(i)=\begin{cases} 1 & \text{ if }i=0; \\ 0 & \text{ otherwise}, \end{cases} $$ and ${\bf 0}$ to be the $\mathcal{A}$-coloring of all zeros, then we have $$ X=\{{\bf 0}\}\cup\mathcal{O}(x_{\infty})\cup\bigcup_{n=1}^{\infty}\mathcal{O}(x_n). $$ Suppose $R\in\mathbb{N}$ is fixed and $\varphi\in\mathcal{A}ut_R(X)$. Since $\varphi$ preserves the period of periodic points, $\varphi({\bf 0})={\bf 0}$. In particular, the block code $\varphi$ takes the block consisting of all zeros to $0$. It follows that there exists $k\in[-R,R]$ such that $\varphi(x_{\infty})=\sigma^k(x_{\infty})$. For any $m>2R+1$, the blocks of length $2R+1$ occurring in $x_m$ are identical to those appearing in $x_{\infty}$ and so $\varphi(x_m)=\sigma^k(x_m)$ for all such $m$. Now let $\varphi_1,\dots,\varphi_n\in\mathcal{A}ut(X)$ and find $R\in\mathbb{N}$ such that $\varphi_1,\dots,\varphi_n,\varphi_1^{-1},\dots,\varphi_n^{-1}\in\mathcal{A}ut_R(X)$. For $1\leq i\leq n$, let $k_i\in[-R,R]$ be such that for all $m>2R+1$ we have $\varphi_i(x_m)=\sigma^{k_i}(x_m)$. Then $N\in\mathbb{N}$, any $e_1,\dots,e_N\in\{1,\dots,n\}$, any $\epsilon_1,\dots,\epsilon_N\in\{-1,1\}$, and any $m>2R+1$, we have $$ \left(\varphi_{e_1}^{\epsilon_1}\circ\varphi_{e_2}^{\epsilon_2}\circ\cdots\circ\varphi_{e_N}^{\epsilon_N}\right)(x_m)=\sigma^{(\epsilon_1\cdot k_{e_1}+\epsilon_2\cdot k_{e_2}+\cdots+\epsilon_N\cdot k_{e_N})}(x_m). $$ Then if $\varphi\in\mathcal{A}ut(X)$ is the automorphism that acts like $\sigma$ on $\mathcal{O}(x_{R+1})$ and acts trivially on $X\setminus\mathcal{O}(x_{R+1})$ (this map is continuous because $x_{R+1}$ is isolated), then $\varphi\notin\langle\varphi_1,\dots,\varphi_N\rangle$. Therefore $\langle\varphi_1,\dots,\varphi_N\rangle\neq\mathcal{A}ut(X)$. Since $\varphi_1,\dots,\varphi_n\in\mathcal{A}ut(X)$ were general, it follows that $\mathcal{A}ut(X)$ is not finitely generated. On the other hand $$ P_X(n)=n+2^{\lfloor\log_2(n)\rfloor+1}-1<3n $$ for all $n$, so $P_X(n)$ grows linearly. We also remark that $\mathcal{A}ut(X)=\mathcal{A}ut(X)\cap[\sigma]$ for this shift. \end{example} \begin{proposition}\label{ex:infinitely-generated} There exists a shift $(X,\sigma)$ of linear growth that has a dense set of periodic points and is such that none of the groups $\mathcal{A}ut(X)$, $\mathcal{A}ut(X)\cap[\sigma]$, and $\mathcal{A}ut(X)/\mathcal{A}ut(X)\cap[\sigma]$ are finitely generated. \end{proposition} \begin{proof} Let $X_1$ be the shift of $\{0,1\}^{\mathbb{Z}}$ constructed in the previous example. Let $X_2$ be the same shift, constructed over the alphabet $\{2,3\}$ (by identifying $0$ with $2$ and $1$ with $3$). Let $X=X_1\cup X_2$ and observe that $d(X_1,X_2)=1$. Since $\mathcal{A}ut(X_i)\cap[\sigma]=\mathcal{A}ut(X_i)$ for $i=1,2$, we have $\mathcal{A}ut(X)\cap[\sigma]\cong\mathcal{A}ut(X_1)\times\mathcal{A}ut(X_2)$. Therefore $\mathcal{A}ut(X)\cap[\sigma]$ is not finitely generated. On the other hand, $$ P_X(n)=P_{X_1}(n)+P_{X_2}(n)=2\cdot P_{X_1}(n)<6n $$ so $X$ is a shift of linear growth (and has a dense set of periodic points). We claim that $\mathcal{A}ut(x)/\mathcal{A}ut(X)\cap[\sigma]$ is not finitely generated. Define $\delta\in\mathcal{A}ut(X)$ to be the range $0$ involution that exchanges $0$ with $2$ and $1$ with $3$. For each $m\in\mathbb{N}$ let $\delta_m\in\mathcal{A}ut(X)$ be the range $0$ involution which exchanges the (unique) orbit of period $2^m$ in $X_1$ with the (unique) orbit of period $2^m$ in $X_2$ by exchanging $0$ with $2$ and $1$ with $3$ in these orbits only (and fixing the remainder of $X$. For $i\in\mathbb{N}$ let $\tilde{\delta}_i$ be the projection of $\delta_i$ to $\mathcal{A}ut(X)/\mathcal{A}ut(X)\cap[\sigma]$ and let $\tilde{\delta}$ be the projection of $\delta$. These involutions commute pairwise and the set $\{\tilde{\delta}_i\colon i\in\mathbb{N}\}\cup\{\tilde{\delta}\}$ is clearly independent. Now let ${\bf x}\in X_1$ be the point ${\bf x}(i)=1$ if and only if $i=0$, and let ${\bf y}\in X_2$ be the point ${\bf y}(i)=4$ if and only if $i=0$. Let $\varphi\in\mathcal{A}ut(X)$ be fixed and observe that either $\varphi({\bf x})\in\mathcal{O}({\bf x})$ or $\varphi({\bf x})\in\mathcal{O}({\bf y})$. In the former case define $\epsilon:=0$ and in the latter case define $\epsilon:=1$, so that $\varphi\circ\delta^{\epsilon}$ preserves the orbit of ${\bf x}$ (hence also the orbit of ${\bf y}$). As $\varphi\circ\delta^{\epsilon}$ is given by a block code which carries the block of all $0$'s to $0$, there are at most finitely many $m$ such that $\varphi\circ\delta^{\epsilon}$ does not preserve the orbit of the (unique) periodic orbit of period $2^m$ in $X_1$. Let $m_1<\cdots<m_n$ be the set of $m$ for which it does not preserve the orbit. Then $$ \varphi\circ\delta^{\epsilon}\circ\delta_{m_1}\circ\cdots\circ\delta_{m_n}\in\mathcal{A}ut(X)\cap[\sigma]. $$ Therefore $\mathcal{A}ut(X)/\mathcal{A}ut(X)\cap[\sigma]$ is the group generated by $\tilde{\delta}, \tilde{\delta}_1, \tilde{\delta}_2, \tilde{\delta}_3,\dots$ This group is isomorphic to $\prod_{i=1}^{\infty}\mathbb{Z}_2$ so $\mathcal{A}ut(X)/\mathcal{A}ut(X)\cap[\sigma]$ is not finitely generated. Finally, as $\mathcal{A}ut(X)$ factors onto a group that it not finitely generated, it is not finitely generated either. \end{proof} \section{Automorphisms of periodic shifts} \label{sec:periodic2} We characterize which finite groups arise as automorphism groups of shifts. \begin{definition} For $n>1$ and $m\in\mathbb{N}$, let $$ \mathbb{Z}_n^m:=\underbrace{\mathbb{Z}_n\times\mathbb{Z}_n\times\cdots\times\mathbb{Z}_n}_{m\text{ times}} $$ where $\mathbb{Z}_n$ denotes $\mathbb{Z}/n\mathbb{Z}$. Let $S_m$ denote the symmetric group on $m$ letters and define a homomorphism $\psi\colon S_m\to\mathcal{A}ut(\mathbb{Z}_n^m)$ by $$ \left(\psi(\pi)\right)(i_1,\dots,i_m):=(i_{\pi(1)},\dots,i_{\pi(m)}). $$ Then the {\em generalized symmetric group} is defined as in~\cite{Osima} to be $$ S(n,m):=\mathbb{Z}_n^m\rtimes_{\psi} S_m. $$ Equivalently, $S(n,m)$ is the wreath product $\mathbb{Z}_n\wr S_m$. \end{definition} \begin{theorem} Suppose $G$ is a finite group. There exists a shift $(X,\sigma)$ for which $\mathcal{A}ut(X)\cong G$ if and only if there exist $s\in\mathbb{N}$, $n_1<n_2<\cdots<n_s$, and $m_1, m_2, \dots,m_s\in\mathbb{N}$ such that $$ G\cong S(n_1,m_1)\times S(n_2,m_2)\times\cdots\times S(n_s,m_s). $$ \end{theorem} \begin{proof} Suppose $(X,\sigma)$ is a shift for which $\mathcal{A}ut(X)$ is finite. Since $\sigma\in\mathcal{A}ut(X)$, there exists $k\in\mathbb{N}$ such that $\sigma^k(x)=x$ for all $x\in X$. That is, $X$ is comprised entirely of periodic points such that the minimal period of each point is a divisor of $k$. Since a shift can have only finitely many such points, $X$ is finite. Let $x_1,\dots,x_N\in X$ be representatives of the orbits in $X$, meaning that $\mathcal{O}(x_i)\cap\mathcal{O}(x_j)=\emptyset$ whenever $i\neq j$ and for all $x\in X$ there exist $i,k\in\mathbb{N}$ such that $x=\sigma^k(x_i)$. For $i=1, \ldots, N$, let $p_i$ be the minimal period of $x_i$ and, without loss, assume that $p_1\leq p_2\leq\cdots\leq p_N$. Define $n_1:=p_1$ and inductively define $n_2,n_3,\dots,n_s$ by $$ n_{i+1}:=\min\{p_j\colon p_j>n_i\}, $$ where $s$ is the number of steps before the construction terminates. Define $$ m_i:=\vert\{j\colon p_j=n_i\}\vert. $$ Let $\varphi\in\mathcal{A}ut(X)$. Then for $1\leq i\leq s$, $\varphi$ induces a permutation on the set of periodic points of minimal period $n_i$. More precisely, for fixed $1\leq i\leq s$, we can define $\pi_i^{\varphi}\in S_{m_i}$ to be the permutation that sends $j\in\{1,2,\dots,m_i\}$ to the unique integer $k\in\{1,2,\dots,m_i\}$ such that $\varphi(x_{m_1+\cdots+m_{i-1}+j})\in\mathcal{O}(x_{m_1+\cdots+m_{i-1}+k})$. For $1\leq j\leq m_i$, choose $k_j^i\in\mathbb{Z}_{n_i}$ such that $$ \varphi(x_{m_1+\cdots+m_{i-1}+j})=\sigma^{k_j^i}(x_{m_1+\cdots+m_{i-1}+\pi_i^{\varphi}(j)}). $$ Then the map $$ \varphi\mapsto(k_1^1,k_2^1,\dots,k_{m_1}^1,\pi_1^{\varphi},k_1^2,\dots,k_{m_2}^2,\pi_2^{\varphi},\dots,k_1^s,\dots,k_{m_s}^s,\pi_s^{\varphi}) $$ is a homomorphism from $\mathcal{A}ut(X)$ to $S(n_1,m_1)\times\cdots\times S(n_s,m_s)$. The kernel of this map is trivial. To check that it is surjective, if $\pi_1,\dots,\pi_s$ are permutations ($\pi_i\in S_{m_i}$ for all $i$), then define $$ \varphi_{\pi_1,\dots,\pi_s}(x_{m_1+\cdots+m_{i-1}+j})=x_{m_1+\cdots+m_{i-1}+\pi_i(j)} $$ and extend this to an automorphism of $(X,\sigma)$. Similarly, for $1\leq i\leq N$, define $$ \varphi_i(\sigma^k(x_j)):=\sigma^{k+\delta_{i,j}}(x_j), $$ where $\delta_{i,j}$ is the Kronecker delta. Note that each of these maps is given by a block code, where the range is the smallest $R$ such that $\sigma^R(x)=x$ for all $x\in X$. Taken together, this shows that the map $\phi$ is surjective and thus is an isomorphism. Conversely, suppose that $n_1<\cdots<n_s$ are given and $m_1,\dots,m_s\in\mathbb{N}$. For $1\leq i\leq s$ and $1\leq j\leq m_i$, define $$ x_{i,j}(k):=\begin{cases} j & \text{ if }k\equiv0\text{ (mod $n_i$)}; \\ 0 & \text{ otherwise}. \end{cases} $$ Let $$ X^{\prime}=\bigcup_{i=1}^s\bigcup_{j=1}^{n_i}x_{i,j} $$ and let $X$ be the closure of $X^{\prime}$ under $\sigma$. Then $X$ consists of periodic points, with precisely $m_i$ distinct orbits of minimal period $n_i$, for $1\leq i\leq s$. Thus \begin{equation*} \mathcal{A}ut(X)\cong S(n_1,m_1)\times\cdots\times S(n_s,m_s). \qedhere \end{equation*} \end{proof} \noindent {\bf Acknowledgment}: We thank Jim Davis for pointing us to reference~\cite{SW}. \end{document}
\begin{document} \title{Approximate $C^*$-ternary ring homomorphisms} \author{Mohammad Sal Moslehian} \address{Department of Mathematics, Ferdowsi University, P. O. Box 1159, Mashhad 91775, Iran.} \email{[email protected]} \subjclass[2000]{Primary 39B82; Secondary 39B52, 46L05.} \keywords{generalized Hyers--Ulam--Rassias stability, $C^*$-ternary ring, $C^*$-ternary homomorphism, Trif's functional equation} \begin{abstract} In this paper, we establish the generalized Hyers--Ulam--Rassias stability of $C^*$-ternary ring homomorphisms associated to the Trif functional equation \begin{eqnarray*} d \cdot C_{d-2}^{l-2} f(\frac{x_1+\cdots +x_d}{d})+ C_{d-2}^{l-1}\sum_{j=1}^d f(x_j) = l \cdot \sum_{1\leq j_1< \cdots < j_l\leq d} f(\frac{x_{j_1} + \cdots + x_{j_l}}{l}). \end{eqnarray*} \end{abstract} \maketitle \section {Introduction and preliminaries} A {\it ternary ring of operators} (TRO) is a closed subspace of the space $B({\mathcal H}, {\mathcal K})$ of bounded linear operators between Hilbert spaces ${\mathcal H}$ and ${\mathcal K}$ which is closed under the ternary product $[xyz] := xy^{\ast}z$. This concept was introduced by Hestenes \cite{HES}. The class of TRO's includes Hilbert $C^*$-modules via the ternary product $[xyz] := \langle x, y\rangle z$. It is remarkable that every TRO is isometrically isomorphic to a corner $p {\mathcal A} (1-p)$ of a $C^*$-algebra ${\mathcal A}$, where $p$ is a projection. A closely related structure to TRO's is the so-called $JC^*$-triple that is a norm closed subspace of $B({\mathcal H})$ being closed under the triple product $[xyz]=(xy^*z + zy^*x)/2$; cf. \cite{HAR}. It is also true that a commutative TRO, i.e. a TRO with the property $xy^*z=zy^*x$, is an associative $JC^*$-triple. Following \cite{ZET} a {\it C*-ternary ring} is defined to be a Banach space ${\mathcal A}$ with a ternary product $(x, y, z)\mapsto [xyz]$ from ${\mathcal A}$ into ${\mathcal A}$ which is linear in the outer variables, conjugate linear in the middle variable, and associative in the sense that $[xy[zts]]=[x[tzy]s]=[[xyz]ts]$, and satisfies $\|[xyz]\|\leq\|x\|\|y\|\|z\|$ and $\|[xxx]\|=\|x\|^{3}$. For instance, any TRO is a C*-ternary ring under the ternary product $[xyz]= xy^*z$. A linear mapping $\varphi$ between $C^*$-ternary rings is called a {\it homomorphism} if $\varphi([xyz])=[\varphi(x)\varphi(y)\varphi(z)]$ for all $x,y,z\in {\mathcal A}$. The stability problem of functional equations originated from a question of Ulam \cite{ULA}, posed in 1940, concerning the stability of group homomorphisms. In the next year, Hyers \cite{HYE} gave a partial affirmative answer to the question of Ulam in the context of Banach spaces. In 1978, Th. M. Rassias \cite{RAS1} extended the theorem of Hyers by considering the unbounded Cauchy difference $\|f(x+y)-f(x)-f(y)\|\leq \varepsilon(\|x\|^ p+\|y\|^ p)$, where $\varepsilon>0$ and $ p\in [0,1)$ are constants. The result of Th. M. Rassias has provided a lot of influence in the development of what we now call {\it Hyers--Ulam--Rassias stability} of functional equations. In 1994, a generalization of Rassias' result, the so-called generalized Hyers--Ulam--Rassias stability, was obtained by G\u avruta \cite{GAV} by following the same approach as in \cite{RAS1}. During the last decades several stability problems of functional equations have been investigated in the spirit of Hyers--Ulam--Rassias-G\u avruta. See \cite{CZE, H-I-R, JUN, RAS2, MOS1} and references therein for more detailed information on stability of functional equations. As far as the author knows, \cite{BOU} is the first paper dealing with stability of (ring) homomorphisms. Another related result is that of Johnson \cite{JOH} in which he introduced the notion of almost algebra $*$-homomorphism between two Banach $*$-algebras. In fact, so many interesting results on the stability of homomorphisms have been obtained by many mathematicians; see \cite{RAS3} for a comprehensive account on the subject. In \cite{B-M} the stability of homomorphisms between $J^*$-algebras associated to the Cauchy equation $f(x+y)=f(x)+f(y)$ was investigated. Some results on stability ternary homomorphisms may be found at \cite{A-M, M-S}. Trif \cite{TRI} proved the generalized stability for the so-called Trif functional equation \begin{eqnarray*} d \cdot C_{d-2}^{l-2} f(\frac{x_1+\cdots +x_d}{d})+ C_{d-2}^{l-1}\sum_{j=1}^d f(x_j) = l \cdot \sum_{1\leq j_1< \cdots < j_l\leq d} f(\frac{x_{j_1} + \cdots + x_{j_l}}{l}), \end{eqnarray*} deriving from an inequality of Popoviciu \cite{POP} for convex functions (here, $C^k_r$ denotes $\frac{r!}{k!(r-k)!}$). Hou and Park \cite{P-H} applied the result of Trif to study $*$-homomorphisms between unital $C^*$-algebras. Further, Park investigated the stability of Poisson $JC^*$-algebra homomorphisms associated with Trif's equation (see \cite{PAR1}. In this paper, using some strategies from \cite{B-M, L-S, P-H, PAR1, TRI}, we establish the generalized Hyers--Ulam--Rassias stability of $C^*$-ternary homomorphisms associated to the Trif functional equation. If a $C^*$-ternary ring $({\mathcal A}, [\;])$ has an identity, i.e. an element $e$ such that $x = [xee] = [eex]$ for all $x\in {\mathcal A}$, then it is easy to verify that $x\odot y := [xey]$ and $x^*:= [exe]$ make ${\mathcal A}$ into a unital $C^*$-algebra (due to the fact that $\|x\odot x^*\odot x\| = \|x\|^3$). Conversely, if $(A, \odot)$ is a (unital) $C^*$-algebra, then $[xyz] := x\odot y^*\odot z$ makes ${\mathcal A}$ into a $C^*$-ternary ring (with the unit $e$ such that $x\odot y = [xey]$) (see \cite{MOS2}). Thus our approach may be applied to investigate of stability of homomorphisms between unital $C^*$-algebras. Throughout this paper, ${\mathcal A}$ and ${\mathcal B}$ denote $C^*$-ternary rings. In addition, let $q=\frac{l(d-1)}{d-l}$ and $r = -\frac{l}{d-l}$ for positive integers $l, d$ with $2\leq l\leq d-1$. By an {\it approximate $C^*$-ternary ring homomorphism associated to the Trif equation} we mean a mapping $f: {\mathcal A}\to {\mathcal B}$ for which there exists a certain control function $\varphi: {\mathcal A}^{d+3}\to [0, \infty)$ such that if \begin{eqnarray*} D_\mu f(x_1, \cdots, x_d, u, v, w)&=&\|d \cdot C_{d-2}^{l-2} f(\frac{\mu x_1+\cdots + \mu x_d}{d} + \frac{[uvw]}{d \cdot C_{d-2}^{l-2}}) + C_{d-2}^{l-1} \sum_{j=1}^d \mu f(x_j) \\ &&- l \cdot \sum_{1\leq j_1< \cdots < j_l\leq d} \mu f(\frac{x_{j_1} + \cdots + x_{j_l}}{l}) - [f(u)f(v)f(w)]\|. \end{eqnarray*} then \begin{eqnarray}\label{trifapp} D_\mu f(x_1, \cdots, x_d, u, v, w)\leq\varphi(x_1, \cdots, x_d, u, v, w), \end{eqnarray} for all scalars $\mu$ in a subset ${\mathbb E}$ of ${\mathbb C}$ and all $x_1, \cdots, x_d, u, v, w\in {\mathcal A}$. It is not hard to see that a function $T : X \to Y$ between linear spaces satisfies Trif's equation if and only if there is an additive mapping $S : X \to Y$ such that $T(x) = S(x) + T(0)$ for all $x \in X$. In fact, $S(x) := (1/2)(T(x) - T(-x))$; see \cite{TRI}. \section{Main Results} In this section, we are going to establish the generalized Hyers--Ulam--Rassias stability of homomorphisms in $C^*$-ternary rings associated with the Trif functional equation. We start our work with investigating the case in which an approximate $C^*$-ternary ring homomorphism associated to the Trif equation is an exact homomorphism. \begin{proposition} Let $T:{\mathcal A} \to {\mathcal B}$ be an approximate $C^*$-ternary ring homomorphism associated to the Trif equation with ${\mathbb E}={\mathbb C}$ and a control function $\varphi$ satisfying \begin{eqnarray*} \lim_{n\to\infty}q^{-n}\varphi(q^nx_1, \cdots, q^nx_d, q^n u, q^nv, q^nw)=0, \end{eqnarray*} for all $x_1, \cdots, x_d, u, v, w\in {\mathcal A}$. Suppose that $T(qx)=qT(x)$ for all $x\in {\mathcal A}$. Then $T$ is a $C^*$-ternary homomorphism. \end{proposition} \begin{proof} $T(0)=0$, because $T(0)=qT(0)$ and $q>1$. We have \begin{eqnarray*} D_1 T(x_1, \cdots, x_d, 0, 0, 0)&=& q^{-n}D_1 T(q^nx_1, \cdots, q^nx_d, 0, 0, 0)\\ &\leq& q^{-n}\varphi(q^nx_1, \cdots, q^nx_d, 0, 0, 0). \end{eqnarray*} Taking the limit as $n\to\infty$ we conclude that $T$ satisfies Trif's equation. Hence $T$ is additive. It follows from \begin{eqnarray*} D_\mu T(q^nx, \cdots, q^nx, 0, 0, 0) &=& q^n\|d \cdot C_{d-2}^{l-2} (T(\mu x) -\mu T(x))\| \leq \varphi(q^nx, \cdots, q^nx, 0, 0, 0), \end{eqnarray*} that $T$ is homogeneous. Set $x_1=\cdots=x_d=0$ and replace $u, v, w$ by $q^nu, q^nv, q^nw$, respectively, in (\ref{trifapp}). Since $T$ is homogeneous, we have \begin{eqnarray*} \|T([uvw])-[T(u)T(v)T(w)]\|&=& q^{-3n}\|T([q^nu q^nv q^nw])- [T(q^nu)T(q^nv)T(q^nw)]\| \\ &\leq& q^{-n}\varphi(0, \cdots, 0, q^nu, q^nv, q^nw), \end{eqnarray*} for all $u, v, w\in {\mathcal A}$. The right hand side tends to zero as $n\to\infty$. Hence $T([uvw])=[T(u)T(v)T(w)]$ for all $u, v, w\in {\mathcal A}$. \end{proof} \begin{theorem}\label{main} Let $f:{\mathcal A} \to {\mathcal B}$ be an approximate $C^*$-ternary ring homomorphism associated to the Trif equation with ${\mathbb E}={\mathbb T}$ and a control function $\varphi :{\mathcal A}^{d+3} \to [0, \infty)$ satisfying \begin{eqnarray}\label{phi} \widetilde{\varphi}(x_1, \cdots, x_d, u, v, w):=\sum_{j=0}^{\infty} q^{-j} \varphi(q^jx_1, \cdots, q^jx_d, q^ju, q^jv, q^jw) < \infty , \end{eqnarray} for all $x_1, \cdots, x_d, u, v, w\in{\mathcal A}$. If $f(0)= 0$, then there exists a unique $C^*$-ternary ring homomorphism $T:{\mathcal A} \to {\mathcal B}$ such that \begin{eqnarray*} \|f(x) - T(x)\|\leq \frac{1}{l \cdot C_{d-1}^{l-1}} \widetilde{\varphi}(qx, rx, \cdots, rx, 0, 0, 0), \end{eqnarray*} for all $x\in{\mathcal A}$. \end{theorem} \begin{proof} Set $u=v=w=0, \mu =1$ and replace $x_1, \cdots ,x_d$ by $qx, rx, \cdots, rx$ in (\ref{trifapp}). Then \begin{eqnarray*} \|C_{d-2}^{l-1}f(qx)-l \cdot C_{d-1}^{l-1} f(x)\|\leq \varphi(qx, rx, \cdots, rx, 0, 0, 0) \quad (x \in {\mathcal A}). \end{eqnarray*} One can use induction to show that \begin{eqnarray}\label{approx} &&\|q^{-n}f(q^nx)-q^{-m}f(q^mx)\|\nonumber\\ &\leq& \frac{1}{l \cdot C_{d-1}^{l-1}}\sum_{j=m}^{n-1}q^{-j} \varphi\big(q^j(qx), q^j(rx), \cdots, q^j(rx), 0, 0, 0\big), \end{eqnarray} for all nonnegative integers $m<n$ and all $x \in {\mathcal A}$. Hence the sequence $\{q^{-n}f(q^nx)\}_{n\in {\mathbb N}}$ is Cauchy for all $x\in {\mathcal A}$. Therefore we can define the mapping $T:{\mathcal A} \to {\mathcal B}$ by \begin{eqnarray}\label{lim} T(x) := \lim_{n\to\infty}\frac{1}{q^n} f(q^nx)\quad (x\in{\mathcal A}). \end{eqnarray} Since \begin{eqnarray*} D_1T(x_1, \cdots ,x_d, 0, 0, 0) &=& \lim_{n\to\infty} q^{-n}D_1f(q^nx_1,\cdots, q^nx_d, 0, 0, 0)\\ &\leq& \lim_{n\to\infty} q^{-n}\varphi(q^nx_1,\cdots, q^nx_d, 0, 0, 0)\\ &=& 0, \end{eqnarray*} we conclude that $T$ satisfies the Trif equation and so it is additive (note that (\ref{lim}) implies that $T(0)=0$). It follows from (\ref{lim}) and (\ref{approx}) with $m=0$ that \begin{eqnarray*} \|f(x)- T(x)\| \leq \frac{1}{l \cdot C_{d-1}^{l-1}} \widetilde{\varphi}(qx, rx, \cdots, rx, 0, 0, 0), \end{eqnarray*} for all $x\in {\mathcal A}$. We use the strategy of \cite{TRI} to show the uniqueness of $T$. Let $T'$ be another additive mapping fulfilling \begin{eqnarray*} \|f(x)- T'(x)\| \leq \frac{1}{l \cdot C_{d-1}^{l-1}} \widetilde{\varphi}(qx, rx, \cdots, rx, 0, 0, 0), \end{eqnarray*} for all $x\in {\mathcal A}$. We have \begin{eqnarray*} \|T(x)- T'(x)\|&=&q^{-n}\|T(q^nx)-T'(q^nx)\|\\ &\leq& q^{-n}\|T(q^nx)-f(q^nx)\|+ q^{-n}\|f(q^nx)-T'(q^nx)\|\\ &\leq& \frac{2q^{-n}}{l \cdot C_{d-1}^{l-1}}\widetilde{\varphi}\big(q^n(qx), q^n(rx), \cdots, q^n(rx), 0, 0, 0\big)\\ &\leq& \frac{2}{l \cdot C_{d-1}^{l-1}}\sum_{j=n}^\infty q^{-j} \varphi\big(q^j(qx),q^j(rx), \cdots, q^j(rx), 0, 0, 0\big), \end{eqnarray*} for all $x\in{\mathcal A}$. Since the right hand side tends to zero as $n\to\infty$, we deduce that $T(x)=T'(x)$ for all $x\in{\mathcal A}$. Let $\mu\in{\mathbb T}^1$. Setting $x_1= \cdots = x_d = x$ and $u=v=w=0$ in (\ref{trifapp}) we get \begin{eqnarray*} \| d \cdot C_{d-2}^{l-2} \big(f(\mu x) -\mu f(x)\big)\| \leq \varphi(x, \cdots, x, 0, 0, 0), \end{eqnarray*} for all $x\in{\mathcal A}$. So that \begin{eqnarray*} q^{-n} \| d \cdot C_{d-2}^{l-2} \big(f(\mu q^n x) -\mu f(q^n x)\big)\| \leq q^{-n} \varphi(q^nx, \cdots, q^nx, 0, 0, 0), \end{eqnarray*} for all $x\in{\mathcal A}$. Since the right hand side tends to zero as $n\to\infty$, we have \begin{eqnarray*} \lim_{n \to \infty}q^{-n}\|f(\mu q^n x) -\mu f(q^n x)\| = 0, \end{eqnarray*} for all $\mu\in{\mathbb T}^1$ and all $x\in{\mathcal A}$. Hence \begin{eqnarray*} T(\mu x) = \lim_{n\to \infty}\frac{f(q^n \mu x)}{q^n}= \lim_{n\to \infty}\frac{\mu f(q^nx)}{q^n} = \mu T(x), \end{eqnarray*} for all $\mu\in{\mathbb T}^1$ and all $x\in{\mathcal A}$. Obviously, $T(0x)=0=0T(x)$. Next, let $\lambda \in {\mathbb C} \;\;(\lambda \neq 0)$, and let $M$ be a natural number greater than $|\lambda|$. By an easily geometric argument, one can conclude that there exist two numbers $\mu_1, \mu_2 \in {\mathbb T}$ such that $2\frac{\lambda}{M}=\mu_1+\mu_2$. By the additivity of $T$ we get $T\big(\frac{1}{2}x\big)=\frac{1}{2}T(x)$ for all $ x\in {\mathcal A}$. Therefore \begin{eqnarray*} T(\lambda x)& = & T\big(\frac{M}{2}\cdot 2 \cdot \frac{\lambda}{M}x\big)=MT\big(\frac{1}{2}\cdot 2\cdot \frac{\lambda}{M}x\big) =\frac{M}{2}T\big(2\cdot \frac{\lambda}{M}x\big)\\ & = & \frac{M}{2}T(\mu_1x+\mu_2x) =\frac{M}{2}\big(T(\mu_1x)+T(\mu_2x)\big) \\ & = & \frac{M}{2}(\mu_1+\mu_2)T(x) =\frac{M}{2}\cdot 2\cdot \frac{\lambda}{M}\\ &=&\lambda T(x), \end{eqnarray*} for all $x \in {\mathcal A}$, so that $T$ is a ${\mathbb C}$-linear mapping. Set $\mu =1$ and $x_1=\cdots=x_d=0$, and replace $u, v, w$ by $q^nu, q^nv, q^nw$, respectively, in (\ref{trifapp}) to get \begin{eqnarray*} \frac{1}{q^{3n}}\big\|d \cdot C_{d-2}^{l-2} f\big(\frac{q^{3n}}{d \cdot C_{d-2}^{l-2}}[uvw]\big)-\big[f(q^nu)f(q^nv)f(q^nw)\big]\big\|\\ \leq q^{-3n}\varphi(0, \cdots, 0, q^nu, q^nv, q^nw), \end{eqnarray*} for all $u, v, w\in {\mathcal A}$. Then by applying the continuity of the ternary product $(x,y,z)\mapsto [xyz]$ we deduce \begin{eqnarray*} T([uvw])&=& d \cdot C_{d-2}^{l-2} T\big(\frac{1}{d \cdot C_{d-2}^{l-2}}[uvw]\big)\\ &=&\lim_{n\to\infty}\frac{d \cdot C_{d-2}^{l-2}}{q^{3n}} f\big(\frac{q^{3n}}{d \cdot C_{d-2}^{l-2}}[uvw]\big)\\ &=&\lim_{n\to\infty}\big[\frac{f(q^nu)}{q^n}\frac{f(q^nv)}{q^n}\frac{f(q^nw)}{q^n}\big]\\ &=& [T(u)T(v)T(w)], \end{eqnarray*} for all $u, v, w\in {\mathcal A}$. Thus $T$ is a $C^*$-ternary homomorphism. \end{proof} \begin{example} Let $S:{\mathcal A} \to {\mathcal A}$ be a (bounded) $C^*$-ternary homomorphism, and let $f:{\mathcal A} \to {\mathcal A}$ be defined by $$f(x)=\left \{\begin{array}{cc}S(x) \;\;\;\;\;\; \|x\|<1\\ 0 \;\;\;\;\;\;\;\;\;\;\;\; \|x\|\geq 1 \end{array} \right .$$ and $$\varphi(x_1, \cdots, x_d, u, v, w) := \delta, $$ where $\delta := d \cdot C_{d-2}^{l-2} + d \cdot C_{d-2}^{l-1} + l \cdot C_d^l + 1$. Then \begin{eqnarray*} \widetilde{\varphi}(x_1, \cdots, x_d, u, v, w)&=&\sum_{n=0}^\infty q^{-n}\cdot \delta = \frac{\delta q}{q-1}, \end{eqnarray*} and \begin{eqnarray*} D_\mu f(x_1, \cdots, x_d, u, v, w)\leq \varphi (x_1, \cdots, x_d, u, v, w), \end{eqnarray*} for all $\mu\in {\mathbb T}^1$ and all $x_1, \cdots, x_d, u, v, w\in {\mathcal A}$. Note also that $f$ is not linear. It follows from Theorem \ref{main} that there is a unique $C^*$-ternary ring homomorphism $T: {\mathcal A} \to {\mathcal A}$ such that \begin{eqnarray*} \|f(x)-T(x)\|\leq \frac{1}{l \cdot C_{d-1}^{l-1}}\, \widetilde{\varphi}(qx, rx, \cdots, rx, 0, 0, 0) \qquad (x\in {\mathcal A}). \end{eqnarray*} Further, $T(0)=\lim_{n\to\infty}\frac{f(0)}{q^n}=0$ and for $x\neq 0$ we have \begin{eqnarray*} T(x)=\lim_{n\to\infty}\frac{f(q^nx)}{q^n} =\lim_{n\to\infty}\frac{0}{q^n}=0, \end{eqnarray*} since for sufficiently large $n, \|q^nx\|\geq 1$. Thus $T$ is identically zero. \end{example} \begin{corollary} Let $f:{\mathcal A} \to {\mathcal B}$ be a mapping with $f(0)= 0$ and there exist constants $\varepsilon \geq 0$ and $p\in[0, 1)$ such that \begin{eqnarray*} D_\mu f(x_1, \cdots, x_d, u, v, w)\leq \varepsilon (\sum_{j=1}^d \|x_j\|^p + \|u\|^p + \|v\|^p + \|w\|^p), \end{eqnarray*} for all $\mu\in{\mathbb T}^1$ and all $x_1, \cdots, x_d, u, v, w\in{\mathcal A}$. Then there exists a unique $C^*$-ternary ring homomorphism $T:{\mathcal A} \to {\mathcal B}$ such that \begin{eqnarray*} \|f(x) - T(x)\|\leq \frac{q^{1-p}(q^p+(d-1)r^p)\varepsilon }{l \cdot C_{d-1}^{l-1}(q^{1-p}-1)}\|x\|^p, \end{eqnarray*} for all $x\in{\mathcal A}$. \end{corollary} \begin{proof} Define $\varphi(x_1, \cdots, x_d, u, v, w) = \varepsilon (\sum_{j=1}^d \|x_j\|^p + \|u\|^p + \|v\|^p + \|w\|^p)$, and apply Theorem 2.2. \end{proof} The following corollary can be applied in the case that our ternary algebra is linearly generated by its `idempotents', i.e. elements $u$ with $u^3 = u$. \begin{proposition} Let ${\mathcal A}$ be linearly spanned by a set $S\subseteq {\mathcal A}$ and let $f:{\mathcal A} \to {\mathcal B}$ be a mapping satisfying $f(q^{2n}[s_1s_2z]) = [f(q^ns_1)f(q^ns_2)f(z)]$ for all sufficiently large positive integers $n$, and all $s_1,s_2\in S, z\in{\mathcal A}$. Suppose that there exists a control function $\varphi :{\mathcal A}^{d} \to [0, \infty)$ satisfying \begin{eqnarray*} \widetilde{\varphi}(x_1, \cdots, x_d):=\sum_{j=0}^{\infty} q^{-j} \varphi(q^jx_1, \cdots, q^jx_d) < \infty \quad (x_1, \cdots, x_d \in{\mathcal A}). \end{eqnarray*} If $f(0)=0$ and \begin{eqnarray*} \|d \cdot C_{d-2}^{l-2} f(\frac{\mu x_1+\cdots + \mu x_d}{d}) + C_{d-2}^{l-1} \sum_{j=1}^d \mu f(x_j)\\ - l \cdot \sum_{1\leq j_1< \cdots < j_l\leq d} \mu f(\frac{x_{j_1} + \cdots + x_{j_l}}{l})\|\leq \varphi(x_1, \cdots, x_d), \end{eqnarray*} for all $\mu \in{\mathbb T}^1$ and all $x_1, \cdots, x_d \in {\mathcal A}$, then there exists a unique $C^*$-ternary ring homomorphism $T:{\mathcal A} \to {\mathcal B}$ such that \begin{eqnarray*} \|f(x) - T(x)\|\leq \frac{1}{l \cdot C_{d-1}^{l-1}}\, \widetilde{\varphi}(qx, rx, \cdots, rx), \end{eqnarray*} for all $x\in{\mathcal A}$. \end{proposition} \begin{proof} Applying the same argument as in the proof of Theorem 2.2, there exists a unique linear mapping $T:{\mathcal A} \to {\mathcal B}$ given by \begin{eqnarray*} T(x) := \lim_{n\to\infty}\frac{1}{q^n} f(q^nx) \quad (x\in{\mathcal A}) \end{eqnarray*} such that \begin{eqnarray*} \|f(x) - T(x)\|\leq \frac{1}{l \cdot C_{d-1}^{l-1}} \widetilde{\varphi}(qx, rx, \cdots, rx), \end{eqnarray*} for all $x\in{\mathcal A}$. We have \begin{eqnarray*} T([s_1s_2z]) &=& \lim_{n\to\infty}\frac{1}{q^{2n}} f([(q^ns_1)(q^ns_2)z])\\ &=& \lim_{n\to\infty}\big[\frac{f(q^ns_1)}{q^n}\frac{f(q^ns_2)}{q^n}f(z)\big]\\ &=& [T(s_1)T(s_2)f(z)]. \end{eqnarray*} By the linearity of $T$ we have $T([xyz]) = [T(x)T(y)f(z)]$ for all $x, y, z\in {\mathcal A}$. Therefore $q^nT([xyz])= T([xy(q^nz)]) = [T(x)T(y)f(q^nz)]$, and so \begin{eqnarray*} T[xyz])= \lim_{n\to\infty}\frac{1}{q^n}[T(x)T(y)f(q^nz)]=\big[T(x)T(y)\lim_{n\to\infty}\frac{f(q^nz)}{q^n}\big ]= [T(x)T(y)T(z)], \end{eqnarray*} for all $x,y,z\in{\mathcal A}$. \end{proof} \begin{theorem} Suppose that $f:{\mathcal A} \to {\mathcal B}$ is an approximate $C^*$-ternary ring homomorphism associated to the Trif equation with ${\mathbb E}=\{1, {\bf i}\}$ and a control function $\varphi: A^{d+3}\to [0, \infty)$ fulfilling (\ref{phi}). If $f(0)=0$ and for each fixed $x\in {\mathcal A}$ the mapping $t\mapsto f(tx)$ is continuous on ${\mathbb R}$, then there exists a unique $C^*$-ternary homomorphism $T:{\mathcal A} \to {\mathcal B}$ such that \begin{eqnarray*} \|f(x)-T(x)\|\leq \widetilde{\varphi}(qx, rx, \cdots, rx, 0, 0, 0), \end{eqnarray*} for all $x\in{\mathcal A}$. \end{theorem} \begin{proof} Put $u=v=w=0$ and $\mu=1$ in (\ref{trifapp}). Using the same argument as in the proof of Theorem \ref{main} we deduce that there exists a unique additive mapping $T:{\mathcal A} \to {\mathcal B}$ given by \begin{eqnarray*} T(x)=\lim_{n\to\infty}\frac{f(q^nx)}{q^n} \quad (x\in {\mathcal A}). \end{eqnarray*} By the same reasoning as in the proof of the main theorem of \cite{RAS1}, the mapping $T$ is ${\mathbb R}$-linear. Putting $x_1= \cdots = x_d = x$, $\mu={\bf i}$ and $u=v=w=0$ in (\ref{trifapp}) we get \begin{eqnarray*} \|d \cdot C_{d-2}^{l-2} (f({\bf i} x) -{\bf i} f(x))\| \leq \varphi(x, \cdots, x, 0, 0, 0) \quad (x\in {\mathcal A}). \end{eqnarray*} Hence \begin{eqnarray*} q^{-n}\|f(q^n{\bf i}x)-{\bf i}f(q^nx)\|\leq q^{-n}\varphi(q^nx, \cdots, q^nx, 0, 0, 0) \quad (x\in {\mathcal A}). \end{eqnarray*} The right hand side tends to zero as $n\to\infty$, hence \begin{eqnarray*} T({\bf i}x)=\lim_{n\to\infty}\frac{f(q^n{\bf i}x)}{q^n}=\lim_{n\to\infty}\frac{{\bf i}f(q^nx)}{q^n}={\bf i}T(x) \quad (x\in {\mathcal A}). \end{eqnarray*} For every $\lambda\in {\mathbb C}$ we can write $\lambda=\alpha_1+{\bf i}\alpha_2$ in which $\alpha_1,\alpha_2\in{\mathbb R}$. Therefore \begin{eqnarray*} T(\lambda x)&=&T(\alpha_1x+{\bf i}\alpha_2x)=\alpha_1T(x)+\alpha_2T({\bf i}x)\\ &=&\alpha_1T(X)+{\bf i}\alpha_2T(x)=(\alpha_1+{\bf i}\alpha_2)T(x)\\ &=&\lambda T(x), \end{eqnarray*} for all $x\in {\mathcal A}$. Thus $T$ is ${\mathbb C}$-linear. \end{proof} \end{document}
\begin{document} \title{Orthogonal measurements are {\it almost} \section{Introduction} The discovery and analysis of the quantum speedup within algorithms where entanglement is totally absent \cite{info-no-ent} has raised considerable interest to alternative measures of quantum correlations beyond entanglement. In contrast to the paradigm of a tensor product structure as the root of classicality of correlations, a new paradigm based on the ignorance produced by measurement has crystallized into several measures of quantum correlations, of which perhaps the most widely used is the quantum discord \cite{zurek,vedral}. In a sense, it captures the fact that unless measurements on party $B$ leave unaffected party $A$ in a bipartite states $\varrho_{AB}$, we cannot really speak of such state as being purely classically correlated. An intense recent research activity is based on quantum discord as a quantifier for quantum correlations for two-qubit states. Yet it is typically used in a simplified form, where only orthogonal measurements are considered (see e.g. \cite{zurek,info-no-ent,maziero,luo2008,mazhar2010,mazzola2010,guo}). There is now a raising concern about the possibility that more general measurements might modify the value of quantum discord, thus weakening the conclusions of some recent works. The work by Hamieh et al. \cite{zaraket} took a first step showing the sufficiency of projective measurements (i.e. rank 1 POVM's) for states of two qubits. This of course does not demonstrate that two orthogonal projectors are enough. In spite of the great reduction of the complexity of the problem \cite{zaraket}, the optimal projective measurement of a qubit can have between 2 and 4 elements \cite{dariano} (the case of 2 elements corresponds to orthogonal measurements) and the question of how many elements the optimal POVM has is still open. In fact in the work by Hamieh et al. \cite{zaraket} only a very particular state is studied. Hence, the matter remains unsettled and using orthogonal measurements seems to be an unnecessary restriction. In this Letter, we show that orthogonal measurements are sufficient to obtain the discord of rank 2 states of two qubits, while for rank 3 and 4 they give a pretty tight upper bound. Moreover, \fer{given the relationship} between quantum discord and entanglement of formation \fer{ for pure tripartite states} \cite{winter}, we give a formula for discord based purely on the eigenvectors and eigenvalues of the original state, valid exactly for rank 2. We also show that the entanglement of formation of a $2\otimes N$ rank 2 state has a tight upper bound given by optimal decompositions of 2 elements. Finally, we give an alternative formula for quantum discord of two qubits states of any rank based on its Bloch vectors. \section{Quantum discord} Two classically equivalent formulas for the mutual information in a bipartite state, related by Bayes rule are $\mathcal{I}(A:B)=H(A)+H(B)-H(A,B)$ and $\mathcal{J}(A:B)=H(A)-H(A|B)$, where $H(.)$ is the Shannon entropy and $H(A|B)$ is the conditional Shannon entropy of $A$ given $B$. Their quantum counterparts, however, differ substantially \cite{zurek}, the former being known as the quantum mutual information: \begin{equation} \mathcal{I}(\varrho)=S(\varrho_A)+S(\varrho_B)-S(\varrho), \end{equation} where $S(.)$ is the von Neumann entropy and $\varrho_{A(B)}$ are the reduced states after tracing out party $B(A)$. It is precisely in $\mathcal{J}(A:B)$ where measurements come into scene, since the conditional entropy of {\it A given B} is the one given by measurement outcomes on party B. Though a general measurement has to be associated to a generic POVM, for the sake of simplicity the community has used the restricted set of perfect (von Neumann, or orthogonal) measurements, i.e. : \begin{equation} {\cal{J}}(\varrho)_{\{\Pi_j^B\}}=S(\varrho_A)-S(A|\{\Pi_j^B\}) \end{equation} with the conditional entropy defined as $S(A|\{\Pi_j^B\})=\sum_ip_iS(\varrho_{A|\Pi_i^B})$, $p_i={\mbox Tr}_{AB}(\Pi_i^B\varrho)$ and where $\varrho_{A|\Pi_i^B}= \Pi_i^B\varrho\Pi_i^B/{p_i} $ is the density matrix after a complete projective measurement $(\{\Pi_j^B\})$ has been performed on B. Notice that $\Pi_j^B$ are orthogonal projectors. Quantum discord is thus defined as the difference \begin{equation} \label{eqdisc} \delta_{A:B}(\varrho)=\min_{\{\Pi_i^B\}}\left[S(\varrho_B)-S(\varrho)+S(A|\{\Pi_i^B\})\right], \end{equation} minimized over all possible orthogonal measurements. However, more general measurements should be used to exhaust the minimization problem, as already stated in the seminal papers \cite{zurek,vedral}. Hence the following generalization is required \begin{equation} \varrho_{A|\Pi_j^B}\to\varrho_{A|E_j^B}=\mbox{Tr}_B(E_j^B\varrho_{AB}/p_j) \end{equation} where the elements of the POVM $E_j^B$ fulfill $\sum_j E_j^B={\mathbb 1}_B$ \begin{equation} \label{eqdiscPOVM} \delta_{A:B}(\varrho)=\min_{\{E_i^B\}}\left[S(\varrho_B)-S(\varrho)+S(A|\{E_i^B\})\right]. \end{equation} Based on the convexity properties of the conditional entropy $S(A|\{E_i^B\})$ Hamieh et al. \cite{zaraket} (see also \cite{datta}) show that POVM's which optimize discord are extremal or indecomposable, i.e. they cannot be obtained by mixing other POVM's; further, it has been shown \cite{dariano} that extremal POVM's for qubits are of rank 1 and can have between 2 and 4 elements. \section{Unified picture through purification} In \cite{winter} it was shown that \fer{given a mixed state} $\varrho_{AB}$ \fer{and its purification $|\psi_{ABC}\rangle$} through an ancilla qudit C, \fer{the following relation between the conditional entropy when B is measured, and the entanglement of formation $E_F$ of the subsystem AC holds:} \begin{equation} \min_{\{E_i^B\}}S(A|\{E_i^B\})= E_F(\varrho_{AC}) \end{equation} between the conditional entropy when B is measured, and the entanglement of formation $E_F$ of the subsystem AC. The minimization of the $AB$ conditional entropy over POVM measurements on B is thus equivalent to minimization of $E_F$ in $AC$ over all ensemble decompositions. Hence the number of elements giving the optimal ensemble decomposition of $E_F(\varrho_{AC})$ coincides with the number of elements of the POVM which minimizes $S(A|\{E_i^B\})$ \cite{winter}.\\ \textbf{Theorem 1}. Given a bipartite mixed state of two qubits $\rho_{AB}$ of rank 2, the optimal measurement giving the quantum discord is a 2 element POVM. The elements of such POVM are orthogonal projectors. \textbf{Proof}. Consider a rank 2 state of two qubits with spectral decomposition $\rho_{AB}=\sum_{i=1}^{R=2} \alpha_i|\psi_i\rangle\langle\psi_i|$. It\fer{s purification} by an ancilla qubit C \fer{has} the form $|\Psi_{ABC}\rangle=\sum_{i=1}^{2} \sqrt{\alpha_i}|\psi_i\rangle|i\rangle_C$, where $|i\rangle_C$ forms an orthonormal basis in the Hilbert space of the ancilla qubit. We can also Schmidt decompose this state as $|\Psi_{ABC}\rangle=\sum_{i=1}^{m} \sqrt{\beta_i}|i\rangle_B|\phi_i\rangle_{AC}$, where $m=\min(d_B,d_Ad_C)=d_B=2$ with $d_B$ is the dimension of the Hilbert space of party B and so forth. So the partition AC has the form $\rho_{AC}=\sum_{i=1}^{m=2} \beta_i|\phi_i\rangle\langle\phi_i|$ and hence is of rank 2. Wootters \cite{wootters} showed that the entanglement of formation of this two-qubit mixed state is obtained from an optimal decomposition made up of {\it as many elements as its rank}, which in this case is 2, which in turn means that the POVM in B that realizes such decomposition has 2 elements. Being optimal POVM's of rank 1, the 2 elements of such POVM are necessarily orthogonal \cite{dariano}. This can be seen by noticing that a rank 1 POVM of 2 elements $E_1=\alpha_1\ket{\phi_1}\bra{\phi_1}$ and $E_2=\alpha_2\ket{\phi_2}\bra{\phi_2}$ has to fulfill positivity $E_i>0$ and normalization $E_1+E_2=\mathbb{1}$, which necessarily lead to orthogonality of its elements (this is easy to show when the elements $E_i$ are written in Bloch form \cite{dariano}. QED \\ \textbf{Corollary}. The quantum discord of a rank 2 state of two qubits is given by \begin{equation} \delta_{A:B}(\varrho_{AB})=S(\varrho_B)-S(\varrho_{AB})+\mathcal{E}(C(\varrho_{AC})), \end{equation} with \begin{equation} \varrho_{AC}=\mbox{tr}_B\left(\sum_{i,j=1}^2\sqrt{\lambda_i\lambda_j}|\psi_i\rangle\langle\psi_j|\otimes|i\rangle_C \langle j|\right), \end{equation} where $\{\lambda_i,|\psi_i\rangle\}$ is the spectral decomposition of $\varrho_{AB}$, and $|i_C\rangle$ is any orthonormal basis in $\mathcal{H}_C$. The function $\mathcal{E}$ is given by \begin{equation} \mathcal{E}(C)=h(\frac{1+\sqrt{1-C^2}}{2}), \end{equation} where \begin{equation} h(x)=-x\log_2x-(1-x)\log_2(1-x), \end{equation} and where $C(\rho)$ is the concurrence of $\rho$ \cite{wootters}, given by max$(0,l_1-l_2-l_3-l_4)$, with $l_i$ the eigenvalues of the hermitian matrix $R(\varrho_{AC})$, where $R(\rho)=\sqrt{\sqrt{\rho}\tilde{\rho}\sqrt{\rho}}$ and $\tilde{\rho}=(\sigma_y\otimes\sigma_y)\rho^*(\sigma_y\otimes\sigma_y)$.\\ Hence, to obtain the discord, diagonalize $\varrho_{AB}$ and use its eigenvalues and eigenvectors to construct $\varrho_{BC}$. Obtain the concurrence of $\varrho_{BC}$, substitute it in $\mathcal{E}(C)$ and obtain the optimal conditional entropy between A and B. This result was implicit in ref. \cite{chinos}.\\ \section{Quantum discord of rank 3 and 4 states} Orthogonal measurements do not give the optimal discord for higher rank states, as first found in \cite{chinos2} through a counterexample based on maximally discordant mixed states (MDMS) \cite{MDMS}. Their study was limited to 3 element POVM's leading to a deviation of $\sim 2\times10^{-5}$ with respect to orthogonal projectors. However we will give evidence that the \fer{set of} states where 3 and 4-element POVM's are needed is indeed \fer{small} and the improvement in discord using Eq. (\ref{eqdiscPOVM}) is \fer{tiny}. In the case of states $\varrho_{AB}$ with rank higher than 2, a purification would yield a qubit-qudit system in AC, whose optimal decomposition (for $E_F$) is not known. Therefore no analytical tool can help us discriminate how many elements build the optimal POVM for the quantum discord of states with rank higher than 2. However, it is known that for qubits the optimal measurements are given by rank 1 measurements with 2, 3 and 4 outcomes. This knowledge is based on two facts: i) the conditional entropy $S(A|\{E_i^B\})$ is a concave function over the convex set of POVM's \cite{datta,zaraket}, hence only extremal POVM's will minimize it, ii) extremal POVM's of qubits are rank 1 and have 2, 3 or 4 elements \cite{dariano}, i.e. $E_j^B=\alpha_j\ket{\phi_j}\bra{\phi_j}$ ($j=2,...N$, $N\leq4$), with $\alpha$ real and nonzero and $\ket{\phi_j}$ are pure states (nonorthogonal unless $N=2$). Even considering at most 4 elements POVM's, the numerical analysis of this problem is challenging, as detailed in the following. We start parametrizing the N elements of a POVM as: \begin{eqnarray} \tilde{E}_1&=&\alpha_1\ket{0}\bra{0}\nonumber\\ \tilde{E}_j&=&\alpha_jU(\theta_j,\phi_j)\ket{0}\bra{0}U^\dagger(\theta_j,\phi_j)\ \ \ (1<j\le N),\nonumber \end{eqnarray} where $U(\theta,\phi)$ is a qubit rotation, plus a final global rotation $U(\Omega,\Phi)$ acting on all elements; i.e. $E_i=U(\Omega,\Phi)\tilde{E}_iU^\dagger(\Omega,\Phi)$. The completeness relation $\sum_i E_i={\mathbb 1}$ solves the coefficients in terms of the angles: $\alpha_i=\alpha_i(\{\theta_j,\phi_j\})$. This means running 6(8) loops in angles\footnote{\fer{Though for the case of $3$-element POVM's the number of loops can be reduced to 5 by using geometrical arguments.}}, for 3(4)-element POVM's, for each state, in order to solve the minimization problem in the discord definition (\ref{eqdiscPOVM}). We must note that orthogonal measurements are \fer{a limit case} in the definition of $3$-element POVM's; in the same way $4$ element POVM's do not include the case of $3$ elements. The numerical evaluation of discord is very sensitive to identification of the minimizing POVM and a proper scan of all possible POVM's requires small step sizes for the angles $\{\theta_j,\phi_j\}$. To give an idea on the sensitivity of the minimization on the step size in the angles, we show in fig.~\ref{fig1} the values of discord for 2, 3 and 4-element POVM's ($\delta_2,\delta_3,\delta_4$) against the step size for \fer{three} states. In \fer{all} cases it is surprising the oscillatory nature of discord even for rather refined samplings (small angles). This demonstrates the importance to scan all POVM's over different step sizes to gather the minimum, best approximating Eq.~(\ref{eqdiscPOVM}). We notice that a refinement until angular step sizes $\sim 0.02\pi$ is feasible only for 2 and 3 elements POVM's. In any case, a good level of accuracy is obtained if the lowest value of discord obtained \fer{among} different angular precisions is retained. \fer{The insets of fig.~\ref{fig1} show the minimum value of discord obtained inside a box of angular precisions $\Delta\theta=\Delta\phi\in[\rm{w},0.25\pi]$.} \fer{In fig.~\ref{fig1}a we show the state with highest deviation we have found (highest point in fig.~\ref{fig3}) in a scan of $10^5$ random states of rank 3 and 4, with $\delta_2-\delta_{3(4)}\sim10^{-3}$. This deviation is high above the typical deviation we have found of around $10^{-6}$.} In fig.~\ref{fig1}\fer{b} we show a MDMS of rank 3, separable, but with maximum discord versus classical correlations \cite{MDMS}: $\varrho_{\mbox{\tiny MDMS}}=(1-\epsilon)(m\ket{00}\bra{00}+(1-m)\ket{11}\bra{11})+\epsilon\ket{\Psi^-}\bra{\Psi^-}$ with $\ket{\Psi^-}=(\ket{01}-\ket{10})/\sqrt{2}$ the usual Bell state and values $\epsilon\fer{=0.2349602}$, $m=0.11$. In this case, POVM's with more than 2 elements are needed. This is actually a rather singular event, as we will see in fig.~\ref{fig3}. Indeed in fig.~\ref{fig1}\fer{b} we show that an improvement of \fer{$\sim8\times10^{-6}$} is provided by 3,4-element POVM's ($\delta_{3(4)}$), as compared to orthogonal measurements ($\delta_2$). The most common situation is represented in figure \fer{~\ref{fig1}c}, for a generic state obtained by a random density matrix of rank 3. \begin{figure} \caption{(color online) Quantum discord minimized by 2 ($\delta_2$, black), 3($\delta_3$, red) and 4($\delta_4$, orange) elements POVM's with the given step size $\Delta\theta=\Delta\phi$ in the angles of the POVM elements $E_i$. The state\fer{s are} \label{fig1} \end{figure} \begin{figure} \caption{\fer{(color online) Deviation of quantum discord $\delta_2-\delta_{3(4)} \label{fig3} \end{figure} Next we aim to establish the abundance of states for which 3 or 4 elements POVM's provide the improvement in discord found in fig.~\ref{fig1}. We then \fer{start} with a scan of Hilbert space where \fer{$10^5$} random density matrices of rank 3 and 4 have been generated according to the Haar measure. We plot in fig.~\ref{fig3} the deviations \fer{$\delta_{2}-\delta_{3(4)}$ (only when positive)} versus the result for orthogonal measurements ($\delta_2$). \fer{In this figure }we have sampled the angles from steps $\sim 0.3\pi$ until a lower limit of {\fer{w$=0.03\pi$ ($0.18\pi$)}} for 3 (4)-elements POVM's respectively. We observe that the optimal discord is given by orthogonal POVM's, \fer{except for a $0.63\%$($0.001\%$) of states which have a typical deviation of order $10^{-6}$ (see Table~\ref{tab.1}). In order to discriminate the dependence of such abundance and degree of deviations on the scan size ($N$), or the precision in the angles ($\Delta\theta=\Delta\phi\in[$w$,0.25\pi]$), we present in Table~\ref{tab.1} the results obtained for different sample characteristics $N,$w. We find that for a given angular precision (for instance up to w$=0.03\pi$ in Table~\ref{tab.1}) the abundance of deviant states ($p$) is not sensitive to the sample size (the cases $N=3\times10^4$ and $10^5$ can be compared in the table). On the other hand discriminating states with $\delta_2>\delta_3$ in which these values are close, requires a quite refined angular precision (w), therefore leading to a highly dependent abundance (p) (see strong variation of $p$ to find states with $\delta_2>\delta_3$ for $N=10^5$ in the table). Due to computational limitations such a deep numerical study was only possible for 3-el. POVM's while for 4-el. POVM's we were only able to reach precisions up to w$=0.18\pi$, for which we have found only one deviating state. At the best precision reached we see that the typical (average) deviation $\delta_2-\delta_3$ is of the order $10^{-6}$ with a standard deviation of order $10^{-5}$. } \begin{table}\fer{ \caption{Probability ($p$) to find states with $\Delta\equiv\delta_2-\delta_{3(4)}>0$ in a sample of $N$ states whose discord is calculated with angular precision up to w. We show the average deviation $\langle\Delta\rangle$ and its standard deviation $\sigma(\Delta)$. The standard deviation is reduced by an order of magnitude if we remove the state a) of fig~\ref{fig1}. We do not show the average deviation for $\delta_2>\delta_4$ nor its standard deviation since only one state was found in the full scan.} \label{tab.1} \begin{center} \begin{tabular}{|c|c|c|c|c|} \cline{1-1} $\delta_2>\delta_3$\\ \hline $N$& w$/\pi$ & $p$ & $\langle\Delta\rangle$ & $\sigma(\Delta)$\\ \hline $3\cdot 10^4$& $0.03$ & $6.6\cdot10^{-3}$ & $1.6\cdot10^{-6}$ & $2.6\cdot10^{-6}$\\ \hline $10^5$& $0.05$ & $5\cdot10^{-4}$ & $2\cdot10^{-5}$ & $1.2\cdot10^{-4}$\\ \hline $10^5$& $0.03$ & $6.3\cdot10^{-3}$ & $3\cdot10^{-6}$ & $3.7\cdot10^{-5}$\\ \hline $10^5$& $0.025$ & $8\cdot10^{-3}$ & $2.8\cdot10^{-6}$ & $3.4\cdot10^{-5}$\\ \hline $10^5$& $0.02$ & $1.4\cdot10^{-2}$ & $2.1\cdot10^{-6}$ & $2.6\cdot10^{-5}$\\ \hline $\delta_2>\delta_4$\\ \hline $3\cdot10^4$ & $0.2$ & $0$ & -- & --\\ \hline $10^5$ & $0.2$ & $10^{-5}$& -- & --\\ \hline $10^5$ & $0.18$ & $10^{-5}$& -- & --\\ \hline \end{tabular} \end{center}} \end{table} The set of states for which we find improvements with $\delta_{3(4)}$ is rather small. An interesting question is whether these states lie in the neighborhood of rank 3 MDMS \cite{MDMS,chinos2} or are distributed everywhere in the $\{\mathcal{J},\mathcal{I}\}$ diagram (figure 1 in the same reference). \fer{We first stress that the state in fig.~\ref{fig1}a is nowhere near the MDMS border (it has $\{\mathcal{J}\simeq0.17,\mathcal{I}\simeq0.1809\}$).} \fer{Moreover} we can gain some insight about the smallness of \fer{the MDMS} neighborhood by investigating the state of rank 3 in fig.~\ref{fig1}\fer{b} \cite{MDMS} whereby we perturbate it with a Bell state. That is we study the state $\varrho=(1-\lambda)\varrho_{\mbox{\tiny MDMS}}+\lambda\ket{\Phi^+}\bra{\Phi^+}$ with $\ket{\Phi^+}=(\ket{00}+\ket{11})/\sqrt{2}$. The effect of this perturbation is to move the state away from the border in the $\{\mathcal{J},\mathcal{I}\}$ diagram. We find that already for \fer{$\lambda\simeq0.002$} we reach the transition where $\delta_2=\delta_3=\delta_4$, meaning that, at least in the neighborhood of the MDMS border, 3,4-element POVM's are needed in a very tiny region. Summarizing our numerical analysis for randomly generated states, we have found that orthogonal measurements are $almost$ enough \fer{in the following sense}. The improvements $\delta_{3(4)}<\delta_2$ do occur but \fer{they represent small corrections (a maximum deviation $10^{-3}$ was found for only one state in a sample of $10^5$ while other deviations were up to $10^{-5}$, fig.~\ref{fig3}) and} they can be numerically \fer{appreciated} only for a tiny set of states \fer{(with the mentioned minimization up to precision w$=0.02\pi$ these states appear with probability $\sim10^{-2}$ for $\delta_3$, while for $\delta_4$ they appear with probability $\sim10^{-5}$ at w$=0.18\pi$).} Given this numerical evidence we give the following upper bound: \section{Observation} The entanglement of formation of a $2\otimes N$ rank 2 bipartite state $\varrho_{BC}$ has a {\it tight upper bound} given by an optimal decomposition of two elements: \begin{equation} E_F(\varrho_{BC})\leq\min_{\{p_k,|\phi_{BC}^k\rangle\}}\sum_{k=1}^{\bf 2}p_kE(|\phi_{BC}^k\rangle), \end{equation} with $E(|\phi_{BC}^k\rangle)=S(\mbox{tr}_B(\ket{\phi_{BC}^k}\bra{\phi_{BC}^k}))$. \fer{The deviation from the equality is on average of order $10^{-6}$}.\\ To show this, take the $2\otimes N$, rank 2, state in spectral form $\varrho_{BC}=\sum_{k=1}^2\lambda_k|\psi_{BC}^k\rangle\langle\psi_{BC}^k|$. It can be purified by an ancilla qubit as $|\psi_{ABC}\rangle=\sum_{k=1}^2\sqrt{\lambda_k}|e_A^k\rangle|\psi_{BC}^k\rangle$, with $|e_A^k\rangle$ any orthonormal basis in $\mathcal{H}_A$. Again according to \cite{winter}: \begin{equation} \label{eq8} E_F(\varrho_{BC})=\min_{\{E_j^A\}}S(\{E_j^A\}|B)\leq\min_{\{\Pi_j^A\}}S(\{\Pi_j^A\}|B), \end{equation} where restriction to orthogonal measurements 'spoils' the minimization. Given the numerical evidence we have provided, the improvement of doing full minimization is \fer{on average} at the level of \fer{$10^{-6}$ (see Table~\ref{tab.1})}. The conditional entropy for a given orthonormal measurement $\Pi_k^A=|\xi^k\rangle\langle\xi^k|$ is given by \begin{equation} \label{eq9} S(\{\Pi_k^A\}|B)=\sum_{k=1}^2 p_k S(\rho_B^k)=\sum_{k=1}^2 p_k E(\rho_{BC}^k) \end{equation} with $p_k=\mbox{tr}(\Pi_k^A\varrho_{ABC})$ and $\rho_B^k=\mbox{tr}_{AC}(\Pi_k^A\varrho_{ABC})/p_k$. In the last equality we have used the fact that $\rho_{BC}^k$ is pure. This can be seen by writing explicitly \begin{eqnarray} \rho_{BC}^k&=&\mbox{tr}_A(\Pi_k^A\varrho_{ABC})=\nonumber\\ &=&\sum_{i,j=1}^2e_i(k)e_j^*(k)\sqrt{\lambda_i\lambda_j}|\psi_{BC}^i\rangle\langle\psi_{BC}^j|=\nonumber\\ &=&|\phi_{BC}^i\rangle\langle\phi_{BC}^i| \end{eqnarray} with $|\phi_{BC}^i\rangle=\sum_{i=1}^2e_i(k)\sqrt{\lambda_i}|\psi_{BC}^i\rangle$, and $e_i(k)=\langle\xi^k|e_i\rangle$. So, finally, $S(\rho_B^k)=E(|\phi_{BC}^k\rangle)$. QED \\ We stress the fact that instead of minimization over ensemble decompositions with a number of elements ranging from $R$ to $R^2$ ($R$ is the rank of the state), as shown to be sufficient by Uhlmann \cite{uhlmann}, we can safely restrict to decompositions with 2 elements if we are not interested in states which are rare to find and have deviations which are probably quite small, as we have seen when perturbing a MDMS extremal state. \section{Bloch formula for quantum discord} We finish by giving an alternative formula for the computation of quantum discord of generic two-qubit states of any rank. Writing the POVM elements in Bloch form \begin{equation} \label{POVMbloch} E_i^A=\alpha_i({\mathbb{1}}_A+\vec{n}_i\cdot\vec{\sigma}_A), \end{equation} with the positivity and normalization (completeness) conditions \begin{equation} \alpha_i>0\ ,\ \sum_i\alpha_i=1\ ,\ \sum_i\alpha_i\vec{n}_i=\vec{0}\ ,\ (|\vec{n}_i|=1), \end{equation} and the density matrix $\varrho_{AB}$ (coming from purification of $\varrho_{BC}$ plus tracing subsystem C) given also in Bloch form \begin{equation} \label{rhoBloch} \varrho_{AB}=\frac{1}{4}\left(\mathbb{1}_{AB}+\vec{a}\cdot\vec{\sigma}_A+\vec{b}\cdot\vec{\sigma}_B+\sum_ic_i\sigma_A^i\otimes\sigma_B^i \right) \end{equation} \begin{widetext} \begin{eqnarray} \label{alternDisc1} \delta_{A:B}(\varrho_{AB})&=&S_B-S_{AB}+\min_{\{\alpha_i,\vec{n}_i\}}\sum_{i=1}^m\alpha_i(1+\vec{n}_i\cdot\vec{b})\sum_\pm H(\lambda_i^\pm(\vec{a},\vec{b},\vec{cn}_i))\\ \label{alternDisc2} \lambda_i^\pm(\vec{a},\vec{b},\vec{cn}_i)&=&\frac{1}{2}\left(1\pm\left|\frac{\vec{a}+\vec{cn}_i}{1+\vec{b}\cdot\vec{n}_i}\right|\right)\ ,\ \vec{cn}_i=\{c_xn_{i,x},c_yn_{i,y},c_zn_{i,z}\}\\ \label{alternDisc3} S_B&=&\sum_\pm H(\frac{1}{2}(1\pm|\vec{b}|))\\ \label{alternDisc4} S_{AB}&=&S(\varrho_{AB}) \end{eqnarray} \end{widetext} (notice that when we write $\vec{a}\cdot\vec{\sigma}_A$, we mean $\vec{a}\cdot\vec{\sigma}_A\otimes\mathbb{1}_B$), we enunciate the following theorem:\\ \textbf{Proposition}. The discord $\delta_{A:B}(\varrho_{AB})$ with $\varrho_{AB}$ of {\it any rank}, written as in eq. (\ref{rhoBloch}), is given by eqs.(\ref{alternDisc1}-\ref{alternDisc4}) and $m(=2,3,4)$ is the number of elements of the extremal POVM.\\ The minimization is restricted by the conditions for {\it extremality} \cite{dariano} for each number of POVM elements (m): \begin{eqnarray} \label{extr2} &\mbox{m=2:}&\mbox{ all extremal, from normalization they follow:}\nonumber\\ && \ \alpha_i=\frac{1}{2}\ ,\ \vec{n}_1=-\vec{n}_2\equiv\vec{n}\mbox{ (i.e. orthogonal)}\\ \label{extr3} &\mbox{m=3:}&\mbox{ all extremal, from normalization they follow:}\nonumber\\ && \ \alpha_3=1-\alpha_1-\alpha_2\ ,\ \vec{n}_3=-\frac{1}{\alpha_3}(\vec{n}_1+\vec{n}_2)\\ \label{extr4} &\mbox{m=4:}&\ \ \vec{n}_i\mbox{ not in the same plane;}\nonumber\\ && \ \mbox{normalization yields }\sum_{i=1}^4\alpha_i\vec{n}_i=\vec{0} \end{eqnarray} \textbf{Proof}. Simple algebra yields the probabilities and outcomes of each measurement: \begin{eqnarray} p_k&\equiv&\mbox{tr}\left(E_i^B\varrho_{AB}\right)=\alpha_i(1+\vec{n}_i\cdot\vec{b})\\ \rho_A^k&\equiv&\mbox{tr}_{B}\left(E_i^B\varrho_{AB}\right)/p_k=\nonumber\\ &=&\frac{1}{2}\left(\mathbb{1}_A+\frac{(\vec{a}+\vec{cn}_k)\cdot\vec{\sigma}_A}{1+\vec{b}\cdot\vec{n}_k}\right) \end{eqnarray} whose entropy can be calculated from the eigenvalues of $\rho_A^k$ ($\lambda_k^\pm(\vec{a},\vec{b},\vec{cn}_k)$) as defined in eq. (\ref{alternDisc2}). The condition that for $m=4$ extremality implies that the POVM elements cannot lie in the same plane, was derived in \cite{dariano}, as well as the normalization conditions.\\ In summary, we have proven that the quantum discord of rank 2 two-qubit mixed states is obtained {\it using only orthogonal projectors} as measurements. Strong numerical evidence has been given to conjecture that they are almost sufficient for higher ranks, except for states which appear with probability \fer{$\sim10^{-2}$} and have negligible deviations \fer{on average }of order \fer{$10^{-6}$ in a sample of $10^5$ states (w$=0.02\pi$ for $\delta_3$). We discussed the importance of the states sample size and of the precision in scanning all possible measurements, showing the need of using a minimization procedure over different angular step sizes. Two} example\fer{s} w\fer{ere} given \fer{in figs.~\ref{fig1}a and \ref{fig1}b,} \fer{both of rank 3}, showing that 3 and 4 element POVM's give a better quantum discord, though with a very small improvement \fer{(a maximum of order $10^{-3}$)}. Based on the connection \cite{winter} between conditional entropy and entanglement of formation we have given a related tight upper bound, namely that the entanglement of formation of a $2\otimes N$ system of rank 2 is obtained by 2 element decompositions with very high probability and precision. Finally, an alternative quantum discord formula for generic mixed states of two qubits was given in terms of the Bloch vectors of the state, where minimization is performed over the Bloch form of POVM's elements. \fer{As a side remark, we note that Gaussian discord in continuous variable systems is also an example where projective, but not orthogonal measurements are optimal \cite{Paris,Adesso}.} \acknowledgments Funding from FISICOS (FIS2007-60327), Govern Balear (AAEE0113/09) and CoQuSys (200450E566) projects, JaeDoc (CSIC) and Juan de la Cierva program are acknowledged. \fer{We also thank Matthias Lang for useful comments. Numerical calculations have been performed using the IFISC and GRID-CSIC cluster facilities (Ref. 200450E494).} \end{document}
\begin{document} \title{Global existence and exponential growth for a viscoelastic wave equation with dynamic boundary conditions} \author{St\'{e}phane Gerbi\thanks{ Laboratoire de Math\'ematiques, Universit\'e de Savoie et CNRS, UMR-5128, 73376 Le Bourget du Lac, France, E-mail : \url{[email protected]}, } ~and Belkacem Said-Houari\thanks{ Division of Mathematical and Computer Sciences and Engineering, King Abdullah University of Science and Technology (KAUST), Thuwal, KSA, E-mail: \url{[email protected]}}} \date{} \maketitle \begin{abstract} The goal of this work is to study a model of the wave equation with dynamic boundary conditions and a viscoelastic term. First, applying the Faedo-Galerkin method combined with the fixed point theorem, we show the existence and uniqueness of a local in time solution. Second, we show that under some restrictions on the initial data, the solution continues to exist globally in time. On the other hand, if the interior source dominates the boundary damping, then the solution is unbounded and grows as an exponential function. In addition, in the absence of the strong damping, then the solution ceases to exist and blows up in finite time. \end{abstract} {\bf Keywords:} Damped viscoelastic wave equations, global solutions, exponential growth, blow up in finite time, dynamic boundary conditions. \section{Introduction} We consider the following problem \begin{equation} \left\{ \begin{array}{ll} u_{tt}-\Delta u-\alpha \Delta u_{t}+\displaystyle\int_{0}^{t}g(t-s)\Delta u(s)ds=|u|^{p-2}u, & x\in \Omega ,\ t>0 \, , \\ u(x,t)=0, & x\in \Gamma _{0},\ t>0 \, , \\ u_{tt}(x,t)=-\left[ \displaystyle\frac{\partial u}{\partial \nu }(x,t)- \displaystyle\int_{0}^{t}g(t-s)\frac{\partial u}{\partial \nu }(x,s)ds+\frac{ \alpha \partial u_{t}}{\partial \nu }(x,t)+h\left( u_{t}\right) \right] & x\in \Gamma _{1},\ t>0\, , \\ u(x,0)=u_{0}(x),\qquad u_{t}(x,0)=u_{1}(x) & x\in \Omega \, , \end{array} \right. \label{ondes} \end{equation} where $u=u(x,t)\,,\,t\geq 0\,,\,x\in \Omega \,,\,\Delta $ denotes the Laplacian operator with res\-pect to the $x$ variable, $\Omega $ is a regular and bounded domain of $\mathbb{R}^{N}\,,\,(N\geq 1)$, $\partial \Omega ~=~\Gamma _{0}~\cup ~\Gamma _{1}$, $mes(\Gamma _{0})>0,$ $\Gamma_{0}\cap \Gamma _{1}=\varnothing $ and $\partial/\partial \nu $ denotes the unit outer normal derivative, $\alpha$ is a positive constant, $p>2, \,h \mbox{ and } g$ are functions whose properties will be discussed in the next section, $u_{0}\,,\,u_{1}$ are given functions. Nowadays the wave equation with dynamic boundary conditions are used in a wide field of applications. See \cite{MuerKu_2011} for some applications. Problems similar to (\ref{ondes}) arise (for example) in the modeling of longitudinal vibrations in a homogeneous bar in which there are viscous effects. The term $\Delta u_t$, indicates that the stress is proportional not only to the strain, but also to the strain rate, see \cite{CSh_76} fore more details. From the mathematical point of view, these problems do not neglect acceleration terms on the boundary. Such type of boundary conditions are usually called \textit{dynamic boundary conditions}. They are not only important from the theoretical point of view but also arise in several physical applications. For instance in one space dimension and for $g=0$, problem (\ref{ondes}) can modelize the dynamic evolution of a viscoelastic rod that is fixed at one end and has a tip mass attached to its free end. The dynamic boundary conditions represents Newton's law for the attached mass, (see \cite {BST64,AKS96, CM98} for more details). In the two dimension space, as showed in \cite{G06} and in the references therein, these boundary conditions arise when we consider the transverse motion of a flexible membrane $\Omega $ whose boundary may be affected by the vibrations only in a region. Also some dynamic boundary conditions as in problem (\ref{ondes}) appear when we assume that $\Omega $ is an exterior domain of $\mathbb{R}^{3} $ in which homogeneous fluid is at rest except for sound waves. Each point of the boundary is subjected to small normal displacements into the obstacle: this type of dynamic boundary conditions are known as acoustic boundary conditions, see \cite{B76} for more details. Littman and Markus \cite{LM88} considered a system which describe an elastic beam, linked at its free end to a rigid body. The whole system is governed by the Euler-Bernoulli Partial Differential Equations with dynamic boundary conditions. They used the classical semigroup methods to establish existence and uniqueness results while the asymptotic stabilization of the structure is achieved by the use of feedback boundary damping. In \cite{GV94} the author introduced the model \begin{equation} u_{tt}-u_{xx}-u_{txx}=0,\qquad x\in (0,L),\,t>0, \label{Vand_1} \end{equation} which describes the damped longitudinal vibrations of a homogeneous flexible horizontal rod of length $L$ when the end $x=0$ is rigidly fixed while the other end $x=L$ is free to move with an attached load. Thus she considered Dirichlet boundary condition at $x=0$ and dynamic boundary conditions at $ x=L\,$, namely \begin{equation} u_{tt}(L,t)=-\left[ u_{x}+u_{tx}\right] (L,t),\qquad t>0 \ . \label{Vand_2} \end{equation} By rewriting the whole system within the framework of the abstract theories of the so-called $B$-evolution theory, the existence of a unique solution in the strong sense has been shown. An exponential decay result was also proved in \cite{GV96} for a problem related to (\ref{Vand_1})-(\ref{Vand_2}), which describe the weakly damped vibrations of an extensible beam. See \cite{GV96} for more details. Subsequently, Zang and Hu \cite{ZH07}, considered the problem \begin{equation*} u_{tt}-p\left( u_{x}\right) _{xt}-q\left( u_{x}\right) _{x}=0,\qquad x\in \left(0,1\right) ,\,t>0 \end{equation*} with \begin{equation*} u\left( 0,t\right) =0,\qquad p\left( u_{x}\right) _{t}+q\left( u_{x}\right) \left( 1,t\right) +ku_{tt}\left( 1,t\right) =0,\, t\geq 0. \end{equation*} By using the Nakao inequality, and under appropriate conditions on $p$ and $ q $, they established both exponential and polynomial decay rates for the energy depending on the form of the terms $p$ and $q$. Recently, the present authors have considered, in \cite{GS08} and \cite{GS082}, problem (\ref{ondes}) with $g=0$ and a nonlinear boundary damping of the form $h\left(u_{t}\right)=\left\vert u_{t}\right\vert^{m-2}u_{t}$. A local existence result was obtained by combining the Faedo-Galerkin method with the contraction mapping theorem. Concerning the asymptotic behavior, the authors showed that the solution of such problem is unbounded and grows up exponentially when time goes to infinity provided that the initial data are large enough and the damping term is nonlinear. The blow up result was shown when the damping is linear (i.e. $m=2$). Also, we proved in \cite{GS082} that under some restrictions on the exponents $m$ and $p$, we can always find initial data for which the solution is global in time and decays exponentially to zero. These results had been recently generalized for a wide range of nonlinearities in the equation and in the boundary term: the authors proved the local existence and uniqueness by a sophisticated application of the non linear semigroup theory, see \cite{GRS2012}. In the absence of the strong damping $\alpha \Delta u_t$ and for Dirichlet boundary conditions on the whole boundary $\partial\Omega$, the question of blow up in finite time of problem (\ref{ondes}) has been investigated by many authors. Messaoudi \cite{Mess03} showed that if the initial energy is negative and if the relaxation function $g$ satisfies the following assumption \begin{equation} \label{Messaoudi_condition} \int_{0}^{\infty }g(s)ds<\frac{(p/2)-1}{(p/2)-1+(1/2p)} \ , \end{equation} then the solutions blow up in finite time. In fact this last condition has been assumed by other researchers. See for instance \cite{Mess_Kafi_2007,Mess_Kafi_2008,MS2010,Mes01,SaZh2010,YWL2009}. The main goal of this paper is to prove the local existence and to study the asymptotic behavior of the solution of problem (\ref{ondes}). One of the main questions is to show a blow-up result of the solution. This question is a difficult open problem, since in the presence of the strong damping term, i.e. when $\alpha\neq 0$, the problem has a parabolic structure, which means that the solution gains more regularity. However, in this paper, we give a partial answer to this question and show that for $\alpha\neq 0$ and for large initial data, the solution is unbounded and grows exponentially as $t$ goes to infinity. While for the case $\alpha=0$, the solution has been shown to blow up in finite time. The main contribution of this paper in this blow up result is the following: the exponential growth and blow-up results hold without making the assumption (\ref{Messaoudi_condition}). In fact the only requirement is that the exponent $p$ has to be large enough which is a condition much weaker than condition (\ref{Messaoudi_condition}). Moreover, unlike in the works of Messaoudi and coworkers, we do not assume any polynomial structure on the damping term $h(u_t)$, to obtain an exponential growth of the solution or a blow up in finite time. This paper is organized as follows: firstly, applying the Faedo-Galerkin method combined with the fixed point theorem, we show, in Section \ref{local_existence_section}, the existence and uniqueness of a local in time solution. Secondly, under the smallness assumption on the initial data, we show, in Section \ref{Global_existence_section}, that the solution continues to exist globally in time. On the other hand, in Section \ref{Exponential_growth_section}, we prove that under some restrictions on the initial data and if the interior source dominates the boundary damping then the $L^p$-norm of the solution grows as an exponential function. Lastly, in Section \ref{blow_up_section}, we investigate the case when $\alpha=0$ and we prove that the solution ceases to exist and blows up in finite time. \section{Preliminary and local existence} \label{local_existence_section} In this section, we introduce some notations used throughout this paper. We also prove a local existence result of the solution of problem (\ref {ondes}). We denote \begin{equation*} H_{\Gamma_{0}}^{1}(\Omega) =\left\{u \in H^1(\Omega) /\ u_{\Gamma_{0}} = 0\right\} . \end{equation*} By $( .,.) $ we denote the scalar product in $L^{2}( \Omega)$ i.e. $(u,v)(t) = \int_{\Omega} u(x,t) v(x,t) dx$. Also we mean by $\Vert .\Vert_{q}$ the $L^{q}(\Omega) $ norm for $1 \leq q \leq \infty$, and by $ \Vert .\Vert_{q,\Gamma_{1}}$ the $L^{q}(\Gamma_{1}) $ norm. Let $T>0$ be a real number and $X$ a Banach space endowed with norm $\Vert.\Vert _{X}$. $L^{p}(0,T;X),\ 1~\leq p~<\infty $ denotes the space of functions $f$ which are $L^{p}$ over $\left( 0,T\right) $ with values in $X$ , which are measurable and $\Vert f\Vert _{X}\in L^{p}\left( 0,T\right) $. This space is a Banach space endowed with the norm \begin{equation*} \Vert f\Vert _{L^{p}\left( 0,T;X\right) }=\left( \int_{0}^{T}\Vert f\Vert _{X}^{p}dt\right) ^{1/p}\quad . \end{equation*} $L^{\infty }\left( 0,T;X\right) $ denotes the space of functions $f:\left] 0,T\right[ \rightarrow X$ which are measurable and $\Vert f\Vert _{X}\in L^{\infty }\left( 0,T\right) $. This space is a Banach space endowed with the norm: \begin{equation*} \Vert f\Vert _{L^{\infty }(0,T;X)}=\mbox{ess}\sup_{0<t<T}\Vert f\Vert _{X}\quad . \end{equation*} We recall that if $X$ and $Y$ are two Banach spaces such that $ X\hookrightarrow Y$ (continuous embedding), then \begin{equation*} L^{p}\left( 0,T;X\right) \hookrightarrow L^{p}\left( 0,T;Y\right) ,\ 1\leq p\leq \infty . \end{equation*} We will also use the embedding (see \cite[Therorem 5.8]{A75}): \begin{equation*} H_{\Gamma _{0}}^{1}(\Omega )\hookrightarrow L^{p}(\Omega),\;2\leq p\leq \bar{p}\quad \mbox{where }\quad \bar{p}=\left\{ \begin{array}{ll} \dfrac{2N}{N-2} & \mbox{ if }N\geq 3, \\ +\infty & \mbox{ if }N=1,2 \ , \end{array} \right. \end{equation*} and also \begin{equation*} H_{\Gamma _{0}}^{1}(\Omega )\hookrightarrow L^{q}(\Gamma _{1}),\;2\leq q\leq \bar{q}\quad \mbox{where }\quad \bar{q}=\left\{ \begin{array}{ll} \dfrac{2(N-1)}{N-2} & \mbox{ if }N\geq 3, \\ +\infty & \mbox{ if }N=1,2. \end{array} \right. \end{equation*} For $2 \leq m \leq \bar{q}$, let us denote $V=H_{\Gamma _{0}}^{1}(\Omega )\cap L^{m}(\Gamma _{1})$. We assume that the relaxation functions $g$ is of class $C^{1}$ on $\mathbb{R}$ and satisfies: \begin{equation} \forall \, s \, \in \mathbb{R} \,,\, g\left( s\right) \geq 0,\mbox{ and } \left(1-\displaystyle\int_{0}^{\infty }g\left(s\right) ds \right)=l>0 \ .\label{hypothesis_g} \end{equation} Moreover, we suppose that: \begin{equation} \forall \, s\geq 0 \,,\, g^{\prime}(s) \leq 0. \label{hypothesis_g_2} \end{equation} The hypotheses on the function $h$ are the following: \begin{description} \item[(H1)] $h$ is continuous and strongly monotone, i.e. for $2 \leq m \leq \bar{q}$, there exists a constant $m_{0}>0$ such that \begin{equation} \left( h(s)-h(v)\right) (s-v)\geq m_{0}|s-v|^{m} \, , \label{Assumption_h_1} \end{equation} \item[(H2)] there exist two positive constants $c_{m}$ and $C_{m}$ such that \begin{equation} c_{m}|s|^{m}\leq h(s)s\leq C_{m}|s|^{m},\qquad \forall s\in \mathbb{R} \ . \label{Assumption_h} \end{equation} \end{description} For a function $u \in C\Bigl(\lbrack \ 0,T],H_{\Gamma _{0}}^{1}(\Omega )\Bigl)$, let us introduce the following notation: \begin{equation*} \left( g\diamond u\right) \left( t\right) =\int_{0}^{t}g\left( t-s\right) \left\Vert \nabla u\left( s\right) -\nabla u\left( t\right) \right\Vert _{2}^{2}ds. \end{equation*} Thus, when $u \in C\Bigl(\lbrack \ 0,T],H_{\Gamma _{0}}^{1}(\Omega )\Bigl)\cap C^{1}\Bigl(\lbrack \ 0,T],L^{2}(\Omega )\Bigl)$ such that $u_{t} \in L^{2}\Bigl(0,T;H_{\Gamma _{0}}^{1}(\Omega )\Bigl)$, we have: \begin{eqnarray} \frac{d}{dt}\left( g\diamond u\right) \left( t\right) &=&\int_{0}^{t}g^{\prime }\left( t-s\right) \left\Vert \nabla u\left( s\right) -\nabla u\left( t\right) \right\Vert _{2}^{2}ds \notag \\ &&+\frac{d}{dt}\left( \left\Vert \nabla u\left( t\right) \right\Vert _{2}^{2}\right) \int_{0}^{t}g\left( s\right) ds-2\int_{\Omega }\int_{0}^{t}g\left( t-s\right) \nabla u\left( s\right) \nabla u_{t}\left( t\right) dsdx \notag \\ &=&\left( g^{\prime }\diamond u\right) \left( t\right) -2\int_{\Omega }\int_{0}^{t}g\left( t-s\right) \nabla u\left( s\right) \nabla u_{t}\left( t\right) dsdx \label{Integral_relation} \\ &&+\frac{d}{dt}\left\{ \left\Vert \nabla u\left( t\right) \right\Vert _{2}^{2}\int_{0}^{t}g\left( s\right) ds\right\} -g\left( t\right) \left\Vert \nabla u\left( t\right) \right\Vert _{2}^{2}. \notag \end{eqnarray} This last identity implies: \begin{eqnarray} \int_{\Omega }\int_{0}^{t}g\left( t-s\right) \nabla u\left( s\right) \nabla u_{t}\left( t\right) dsdx &=&\frac{1}{2}\left( g^{\prime }\diamond u\right) \left( t\right) +\frac{1}{2}\frac{d}{dt}\left\{ \left\Vert \nabla u\left( t\right) \right\Vert _{2}^{2}\int_{0}^{t}g\left( s\right) ds\right\} \notag \\ &&-\frac{1}{2}g\left( t\right) \left\Vert \nabla u\left( t\right) \right\Vert _{2}^{2}-\frac{1}{2}\frac{d}{dt}\left( g\diamond u\right) \left( t\right) . \label{Nonlinear_term_viscoelastic} \end{eqnarray} For $u \in C\Bigl(\lbrack \ 0,T],H_{\Gamma _{0}}^{1}(\Omega )\Bigl)\cap C^{1}\Bigl(\lbrack \ 0,T],L^{2}(\Omega )\Bigl)$ such that $u_{t} \in L^{2}\Bigl(0,T;H_{\Gamma _{0}}^{1}(\Omega )\Bigl)$, let us define the modified energy functional $E$ by: \begin{eqnarray} E\left( t,u,u_{t}\right) =E\left( t\right) &=&\dfrac{1}{2}\Vert u_{t}\left(t\right) \Vert _{2}^{2}+\dfrac{1}{2}\Vert u_{t}\left( t\right) \Vert _{2,\Gamma _{1}}^{2}+\dfrac{1}{2}\left( 1-\displaystyle\int_{0}^{t}g\left( s\right) ds\right) \left\Vert \nabla u\left( t\right) \right\Vert _{2}^{2} \notag\\ & ~&+\dfrac{1}{2}\left( g\diamond u\right) \left( t\right) -\dfrac{1}{p}\left\Vert u\left( t\right) \right\Vert _{p}^{p}.\label{Energy_visco_elastic} \end{eqnarray} The following local existence result of the solution of problem (\ref{ondes}) is closely related to the one we have proved for a slightly different problem in \cite[Theorem 2.1]{GS08}, where no memory term was present. Let us sate it: \begin{theorem} \label{existence} Assume that (\ref{hypothesis_g}), (\ref{hypothesis_g_2}) and (\ref{Assumption_h_1}) hold. Let $2\leq p\leq \bar{q}$ and $\max\left( 2,\frac{\bar{q}}{\bar{q}+1-p} \right) \leq m \leq \bar{q}$. Then given $ u_{0}\in H_{\Gamma _{0}}^{1}(\Omega )$ and $u_{1}\in L^{2}(\Omega )$, there exists $T>0$ and a unique solution $u$ of the problem (\ref{ondes}) on $ (0,T) $ such that \begin{eqnarray*} u &\in &C\Bigl(\lbrack \ 0,T],H_{\Gamma _{0}}^{1}(\Omega )\Bigl)\cap C^{1} \Bigl(\lbrack \ 0,T],L^{2}(\Omega )\Bigl), \\ u_{t} &\in &L^{2}\Bigl(0,T;H_{\Gamma _{0}}^{1}(\Omega )\Bigl)\cap L^{m}\left( \left( 0,T\right) \times \Gamma _{1}\right) . \end{eqnarray*} \end{theorem} Let us mention that Theorem \ref{existence} also holds for $\alpha=0$. The proof of Theorem \ref{existence} can be done along the same line as in \cite[Theorem 2.1]{GS08}. The main idea of the proof is based on the combination between the Fadeo-Galerkin approximations and the contraction mapping theorem. However, for the convenience of the reader we give only the outline of the proof here. For $u\in C\bigl(\lbrack 0,T],H_{\Gamma _{0}}^{1}(\Omega )\bigl)\,\cap \,C^{1}\bigl(\lbrack 0,T],L^{2}(\Omega )\bigl)$ given, let us consider the following problem: \begin{equation} \left\{ \begin{array}{ll} v_{tt}-\Delta v-\alpha \Delta v_{t}+\displaystyle\int_{0}^{t}g(t-s)\Delta v(s)ds=|u|^{p-2}u, & x\in \Omega ,\ t>0 \,, \\ v(x,t)=0, & x\in \Gamma _{0},\ t>0\,, \\ v_{tt}(x,t)=-\left[ \displaystyle\frac{\partial v}{\partial \nu }(x,t)- \displaystyle\int_{0}^{t}g(t-s)\frac{\partial v}{\partial \nu }(x,s)ds+\frac{ \alpha \partial v_{t}}{\partial \nu }(x,t)+h\left( v_{t}\right) \right] & x\in \Gamma _{1},\ t>0 \,, \\ v(x,0)=u_{0}(x),\;v_{t}(x,0)=u_{1}(x) & x\in \Omega . \end{array} \right. \label{ondes_u} \end{equation} \begin{definition} \label{generalised} A function $v(x,t)$ such that \begin{eqnarray*} v &\in &L^{\infty }\left( 0,T;H_{\Gamma _{0}}^{1}(\Omega )\right) \ , \\ v_{t} &\in &L^{2}\left( 0,T;H_{\Gamma _{0}}^{1}(\Omega )\right) \cap L^{m}\left( (0,T)\times \Gamma _{1}\right) \ , \\ v_{t} &\in &L^{\infty }\left( 0,T;H_{\Gamma _{0}}^{1}(\Omega )\right) \cap L^{\infty }\left( 0,T;L^{2}(\Gamma _{1})\right) \ , \\ v_{tt} &\in &L^{\infty }\left( 0,T;L^{2}(\Omega )\right) \cap L^{\infty }\left( 0,T;L^{2}(\Gamma _{1})\right) \ , \\ v(x,0) &=&u_{0}(x)\,, \\ v_{t}(x,0) &=&u_{1}(x)\,, \end{eqnarray*} is a generalized solution to the problem (\ref{ondes_u}) if for any function $\omega \in H_{\Gamma _{0}}^{1}(\Omega )\cap L^{m}(\Gamma _{1})$ and $ \varphi \in C^{1}(0,T)$ with $\varphi (T)=0$, we have the following identity: \begin{equation*} \begin{array}{lll} \displaystyle\int_{0}^{T}(|u|^{p-2}u,w)(t)\,\varphi (t)\,dt & =\displaystyle \int_{0}^{T}\Bigg[(v_{tt},w)(t)+(\nabla v,\nabla w)(t)-\int_{0}^{t}g(t-s)(\nabla v\left( s\right) ,\nabla w\left( t\right) )ds & \\ & +\alpha (\nabla v_{t},\nabla w)(t)\varphi (t)dt\Bigg]+\displaystyle \int_{0}^{T}\varphi (t)\left( \int_{\Gamma _{1}}v_{tt}(t)w\,d\Gamma +h\left( v_{t}\right) \,d\Gamma \right) dt. & \end{array} \end{equation*} \end{definition} \begin{lemma} \label{existence_f} Let $2\leq p\leq \bar{q}$ and $2 \leq m \leq \bar{q}$. Let $u_{0}\in H^{2}(\Omega )\cap V,\,u_{1}\in H^{2}(\Omega )$, then for any $T>0,$ there exists a unique generalized solution (in the sense of Definition \ref{generalised}), $ v(t,x)$ of problem (\ref{ondes_u}). \end{lemma} The proof of Lemma \ref{existence_f} is essentially based on the Fadeo-Galerkin approximations combined with the compactness method and can be done along the same line as in \cite[Lemma 2.2]{GS08}, we omit the details. In the following lemma we state a local existence result of problem (\ref {ondes_u}). \begin{lemma} \label{existence_u} Let $2\leq p\leq \bar{q}$ and $\max\left( 2, \frac{\bar{q}}{\bar{q}+1-p} \right) \leq m \leq \bar{q}$. Then given $u_{0}~\in ~H_{\Gamma _{0}}^{1}(\Omega )\,,\,u_{1}\in L^{2}(\Omega )$ there exists $T>0$ and a unique solution $v$ of the problem (\ref{ondes_u}) on $(0,T)$ such that \begin{eqnarray*} v &\in &C\Bigl(\lbrack 0,T],H_{\Gamma _{0}}^{1}(\Omega )\Bigl)\,\cap \,C^{1} \Bigl(\lbrack 0,T],L^{2}(\Omega )\Bigl), \\ v_{t} &\in &L^{2}\Bigl(0,T;H_{\Gamma _{0}}^{1}(\Omega )\Bigl)\,\cap L^{m}\left( \left( 0,T\right) \times \Gamma _{1}\right) \end{eqnarray*} and satisfies the energy inequality: \begin{eqnarray*} &&\frac{1}{2}\left[ \Vert u_{t}\left( t\right) \Vert _{2}^{2}+\Vert u_{t}\left( t\right) \Vert _{2,\Gamma _{1}}^{2}+\left( 1-\displaystyle \int_{0}^{t}g\left( s\right) ds\right) \left\Vert \nabla u\left( t\right) \right\Vert _{2}^{2} +\left( g\diamond u\right) \left( t\right) \right] _{s}^{t} \\ &&+\alpha \displaystyle\int_{s}^{t}\Vert \nabla v_{t}(\tau )\Vert _{2}^{2}d\tau +\displaystyle\int_{s}^{t}\int_{\Gamma _{1}}h(v_{t}(\sigma,\tau ))d\sigma d\tau \\ &\leq &\displaystyle\int_{s}^{t}\displaystyle\int_{\Omega }|u(\tau )|^{p-2}u(\tau )v_{t}(\tau )d\tau dx \end{eqnarray*} for $0\leq s\leq t\leq T$. \end{lemma} \begin{proof} We first approximate $u\in C([0,T],H_{\Gamma _{0}}^{1}(\Omega ))\cap C^{1}\left( [0,T],L^{2}(\Omega )\right) $ endowed with the standard norm $\Vert u\Vert =\displaystyle\max_{t\in \lbrack 0,T]}\Vert u_{t}(t)\Vert _{2}+\Vert u(t)\Vert _{H^{1}(\Omega )}$, by a sequence $(u^{k})_{k\in \mathbb{N}}\subset C^{\infty }([0,T]\times \overline{ \Omega })$ by a standard convolution arguments (see \cite{B83}). Next, we approximate the initial data $u_{1}\in L^{2}(\Omega )$ by a sequence $ (u_{1}^{k})_{k\in \mathbb{N}}\subset C_{0}^{\infty }(\Omega )$. Finally, since the space $H^{2}(\Omega )\cap V\cap H_{\Gamma _{0}}^{1}(\Omega )$ is dense in $H_{\Gamma _{0}}^{1}(\Omega )$ for the $H^{1}$ norm, we approximate $u_{0}\in H_{\Gamma _{0}}^{1}(\Omega )$ by a sequence $(u_{0}^{k})_{k\in \mathbb{N}}\subset H^{2}(\Omega )\cap V\cap H_{\Gamma _{0}}^{1}(\Omega )$. We consider now the set of the following problems: \begin{equation} \left\{ \begin{array}{ll} v_{tt}^{k}-\Delta v^{k}-\alpha \Delta v_{t}^{k}+\displaystyle \int_{0}^{t}g(t-s)\Delta v^{k}(s)ds=|u^{k}|^{p-2}u^{k}, & x\in \Omega ,\ t>0 , \\ v^{k}(x,t)=0, & x\in \Gamma _{0},\ t>0 , \\ v_{tt}^{k}(x,t)=-\left[ \displaystyle\frac{\partial v^{k}}{\partial \nu } (x,t)-\displaystyle\int_{0}^{t}g(t-s)\frac{\partial v^{k}}{\partial \nu } (x,s)ds+\frac{\alpha \partial v_{t}^{k}}{\partial \nu }(x,t)+h\left( v_{t}^{k}\right) \right] , & x\in \Gamma _{1},\ t>0 , \\ v^{k}(x,0)=u_{0}^{k},\;v_{t}^{k}(x,0)=u_{1}^{k}, & x\in \Omega . \end{array} \right. \label{approx_k} \end{equation} Since every hypothesis of Lemma \ref{existence_f} are verified, we can find a sequence of unique solution $\left( v_{k}\right) _{k\in \mathbb{N}}$ of the problem (\ref{approx_k}). Our goal now is to show that $ (v^{k},v_{t}^{k})_{k\in \mathbb{N}}$ is a Cauchy sequence in the space \begin{equation*} \begin{array}{lll} Y_{T} & =\Bigl\{ & (v,v_{t})/v\in C\left( \left[ 0,T\right] ,H_{\Gamma _{0}}^{1}(\Omega )\right) \cap C^{1}\left( \left[ 0,T\right] ,L^{2}(\Omega )\right) , \\ & & v_{t}\in L^{2}\left( 0,T;H_{\Gamma _{0}}^{1}(\Omega )\right) \cap L^{m}\left( \left( 0,T\right) \times \Gamma _{1}\right) \Bigl\} \end{array} \end{equation*} endowed with the norm \begin{equation} \label{norm} \Vert (v,v_{t})\Vert _{Y_{T}}^{2}=\max_{0\leq t\leq T}\Bigl[\Vert v_{t}\Vert _{2}^{2}+l\Vert \nabla v\Vert _{2}^{2}\Bigl]+\Vert v_{t}\Vert _{L^{m}\left( \left( 0,T\right) \times \Gamma _{1}\right) }^{2}+\int_{0}^{t}\Vert \nabla v_{t}(s)\Vert _{2}^{2}\;ds\;. \end{equation} For this purpose, we set $U=u^{k}-u^{k^{\prime }},\ V=v^{k}-v^{k^{\prime }}$ . It is straightforward to see that $V$ satisfies: \begin{equation*} \left\{ \begin{array}{ll} V_{tt}-\Delta V-\alpha \Delta V_{t}+\displaystyle\int_{0}^{t}g(t-s)\Delta V(s)ds=|u^{k}|^{p-2}u^{k}-|u^{k^{\prime }}|^{p-2}u^{k^{\prime }} & x\in \Omega ,\ t>0 \,, \\ V(x,t)=0 & x\in \Gamma _{0},\ t>0 \,, \\ V_{tt}(x,t)=-\left[\displaystyle\frac{\partial V}{\partial \nu }(x,t)- \displaystyle\int_{0}^{t}g(t-s)\frac{\partial V}{\partial \nu }(x,s)ds+\frac{ \alpha \partial V_{t}}{\partial \nu }(x,t)+h(v_{t}^{k})-h(v_{t}^{k^{\prime }})\right] & x\in \Gamma _{1},\ t>0 \,, \\ V(x,0)=u_{0}^{k}-u_{0}^{k^{\prime }},\;V_{t}(x,0)=u_{1}^{k}-u_{1}^{k^{\prime }} & x\in \Omega . \end{array} \right. \end{equation*} We multiply the above differential equations by $V_{t}$, we integrate over $ (0,t)\times \Omega $, we use integration by parts and the identity (\ref {Integral_relation}) to obtain: \begin{equation} \left. \begin{array}{ll} & \begin{array}{l} \dfrac{1}{2}\left( \Vert V_{t}\left( t\right) \Vert _{2}^{2}+\Vert V_{t}\left( t\right) \Vert _{2,\Gamma _{1}}^{2}+\left( 1-\displaystyle \int_{0}^{t}g\left( r\right) dr\right) \left\Vert \nabla V\left( t\right) \right\Vert _{2}^{2}\right) \\ +\alpha \displaystyle\int_{0}^{t}\Vert \nabla V_{t}\Vert _{2}^{2}ds\vspace{ 0.2cm}+\int_{0}^{t}\int_{\Gamma _{1}}\left( h(v_{t}^{k}(x,\tau ))-h(v_{t}^{k^{\prime }}(x,\tau ))\right) \left( v_{t}^{k}(x,\tau )-v_{t}^{k^{\prime }}(x,\tau )\right) d\Gamma d\tau \end{array} \\ & -\dfrac{1}{2}\displaystyle\int_{0}^{t}\left( g^{\prime }\diamond V\right) \left( s\right) ds+\dfrac{1}{2}\displaystyle\int_{0}^{t}g\left( s\right) \left\Vert \nabla V\left( s\right) \right\Vert _{2}^{2}ds \\ = & \displaystyle\frac{1}{2}\left( \Vert V_{t}(0)\Vert _{2}^{2}+\Vert \nabla V(0)\Vert _{2}^{2}+\Vert V_{t}(0)\Vert _{2,\Gamma _{1}}^{2}\right) \vspace{ 0.2cm} \\ & +\displaystyle\int_{0}^{t}\displaystyle\int\limits_{\Omega }\left( |u^{k}|^{p-2}u^{k}-|u^{k^{\prime }}|^{p-2}u^{k^{\prime }}\right) \left( v_{t}^{k}-v_{t}^{k^{\prime }}\right) \,dxds,\quad \forall t\in \left( 0,T\right) . \end{array} \right. \label{Main_estimate_existence} \end{equation} Consequently, the above inequality together with (\ref{hypothesis_g}), (\ref {hypothesis_g_2}) and (\ref{Assumption_h_1}) gives \begin{equation} \left. \begin{array}{ll} & \dfrac{1}{2}\left( \Vert V_{t}\left( t\right) \Vert _{2}^{2}+\Vert V_{t}\left( t\right) \Vert _{2,\Gamma _{1}}^{2}+l\left\Vert \nabla V\left( t\right) \right\Vert _{2}^{2}\right) +\alpha \displaystyle\int_{0}^{t}\Vert \nabla V_{t}\Vert _{2}^{2}ds +m_{0}\int_{0}^{t}\Vert V_{t}\Vert _{m,\Gamma _{1}}^{m}ds \\ \leq & \displaystyle\frac{1}{2}\left( \Vert V_{t}(0)\Vert _{2}^{2}+\Vert \nabla V(0)\Vert _{2}^{2}+\Vert V_{t}(0)\Vert _{2,\Gamma _{1}}^{2}\right) \\ & +\displaystyle\int_{0}^{t}\displaystyle\int\limits_{\Omega }\left( |u^{k}|^{p-2}u^{k}-|u^{k^{\prime }}|^{p-2}u^{k^{\prime }}\right) \left( v_{t}^{k}-v_{t}^{k^{\prime }}\right) \,dxds,\quad \forall t\in \left( 0,T\right) . \end{array} \right. \label{Main_inequality_Cauchy} \end{equation} Following the same method as in \cite{GS08}, we deduce that there exists $C$ depending only on $\Omega \mbox{ and }p$ such that: \begin{equation*} \Vert V\Vert _{Y_{T}}\leq C\left( \Vert V_{t}(0)\Vert _{2}^{2}+\Vert \nabla V(0)\Vert _{2}^{2}+\Vert V_{t}(0)\Vert _{2,\Gamma _{1}}^{2}\right) +CT\Vert U\Vert _{Y_{T}}. \end{equation*} Since $(u_{0}^{k})_{k\in \mathbb{N}}$ is a converging sequence in $H_{\Gamma _{0}}^{1}\left( \Omega \right) $, $(u_{1}^{k})_{k\in \mathbb{N}}$ is a converging sequence in $L^{2}\left( \Omega \right) $ and $\left( u^{k}\right) _{k\in \mathbb{N}}$ is a converging sequence in $C\left( \left[ 0,T\right] ,H_{\Gamma _{0}}^{1}(\Omega )\right) \cap C^{1}\left( \left[ 0,T \right] ,L^{2}(\Omega )\right) $ (so in $Y_{T}$ also), we conclude that $ (v^{k},v_{t}^{k})_{k\in \mathbb{N}}$ is a Cauchy sequence in $Y_{T}$. Thus $ (v^{k},v_{t}^{k})$ converges to a limit $(v,v_{t})\in Y_{T}$. Now by the same procedure used by Georgiev and Todorova in \cite{GT94}, we prove that this limit is a weak solution of the problem (\ref{ondes_u}). This completes the proof of the Lemma \ref{existence_u}. \end{proof} \begin{proof}[Proof of Theorem \ref{existence}] In order to prove Theorem \ref{existence}, we use the contraction mapping theorem.\newline Indeed, for $T>0,$ let us define the convex closed subset of $Y_{T}$: \begin{equation*} X_{T}=\left\{ (v,v_{t})\in Y_{T}\mbox{ such that }v(0)=u_{0},v_{t}(0)=u_{1} \right\} . \end{equation*} Let us denote: \begin{equation*} B_{R}\left( X_{T}\right) =\left\{ v\in X_{T};\Vert v\Vert _{Y_{T}}\leq R\right\} , \end{equation*} the ball of radius $R$ in $X_{T}$. Then, Lemma \ref{existence_u} implies that for any $u\in X_{T}$, we may define $v=\Phi \left( u\right) $ the unique solution of (\ref{ondes_u}) corresponding to $u$. Our goal now is to show that for a suitable $T>0$, $\Phi $ is a contractive map satisfying $ \Phi \left( B_{R}(X_{T})\right) \subset B_{R}(X_{T})$. \newline Let $u\in B_{R}(X_{T})$ and $v=\Phi \left( u\right) $. Then for all $t\in \lbrack 0,T]$ we have as in (\ref{Main_estimate_existence}): \begin{equation} \begin{array}{l} \Vert v_{t}\Vert _{2}^{2}+l\Vert \nabla v\Vert _{2}^{2}+\Vert v_{t}\Vert _{2,\Gamma _{1}}^{2}+2\alpha \displaystyle\int\limits_{0}^{t}\Vert \nabla v_{t}\Vert _{2}^{2}\,ds+c\int_{0}^{t}\Vert v_{t}\Vert _{m,\Gamma _{1}}^{m}ds \\ \leq \Vert u_{1}\Vert _{2}^{2}+\Vert \nabla u_{0}\Vert _{2}^{2}+\Vert u_{1}\Vert _{2,\Gamma _{1}}^{2}+2\displaystyle\int\limits_{0}^{t} \displaystyle\int\limits_{\Omega }|u\left( \tau \right) |^{p-2}u\left( \tau \right) v_{t}\left( \tau \right) \,dx\,d\tau . \end{array} \quad \label{schauder} \end{equation} Using H\"{o}lder's inequality, we can control the last term in the right hand side of the inequality (\ref{schauder}) as follows: \begin{equation*} \displaystyle\int\limits_{0}^{t}\displaystyle\int\limits_{\Omega }|u\left( \tau \right) |^{p-2}u\left( \tau \right) v_{t}\left( \tau \right) dxd\tau \leq \displaystyle\int\limits_{0}^{t}\Vert u\left( \tau \right) \Vert _{2N/\left( N-2\right) }^{p-1}\Vert v_{t}\left( \tau \right) \Vert _{{2N}/{ \bigl(3N-Np+2(p-1)\bigl)}}d\tau \end{equation*} Since $\displaystyle p\leq \frac{2N}{N-2}$, we have: $$\displaystyle\frac{2N}{\bigl(3N-Np+2(p-1)\bigl)}\leq \frac{2N}{N-2}\quad .$$ Thus, by Young's and Sobolev's inequalities, we get for all $\delta >0$ there exists $C(\delta )>0$, such that for all $t\in \left( 0,T\right) $ \begin{equation*} \displaystyle\int\limits_{0}^{t}\displaystyle\int\limits_{\Omega }|u\left( \tau \right) |^{p-2}u\left( \tau \right) v_{t}\left( \tau \right) dxd\tau \leq C(\delta )tR^{2(p-1)}+\delta \displaystyle\int\limits_{0}^{t}\Vert \nabla v_{t}\left( \tau \right) \Vert _{2}^{2}d\tau . \end{equation*} Inserting the last estimate in the inequality (\ref{schauder}) and choosing $ \delta $ small enough such that: \begin{equation*} \Vert v\Vert _{Y_{T}}^{2}\leq \frac{1}{2}R^{2}+CTR^{2(p-1)}. \end{equation*} Thus, for $T$ sufficiently small, we have $\Vert v\Vert _{Y_{T}}\leq R$. This shows that $v\in B_{R}\left( X_{T}\right) $. To prove that $\Phi $ is a contraction, we have to follow the same steps (up to minor changes) as in \cite{GS08}. We omit the details. Thus the proof of Theorem \ref{existence} is finished. \end{proof} \begin{remark} Let us say that the hypothesis on $m$, $\max\left( 2, \frac{\bar{q}}{\bar{q}+1-p} \right) \leq m \leq \bar{q}$, is made to pass to the limit in the nonlinear term, by the same way we have used in \cite[Equation (2.28)]{GS08}. \end{remark} \section{Global existence}\label{Global_existence_section} In this section, we show that, under some restrictions on the initial data, the local solution of problem (\ref{ondes}) can be continued in time and the lifespan of the solution will be $[0,\infty )$. \begin{definition} \label{Tmax} Let $2\leq p\leq \bar{q}$, $\max\left( 2, \frac{\bar{q}}{\bar{q}+1-p} \right) \leq m \leq \bar{q}$, $u_{0}\in H_{\Gamma_{0}}^{1}(\Omega) $ and $u_{1}\in L^{2}(\Omega) $. We denote by $u$ the solution of (\ref{ondes}). We define: $$ T_{max} = \sup\Bigl\{ T > 0 \,,\, u = u(t) \ \mbox{ exists on } \ [0,T]\Bigr\} \ . $$ Since the solution $u \in Y_T$ (the solution is ``regular enough''), from the definition of the norm given by (\ref{norm}), let us recall that if $T_{max} < \infty$, then $$ \lim_{\underset {t < T_{max}} {t \rightarrow T_{max}}} \Vert \nabla u(t) \Vert_2 + \Vert u_t(t) \Vert_2 = + \infty. $$ If $T_{max} < \infty$, we say that the solution of (\ref{ondes}) blows up and that $T_{max}$ is the blow up time.\\ If $T_{max} = \infty$, we say that the solution of (\ref{ondes}) is global. \end{definition} In order to study the blow up phenomenon or the global existence of the solution of (\ref{ondes}), for all $0\leq t < T_{max}$, we define: \begin{eqnarray} I(t) &=&I(u(t))=\left( 1-\displaystyle\int_{0}^{t}g\left( s\right) ds\right) \left\Vert \nabla u\left( t\right) \right\Vert _{2}^{2}+\left( g\diamond u\right) \left( t\right) -\Vert u\Vert _{p}^{p}, \label{Energy_I} \\ J(t) &=&J(u(t))=\frac{1}{2}\left( 1-\displaystyle\int_{0}^{t}g\left( s\right) ds\right) \left\Vert \nabla u\left( t\right) \right\Vert _{2}^{2}+ \dfrac{1}{2}\left( g\diamond u\right) \left( t\right) -\frac{1}{p}\Vert u\Vert _{p}^{p}. \label{Energy_J} \end{eqnarray} Thus the energy functional defined in (\ref{Energy_visco_elastic}) can be rewritten as \begin{equation} E(u(t))=E(t)=J(t)+\frac{1}{2}\Vert u_{t}\Vert _{2}^{2}+\frac{1}{2}\Vert u_{t}\Vert _{2,\Gamma _{1}}^{2}. \label{Energy_E} \end{equation} As in \cite{GS06,V99}, we denote by $B$ the best constant in the Poincar\'{e}-Sobolev embedding $H_{\Gamma_{0}}^{1}(\Omega) \hookrightarrow L^{p}(\Omega)$ defined by: \begin{equation}\label{sobolev} B^{-1} = \inf\left\{\Vert \nabla u \Vert_2 : u \in H_{\Gamma_{0}}^{1}(\Omega), \Vert u\Vert_p = 1 \right\}. \end{equation} For $u_{0}~\in~H_{\Gamma _{0}}^{1}(\Omega )\,,\,u_{1}\in L^{2}(\Omega )$, we define: \begin{equation*} E(0) =\dfrac{1}{2}\Vert u_{1}\Vert_{2}^{2}+\dfrac{1}{2}\Vert u_{1}\Vert_{2,\Gamma _{1}}^{2}+ \dfrac{1}{2} \left\Vert \nabla u_{0} \right\Vert _{2}^{2} -\dfrac{1}{p}\left\Vert u_{0}\right\Vert _{p}^{p}. \end{equation*} The first goal is to prove that the above energy $E\left( t\right) $ defined in (\ref{Energy_visco_elastic}) is a non-increasing function along the trajectories. More precisely, we have the following result: \begin{lemma} \label{Lemma_dissp_energy_visco} Let $2\leq p\leq \bar{q}$, $\max\left( 2, \frac{\bar{q}}{\bar{q}+1-p} \right) \leq m \leq \bar{q}$, and $u$ be the solution of (\ref{ondes}). Then, for all $t>0,$ we have \begin{eqnarray} \frac{dE\left( t\right) }{dt} &=&\frac{1}{2}\left( g^{\prime }\diamond u\right) \left( t\right) -\frac{1}{2}g\left( t\right) \left\Vert \nabla u\left( t\right) \right\Vert _{2}^{2}-\alpha \left\Vert \nabla u_{t}\right\Vert _{2}^{2}-\int_{\Gamma _{1}}h\left( u_{t}\right) u_{t}d\Gamma \notag \\ &\leq &\frac{1}{2}\left( g^{\prime }\diamond u\right) \left( t\right) -\alpha \left\Vert \nabla u_{t}\right\Vert _{2}^{2}-\int_{\Gamma _{1}}h\left( u_{t}\right) u_{t}d\Gamma ,\qquad \forall t\in \lbrack 0,T_{max}). \label{dissp_enrgy_visco} \end{eqnarray} \end{lemma} \begin{proof}Multiplying the first equation in (\ref{ondes}) by $u_{t}$, integrating over $\Omega $, using integration by parts we get: \begin{equation} \left. \begin{array}{l} \dfrac{d}{dt}\left\{ \dfrac{1}{2}\Vert u_{t}\Vert _{2}^{2}+\dfrac{1}{2}\Vert u_{t}\Vert _{2,\Gamma _{1}}^{2}+\dfrac{1}{2}\Vert \nabla u\Vert _{2}^{2}- \dfrac{1}{p}\left\Vert u\right\Vert _{p}^{p}\right\} \\ -\displaystyle\int_{\Omega }\displaystyle\int_{0}^{t}g\left( t-s\right) \nabla u\left( s\right) \nabla u_{t}\left( t\right) dsdx \\ =-\alpha \left\Vert \nabla u_{t}\right\Vert _{2}^{2}-\displaystyle \int_{\Gamma _{1}}h\left( u_{t}\right) u_{t}d\Gamma . \end{array} \right. \label{d_dt_Energy_2} \end{equation} A simple use of the identity (\ref{Nonlinear_term_viscoelastic}) gives (\ref {dissp_enrgy_visco}). This completes the proof of Lemma \ref {Lemma_dissp_energy_visco}. \end{proof} \begin{lemma} \label{Stable_set_lemma}Let $2\leq p\leq \bar{q}$, $\max\left( 2, \frac{\bar{q}}{\bar{q}+1-p} \right) \leq m \leq \bar{q}$. Assume that (\ref {hypothesis_g}) and (\ref{hypothesis_g_2}) hold. Then given $u_{0}~\in ~H_{\Gamma _{0}}^{1}(\Omega )\,,\,u_{1}\in L^{2}(\Omega )$ satisfying \begin{equation} \left\{ \begin{array}{l} \beta =\dfrac{B^{p}}{l}\left( \dfrac{2p}{l\left( p-2\right) }E(0)^{\left( p-2\right) /2}\right)<1, \\ I\left( u_{0}\right) >0, \end{array} \right. \label{beta_condition} \end{equation} we have: \begin{equation*} I\left( u\left( t\right) \right) >0,\qquad \forall t\in \lbrack 0,T_{\max }). \end{equation*} \end{lemma} \begin{proof} Since $I\left( u_{0}\right) >0$, then by continuity, there exists $T^{\ast }<T_{\max }$, such that \begin{equation*} I\left( t\right) >0,\qquad \forall t\in \lbrack 0,T^{\ast }] \end{equation*} which implies that for all $t\in \lbrack 0,T^{\ast }],$ \begin{eqnarray} J\left( t\right) &=&\frac{1}{p}I\left( t\right) +\frac{p-2}{2p}\left\{ \left( 1-\displaystyle\int_{0}^{t}g\left( s\right) ds\right) \left\Vert \nabla u\left( t\right) \right\Vert _{2}^{2}+\left( g\diamond u\right) \left( t\right) \right\} \notag \\ &\geq &\frac{p-2}{2p}\left\{ \left( 1-\displaystyle\int_{0}^{t}g\left( s\right) ds\right) \left\Vert \nabla u\left( t\right) \right\Vert _{2}^{2}+\left( g\diamond u\right) \left( t\right) \right\} . \label{J_inequality} \end{eqnarray} By using (\ref{hypothesis_g}), (\ref{hypothesis_g_2}), (\ref{Energy_E}) and ( \ref{dissp_enrgy_visco}), we easily get, for all $t\in \lbrack 0,T^{\ast }]$ \begin{eqnarray} l\left\Vert \nabla u(t)\right\Vert _{2}^{2} &\leq &\frac{2p}{p-2}J\left( t\right) ,\ \notag \\ &\leq &\frac{2p}{p-2}E\left( t\right) \leq \frac{2p}{p-2}E\left( 0\right) . \label{E_0_J_inequality} \end{eqnarray} From the definition of the constant $B$ in (\ref{sobolev}), we first get: $$ \forall t\in \lbrack 0,T^{\ast }] \ , \ \left\Vert u(t)\right\Vert _{p}^{p} \leq B^{p}\Vert \nabla u(t)\Vert_{2}^{p} \ . $$ Since we have: $$ \forall t\in \lbrack 0,T^{\ast }] \ , \ B^{p}\Vert \nabla u(t)\Vert_{2}^{p}= \frac{B^{p}}{l}\Vert \nabla u(t)\Vert _{2}^{p-2}\left( l\Vert\nabla u(t)\Vert _{2}^{2}\right) , $$ by exploiting (\ref{E_0_J_inequality}) and (\ref{beta_condition}), we obtain, for all $t\in \lbrack 0,T^{\ast }]$: \begin{eqnarray*} \left\Vert u(t)\right\Vert _{p}^{p} &\leq &\beta l\left( \Vert \nabla u(t)\Vert _{2}^{2}\right) \\ &\leq &\beta \left( 1-\int_{0}^{t}g\left( s\right) ds\right) \Vert \nabla u(t)\Vert _{2}^{2} \\ &<&\left( 1-\int_{0}^{t}g\left( s\right) ds\right) \Vert \nabla u(t)\Vert _{2}^{2}. \end{eqnarray*} Therefore, by using (\ref{Energy_I}), we conclude that \begin{equation*} I\left( t\right) >0,\qquad \forall t\in \lbrack 0,T^{\ast }]. \end{equation*} Using the fact that $E$ is decreasing along the trajectory, we get: \begin{equation*} \forall \, 0\leq t < T_{max} \,,\, \dfrac{B^{p}}{l}\left( \dfrac{2p}{l\left(p-2\right) }E\left( t\right) \right) ^{\left( p-2\right) /2}\leq \beta <1 \ . \end{equation*} By repeating this procedure, $T^{\ast }$ is extended to $T_{max}.$ \end{proof} Now, we are able to state the global existence theorem. \begin{theorem} \label{Global_existence}Let $2\leq p\leq \bar{q}$, $\max\left( 2, \frac{\bar{q}}{\bar{q}+1-p} \right) \leq m \leq \bar{q}$. Assume that (\ref{hypothesis_g}) and (\ref{hypothesis_g_2}) hold. Then given $u_{0}~\in ~H_{\Gamma _{0}}^{1}(\Omega )\,,\,u_{1}\in L^{2}(\Omega )$ satisfying (\ref {beta_condition}). Then the solution of (\ref{ondes}) is global and bounded. \end{theorem} \begin{proof} To prove Theorem \ref{Global_existence}, using the definition of $T_{max}$, we have just to check that \begin{equation*} \left\Vert \nabla u(t)\right\Vert_{2}^{2}+\left\Vert u_{t}(t)\right\Vert _{2}^{2} \end{equation*} is uniformly bounded in time. To achieve this, we use (\ref{Energy_J}), ( \ref{Energy_E}), (\ref{dissp_enrgy_visco}) and (\ref{E_0_J_inequality}) to get \begin{eqnarray} E\left( 0\right) &\geq &E\left( t\right) =J\left( t\right) +\frac{1}{2}\Vert u_{t}\Vert _{2}^{2}+\frac{1}{2}\Vert u_{t}\Vert _{2,\Gamma _{1}}^{2} \notag \\ &\geq &\frac{p-2}{2p}\left\Vert \nabla u(t)\right\Vert _{2}^{2}+\frac{1}{2} \left\Vert u_{t}(t)\right\Vert _{2}^{2}. \label{Inq_E_t_E_0} \end{eqnarray} Therefore, \begin{equation*} \left\Vert \nabla u(t)\right\Vert_{2}^{2}+\left\Vert u_{t}(t)\right\Vert _{2}^{2}\leq C E(0) \end{equation*} where $C$ is a positive constant, which depends only on $p.$ \end{proof} \section{Exponential growth for $\protect\alpha>0$} \label{Exponential_growth_section} In this section we will prove that when the initial data are large enough, the energy of the solution of problem (\ref{ondes}) defined by (\ref{Energy_visco_elastic}) grows exponentially and thus so the $L^p$ norm. In order to state and prove the exponential growth result, we introduce the following constants: \begin{equation} B_{1}=\frac{B}{l},\quad \alpha _{1}=B_{1}^{-p/(p-2)},\quad E_{1}=\left( \frac{1}{2}-\frac{1}{p}\right) \alpha_{1}^{2},\quad E_{2}=\left( \frac{l}{2}-\frac{1}{p}\right) \alpha _{1}^{2} \label{constant} \end{equation} Let us first mention that $E_{2} < E_{1}$. The following Lemma will play an essential role in the proof of the exponential growth result, and it is inspired by the work in \cite{CCLa_2007} where the authors proved a similar lemma for the wave equation. First, we define the function \begin{equation} \gamma \left( t\right) :=\left( 1-\displaystyle\int_{0}^{t}g\left( s\right) ds\right) \left\Vert \nabla u\left( t\right) \right\Vert _{2}^{2}+\left( g\diamond u\right) \left( t\right) . \label{Gama} \end{equation} Let us rewrite the energy functional $E$ defined by (\ref{Energy_visco_elastic}) as: \begin{equation}\label{Energy_visco_elastic2} E(t) = \dfrac{1}{2}\Vert u_{t}\left(t\right) \Vert _{2}^{2}+\dfrac{1}{2}\Vert u_{t}\left( t\right) \Vert_{2,\Gamma _{1}}^{2}+ \dfrac{1}{2}\gamma \left( t\right) - \dfrac{1}{p}\left\Vert u(t)\right\Vert_{p}^{p} \ . \end{equation} \begin{lemma} \label{Vitilaro_Lemma} Let $2\leq p\leq \bar{q}$, $\max\left( 2, \frac{\bar{q}}{\bar{q}+1-p} \right) \leq m \leq \bar{q}$. Let $u$ be the solution of (\ref{ondes}). Assume that \begin{equation} E\left( 0\right) <E_{1}\quad \text{ and }\quad \left\Vert \nabla u_{0}\right\Vert _{2}\geq \alpha _{1}. \label{Initial_data_assumptions} \end{equation} Then there exists a constant $\alpha _{2}>\alpha _{1}$ such that \begin{equation} \left( \gamma \left( t\right) \right) ^{1/2}\geq \alpha _{2},\qquad \forall t\in \lbrack 0,T_{\max }) \label{result_Vitillaro_1} \end{equation} and \begin{equation} \left\Vert u\left( t\right) \right\Vert _{p}\geq B_{1}\alpha _{2},\;\qquad \forall t\in \lbrack 0,T_{\max }). \label{result_Vitillaro_2} \end{equation} \end{lemma} \begin{proof} We first note that, by (\ref{Energy_visco_elastic2}), we have: \begin{eqnarray} E(t) &\geq &\dfrac{1}{2}\gamma \left( t\right) -\dfrac{1}{p}\left\Vert u\left( t\right) \right\Vert _{p}^{p} \notag \\ &\geq &\dfrac{1}{2}\gamma \left( t\right) -\frac{B_{1}^{p}}{p}\left( l\left\Vert \nabla u\left( t\right) \right\Vert _{2}\right) ^{p} \notag \\ &\geq &\dfrac{1}{2}\gamma \left( t\right) -\frac{B_{1}^{p}}{p}\left( \gamma \left( t\right) \right) ^{p/2} \label{F_gamma_1} \\ &=&\dfrac{1}{2}\alpha ^{2}-\frac{B_{1}^{p}}{p}\alpha ^{p}=F\left( \alpha \right) , \notag \end{eqnarray} where $\alpha =\left( \gamma \left( t\right) \right) ^{1/2}.$ It is easy to verify that $F$ is increasing for $0<\alpha <\alpha _{1},$ decreasing for $\alpha >\alpha _{1},$ $F\left( \alpha \right) \rightarrow -\infty $ as $ \alpha \rightarrow +\infty ,$ and \begin{equation*} F\left( \alpha _{1}\right) =E_{1}, \end{equation*} where $\alpha _{1}$ is given in (\ref{constant}). Therefore, since $ E(0)<E_{1},$\ there exists $\alpha _{2}>\alpha _{1}$ such that $\,\,F\left( \alpha _{2}\right) =$ $E\left( 0\right) .$\newline If we set $\alpha _{0}=\left( \gamma \left( 0\right) \right) ^{1/2},$ then by (\ref{F_gamma_1}) we have: \begin{equation*} F\left( \alpha _{0}\right) \leq E\left( 0\right) =F\left( \alpha _{2}\right) , \end{equation*} which implies that $\alpha _{0}\geq \alpha _{2}.$\newline Now to establish (\ref{result_Vitillaro_1}), we suppose by contradiction that: \begin{equation*} \left( \gamma \left( t_{0}\right) \right) ^{1/2}<\alpha _{2}, \end{equation*} for some $t_{0}>0$ and by the continuity of $\gamma \left( .\right) ,$ we may choose\ $t_{0}$ such that \begin{equation*} \left( \gamma \left( t_{0}\right) \right) ^{1/2}>\alpha _{1}. \end{equation*} Using again (\ref{F_gamma_1}) leads to: \begin{equation*} E\left( t_{0}\right) \geq F\left( \gamma \left( t_{0}\right) ^{1/2}\right)>F(\alpha _{2})=E\left( 0\right) . \end{equation*} But this is impossible since for all $t>0$, $E(t)\leq E\left( 0\right) $. Hence (\ref{result_Vitillaro_1}) is established. To prove (\ref{result_Vitillaro_2}), we use (\ref{Energy_visco_elastic2}) to get: \begin{equation*} \frac{1}{2}\gamma \left( t\right) \leq E\left( 0\right) +\dfrac{1}{p} \left\Vert u\left( t\right) \right\Vert _{p}^{p} \ . \end{equation*} Consequently, using (\ref{result_Vitillaro_1}) leads to: \begin{eqnarray*} \dfrac{1}{p}\left\Vert u\left( t\right) \right\Vert _{p}^{p} &\geq &\frac{1}{ 2}\gamma \left( t\right) -E\left( 0\right) \\ &\geq &\frac{1}{2}\alpha _{2}^{2}-E\left( 0\right). \end{eqnarray*} But we have: $$\frac{1}{2}\alpha _{2}^{2}-E\left( 0\right) = \frac{1}{2}\alpha _{2}^{2}-F\left( \alpha _{2}\right) =\frac{B_{1}^{p}}{p}\alpha_{2}^{p} \ .$$ Therefore (\ref{result_Vitillaro_2}) holds. This finishes the proof of Lemma \ref{Vitilaro_Lemma}. \end{proof} The exponential growth result reads as follows: \begin{theorem} \label{blow_up_viscoel} Suppose that (\ref{hypothesis_g}), (\ref {hypothesis_g_2}) (\ref{Assumption_h}) hold. Assume that \begin{equation*} 2\leq m\quad \text{and}\quad \max \left( m,2/l\right) <p\leq \overline{p}. \end{equation*} Then, the solution of (\ref{ondes}) satisfying \begin{equation} E\left( 0\right) <E_{2}\ \mbox{ and } \ \left\Vert \nabla u_{0}\right\Vert _{2}\geq \alpha _{1}, \label{Initial_data_visco} \end{equation} grows exponentially in the $L^{p}$ norm. \end{theorem} \begin{remark} \label{Remark_Gerbi_Said} It is obvious that for $g=0$, we have $E_1=E_2$, and Theorem \ref{blow_up_viscoel} reduces to Theorem 3.1 in \cite{GS08}. \end{remark} \begin{remark} \label{Remark_integral_condition} In Theorem \ref{blow_up_viscoel}, the condition \begin{equation*} \int_0^\infty g(s)ds<\frac{(p/2)-1}{(p/2)-1+(1/2p)} \end{equation*} used in \cite {Mess_Kafi_2007,Mess_Kafi_2008,MS2010,Mes01,Mess03,SaZh2010,YWL2009} is unnecessary and our result holds without it. \end{remark} \begin{remark} \label{remarkc1} Let us denote $c_{1}=\left(l-\frac{2}{p}\right)-2E_{2}\left( B_{1}\alpha _{2}\right) ^{-p}.$ Since we have seen that $\alpha_{2} > \alpha_{1}$, using the definition of $E_{2}$, we easily get $c_{1}>0$. This constant will play an important role in the proof of Theorem \ref{blow_up_viscoel} \end{remark} \begin{proof}[Proof of Theorem \protect\ref{blow_up_viscoel}] \label{subsection_proof_blow_up} We implement the so-called Georgiev-Todorova method (see \cite{GT94,Mes01} and also \cite{MS041}). So, we suppose that the solution exists for all time and we will prove an exponential growth. For this purpose, we set: \begin{equation} \mathscr{H}\left( t\right) =E_{2}-E\left( t\right) . \label{function_H} \end{equation} Of course by (\ref{Initial_data_assumptions}) and (\ref{dissp_enrgy_visco}) and since $E_{2}<E_{1}$, we deduce that $\mathscr{H}$ is a non-decreasing function. So, by using (\ref{Energy_visco_elastic2}) and, (\ref{function_H}) we get successively: \begin{equation*} 0 < \mathscr{H}\left( 0\right) \leq \mathscr{H}\left( t\right) \leq E_{2}-E\left( t\right) \leq E_{1}-\dfrac{1}{2}\gamma(t)+\dfrac{1}{p}\left\Vert u\left( t\right) \right\Vert _{p}^{p}. \end{equation*} On one hand as $F(\alpha_{1}) = E_{1}$ and $ \forall \ t > 0 \,,\ \gamma(t) \geq \alpha_{2}^{2} > \alpha_{1}^{2}$, we obtain: $$ E_{1}-\dfrac{1}{2}\gamma(t)< F\left( \alpha _{1}\right) -\frac{1}{2}\alpha _{1}^{2} $$ On the other hand, since $$F\left( \alpha _{1}\right) -\frac{1}{2}\alpha _{1}^{2}=-\frac{B_{1}^{p}}{p}\alpha _{1}^{p} \ , $$ we obtain the following inequality: \begin{equation} 0<\mathscr{H}\left( 0\right) \leq \mathscr{H}\left( t\right) \leq \dfrac{1}{p }\left\Vert u\left( t\right) \right\Vert _{p}^{p},\;\qquad \forall t\geq 0. \label{H_inequality} \end{equation} For $\varepsilon $ small to be chosen later, and inspired by the ideas of the authors in \cite{GS08}, we then define the auxiliary function: \begin{equation} \mathscr{L}\left( t\right) =\mathscr{H}\left( t\right) +\varepsilon \int_{\Omega }u_{t}udx+\varepsilon \int_{\Gamma _{1}}u_{t}ud\Gamma +\frac{ \varepsilon \alpha }{2}\left\Vert \nabla u\right\Vert _{2}^{2}. \label{defL} \end{equation} Let us remark that $\mathscr{L}$ is a small perturbation of the energy. By taking the time derivative of (\ref{defL}), using problem (\ref{ondes}), we obtain: \begin{eqnarray} \frac{d\mathscr{L}(t)}{dt} &=&\alpha \left\Vert \nabla u_{t}\right\Vert _{2}^{2}+\int_{\Gamma _{1}}h\left( u_{t}\right) u_{t}d\Gamma +\varepsilon \left\Vert u_{t}\right\Vert _{2}^{2}-\varepsilon \left\Vert \nabla u\right\Vert _{2}^{2} \notag \\ &&+\varepsilon \left\Vert u\right\Vert _{p}^{p}+\varepsilon \left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2}-\varepsilon \int_{\Gamma _{1}}h\left( u_{t}\right) u(x,t)d\sigma \notag \\ &&+\int_{\Omega }\nabla u\left( t\right) \int_{0}^{t}g\left( t-s\right) \nabla u\left( s\right) dsdx. \label{derivL2} \end{eqnarray} By making use of (\ref{Assumption_h}) and the following Young's inequality \begin{equation} XY\leq \frac{\lambda ^{\mu }X^{\mu }}{\mu }+\frac{\lambda ^{-\nu }Y^{\nu }}{\nu }, \label{Young_inequality} \end{equation} $X,\,Y\geq 0,\;\lambda >0,\;\mu ,\,\nu \in \mathbb{R^{+}}$ such that $ 1/\mu +1/\nu =1,$ then we get \begin{eqnarray} \int_{\Gamma _{1}}h\left( u_{t}\right) ud\Gamma &\leq &C_{m}\int_{\Gamma _{1}}\left\vert u_{t}\right\vert ^{m-2}u_{t}ud\Gamma \notag \\ &\leq &C_{m}\frac{\lambda ^{m}}{m}\left\Vert u\right\Vert _{m,\Gamma _{1}}^{m}+C_{m}\frac{m-1}{m}\lambda ^{-m/\left( m-1\right) }\left\Vert u_{t}\right\Vert _{m,\Gamma _{1}}^{m}. \label{Young_1} \end{eqnarray} Now, the term involving $g$ on the right-hand side of (\ref{derivL2}) can be written as \begin{eqnarray} \displaystyle\int_{\Omega }\nabla u\left( t,x\right) \displaystyle &&\hspace*{-1cm}\int_{0}^{t}g\left( t-s\right) \nabla u\left( s,x\right) dsdx =\Vert \nabla u\left( t\right) \Vert_{2}^{2}\left( \displaystyle\int_{0}^{t}g\left( s\right) ds\right) \label{Las_term_estimate} \\ &+&\displaystyle\int_{\Omega }\nabla u\left( t,x\right) \displaystyle\int_{0}^{t}g\left( t-s\right) \left( \nabla u\left( s,x\right) -\nabla u\left( t,x\right) \right) dsdx \notag . \end{eqnarray} On the other hand, by using H\"{o}lder's and Young's inequalities, we infer that for all $\mu >0,$ we get \begin{equation} \left. \begin{array}{l} \displaystyle\int_{\Omega }\nabla u\left( t,x\right) \displaystyle \int_{0}^{t}g\left( t-s\right) \left( \nabla u\left( s,x\right) -\nabla u\left( t,x\right) \right) dsdx \\ \leq \displaystyle\int_{0}^{t}g\left( t-s\right) \Vert \nabla u\left( t\right) \Vert _{2}\Vert \nabla u\left( s\right) -\nabla u\left( t\right) \Vert _{2} ds \\ \leq \mu \left( g\diamond u\right) \left( t\right) +\dfrac{1}{4\mu }\left( \displaystyle\int_{0}^{t}g\left( s\right) ds\right) \Vert \nabla u\left( t\right) \Vert _{2}^{2}. \end{array} \right. \label{mu_inequality} \end{equation} Inserting the estimates (\ref{Young_1}) and (\ref{Las_term_estimate}) into ( \ref{derivL2}), taking into account the inequality (\ref{mu_inequality}) and making use of (\ref{Assumption_h}), we obtain by choosing $\mu =1/2$ and multiplying by $l$ \begin{eqnarray} l\mathscr{L}^{\prime }\left( t\right) &\geq &\alpha l\left\Vert \nabla u_{t}\right\Vert _{2}^{2}+l\left( c_{m}-C_{m}\varepsilon \frac{m-1}{m} \lambda ^{-m/\left( m-1\right) }\right) \left\Vert u_{t}\right\Vert _{m,\Gamma _{1}}^{m}+\varepsilon l\left\Vert u_{t}\right\Vert _{2}^{2} \notag \\ &&+\varepsilon l\left\Vert u\right\Vert _{p}^{p}+\varepsilon l\left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2}-C_{m}\varepsilon l\frac{\lambda ^{m}}{m }\left\Vert u\right\Vert _{m,\Gamma _{1}}^{m} \label{dL_dt_2} \\ &&-\frac{\varepsilon l}{2}\left( g\diamond u\right) \left( t\right) -\varepsilon l\Vert \nabla u\left( t\right) \Vert _{2}^{2}. \notag \end{eqnarray} We want now to estimate the term involving $\left\Vert u\right\Vert _{m,\Gamma _{1}}^{m}$ in (\ref{dL_dt_2}). We proceed as in \cite{GS08}. Then, we have \begin{equation*} \left\Vert u\right\Vert _{m,\Gamma _{1}}\leq C\left\Vert u\right\Vert _{H^{s}(\Omega )}, \end{equation*} which holds for: \begin{equation*} m\geq 1\quad \mbox{ and }\quad 0<s<1,\quad s\geq \frac{N}{2}-\frac{N-1}{m}>0 , \end{equation*} where $C$ here and in the sequel denotes a generic positive constant which may change from line to line. Recalling the interpolation and Poincar\'{e}'s inequalities (see \cite{LM68}) \begin{eqnarray*} \left\Vert u\right\Vert _{H^{s}(\Omega )} &\leq &C\left\Vert u\right\Vert _{2}^{1-s}\left\Vert \nabla u\right\Vert _{2}^{s} ,\\ &\leq &C\left\Vert u\right\Vert _{p}^{1-s}\left\Vert \nabla u\right\Vert_{2}^{s}, \end{eqnarray*} we finally have the following inequality: \begin{equation} \left\Vert u\right\Vert _{m,\Gamma _{1}}\leq C\left\Vert u\right\Vert _{p}^{1-s}\left\Vert \nabla u\right\Vert _{2}^{s}. \label{u_m_estimate} \end{equation} If $s<2/m$, using again Young's inequality, we get: \begin{equation} \left\Vert u\right\Vert _{m,\Gamma _{1}}^{m}\leq C\left[ \left( \left\Vert u\right\Vert _{p}^{p}\right) ^{\frac{m\left( 1-s\right) \mu }{p}}+\left( \left\Vert \nabla u\right\Vert _{2}^{2}\right) ^{\frac{ms\theta }{2}}\right] \label{estiGamma1} \end{equation} for $1/\mu +1/\theta =1.$ Here we choose $\theta =2/ms,$ to get $\mu =2/\left( 2-ms\right) $. Therefore the previous inequality becomes: \begin{equation} \left\Vert u\right\Vert _{m,\Gamma _{1}}^{m}\leq C\left[ \left( \left\Vert u\right\Vert _{p}^{p}\right) ^{\frac{m\left( 1-s\right) 2}{\left( 2-ms\right) p}}+\left\Vert \nabla u\right\Vert _{2}^{2}\right] . \label{normGamma1} \end{equation} Now, choosing $s$ such that: \begin{equation*} 0<s\leq \frac{2\left( p-m\right) }{m\left( p-2\right) }, \end{equation*} we get: \begin{equation} \frac{2m\left( 1-s\right) }{\left( 2-ms\right) p}\leq 1. \label{choicesm} \end{equation} Once the inequality (\ref{choicesm}) is satisfied, we use the classical algebraic inequality: \begin{equation} z^{\nu }\leq \left( z+1\right) \leq \left( 1+\frac{1}{\omega }\right) \left( z+\omega \right) \;,\quad \forall z\geq 0\;,\quad 0<\nu \leq 1\;,\quad \omega \geq 0, \label{Algebraic_inequality} \end{equation} to obtain the following estimate: \begin{eqnarray} \left( \left\Vert u\right\Vert _{p}^{p}\right) ^{\frac{m\left( 1-s\right) 2}{ \left( 2-ms\right) p}} &\leq &d\left( \left\Vert u\right\Vert _{p}^{p}+ \mathscr{H}\left( 0\right) \right) \notag \label{normeLp} \\ &\leq &d\left( \left\Vert u\right\Vert _{p}^{p}+\mathscr{H}\left( t\right) \right) \;,\quad \forall t\geq 0 , \end{eqnarray} where we have set $d=1+1/\mathscr{H}(0)$. Inserting the estimate (\ref {normeLp}) into (\ref{estiGamma1}) we obtain the following important inequality: \begin{equation} \left\Vert u\right\Vert _{m,\Gamma _{1}}^{m}\leq C\left[ \left\Vert u\right\Vert _{p}^{p}+l\left\Vert \nabla u\right\Vert _{2}^{2}+\mathscr{H} \left( t\right) \right] . \label{L_gam_m-norm} \end{equation} Keeping in mind that $ l=1-\int_{0}^{\infty }g\left( s\right) ds$, in order to control the term $\left\Vert \nabla u\right\Vert _{2}^{2}$ in equation (\ref{dL_dt_2}), we preferely use (as $\mathscr{H}(t)>0$), the following estimate: \begin{equation*} \left\Vert u\right\Vert _{m,\Gamma _{1}}^{m}\leq C\left[ \left\Vert u\right\Vert _{p}^{p}+l\left\Vert \nabla u\right\Vert _{2}^{2}+2\mathscr{H} \left( t\right) \right] . \end{equation*} which gives finally: \begin{eqnarray} \left\Vert u\right\Vert _{m,\Gamma _{1}}^{m} &\leq &C\left[ 2E_{2}+\left( 1+ \frac{2}{p}\right) \left\Vert u\right\Vert _{p}^{p}-\left\Vert u_{t}\right\Vert _{2}^{2}-\left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2}\right. \notag \\ &&\left. +\left( l-\Big(1-\int_{0}^{t}g\left( s\right) ds\Big)\right) \left\Vert \nabla u\right\Vert _{2}^{2}-\left( g\diamond u\right) \left( t\right) \right] . \label{umgama1} \end{eqnarray} Since $ 1-\int_{0}^{t}g\left( s\right) ds\geq l$, then we obtain from above \begin{equation} \left\Vert u\right\Vert _{m,\Gamma _{1}}^{m}\leq C\left[ 2E_{2}+\left( 1+ \frac{2}{p}\right) \left\Vert u\right\Vert _{p}^{p}-\left\Vert u_{t}\right\Vert _{2}^{2}-\left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2}-\left( g\diamond u\right) \left( t\right) \right] . \label{boundary_important_estimate} \end{equation} Now, inserting (\ref{boundary_important_estimate}) into (\ref{dL_dt_2}), then we infer that: \begin{eqnarray} l\mathscr{L}^{\prime }\left( t\right) &\geq &\alpha l\left\Vert \nabla u_{t}\right\Vert _{2}^{2}+l\left( c_{m}-C_{m}\varepsilon \frac{m-1}{m} \lambda ^{-m/\left( m-1\right) }\right) \left\Vert u_{t}\right\Vert _{m,\Gamma _{1}}^{m} \notag \\ &&+\varepsilon \left( l+lC_{m}\frac{\lambda ^{m}}{m}C\right) \left\Vert u_{t}\right\Vert _{2}^{2}+\varepsilon \left( l+lC_{m}\frac{\lambda ^{m}}{m} C\right) \left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2} \notag \\ &&+\varepsilon \left\{ l-C_{m}l\frac{\lambda ^{m}}{m}C\left( 1+\frac{2}{p} \right) \right\} \left\Vert u\right\Vert _{p}^{p} \label{dL_dt_4} \\ &&+\varepsilon \left( C_{m}l\frac{\lambda ^{m}}{m}C-\frac{l}{2}\right) \left( g\diamond u\right) \left( t\right) -\varepsilon l\Vert \nabla u\left( t\right) \Vert _{2}^{2}-2C_{m}\varepsilon l\frac{\lambda ^{m}}{m}CE_{2} \ . \notag \end{eqnarray} From (\ref{function_H}), we get \begin{eqnarray*} \mathscr{H}\left( t\right) &\leq &E_{2}-\dfrac{1}{2}\left( 1-\displaystyle \int_{0}^{t}g\left( s\right) ds\right) \left\Vert \nabla u\left( t\right) \right\Vert _{2}^{2} \\ &&-\dfrac{1}{2}\left( g\diamond u\right) \left( t\right) +\dfrac{1}{p} \left\Vert u\left( t\right) \right\Vert _{p}^{p} \\ &\leq &E_{2}-\frac{l}{2}\left\Vert \nabla u\left( t\right) \right\Vert _{2}^{2}-\dfrac{1}{2}\left( g\diamond u\right) \left( t\right) +\dfrac{1}{p} \left\Vert u\left( t\right) \right\Vert _{p}^{p}. \end{eqnarray*} This last inequality gives \begin{equation} -l\left\Vert \nabla u\left( t\right) \right\Vert _{2}^{2}\geq 2\left( \mathscr{H}\left( t\right) -E_{2}+\dfrac{1}{2}\left( g\diamond u\right) \left( t\right) -\dfrac{1}{p}\left\Vert u\left( t\right) \right\Vert _{p}^{p}\right) . \label{Gradient_inequality} \end{equation} Consequently, (\ref{dL_dt_4}) takes the form \begin{eqnarray} l\mathscr{L}^{\prime }\left( t\right) &\geq &\alpha l\left\Vert \nabla u_{t}\right\Vert _{2}^{2}+l\left( c_{m}-C_{m}\varepsilon \frac{m-1}{m} \lambda ^{-m/\left( m-1\right) }\right) \left\Vert u_{t}\right\Vert _{m,\Gamma _{1}}^{m} \notag \\ &&+\varepsilon \left( l+lC_{m}\frac{\lambda ^{m}}{m}C\right) \left\Vert u_{t}\right\Vert _{2}^{2}+\varepsilon \left( l+lC_{m}\frac{\lambda ^{m}}{m} C\right) \left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2} \notag \\ &&+\varepsilon \left\{ l-\frac{2}{p}-C_{m}l\frac{\lambda ^{m}}{m}C\left( 1+ \frac{2}{p}\right) \right\} \left\Vert u\right\Vert _{p}^{p}-2\varepsilon E_{2} \label{dL_dt_5} \\ &&+\varepsilon \left( C_{m}l\frac{\lambda ^{m}}{m}C-\frac{l}{2}+1\right) \left( g\diamond u\right) \left( t\right) +2\varepsilon \mathscr{H}\left( t\right) -2C_{m}\varepsilon l\frac{\lambda ^{m}}{m}CE_{2}. \notag \end{eqnarray} Now to estimate the terms involving $\left\Vert u\right\Vert _{p}^{p}$ and $E_{2}$ in (\ref{dL_dt_5}), we simply write: $$ \left( l-\frac{2}{p}\right) \left\Vert u\right\Vert_{p}^{p}-2E_{2} = \left( l-\frac{2}{p}\right) \left\Vert u\right\Vert _{p}^{p}-2E_{2} \frac{\Vert u\Vert_{p}^p} {\Vert u\Vert_{p}^p} \ .$$ Then by using (\ref{result_Vitillaro_2}), we get: \begin{equation*} \left( l-\frac{2}{p}\right) \left\Vert u\right\Vert _{p}^{p}-2E_{2}\geq c_{1}\left\Vert u\right\Vert _{p}^{p}, \end{equation*} where $c_{1} > 0$ is defined in Remark \ref{remarkc1}. Thus, (\ref{dL_dt_5}) becomes: \begin{eqnarray} l\mathscr{L}^{\prime }\left( t\right) &\geq &\alpha l\left\Vert \nabla u_{t}\right\Vert _{2}^{2}+l\left( c_{m}-C_{m}\varepsilon \frac{m-1}{m} \lambda ^{-m/\left( m-1\right) }\right) \left\Vert u_{t}\right\Vert _{m,\Gamma _{1}}^{m} \notag \\ &&+\varepsilon \left( l+lC_{m}\frac{\lambda ^{m}}{m}C\right) \left\Vert u_{t}\right\Vert _{2}^{2}+\varepsilon \left( l+lC_{m}\frac{\lambda ^{m}}{m} C\right) \left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2} \notag \\ &&+\varepsilon \left\{ c_{1}-C_{m}l\frac{\lambda ^{m}}{m}C\left( 1+\frac{2}{p }\right) -4C_{m}\varepsilon l\frac{\lambda ^{m}}{m}CE_{2}\left( B_{1}\alpha _{2}\right) ^{-p}\right\} \left\Vert u\right\Vert _{p}^{p} \label{dL_dt_6} \\ &&+\varepsilon \left( C_{m}l\frac{\lambda ^{m}}{m}C-\frac{l}{2}+1\right) \left( g\diamond u\right) \left( t\right) +2\varepsilon \left( \mathscr{H} \left( t\right) +C_{m}l\frac{\lambda ^{m}}{m}CE_{2}\right) . \notag \end{eqnarray} Notice that since $l < 1$, we first have : $$ \forall \, \lambda > 0\,,\, C_{m}l\dfrac{\lambda^{m}}{m}C-\dfrac{l}{2}+1>0 \ .$$ At this point, we pick $\lambda $ small enough such that: \begin{equation*} c_{1}-C_{m}l\dfrac{\lambda ^{m}}{m}C\left( 1+\dfrac{2}{p}\right) -4C_{m}\varepsilon l\dfrac{\lambda ^{m}}{m}CE_{2}\left( B_{1}\alpha _{2}\right) ^{-p}>0 \ . \end{equation*} Once $\lambda $ is fixed, we may choose $\varepsilon $ small enough such that \begin{equation*} \left\{ \begin{array}{l} c_{m}-C_{m}\varepsilon \dfrac{m-1}{m}\lambda ^{-m/\left( m-1\right) }>0, \\ \mathscr{L}\left( 0\right) >0. \end{array} \right. \end{equation*} Consequently, we end up with the estimate: \begin{equation} \mathscr{L}^{\prime }\left( t\right) \geq \eta _{1}\left( \left\Vert u_{t}\right\Vert _{2}^{2}+\left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2}+\left\Vert u\right\Vert _{p}^{p}+\mathscr{H}\left( t\right) +E_{2}\right) ,\quad \forall t\geq 0 \ . \label{First_main_inequality} \end{equation} Next, it is clear that, by Young's and Poincar\'{e}'s inequalities, we have: \begin{equation} \mathscr{L}\left( t\right) \leq \gamma \left[ \mathscr{H}\left( t\right) +\left\Vert u_{t}\right\Vert _{2}^{2}+\left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2}+\left\Vert \nabla u\right\Vert _{2}^{2}\right] \,\mbox{ for some } \gamma >0. \label{estiL1} \end{equation} Since $\mathscr{H}(t)>0$, then for all $t\geq 0$, we have: \begin{equation} \frac{l}{2}\left\Vert \nabla u\right\Vert _{2}^{2}\leq \frac{1}{p}\left\Vert u\right\Vert _{p}^{p}+E_{2},\quad \label{Dradient_main_estimate} \end{equation} Thus, the inequality (\ref{estiL1}) becomes: \begin{equation} \mathscr{L}\left( t\right) \leq \zeta \left[ \mathscr{H}(t)+\left\Vert u_{t}\right\Vert _{2}^{2}+\left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2}+\left\Vert u\right\Vert _{p}^{p}+E_{2}\right] \,\, \mbox{ for some }\zeta >0. \label{estiL1bis} \end{equation} From the two inequalities (\ref{First_main_inequality}) and (\ref{estiL1bis} ), we finally obtain the differential inequality: \begin{equation} \frac{d\mathscr{L}\left( t\right) }{dt}\geq \mu \mathscr{L}\left( t\right) \,\,\mbox{ for some }\mu >0. \label{diffineq} \end{equation} An integration of the previous differential inequality (\ref{diffineq}) between $0$ and $t$ gives the following estimate for the function $\mathscr{L}$: \begin{equation} \mathscr{L}\left( t\right) \geq \mathscr{L}\left( 0\right) e^{\mu t}. \label{estiL2} \end{equation} On the other hand, from the definition of the function $\mathscr{L}$, from inequality (\ref{H_inequality}) and for small values of the parameter $\varepsilon$, it follows that: \begin{equation} \mathscr{L}\left( t\right) \leq \frac{1}{p}\left\Vert u\right\Vert _{p}^{p}. \label{estiLp2} \end{equation} From the two inequalities (\ref{estiL2}) and (\ref{estiLp2}) we conclude the exponential growth of the solution in the $L^{p}$-norm. \end{proof} \section{Blow up in finite time for $\protect\alpha=0$} \label{blow_up_section} In this section, we prove that in the absence of the strong damping $-\Delta u_{t}$, (i.e. $\alpha =0$), the solution of problem (\ref{ondes}) blows up in finite time that is it exists $0<T^{\ast}<\infty $ such that $\left\Vert u(t)\right\Vert_{p}\rightarrow \infty $ as $t\rightarrow T^{\ast }$. The blow up result reads as follows: \begin{theorem} \label{blow_up_} Suppose that (\ref{hypothesis_g}), (\ref{hypothesis_g_2}) and (\ref{Assumption_h}) hold. Assume that \begin{equation*} 2<m\quad \text{and}\quad \max \left( m,2/l\right) <p\leq \overline{p}. \end{equation*} Then, the solution of (\ref{ondes}) satisfying \begin{equation} E\left( 0\right) <E_{2},\qquad \left\Vert \nabla u_{0}\right\Vert _{2}\geq \alpha _{1}, \end{equation} blows up in finite time. That is $\left\Vert u\left( t\right) \right\Vert _{p}\rightarrow \infty $ as $t\rightarrow T^{\ast }$ for some $0<T^{\ast }<\infty $. \end{theorem} \begin{remark} The requirement $m>2$ in Theorem \ref{blow_up_} is technical but it seems necessary in our proof. The case $m=2$ cannot be handled with the method we use here. But the same result can be shown for $m=2$ by using the concavity method. See \cite{GS082} for more details. \end{remark} \begin{proof}[Proof of Theorem \ref{blow_up_}] To prove Theorem \ref{blow_up_}, we suppose that the solution exists for all time and we reach to a contradiction. Following the idea introduced in \cite{GT94} and developed in \cite{MS041} and \cite{V99}, we will define a function $\hat{L}$ which is a perturbation of the total energy of the system and which will satisfy the differential inequality \begin{equation} \frac{d\hat{L}\left( t\right) }{dt}\geq \xi \hat{L}^{1+\nu }\left( t\right) \ , \label{Georgiev_inequality} \end{equation} where $\nu >0.$ Inequality (\ref{Georgiev_inequality}) leads to a blow up of the solution in finite time $T^{\ast }\geq \hat{L}\left( 0\right) ^{-\nu }\xi ^{-1}\nu ^{-1}$, provided that $\hat{L}\left( 0\right) >0.$ To do so, we define the functional $\hat{L}$ as follows: \begin{equation} \hat{L}\left( t\right) =\mathscr{H}^{1-\sigma }(t)+\epsilon \int_{\Omega }u_{t}udx+\epsilon \int_{\Gamma _{1}}u_{t}ud\Gamma , \label{L_hat} \end{equation} where the functional $\mathscr{H}$ is defined in (\ref{function_H}), $\sigma $ is satisfying \begin{equation} 0<\sigma \leq \min \left( \frac{p-m}{p\left( m-1\right) },\frac{p-2}{2p}, \frac{m-2}{2m},\hat{\sigma}\right) , \label{Segma} \end{equation} where $\hat{\sigma}$ is defined later in (\ref{sigma_hat}) and $\epsilon $ is a small positive constant to be chosen later. Taking the time derivative of $ \hat{L}(t)$ and following the same steps as in the proof of Theorem \ref {blow_up_viscoel}, we get (instead of inequality (\ref{dL_dt_2})), for all $\lambda > 0$, \begin{eqnarray} l\hat{L}^{\prime }\left( t\right) &\geq &lc_{m}\left( 1-\sigma \right) \mathscr{H}^{-\sigma }\left( t\right) \left\Vert u_{t}\right\Vert _{m,\Gamma _{1}}^{m}-C_{m}\epsilon \frac{m-1}{m}\lambda ^{-m/\left( m-1\right) }\left\Vert u_{t}\right\Vert _{m,\Gamma _{1}}^{m}+\epsilon l\left\Vert u_{t}\right\Vert _{2}^{2} \notag \\ &&+\epsilon l\left\Vert u\right\Vert _{p}^{p}+\epsilon l\left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2}-C_{m}\epsilon l\frac{\lambda ^{m}}{m} \left\Vert u\right\Vert _{m,\Gamma _{1}}^{m} \label{dL_hat_dt_1} \\ &&-\frac{\epsilon l}{2}\left( g\diamond u\right) \left( t\right) -\epsilon l\Vert \nabla u\left( t\right) \Vert _{2}^{2}. \notag \end{eqnarray} Next, for large positive $M$, we select $\lambda ^{-m/\left( m-1\right) }=M \mathscr{H}^{-\sigma }\left( t\right) $. Then the estimate (\ref{dL_hat_dt_1} ) takes the form: \begin{eqnarray} l\hat{L}^{\prime }\left( t\right) &\geq &\left( lc_{m}\left( 1-\sigma \right) -MC_{m}\epsilon \frac{m-1}{m}\right) \mathscr{H}^{-\sigma }\left( t\right) \left\Vert u_{t}\right\Vert _{m,\Gamma _{1}}^{m}+\epsilon l\left\Vert u_{t}\right\Vert _{2}^{2} \notag \\ &&+\epsilon l\left\Vert u\right\Vert _{p}^{p}+\epsilon l\left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2}-C_{m}\epsilon l\frac{M^{-\left( m-1\right) }}{m}H^{\sigma \left( m-1\right) }\left\Vert u\right\Vert _{m,\Gamma _{1}}^{m} \label{dL_hat_dt_2} \\ &&-\frac{\epsilon l}{2}\left( g\diamond u\right) \left( t\right) -\epsilon l\Vert \nabla u\left( t\right) \Vert _{2}^{2}. \notag \end{eqnarray} Exploiting (\ref{H_inequality}) and (\ref{u_m_estimate}), we get: \begin{equation*} \mathscr{H}^{\sigma \left( m-1\right) }\left\Vert u\right\Vert _{m,\Gamma _{1}}^{m}\leq C\left\Vert u\right\Vert _{p}^{\left( 1-s\right) m+\sigma p\left( m-1\right) }\left\Vert \nabla u\right\Vert _{2}^{sm} \ . \end{equation*} Thus, as in section \ref{Exponential_growth_section}, we have \begin{equation*} \left\Vert u\right\Vert _{p}^{\left( 1-s\right) m+\sigma p\left( m-1\right) }\left\Vert \nabla u\right\Vert _{2}^{sm}\leq C\left[ \left( \left\Vert u\right\Vert _{p}^{p}\right) ^{\left( \frac{m\left( 1-s\right) }{p}+\sigma \left( m-1\right) \right) \mu }+\left( \left\Vert \nabla u\right\Vert _{2}^{2}\right) ^{\frac{ms\theta }{2}}\right] . \end{equation*} Choosing $\mu ,\,\theta ,$ and $s$ exactly as in section \ref {Exponential_growth_section} (with strict inequalities), we choose $\sigma$ that verifies: \begin{equation} \sigma \leq \frac{2-ms}{2\left( m-1\right) }\left( 1-\frac{2m\left( 1-s\right) }{\left( 2-ms\right) p}\right) =\hat{\sigma}. \label{sigma_hat} \end{equation} The hypotheses on $m$ and $p$ ensure to have $0 < \sigma < 1$. Consequently, we get from above: \begin{equation} \mathscr{H}^{\sigma \left( m-1\right) }\left\Vert u\right\Vert _{m,\Gamma _{1}}^{m}\leq C\left[ \left( \left\Vert u\right\Vert _{p}^{p}\right) ^{\left( \frac{m\left( 1-s\right) }{p}+\sigma \left( m-1\right) \right) \mu }+\left\Vert \nabla u\right\Vert _{2}^{2}\right] . \label{H_sigma_inequality} \end{equation} Since, \begin{equation*} \left( \frac{m\left( 1-s\right) }{p}+\sigma \left( m-1\right) \right) \frac{2 }{2-ms}\leq 1, \end{equation*} applying the algebraic inequality (\ref{Algebraic_inequality}), we get: \begin{eqnarray} \left( \left\Vert u\right\Vert _{p}^{p}\right) ^{\left( \frac{m\left( 1-s\right) }{p}+\sigma \left( m-1\right) \right) \frac{2}{2-ms}} &\leq &d\left( \left\Vert u\right\Vert _{p}^{p}+\mathscr{H}\left( 0\right) \right) \notag \\ &\leq &d\left( \left\Vert u\right\Vert _{p}^{p}+\mathscr{H}\left( t\right) \right) \;,\quad \forall t\geq 0 \ .\label{u_p_inequality_2} \end{eqnarray} Thus, (\ref{u_p_inequality_2}) together with (\ref{H_sigma_inequality}) leads to (see (\ref{boundary_important_estimate})): \begin{eqnarray} \mathscr{H}^{\sigma \left( m-1\right) }\left\Vert u\right\Vert _{m,\Gamma _{1}}^{m} &\leq &Cd\left[ \left\Vert u\right\Vert _{p}^{p}+l\left\Vert \nabla u\right\Vert _{2}^{2}+\mathscr{H}\left( t\right) \right] \notag \\ &\leq &Cd\left[ 2E_{2}+\left( 1+\frac{2}{p}\right) \left\Vert u\right\Vert _{p}^{p}-\left\Vert u_{t}\right\Vert _{2}^{2}-\left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2}-\left( g\diamond u\right) \left( t\right) \right] . \label{H_sigma_m_inequality} \end{eqnarray} Inserting (\ref{H_sigma_m_inequality}) into (\ref{dL_hat_dt_2}) and using ( \ref{Gradient_inequality}), we obtain: \begin{eqnarray} l\hat{L}^{\prime}(t)&\geq &\left( lc_{m}\left( 1-\sigma \right) -MC_{m}\epsilon \frac{m-1}{m}\right) \mathscr{H}^{-\sigma }\left( t\right) \left\Vert u_{t}\right\Vert _{m,\Gamma _{1}}^{m} \notag \\ &+~\epsilon& l\left( 1+C_{m}\epsilon \frac{M^{-\left( m-1\right) }}{m} Cd\right) \left\{ \left\Vert u_{t}\right\Vert _{2}^{2}+\left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2}\right\} +2\epsilon \mathscr{H}\left( t\right) -2\epsilon E_{2} \label{dL_hat_dt_3} \\ &+~\epsilon&\left\{ l-\frac{2}{p}-C_{m}l\frac{M^{-\left( m-1\right) }}{m} Cd\left( 1+\frac{2}{p}\right) \right\} \left\Vert u\right\Vert _{p}^{p}+C_{m}\epsilon l\frac{M^{-\left( m-1\right) }}{m}Cd\left( g\diamond u\right) \left( t\right) \notag\\ &-2~\epsilon& C_{m} l\frac{M^{-\left( m-1\right) }}{m}CdE_{2}+\epsilon \left( 1-\frac{l}{2}\right) \left( g\diamond u\right) \left( t\right) . \notag \end{eqnarray} Writing again $E_{2} = E_{2} {\Vert u\Vert_{p}^p}/{\Vert u\Vert_{p}^p}$ and using again (\ref{result_Vitillaro_2}), we deduce that: \begin{eqnarray*} l\hat{L}^{\prime }\left( t\right)& \geq& \left( lc_{m}\left( 1-\sigma \right) -MC_{m}\epsilon \frac{m-1}{m}\right) \mathscr{H}^{-\sigma }\left( t\right) \left\Vert u_{t}\right\Vert _{m,\Gamma _{1}}^{m} \\ &+~\epsilon& l\left( 1+C_{m}\epsilon \frac{M^{-\left( m-1\right) }}{m} Cd\right) \left\{ \left\Vert u_{t}\right\Vert _{2}^{2}+\left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2}\right\} +2\epsilon \mathscr{H}\left( t\right) +2C_{m}\epsilon l\frac{M^{-\left( m-1\right) }}{m}CdE_{2} \\ &+~\epsilon& \left\{ l-\frac{2}{p}-2E_{2}\left( B_{1}\alpha _{2}\right) ^{-p}-C_{m}l\frac{M^{-\left( m-1\right) }}{m}Cd\left( 1+\frac{2}{p}\right) -4C_{m}l\frac{M^{-\left( m-1\right) }}{m}CdE_{2}\left( B_{1}\alpha _{2}\right) ^{-p}\right\} \left\Vert u\right\Vert _{p}^{p} \\ &+~\epsilon&C_{m} l\frac{M^{-\left( m-1\right) }}{m}Cd\left( g\diamond u\right) \left( t\right) . \end{eqnarray*} Thus, using the definition of $c_{1}$ in Remark \ref{remarkc1}, we get: \begin{eqnarray*} l\hat{L}^{\prime }\left( t\right) &\geq &\left( lc_{m}\left( 1-\sigma \right) -MC_{m}\epsilon \frac{m-1}{m}\right) \mathscr{H}^{-\sigma }\left( t\right) \left\Vert u_{t}\right\Vert _{m,\Gamma _{1}}^{m} \\ &+~\epsilon& l\left( 1+C_{m}\epsilon \frac{M^{-\left( m-1\right) }}{m} Cd\right) \left\{ \left\Vert u_{t}\right\Vert _{2}^{2}+\left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2}\right\} +2\epsilon \mathscr{H}\left( t\right) +2C_{m}\epsilon l\frac{M^{-\left( m-1\right) }}{m}CdE_{2} \\ &+~\epsilon& \left\{c_{1}-C_{m}l\frac{M^{-\left( m-1\right) }}{m}Cd\left( 1+\frac{2}{p}\right) -4C_{m}l\frac{M^{-\left( m-1\right) }}{m}CdE_{2}\left( B_{1}\alpha _{2}\right) ^{-p}\right\} \left\Vert u\right\Vert _{p}^{p} \\ &+~\epsilon&C_{m}l\frac{M^{-\left( m-1\right) }}{m}Cd\left( g\diamond u\right) \left( t\right) . \end{eqnarray*} Since $c_{1} > 0$, we choose $M$ large enough such that: \begin{equation*} c_{1}-C_{m}l\frac{M^{-\left( m-1\right) }}{m}Cd\left( 1+\frac{2}{p}\right) -4C_{m}l\frac{M^{-\left( m-1\right) }}{m}CdE_{2}\left( B_{1}\alpha _{2}\right) ^{-p}>0. \end{equation*} Once $M$ is fixed, we pick $\epsilon $ small enough such that \begin{equation*} lc_{m}\left( 1-\sigma \right) -MC_{m}\epsilon \frac{m-1}{m}>0 \end{equation*} and $\hat{L}\left( 0\right) >0$. This leads to \begin{equation} \hat{L}^{\prime }\left( t\right) \geq \hat{\eta}\left( \left\Vert u_{t}\right\Vert _{2}^{2}+\left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2}+ \mathscr{H}\left( t\right) +\left\Vert u\right\Vert _{p}^{p}+E_{2}\right) \label{L_hat_prime} \end{equation} for some $\hat{\eta}>0.$ On the other hand, it is clear from the definition (\ref{L_hat}), we have: \begin{equation} \hat{L}^{\frac{1}{1-\sigma }}\left( t\right) \leq C\left( \epsilon ,\sigma \right) \left[ \mathscr{H}\left( t\right) +\left( \int_{\Omega }u_{t}\,udx\right) ^{\frac{1}{1-\sigma }}+\left( \int_{\Gamma _{1}}u_{t}ud\Gamma \right) ^{\frac{1}{1-\sigma }}\right] \ . \label{L_second_estimate} \end{equation} By the Cauchy-Schwarz inequality and H\"{o}lder's inequality, we have: \begin{eqnarray*} \int_{\Omega }u_{t}udx &\leq &\left( \int_{\Omega }u_{t}^{2}dx\right) ^{ \frac{1}{2}}\left( \int_{\Omega }u^{2}dx\right) ^{\frac{1}{2}} \\ &\leq &C\left( \int_{\Omega }u_{t}^{2}dx\right) ^{\frac{1}{2}}\left( \int_{\Omega }\left\vert u\right\vert ^{p}dx\right) ^{\frac{1}{p}}, \end{eqnarray*} where $C$ is the positive constant which comes from the embedding $ L^{p}\left( \Omega \right) \hookrightarrow L^{2}\left( \Omega \right) $. This inequality implies that there exists a positive constant $C_{1}>0$ such that: \begin{equation*} \left( \int_{\Omega }u_{t}udx\right) ^{\frac{1}{1-\sigma }}\leq C_{1}\left[ \left( \int_{\Omega }\left\vert u\right\vert ^{p}dx\right) ^{\frac{1}{ (1-\sigma )p}}\left( \int_{\Omega }u_{t}^{2}dx\right) ^{\frac{1}{2(1-\sigma ) }}\right] . \end{equation*} Applying Young's inequality to the right hand-side of the preceding inequality, there exists a positive constant also denoted $C>0$ such that: \begin{equation} \left( \int_{\Omega }u_{t}udx\right) ^{\frac{1}{1-\sigma }}\leq C\left[ \left( \int_{\Omega }\left\vert u\right\vert ^{p}dx\right) ^{\frac{\tau }{ (1-\sigma )p}}+\left( \int_{\Omega }u_{t}^{2}dx\right) ^{\frac{\theta }{ 2(1-\sigma )}}\right] , \label{Young_main} \end{equation} for $1/\tau +1/\theta =1$. We take $\theta =2(1-\sigma )$, hence $\tau =2\left( 1-\sigma \right) /\left( 1-2\sigma \right) $, to get: \begin{equation*} \left( \int_{\Omega }u_{t}udx\right) ^{\frac{1}{1-\sigma }}\leq C\left[ \left( \int_{\Omega }\left\vert u\right\vert ^{p}dx\right) ^{\frac{2}{ (1-2\sigma )p}}+\int_{\Omega }u_{t}^{2}dx\right] \ . \end{equation*} Using the algebraic inequality (\ref {Algebraic_inequality}) with $z=\left\Vert u\right\Vert _{p}^{p}$, $ \displaystyle d=1+1/\mathscr{H}(0)$, $\omega =\mathscr{H}(0)$ and $\nu = \displaystyle\frac{2}{p\left( 1-2\sigma \right) }$ (the condition (\ref {Segma}) on $\sigma $ ensuring that $0<\nu \leq 1$) we get: \begin{equation*} z^{\nu }\leq d\left( z+\mathscr{H}(0)\right) \leq d\left( z+\mathscr{H} (t)\right) . \end{equation*} Therefore, there exists a positive constant denoted $C_{2}$ such that \ for all $t\geq 0$, \begin{equation} \left( \int_{\Omega }u_{t}udx\right) ^{\frac{1}{1-\sigma }} \leq C_{2}\left[ \mathscr{H}\left( t\right) +\left\Vert u\left( t\right) \right\Vert _{p}^{p}+\left\Vert u_{t}\left( t\right) \right\Vert _{2}^{2} \right] . \label{estimate_first_term} \end{equation} Following the same method as above, we can show that there exists $C_{3}>0$ such that: \begin{equation*} \left( \int_{\Gamma _{1}}u_{t}ud\Gamma \right) ^{\frac{1}{1-\sigma }}\leq C_{3}\left[ \mathscr{H}\left( t\right) +\left\Vert u\left( t\right) \right\Vert _{m,\Gamma _{1}}^{m}+\left\Vert u_{t}\left( t\right) \right\Vert _{2,\Gamma _{1}}^{2}\right] . \end{equation*} Applying the inequality (\ref{L_gam_m-norm}), we get: \begin{equation*} \left( \int_{\Gamma _{1}}u_{t}ud\Gamma \right) ^{\frac{1}{1-\sigma }}\leq C_{4}\left[ \mathscr{H}\left( t\right) +\left\Vert u\left( t\right) \right\Vert _{p}^{p}+l\left\Vert \nabla u\left( t\right) \right\Vert _{2}^{2}+\left\Vert u_{t}\left( t\right) \right\Vert _{2,\Gamma _{1}}^{2} \right] . \end{equation*} Furthermore, inequality (\ref{Dradient_main_estimate}) leads to: \begin{equation} \left( \int_{\Gamma _{1}}u_{t}ud\Gamma \right) ^{\frac{1}{1-\sigma }}\leq C_{5}\left[ \mathscr{H}\left( t\right) +\left\Vert u\left( t\right) \right\Vert _{p}^{p}+\left\Vert u_{t}\left( t\right) \right\Vert _{2,\Gamma _{1}}^{2}+E_{2}\right] . \label{estimate_second_term} \end{equation} Collecting (\ref{L_second_estimate}), (\ref{estimate_first_term}) and (\ref {estimate_second_term}), we obtain: \begin{equation} \hat{L}^{\frac{1}{1-\sigma }}\left( t\right) \leq \hat{\eta}_{1}\left\{ \left\Vert u_{t}\left( t\right) \right\Vert _{2}^{2}+\left\Vert u_{t}\right\Vert _{2,\Gamma _{1}}^{2}+\mathscr{H}(t)+\left\Vert u\left( t\right) \right\Vert _{p}^{p}+E_{2}\right\} ,\qquad \forall t\geq 0, \label{L_hat_last} \end{equation} for some $\hat{\eta}_{1}>0.$ Combining (\ref{L_hat_prime}) and (\ref{L_hat_last}), then, there exists a positive constant $\xi >0$, as small as $\epsilon $, such that for all $ t\geq 0$, \begin{equation} \hat{L}^{\prime }(t)\geq \xi \hat{L}^{\frac{1}{1-\sigma }}(t). \label{L_1-sigma_2} \end{equation} Thus, inequality (\ref{Georgiev_inequality}) holds. Therefore, $\hat{L}(t)$ blows up in a finite time $T^{\ast }$. On the other hand, from the definition of the function $\hat{L}(t)$ and using inequality (\ref{H_inequality}), for small values of the parameter $\varepsilon$, it follows that: \begin{equation} \hat{L}(t)\leq \kappa \left(\left\Vert u\left( t\right) \right\Vert_{p}^{p}\right)^{1-\sigma} \ , \label{inq_L_norm} \end{equation} where $\kappa $ is a positive constant. Consequently, from the inequality ( \ref{inq_L_norm}) we conclude that the norm $\left\Vert u\left( t\right) \right\Vert _{p}$ of the solution $u$, blows up in the finite time $T^{\ast } $, which implies the desired result. This completes the proof of Theorem \ref{blow_up_}. \end{proof} \end{document}
\begin{document} \title{1GHz clocked electrically driven source of single entangled telecom photon pairs} \author{G. Shooter} \affiliation{Toshiba Research Europe Limited, Cambridge Research Laboratory, 208 Cambridge Science Park, Milton Road, Cambridge, CB4 0GZ, United Kingdom} \affiliation{Cavendish Laboratory, University of Cambridge, J.J. Thomson Avenue, Cambridge, CB3 0HE, United Kingdom} \author{Z. Xiang} \affiliation{Toshiba Research Europe Limited, Cambridge Research Laboratory, 208 Cambridge Science Park, Milton Road, Cambridge, CB4 0GZ, United Kingdom} \affiliation{Cavendish Laboratory, University of Cambridge, J.J. Thomson Avenue, Cambridge, CB3 0HE, United Kingdom} \author{J.R.A. M\"uller} \affiliation{Toshiba Research Europe Limited, Cambridge Research Laboratory, 208 Cambridge Science Park, Milton Road, Cambridge, CB4 0GZ, United Kingdom} \affiliation{Department of Physics and Astronomy, University of Sheffield, Hounsfield Road, Sheffield S3 7RH, UK} \author{J. Skiba-Szymanska} \affiliation{Toshiba Research Europe Limited, Cambridge Research Laboratory, 208 Cambridge Science Park, Milton Road, Cambridge, CB4 0GZ, United Kingdom} \author{J. Huwer} \affiliation{Toshiba Research Europe Limited, Cambridge Research Laboratory, 208 Cambridge Science Park, Milton Road, Cambridge, CB4 0GZ, United Kingdom} \author{J. Griffiths} \affiliation{Cavendish Laboratory, University of Cambridge, J.J. Thomson Avenue, Cambridge, CB3 0HE, United Kingdom} \author{T. Mitchell} \affiliation{Cavendish Laboratory, University of Cambridge, J.J. Thomson Avenue, Cambridge, CB3 0HE, United Kingdom} \author{M. Anderson} \affiliation{Toshiba Research Europe Limited, Cambridge Research Laboratory, 208 Cambridge Science Park, Milton Road, Cambridge, CB4 0GZ, United Kingdom} \affiliation{Cavendish Laboratory, University of Cambridge, J.J. Thomson Avenue, Cambridge, CB3 0HE, United Kingdom} \author{T. M\"uller} \affiliation{Toshiba Research Europe Limited, Cambridge Research Laboratory, 208 Cambridge Science Park, Milton Road, Cambridge, CB4 0GZ, United Kingdom} \author{A.B. Krysa} \affiliation{EPSRC National Epitaxy Facility, Department of Electronic \& Electrical Engineering, The University of Sheffield, 3 Solly Street, Sheffield, S1 4DE } \author{R. M. Stevenson} \affiliation{Toshiba Research Europe Limited, Cambridge Research Laboratory, 208 Cambridge Science Park, Milton Road, Cambridge, CB4 0GZ, United Kingdom} \author{J. Heffernan} \affiliation{EPSRC National Epitaxy Facility, Department of Electronic \& Electrical Engineering, The University of Sheffield, 3 Solly Street, Sheffield, S1 4DE } \author{D. A. Ritchie} \affiliation{Cavendish Laboratory, University of Cambridge, J.J. Thomson Avenue, Cambridge, CB3 0HE, United Kingdom} \author{A. J. Shields} \affiliation{Toshiba Research Europe Limited, Cambridge Research Laboratory, 208 Cambridge Science Park, Milton Road, Cambridge, CB4 0GZ, United Kingdom} \date{23/11/20} \begin{abstract} Quantum networks are essential for realising distributed quantum computation and quantum communication. Entangled photons are a key resource, with applications such as quantum key distribution, quantum relays, and quantum repeaters. All components integrated in a quantum network must be synchronised and therefore comply with a certain clock frequency. In quantum key distribution, the most mature technology, clock rates have reached and exceeded 1GHz. Here we show the first electrically pulsed sub-Poissonian entangled photon source compatible with existing fiber networks operating at this clock rate. The entangled LED is based on InAs/InP quantum dots emitting in the main telecom window, with a multi-photon probability of less than 10\% per emission cycle and a maximum entanglement fidelity of 89\%. We use this device to demonstrate GHz clocked distribution of entangled qubits over an installed fiber network between two points 4.6km apart. \end{abstract} \maketitle © 2020 Optica Publishing Group. Users may use, reuse, and build upon \href{https://www.osapublishing.org/oe/fulltext.cfm?uri=oe-28-24-36838&id=442793}{\textcolor{blue}{the article}}, or use the article for text or data mining, so long as such uses are for non-commercial purposes and appropriate attribution is maintained. All other rights are reserved. \section*{Introduction} For implementation of various kinds of advanced quantum network schemes \cite{Ekert.1991,Briegel.1998,Jacobs.2002,Riedmatten.2004}, entanglement must be distributed between nodes \cite{Komar.2014,sun.2016,valivarthi.2016,Wengerowsky.2019}. The most widely used sources are currently based on spontaneous non-linear processes \cite{Kwiat.1995,Li.2005}, though the efficiency of these sources is intrinsically limited if multi-photon emission is to be minimised. This limit does not apply to entangled photon sources with sub-Poissonian statistics, such as semiconductor quantum dots (QD)s \cite{Benson.2000,Michler.2000}, with the prospect of deterministic entangled pair generation \cite{liu.2019}. For an entangled photon source to be embedded in a quantum network, it must further conform to the basic requirements of operating clock rate and wavelength. State-of-the art quantum key distribution (QKD) systems operate at clock frequencies of 1GHz and above \cite{Yuan.2008,boaron.2018}, with photons in the telecom C-band most suitable for distribution over standard optical fibers. Epitaxially grown semiconductor QDs can be readily incorporated into PIN diode structures, enabling the fabrication of light sources using standard semiconductor processing techniques \cite{Benson.2000,Michler.2000b}. As QDs embedded within diodes can be electrically excited \cite{Yuan.2002}, it is possible to create entangled photon sources that can be conveniently operated similar to other standard light sources, such as telecom laser diodes. InAs/InP QDs emit in the lowest-loss silica fiber window \cite{SkibaSzymanska.2017}, which makes them prime candidates for transmission over standard fiber networks. Entangled LED (ELED) telecom C-band sources have been demonstrated with DC excitation \cite{Muller.2018}. Pulsed single \cite{Michler.2000b,Santori.2001,Hargart.2013} and entangled \cite{Benson.2000,Yuan.2002,Stevenson.2006,zhang.2015,Varnava.2016,Muller.2020} photon sources based on semiconductor QDs have been developed but are either at short wavelengths, and therefore incompatible with existing fiber networks, or only operate at repetition rates too slow for current quantum network applications. Entanglement distribution experiments over installed networks have used low repetition rates \cite{sun.2016,valivarthi.2016,Wengerowsky.2019} while GHz clock rates, necessary for synchronisation with high clock rate QKD systems, have only been demonstrated with nonlinear sources over long fiber in a laboratory \cite{Honjo.2007,Inagaki.2013}. In this work, we show the distribution of entangled qubits from a 1GHz driven sub-Poissonian source over an installed standard telecom network. \section*{GHz clocked single photon source} The fabrication of ELED devices used in this work was developed to be simple, with only two etch steps and two metal depositions. An image of a device is shown in Fig.\ref{fig:DEVICE}(a). This design allows for fast electrical operation at GHz frequencies, with dimensions close to the limit imposed by the size of a bond ball as can be seen in Fig. \ref{fig:DEVICE}(a). The ELED shows good electrical performance as a diode, with the resistance reaching 50$\Omega$ beyond the turn-on voltage. The wafer structure, described in the Supplemental Material, is designed for QD emission in the telecom C-band. The emission spectrum shown in Fig.\ref{fig:DEVICE}(b) comes from a QD that is located within a 5$\mu$m connected pillar, as can be seen in Fig.\ref{fig:DEVICE}(a), and so can be readily relocated. The device was mounted onto the centre of a radio-frequency compatible FR4 packaging with conductive paint before wire bonding to a Au layer on one end of 50$\Omega$ impedance matched tracks, ending with a low-profile micro-coaxial connector as illustrated in the inset of Fig.\ref{fig:DEVICE}(b). \begin{figure} \caption{A quantum dot LED for 1GHz pulsed electrical excitation. (a) A false colour scanning electron microscope image of a device; an oval mesa with connected pillars, electrically isolated from the surrounding wafer. (b) An electroluminescence (EL) spectrum of a quantum dot showing the exciton (X) and biexciton (XX) emission lines. The inset shows an illustration of the radio frequency compatible packaging; the ELED is wire bonded to impedance matched tracks (p-type and n-type bond are shown in yellow and purple respectively) which finish at a low-profile micro-coaxial connector.} \label{fig:DEVICE} \end{figure} In QDs, single photons are emitted via the radiative recombination of confined electron-hole (e-h) pairs \cite{Imamoglu.1994}. Entangled photon pairs are emitted via the biexciton cascade \cite{Benson.2000} where a QD initialised in the doubly excited biexciton (XX) state decays to the singly excited exciton (X) state via emission of the first photon. This state subsequently decays via emission of a second photon, leaving the QD in the ground state. Due to conservation of angular momentum, the two emitted photons are maximally entangled in their polarization. To assess the sub-Poissonian photon emission from the ELED, we measure the second order autocorrelation function (g$^{(2)}$) of X photons as shown in Fig.\ref{fig:g2}(a). This measurement requires isolation of the X spectral line as in Fig.\ref{fig:DEVICE}(b). Since the QD emits at telecom wavelengths, a compact spectral wavelength filtering unit can be used that is based on an optical add-drop multiplexer as shown in Fig.\ref{fig:g2}(b) (FWHM $<$0.25nm, 1.31dB loss), a common component in classical telecommunication technology. \begin{figure} \caption{Measurement of 1 GHz pulsed single photon emission. (a) Experimental setup for measuring the second order autocorrelation (g$^{(2)} \label{fig:g2} \end{figure} The ELED was electrically driven with $\sim$130ps FWHM pulses with a high of 1.5V and a low of 0.3V; the arrival times of photons at each detector were recorded with respect to the 1GHz clock of these pulses. Photon correlations acquired over 90 minutes can be seen in Fig.\ref{fig:g2}(c). The 1ns squares along the bottom-left to top-right diagonal contain coincidences of photons emitted within the same excitation cycle. These are strongly reduced compared to the rest of the grid, corresponding to an excellent suppression of multi-photon emission within the same excitation cycle. Coincidences in the 1ns squares adjacent to the bottom-left to top-right diagonal are also reduced, as the cascade is not fully reinitialised each cycle by this electrical driving condition. Coincidences in Fig.\ref{fig:g2}(c) are also suppressed in a grid pattern with spacing of 1ns. This pattern occurs due to the electrical excitation pulse at the start of each 1ns cycle, when the QD is reinitialised. During this reinitialisation period, the population in the X level is depleted due to excitation to higher energy levels such as the XX. The 1ns squares containing coincidences of photons emitted in different excitation cycles appear to have an almost flat distribution. This is due to the long natural lifetime of the X state of 1.9ns and the dynamics involved in populating the X state via decay from the XX state, which has a lifetime of 0.5ns. Photon coincidences in each 1ns square were then normalised using coincidences in cycles with completely uncorrelated detection events. Fig.\ref{fig:g2}(d) shows that the g$^{(2)}$ for X photons emitted in the same 1ns cycle is 0.097$\pm$0.002 without application of any temporal post-selection. This is far below the classical limit, proving strongly sub-Poissonian emission. The g$^{(2)}$(0) is limited by the non-resonant excitation scheme used here, likely due to interactions with the charge environment. \section*{GHz clocked entanglement} As entangled photons are critical for quantum network applications, we now show the generation of 1GHz clocked entangled photon pairs from our ELED. The device was driven similarly to before, using pulses with a high of 1.5V and a low of 0.5V. The XX and X photons were separated with a spectral filter and each were detected with a polarization analyser comprising of an electronic polarisation controller (EPC) and a polarising beam splitter (PBS) followed by superconducting nanowire single photon detectors (SNSPD)s as in Fig.\ref{fig:entResults}(a). When detecting photons in a polarization basis PQ, P polarized XX photons were measured at detector 1 in Fig.\ref{fig:entResults}(a), with X photons of P and Q polarizations measured at detectors 2 and 3 respectively. As only one output of the XX PBS was sent to a detector, only half of all possible photon coincidences were recorded. Photon arrival times for each detector with respect to the 1GHz clock signal divided by 64 were recorded with a time correlated single photon counter (TCSPC), with photon correlations and entanglement fidelity evaluated in postprocessing. \begin{figure} \caption{Measurement of entanglement. (a) The experimental setup with a free-space spectral filter separating XX and X photons. Electronic polarization controllers (EPC)s followed by polarizing beam splitters (PBS)s set the detection polarization basis, with time correlated single photon counters (TCSPC)s recording photon arrivals at superconducting nanowire single photon detectors. (b) Photon correlations of horizontally (H) and vertically (V) polarized biexciton (XX) and exciton (X) photons, with co- and cross-polarized detection (left and right panels respectively) as a function of XX and X delay with respect to the 1GHz clock (72ps time bins). (c) The fidelity to the maximally entangled Bell $\phi^{+} \label{fig:entResults} \end{figure} Photon pair correlations in the horizontal-vertical (HV) basis covering three consecutive excitation cycles can be seen in Fig.\ref{fig:entResults}(b). Within a 1ns cycle both co (HH) and cross (HV) -polarized photon correlations are suppressed for time bins corresponding to the arrival of an X photon before a XX photon, due to the cascaded emission of the two photons of a pair. For time bins corresponding to the arrival of a XX photon before an X photon, only correlations for co-polarized photons are observed, as expected for the maximally entangled Bell $\phi^{+}$ state. Importantly, correlations do not extend beyond each 1ns excitation cycle. The cascade can be seen to decay to the uncorrelated level within 1 excitation cycle (Supplemental Material, Fig.\ref*{fig:coincs}), verifying a clean reinitialisation of photon pair emission at a 1GHz rate. The resulting fidelity to the maximally entangled Bell $\phi^{+}$ state, calculated as explained in the Supplemental Material, is shown in Fig.\ref{fig:entResults}(c). For most of the grid in Fig.\ref{fig:entResults}(c), the entanglement fidelity is $\sim$0.25, corresponding to completely random polarization correlations. For XX and X photons from the same 1ns cycle, the entanglement fidelity rises above the classical limit of 0.5 for X photons arriving after XX photons, before decaying with oscillations due to the fine structure splitting of the QD of 6.0$\mu$eV \cite{stevenson.2008}. These oscillations are caused by a quantum beat in the superposition bases, as can be seen in the Supplemental Material, Fig.\ref*{fig:corrLab}. Each vertical column of time bins in Fig.\ref{fig:entResults}(c) contains photon coincidences with the same relative XX-X time delay, but different arrival times within the 1ns emission cycles. One can see that the entanglement fidelity drops for time bins at the start and end of the 1ns cycles due to reinitialisation of the emission ($\sim$130ps). Therefore, to give an idea of the highest possible value, XX and X photon arrival times were additionally gated to 0.864ns around the center of 1ns cycles (shown as a white dashed box in Fig.\ref{fig:entResults}(c)). The average of each column is plotted in Fig.\ref{fig:entResults}(d), where one can again observe the time dependent oscillation of the fidelity due to the finite fine structure splitting of the QD. The resulting maximum fidelity to the Bell $\phi^{+}$ state is 0.89$\pm$0.02 with comparable correlation contrasts in the three principal polarisation bases (see the Supplemental Material for further information). However, this value corresponds to a bin size of 72ps, which is not compatible with post-selection free detection schemes. Detectors used in state-of-the-art QKD systems operating at 1GHz clock rates have typical detection gate widths of $<$170ps \cite{Yuan.2007,Yuan.2008}. To assess the performance of the pulsed ELED with these non-research grade detectors, we position a single 168ps integration window to give maximum entanglement fidelity, shown as a black dashed box in Fig.\ref{fig:entResults}(c). This results in a fidelity of 0.86$\pm$0.03, in the regime compatible with error correction in quantum key distribution applications \cite{Chau.2002}. The drop in fidelity when increasing the time window size is within the errors, showing that the QD FSS is not limiting the fidelity achievable with typical gated detectors. Analysing the X autocorrelation data from Fig.\ref{fig:g2} in a similar fashion gives a g$^{(2)}$ of 0.04$\pm$0.01. However, only 3.4$\%$ of the detected photon pairs originating from the same excitation cycle arrive within this 168ps time window. For future compatibility of deterministic GHz clocked entangled photon pair sources with gated detectors for post-selection-free operation, high source efficiencies are crucial. In addition, XX and X lifetimes similar to the detector gate width are necessary to increase the number of photon pairs arriving within the active gate window of the detectors. This could be achieved via Purcell enhancement, which reduces XX and X lifetimes, for example with micropillar designs \cite{Dousse.2010} or circular bragg gratings \cite{liu.2019,Wang.2019}. Given the GHz clock rate, an overall efficiency of the optical system including detectors of approximately 0.6\% (see the Supplemental Material), and average XX and X photon rates at each detector of 52000 and 83000 counts per second, we estimate an intrinsic efficiency of around 2\% for the ELED to generate a photon per excitation pulse. Efficiencies are currently low for non-resonantly excited telecommunication wavelength QDs, which are still undergoing significant development and are not as well established as short-wavelength InAs/GaAs dots. Telecommunication wavelength QDs are larger than their short-wavelength counterparts, making them more susceptible to fluctuations in the surrounding charge environment. This typically results in the presence of multiple charged states with radiative and non-radiative decay paths. Techniques to enhance emission from the neutral XX and X states rather than charged complexes may increase the photon pair efficiency for ELEDs in the future \cite{young.2007}. Perhaps counter-intuitively, truncating the cascade by reinitialising the QD at a high clock rate does not intrinsically limit the photon generation rates; we have recently shown that photon generation rates can surpass those achievable with DC driving for some pulsed regimes \cite{Muller.2020}. \section*{Entanglement distribution} To demonstrate network compatibility of the pulsed entangled photon pair source we distributed entanglement over 4.6km between the Toshiba Cambridge Research Laboratory (CRL) and the Physics Department of the University of Cambridge as shown in Fig.\ref{fig:entDist}, using installed network fiber. The source was operated at CRL where X photons were detected, and XX photons were sent to a deployed detection system over 15km of installed fiber with 6dB loss at 1550nm. \begin{figure} \caption{Experimental setup for distribution of 1GHz clocked entangled photon pairs across the city of Cambridge. The entangled photon pairs were generated from an ELED at the Toshiba Cambridge Research Laboratory, with the X photons detected locally using detectors 1 and 2 (SNSPDs) and the XX photons detected at the Physics Department of the University of Cambridge with detectors 3 and 4 (APDs) in a deployed detection system. The detection polarization basis was set by EPCs followed by PBSs, with TCSPCs recording photon arrivals at detectors. When measuring photons in an arbitrary polarization basis PQ, detectors 1 and 3 measured P-polarized X and XX photons respectively and detectors 2 and 4 measured Q-polarized X and XX photons respectively. A polarization recovery system compensated for drifts occurring over the installed fiber. Classical communication for remote control of system components was multiplexed with the reference clock signal from a small form-factor pluggable transceiver (SFP) over a second fiber. } \label{fig:entDist} \end{figure} The electrical 1GHz clock signal used to drive the ELED was down-sampled to 15.6MHz and converted to an optical signal at 1570nm and multiplexed with 1Gbit/s classical communication data traffic at 1310nm. The communication channel was required for remote control of the detection system and data acquisition, both classical signals were transmitted over a separate installed fiber. At the other end, both classical signals were demultiplexed, and the clock signal was converted back to an electrical signal to be used as the synchronisation reference in the deployed detection system. In both locations, photon arrival times in two detector channels were recorded with respect to the reference clock with TCSPCs similar to the previously discussed measurements in a laboratory. Photon arrival times were measured in the three principal detection bases in sets of 7 minutes. Polarization drifts occurring over the network fiber due to changing environmental conditions were compensated for before each measurement using a similar stabilisation system as in \cite{Xiang.2020}. Photon correlations were evaluated in postprocessing. \begin{figure} \caption{Distribution of entangled photon pairs over 15km of installed fiber. (a) The fidelity to the maximally entangled Bell $\phi^{+} \label{fig:distResult} \end{figure} Entangled photon pairs were distributed between East and West Cambridge for 14 consecutive hours of operation. Fig.\ref{fig:distResult} (a) and (b) shows results plotted in a similar way to Fig.\ref{fig:entResults} but for distribution of entanglement rather than a measurement in a laboratory. The maximum fidelity to the Bell $\phi^{+}$ state, analysed on a 72ps grid with the reinitialisation period discarded as for the laboratory measurement, is 0.79$\pm$0.01. Using the timing characteristics of GHz clocked detectors as indicated in Fig.\ref{fig:distResult}(a), the maximum entanglement fidelity is 0.76$\pm$0.01. The 10\% reduction in the fidelity when transmitting XX photons over the installed fiber is attributed to an increased ratio of background events to XX photon signal at the deployed detectors from $<$2\% to $>$10\%. We further observe a larger drop in polarization correlation contrast for measured superposition bases (diagonal/antidiagonal and right-/left-hand circular, see the Supplemental Material). This most likely results from a larger uncertainty in calibrating the detection bases at the deployed detection system (see the Supplemental Material) which is again caused by a drop in the signal-to-background ratio rather than the performance of the ELED itself. Fig.\ref{fig:distResult}(c) shows the evolution of the maximum entanglement fidelity for sets of 2 hours of data. It remains around 0.79 for the entire 14 hour experiment, demonstrating the excellent stability of the 1GHz clocked ELED as a source for distributed entangled photon pairs across a real-world fiber network. \section*{Conclusion} We have shown an electrically driven 1GHz clocked telecom ELED with strong single photon characteristic, resulting in a two-photon probability of less than 10\% without any temporal post-selection. Using the ELED as a source of 1GHz clocked entangled photons yields a maximum entanglement fidelity of 89\% in a 72ps post selection window. In addition, the device is suitable for operation using standard actively gated GHz clocked detector modules as are used in current QKD systems, with no additional software-based post-selection. However, for real-world applications in quantum communication relying on high entangled photon pair rates, an enhancement of the source brightness is required and the number of photons arriving within the active gate window of such detectors must be significantly increased via the reduction of XX and X lifetimes. Operation of the device in the lowest-loss telecom window enabled us for the first time to demonstrate the distribution of 1GHz clocked entangled qubits from a sub-Poissonian source on a city scale. The achieved entanglement fidelity of 79\% proves reliability for electrically pulsed semiconductor quantum light sources connected to installed fiber networks. Pulsed operation with a GHz clock frequency opens up the possibility for seamless integration with other quantum network hardware such as QKD systems and efficient time multiplexing with classical communication signals. A further developed device design with integration of nano-photonic structures \cite{Bockler.2008,Dousse.2010,liu.2019,Wang.2019} to combine high collection efficiency and Purcell enhancement has the potential to provide a viable future workhorse for quantum communication systems, with intrinsic security and no fundamental efficiency limitation as is the case for sources based on weak coherent laser pulses, spontaneous parametric down-conversion or four-wave mixing. \section*{Growth} The semiconductor wafer was grown by metal organic vapor phase epitaxy on an InP substrate. A bottom distributed Bragg reflector (DBR) comprising 20 layer pairs, each formed of 112nm of (Al$_{0.30}$Ga$_{0.70}$)$_{0.48}$In$_{0.52}$As and 123nm of InP were grown, with the top 3 repeats doped with 2x10$^{18}$cm$^{-3}$ of Si. InAs droplet QDs as in \cite{SkibaSzymanska.2017} were grown in a cavity on a 3$\lambda$/4 intrinsic InP layer followed by a 5$\lambda$/4 InP layer. The top nominal 150nm of the cavity was grown with Zn doping of 2x10$^{18}$cm$^{-3}$ to improve high speed electrical injection at low temperatures. 3 DBR pairs with Zn doping of 2x10$^{18}$cm$^{-3}$ and an InP capping layer completed the p-i-n diode structure. \section*{Fabrication} The device in Fig.\ref{fig:DEVICE}(a) was fabricated in 4 steps. To contact the p-layer, CrAu was thermally evaporated onto the wafer surface. The p-type contact was just large enough to fit a bond ball, 80x110$\mu$m. The mesa and the isolated area were each etched using inductively coupled plasma (ICP) with Cl$_{2}$ based process chemistry. 150nm of AuGeNi was evaporated onto the isolated area to contact the n-layer before annealing at 420$^{\circ}$C. \section*{Characterisation} The device was cooled to 30K in a He vapour cryostat, with an xyz piezo nano-positioning stage enabling navigation around the device. A fibre-based confocal microscope system with NA 0.68 collected light emitted from the device. QD electroluminescence (EL) spectra were measured by sending this light via a fiber to a spectrometer with an InGaAs array. The fine structure splitting of a QD was measured as in \cite{SkibaSzymanska.2017}, by polarization dependent spectroscopy using a quarter wave plate and linear polarizer in front of the spectrometer. This measurement identified XX and X lines, such as for the QD with the EL spectrum in Fig.\ref{fig:DEVICE}(b) which had a fine structure splitting of (6.0$\pm$0.3)$\mu$eV. \section*{Measurement of entanglement fidelity} XX and X photons emitted via the biexciton cascade are co-polarized in the horizontal/vertical basis due to conservation of angular momentum \cite{Benson.2000}. The degree of correlation in a polarization basis PQ is calculated from co-polarized, c$_{PP}$, and cross-polarized, c$_{PQ}$, photon correlations by \[ C_{PQ}=\frac{c_{PP}-c_{PQ}}{c_{PP}+c_{PQ}}. \] The entanglement fidelity to the maximally entangled Bell $\phi^{+}$ state \[ f=\frac{1+C_{HV}+C_{DA}-C_{RL}}{4}, \] is obtained \cite{Ward.2014} from measurements in the horizontal/vertical (HV), diagonal/antidiagonal (DA), and right- and left-hand circularly polarized (RL) detection bases. Correlations in these three principal detection bases are shown for the measurement in the laboratory in Fig.\ref*{fig:corrLab} and for the entanglement distribution measurement in Fig.\ref*{fig:corrDist}. In the superposition bases, DA and RL, there is an oscillation due to the 6.0$\mu$eV fine structure splitting of the QD. The degrees of correlation in the HV, DA, and RL bases, analysed on a 72ps grid with the reinitialisation period discarded as shown in Fig.\ref{fig:entResults}(d) and Fig.\ref{fig:distResult}(b), are C$_{HV}$= 0.87$\pm$0.05, C$_{DA}$= 0.83$\pm$0.05, and C$_{RL}$= -0.84$\pm$0.04 respectively for the laboratory measurement. Similarly, for the entanglement distribution measurement the degrees of correlation are C$_{HV}$= 0.79$\pm$0.02, C$_{DA}$= 0.68$\pm$0.02, and C$_{RL}$= -0.69$\pm$0.02. \begin{figure} \caption{Normalised correlations of biexciton (XX) and exciton (X) photons as a function of XX and X delay with respect to the 1GHz clock (72ps time bins) for the laboratory measurement. The acquisition time per basis was 30 minutes. Correlations in the horizontal/vertical (HV) basis with (a) co- and (b) cross-polarised photons. Correlations in the diagonal/antidiagonal (DA) basis with (c) co- and (d) cross-polarised photons. Correlations in the right-/left-hand circular (RL) basis with (e) co- and (f) cross-polarised photons. Time bins with no photon coincidences are black.} \label{fig:corrLab} \end{figure} \begin{figure} \caption{Normalised correlations of biexciton (XX) and exciton (X) photons as a function of XX and X delay with respect to the 1GHz clock (72ps time bins) for the entanglement distribution measurement over 15km of installed fiber. Correlations in the horizontal/vertical (HV) basis with (a) co- and (b) cross-polarised photons. Correlations in the diagonal/antidiagonal (DA) basis with (c) co- and (d) cross-polarised photons. Correlations in the right-/left-hand circular (RL) basis with (e) co- and (f) cross-polarised photons. The total acquisition time per basis was 196 minutes. In contrast to the laboratory measurement, photon coincidences for equivalent polarisation sets were combined for better statistics, such as HH and VV for (a), as all 4 polarizing beam splitter outputs were detected rather than just 3.} \label{fig:corrDist} \end{figure} A free-space spectral filter was used to separate XX and X photons in the entanglement measurements in Fig.\ref{fig:entResults}, Fig.\ref{fig:entDist}, and Fig.\ref{fig:distResult}, followed by electronic polarization controllers and polarizing beam splitters (PBS)s before the detectors. \section*{Photon-pair coincidence rates} An acquisition time of 56.25 minutes was used for the measurement of entanglement in a laboratory shown in Fig.\ref{fig:entResults}. To show the time dependence of photon-pair emission, co- and cross-polarized photon-pair coincidences in the 3 principal detection bases were summed. Fig.\ref*{fig:coincs} shows this sum with the same time bins as in Fig.\ref{fig:entResults}(b). Although the X lifetime is longer than 1ns, it can be seen that the cascade decays to the uncorrelated level within 1 excitation cycle due to the reinitialisation provided by electrical driving \cite{Muller.2020}. \begin{figure} \caption{Total photon-pair coincidences in the 3 principal detection bases over 3 consecutive emission cycles as a function of biexciton (XX) and exciton (X) photon delays with respect to the 1GHz clock (72ps time bins).} \label{fig:coincs} \end{figure} 52818 photon-pairs coincidences with both photons originating from the same excitation cycle were measured. However, only one output of the PBS for XX photons in Fig.\ref{fig:entResults}(a) was used for this measurement; if both outputs were used the number of coincidences would be doubled. The overall efficiency of the optical system is approximated to be 0.6\% (22.45dB loss). The loss for coupling photons emitted by the QD into single mode fiber was ~15.2dB. The free space grating setup had a loss of 2.6dB. Typical EPC and PBS losses were 0.8dB and 0.4dB respectively. Equivalent photon loss due to finite detection efficiency of SNSPDS around 50\% was 3dB. The experimental setup had 3 fiber-to-fiber connections for XX and X photons, with losses of 0.15dB per connection. \section*{Entanglement distribution photon detection} At CRL, the overall timing jitter for detection was 70ps including the superconducting nanowire single photon detectors (SNSPD)s (Single Quantum). In the deployed system, the overall timing jitter for detection was around 75ps including the avalanche photodiodes (APD)s. The combined X photon rate at the SNSPDs, detectors 1 and 2 in Fig.\ref{fig:entDist}, was around 228 000 counts per second and the combined XX photon rate at the APDs, detectors 3 and 4 in Fig.\ref{fig:entDist}, was around 15 000 counts per second. To calibrate the detection basis, the QD emission was replaced by a polarization reference matched to the eigenbasis of emitted photon pairs (not shown in Fig.\ref{fig:entDist}). EPC voltages were then varied to minimise the output signal from one output mode of the PBS at a detector, aligning the detection basis to the reference. \end{document}
\begin{document} \pdfrender{StrokeColor=black,TextRenderingMode=2,LineWidth=.01pt} \title{Primitive ideals and Jacobson's structure spaces of noncommutative semigroups} \author{Amartya Goswami} \address{Department of Mathematics and Applied Mathematics\\ University of Johannesburg\\ P.O. Box 524, Auckland Park 2006\\ South Africa} \address{National Institute for Theoretical and Computational Sciences (NITheCS)\\ South Africa} \email{[email protected]} \begin{abstract} The purpose of this note is to introduce primitive ideals of noncommutative semigroups and study some topological aspects of the corresponding structure spaces. \end{abstract} \makeatletter \@namedef{subjclassname@2020}{ \textup{2020} Mathematics Subject Classification} \makeatother \subjclass[2020]{20M12; 20M10; 16W22} \keywords{semigroup; primitive ideal; Jacobson topology} \maketitle \section*{Introduction} Since the introduction of primitive rings in \cite{J45}, primitive ideals have shown their immense importance in understanding structural aspects of rings and modules \cite{J56, R88}, Lie algebras \cite{KPP12}, enveloping algebras \cite{D96,J83}, PI-algebras \cite{J75}, quantum groups \cite{J95}, skew polynomial rings \cite{I79}, and others. In \cite{J451}, Jacobson has introduced a hull-kernel topology (also known as Jacobson topology) on the set of primitive ideals of a noncommutative ring, and has obtained representations of biregular rings. This Jacobson topology also turns out to play a key role in representation of finite-dimensional Lie algebras (see \cite{D96}). Compare to the above algebraic structures, after magmas (also known as groupoids), semigroups are the most basic ones. A detailed study of algebraic theory of semigroups can be found in one of the earliest textbooks \cite{CP61} and \cite{CP67} (see also \cite{G01, H92, H95}), whereas specific study of prime, semiprime, and maximal ideals of semigroups are done in \cite{A53, A81, PK92, S69}. Furthermore, various notions of radicals of semigroups have been studied in \cite{A75, G69, S76}. Readers may consider \cite{AJ84} for a survey on ideal theory of semigroups. The next question is of imposing topologies on various types of ideals of semigroups. To this end, hull-kernel topology on maximal ideals of (commutative) semigroups has been considered in \cite{A62}, whereas the same on minimal prime ideals has been done in \cite{K63}. Using the notion of $x$-ideals introduced in \cite{A62}, although in \cite{H66} a study of general notion of structure spaces for semigroups has been done, but having the assumption of commutativity restricts it to only certain types of ideals of semigroups, and hence did not have a scope for primitive ideals. To best of author's knowledge, primitive ideals of semigroups has never been considered. The aim of this paper is to introduce primitive ideals of (noncommutative) semigroups and endow Jacobson topology on primitive ideals to study some topological aspects of them. In order to have the notion of primitive ideals of semigroups, we furthermore need a notion of a module over a noncommutative semigroup, which in general has also not been studied much. We hope this notion of primitive ideals introduced here will in future shade some light on the structural aspects of noncommutative semigroups. \section{Primitive ideals} A \emph{semigroup} is a tuple $(S, \cdot)$ such that the binary operation $\cdot$ on the set $S$ is associative. For all $a, b\in S$, we shall write $ab$ to mean $a\cdot b$. Throughout this work, all semigroups are assumed to be noncommutative. If a semigroup $S$ has an identity, we denote it by $1$ satisfying the property: $s1=s=1s$ for all $s\in S.$ If $A$ and $B$ are subsets of $S$, then by the \emph{set product} $AB$ of $A$ and $B$ we shall mean $AB=\{ab\mid a\in A, b\in B\}.$ If $A=\{a\}$ we write $AB$ as $aB$, and similarly for $B=\{b\}.$ Thus $$AB=\cup\{ Ab\mid b\in B\}=\cup \{aB\mid a\in A\}.$$ A \emph{left} (\emph{right}) \emph{ideal} of a semigroup $S$ is a nonempty subset $\mathfrak{a}$ of $S$ such that $S\!\mathfrak{a}\subseteq \mathfrak{a}$ ($\mathfrak{a} \,S\subseteq \mathfrak{a}$). A \emph{two-sided ideal} or simply an \emph{ideal} is a subset which is both a left and a right ideal of $S$. In this work the word ``ideal'' without modifiers will always mean two-sided ideal. If $X$ is a nonempty subset of a semigroup $S$, then the ideal $\langle X\rangle$ \emph{generated by} $X$ is the intersection of all ideals containing $X$. Therefore, $$\langle X\rangle =X\cup XS\cup SX\cup XSX.$$ We say an ideal $\mathfrak{a}$ is of \emph{finite character} if the generating set $X$ of $\mathfrak{a}$ is equal to the set-theoretic union of all the ideals generated by finite subsets of $X$. We assume all our ideals are of finite character. To define primitive ideals of a semi group $S,$ we require the notion of a module over $S$, which we introduce now. A (\emph{left}) \emph{$S$-module} is an abelian group $(M,+,0)$ endowed with a map $S\times M\to M$ (denoted by $(s,m)\mapsto sm$) satisfying the identities: \begin{enumerate}[\upshape (i)] \itemsep -.2em \item $s(m+m')=sm+sm';$ \item $(ss')m=s(s'm);$ \item $s0=0,$ \end{enumerate} for all $s,s'\in S$ and for all $m, m'\in M$. Henceforth the term ``$S$-module'' without modifier will always mean left $S$-module. If $M$, $M'$ are $S$-modules, then an \emph{$S$-module homomorphism} from $M$ into $M'$ is a group homomorphism $f\colon M\to M'$ such that $f(sm)=sf(m)$ for all $s\in S$ and for all $m\in M.$ A subset $N$ of $M$ is called an $S$-\emph{submodule} of the module $M$ if \begin{enumerate}[\upshape (i)] \itemsep -.2em \item $(N,+)$ is a subgroup of $(M,+);$ \item for all $s\in S$ and for all $n\in N$, $sn\in N.$ \end{enumerate} If $\mathfrak{a}$ is an ideal of $S$, then the additive subgroup $\mathfrak{a}M$ of $M$ generated by the elements of the form $\{am \mid a \in \mathfrak{a},m \in M\}$ is an $S$-submodule. An $S$-module $M$ is called \emph{simple} (or \emph{irreducible}) if \begin{enumerate}[\upshape (i)] \itemsep -.2em \item $S\!M=\left\{\sum s_im_i \mid s_i\in S, m_i\in M\right\}\neq 0.$ \item There is no proper $S$-submodule of $M$ other than $0$. \end{enumerate} A (\emph{left}) \emph{annihilator} of an $S$-module $M$ is $\mathrm{Ann}_S(M)=\{ s\in S\mid sm=0\;\;\text{for all}\;\; m\in M\}.$ When $M=\{m\},$ we write $ \mathrm{Ann}_S(\{m\})$ as $ \mathrm{Ann}_S(m)$. \begin{lemma} An annihilator $\mathrm{Ann}_S(M)$ is an ideal of $S$. \end{lemma} \begin{proof} For all $s\in S$ and for all $x\in \mathrm{Ann}_S(M)$ we have $(sx)m=s(xm)=s0=0.$ \end{proof} A nonempty proper ideal $\mathfrak{p}$ is said to be \emph{primitive} if $\mathfrak{p}=\mathrm{Ann}_S(M)$ for some simple $S$-module $M$. We denote the set of primitive ideals of a semigroup $S$ by $\mathrm{Prim}(S)$. A nonempty proper ideal $\mathfrak{q}$ of a semigroup $S$ is said to be \emph{prime} if for any two ideals $\mathfrak{a}$, $\mathfrak{b}$ of $S$ and $\mathfrak{a}\mathfrak{b}\subseteq \mathfrak{q}$ implies $\mathfrak{a}\subseteq \mathfrak{q}$ or $\mathfrak{b}\subseteq \mathfrak{q}$. As it has been remarked in \cite{BM58}, it does not matter whether the product $\mathfrak{a}\mathfrak{b}$ of ideals $\mathfrak{a}$ and $\mathfrak{b}$ is defined to be the set of all finite sums $\sum i_{\alpha} j_{\alpha}$ (where $i_{\alpha}\in \mathfrak{a}$, $j_{\alpha}\in \mathfrak{b}$), or the smallest ideal of the semigroup $S$ containing all products $i_{\alpha} j_{\alpha}$, or merely the set of all these products. For rings, in \cite{B56}, the second of these definitions has been used and in \cite{A54} the third. The proof of the following result is easy to verify. \begin{lemma}\label{dint} If $\mathfrak{a}$ and $\mathfrak{b}$ are any two ideals of a semigroup, then $\mathfrak{a}\mathfrak{b}\subseteq \mathfrak{a}\cap \mathfrak{b}.$ \end{lemma} The following proposition gives an alternative formulation of prime ideals of semigroups. For a proof, see \cite[Lemma 2.2]{PK92}. \begin{proposition}\label{alpri} Suppose $S$ is a semigroup. Then the following conditions are equivalent: \begin{enumerate}[\upshape (i)] \itemsep -.2em \item $\mathfrak{q}$ is a prime ideal of $S$. \item $aSb \subseteq \mathfrak{q}$ implies $a\in \mathfrak{q}$ or $b\in\mathfrak{q}$\; for all $a, b \in S.$ \end{enumerate} \end{proposition} Primitive ideals and prime ideals of a semigroup are related as follows. \begin{proposition}\label{prtpr} Every primitive ideal of a semigroup is a prime ideal. \end{proposition} \begin{proof} Suppose $\mathfrak{p}$ is a primitive ideal and $\mathfrak{p}=\mathrm{Ann}_S(M)$ for some simple $S$-module $M$. Let $a, b \notin\mathrm{Ann}_S(M)$. Then $am\neq 0$ and $bm'\neq 0$ for some $m, m'\in M.$ Since $M$ is simple, there exists an $s\in S$ such that $s(bm')=m$. Then $$(asb)m'=a(s(bm'))=am\neq 0,$$ and hence $asb \notin \mathrm{Ann}_S(M)$. Therefore, $\mathrm{Ann}_S(M)$ is a prime ideal by Lemma \ref{alpri}. \end{proof} In the next section we talk about Jacobson topology on the set of primitive ideals of a semigroup and discuss about some of the topological properties of the corresponding structure spaces. \section{Jacobson topology} We shall introduce Jacobson topology in $\mathrm{Prim}(S)$ by defining a closure operator for the subsets of $\mathrm{Prim}(S)$. Once we have a closure operator, closed sets are defined as sets which are invariant under this closure operator. Suppose $X$ is a subset of $\mathrm{Prim}(S)$. Set $\mathcal{D}_X=\bigcap_{\mathfrak{q}\in X}\mathfrak{q}.$ We define the closure of the set $X$ as \begin{equation}\label{clop} \mathcal{C}(X)=\left\{ \mathfrak{p}\in \mathrm{Prim}(S) \mid \mathfrak{p}\supseteq \mathcal{D}_X \right\}. \end{equation} If $X=\{x\}$, we will write $\mathcal{C}(\{x\})$ as $\mathcal{C}(x)$. We wish to verify that the closure operation defined in (\ref{clop}) satisfies Kuratowski's closure conditions and that is done in the following \begin{proposition}\label{ztp} The sets $\{\mathcal{C}(X)\}_{X\subseteq \mathrm{Prim}(S)}$ satisfy the following conditions: \begin{enumerate}[\upshape (i)] \itemsep -.2em \item\label{clee} $\mathcal{C}(\emptyset)=\emptyset$, \item\label{clxx} $\mathcal{C}(X)\supseteq X$, \item\label{clclx} $\mathcal{C}(\mathcal{C}(X))=\mathcal{C}(X),$ \item\label{clxy} $ \mathcal{C}(X\cup Y)=\mathcal{C}(X)\cup \mathcal{C}(Y).$ \end{enumerate} \end{proposition} \begin{proof} The proofs of (\ref{clee})-(\ref{clclx}) are straightforward, whereas for (\ref{clxy}), it is easy to see that $ \mathcal{C}(X\cup Y)\supseteq\mathcal{C}(X)\cup \mathcal{C}(Y).$ To obtain the the other inclusion, let $\mathfrak{p}\in \mathcal{C}(X\cup Y).$ Then $$\mathfrak{p}\supseteq \mathcal{D}_{X\cup Y}=\mathcal{D}_X \cap \mathcal{D}_Y.$$ Since $\mathcal{D}_X$ and $\mathcal{D}_Y$ are ideals of $S$, by Lemma \ref{dint}, it follows that $$\mathcal{D}_X\mathcal{D}_Y\subseteq \mathcal{D}_X \cap \mathcal{D}_Y\subseteq \mathfrak{p}.$$ Since by Proposition \ref{prtpr}, $\mathfrak{p}$ is prime, either $\mathcal{D}_X\subseteq \mathfrak{p}$ or $\mathcal{D}_Y\subseteq \mathfrak{p}$ This means either $\mathfrak{p}\in \mathcal{C}(X)$ or $\mathfrak{p}\in \mathcal{C}(Y)$. Thus $ \mathcal{C}(X\cup Y)\subseteq\mathcal{C}(X)\cup \mathcal{C}(Y).$ \end{proof} The set $\mathrm{Prim} (S)$ of primitive ideals of a semigroup $S$ topologized (the Jacobson topology) by the closure operator defined in (\ref{clop}) is called the \emph{structure space} of the semigroup $S$. It is evident from (\ref{clop}) that if $\mathfrak{p}\neq \mathfrak{p}'$ for any two $\mathfrak{p}, \mathfrak{p}'\in \mathrm{Prim}(S)$, then $\mathcal{C}(\mathfrak{p})\neq \mathcal{C}(\mathfrak{p}').$ Thus \begin{proposition}\label{t0a} Every structure space $\mathrm{Prim}(S)$ is a $T_0$-space. \end{proposition} \begin{theorem}\label{csb} If $S$ is a semigroup with identity then the structure space $\mathrm{Prim}(S)$ is compact. \end{theorem} \begin{proof} Suppose $\{K_{ \lambda}\}_{\lambda \in \Lambda}$ is a family of closed sets of $\mathrm{Prim}(S)$ with $\bigcap_{\lambda\in \Lambda}K_{ \lambda}=\emptyset.$ Set $$\mathfrak{a}=\left\langle \bigcup_{\lambda \in \Lambda} \mathcal{D}_{K_{\lambda}}\right\rangle.$$ If $\mathfrak{a}\neq S,$ then we must have a maximal ideal $\mathfrak{m}$ of $S$ such that $\mathfrak{a}\subseteq \mathfrak{m}.$ Moreover, $$\mathcal{D}_{K_{\lambda}}\subseteq \mathfrak{a}\subseteq \mathfrak{m},$$ for all $\lambda \in \Lambda.$ Therefore $\mathfrak{m}\in \mathcal{C}(K_{\lambda})=K_{\lambda}$ for all $\lambda \in \Lambda$, a contradiction of our assumption. Hence $\mathfrak{a}=S,$ and the identity $1\in \mathfrak{a}.$ Since $\mathfrak{a}$ is of finite character we must have a finite subset $\{\lambda_{\scriptscriptstyle 1}, \ldots, \lambda_{\scriptscriptstyle n}\}$ of $\Lambda$ such that $1\in \bigcup_{i=1}^n \mathcal{D}_{K_{\lambda_i}}.$ This implies $\bigcap_{\lambda_i}K_{\lambda_i}=\emptyset,$ which establishes the finite intersection property. \end{proof} Recall that a nonempty closed subset $K$ of a topological space $X$ is \emph{irreducible} if $K\neq K_{\scriptscriptstyle 1}\cup K_{\scriptscriptstyle 2}$ for any two proper closed subsets $K_{\scriptscriptstyle 1}, K_{\scriptscriptstyle 2}$ of $K$. A maximal irreducible subset of a topological space $X$ is called an \emph{irreducible component} of $X.$ A point $x$ in a closed subset $K$ is called a \emph{generic point} of $K$ if $K = \mathcal{C}(x).$ \begin{lemma}\label{lemprime} The only irreducible closed subsets of a structure space $\mathrm{Prim}(S)$ are of the form: $\{\mathcal{C}(\mathfrak{p})\}_{\mathfrak{p}\in \mathrm{Prim}(S)}$. \end{lemma} \begin{proof} Since $\{\mathfrak{p}\}$ is irreducible, so is $\mathcal{C}(\mathfrak{p}).$ Suppose $\mathcal{C}(\{\mathfrak{a}\})$ is an irreducible closed subset of $\mathrm{Prim}(S)$ and $\mathfrak{a}\notin \mathrm{Prim}(S).$ This implies there exist ideals $\mathfrak{b}$ and $\mathfrak{c}$ of $S$ such that $\mathfrak{b}\nsubseteq \mathfrak{a}$ and $\mathfrak{c}\nsubseteq \mathfrak{a}$, but $\mathfrak{b}\mathfrak{c}\subseteq \mathfrak{a}$. Then $$\mathcal{C}(\langle \mathfrak{a}, \mathfrak{b}\rangle)\cup \mathcal{C}(\langle \mathfrak{a},\mathfrak{c}\rangle)=\mathcal{C}(\langle \mathfrak{a}, \mathfrak{b}\mathfrak{c}\rangle)=\mathcal{C}(\mathfrak{a}).$$ But $\mathcal{C}(\langle \mathfrak{a}, \mathfrak{b}\rangle)\neq \mathcal{C}(\mathfrak{a})$ and $\mathcal{C}(\langle \mathfrak{a}, \mathfrak{c}\rangle)\neq \mathcal{C}(\mathfrak{a}),$ and hence $\mathcal{C}(\mathfrak{a})$ is not irreducible. \end{proof} \begin{proposition} Every irreducible closed subset of $\mathrm{Prim}(S)$ has a unique generic point. \end{proposition} \begin{proof} The existence of generic point follows from Lemma \ref{lemprime}, and the uniqueness of such a point follows from Proposition \ref{t0a}. \end{proof} The irreducible components of a structure space can be characterised in terms of minimal primitive ideals, and we have that in the following \begin{proposition}\label{thmirre} The irreducible components of a structure space $\mathrm{Prim}(S)$ are the closed sets $\mathcal{C}(\mathfrak{p})$, where $\mathfrak{p}$ is a minimal primitive ideal of $S$. \end{proposition} \begin{proof} If $\mathfrak{p}$ is a minimal primitive ideal, then by Lemma \ref{lemprime}, $\mathcal{C}(\mathfrak{p})$ is irreducible. If $\mathcal{C}(\mathfrak{p})$ is not a maximal irreducible subset of $\mathrm{Prim}(S)$, then there exists a maximal irreducible subset $\mathcal{C}(\mathfrak{p}')$ with $\mathfrak{p}'\in \mathrm{Prim}(S)$ such that $\mathcal{C}(\mathfrak{p})\subsetneq \mathcal{C}(\mathfrak{p}')$. This implies that $\mathfrak{p}\in \mathcal{C}(\mathfrak{p}')$ and hence $\mathfrak{p}'\subsetneq \mathfrak{p}$, contradicting the minimality property of $\mathfrak{p}$. \end{proof} Recall that a semigroup is called \emph{Noetherian} if it satisfies the ascending chain condition, whereas a topological space $X$ is called \emph{Noetherian} if the descending chain condition holds for closed subsets of $X.$ A relation between these two notions is shown in the following \begin{proposition}\label{fwn} If a semigroup $S$ is Noetherian, then $\mathrm{Prim}(S)$ is a Noetherian space. \end{proposition} \begin{proof} It suffices to show that a collection of closed sets in $\mathrm{Prim}(S)$ satisfy descending chain condition. Let $\mathcal{C}(\mathfrak{a}_{\scriptscriptstyle 1})\supseteq \mathcal{C}(\mathfrak{a}_{\scriptscriptstyle 2})\supseteq \cdots$ be a descending chain of closed sets in $\mathrm{Prim}(S)$. Then, $\mathfrak{a}_{\scriptscriptstyle 1}\subseteq \mathfrak{a}_{\scriptscriptstyle 2}\subseteq \cdots$ is an ascending chain of ideals in $S.$ Since $S$ is Noetherian, the chain stabilizes at some $n \in \mathds{N}.$ Hence, $\mathcal{C}(\mathfrak{a}_{\scriptscriptstyle n}) = \mathcal{C}(\mathfrak{a}_{\scriptscriptstyle n+k})$ for any $k.$ Thus $\mathrm{Prim}(S)$ is Noetherian. \end{proof} \begin{corollary} The set of minimal primitive ideals in a Noetherian semigroup is finite. \end{corollary} \begin{proof} By Proposition \ref{fwn}, $\mathrm{Prim}(S)$ is Noetherian, thus $\mathrm{Prim}(S)$ has a finitely many irreducible components. By Proposition \ref{thmirre}, every irreducible closed subset of $\mathrm{Prim}(S)$ is of form $\mathcal{C}(\mathfrak{p}),$ where $\mathfrak{p}$ is a minimal primitive ideal. Thus $\mathcal{C}(\mathfrak{p})$ is irreducible components if and only if $\mathfrak{p}$ is minimal primitive. Hence, $S$ has only finitely many minimal primitive ideals. \end{proof} \begin{proposition}\label{conmap} Suppose $\phi\colon S\to T$ is a semigroup homomorphism and define the map $\phi_*\colon \mathrm{Prim}(T)\to \mathrm{Prim}(S)$ by $\phi_*(\mathfrak{p})=\phi\inv(\mathfrak{p})$, where $\mathfrak{p}\in\mathrm{Prim}(T).$ Then $\phi_*$ is a continuous map. \end{proposition} \begin{proof} To show $\phi_*$ is continuous, we first show that $f\inv(\mathfrak{p})\in \mathrm{Prim}(S),$ whenever $\mathfrak{p}\in \mathrm{Prim}(T)$. Note that $\phi\inv(\mathfrak{p})$ is an ideal of $S$ and a union of $\mathrm{ker}\phi$-classes (see \cite[Proposition 3.4]{G01}. Suppose $\mathfrak{p}=\mathrm{Ann}_{T}(M)$ for some simple $T$-module. Then by the ``change of rings'' property of modules, $\phi\inv(\mathfrak{p})$ is the annihilator of the simple $T$-module $M$ obtained by defining $sm:=\phi(s)m$. Therefore $f\inv(\mathfrak{p})\in \mathrm{Prim}(S)$. Now consider a closed subset $\mathcal{C}(\mathfrak{a})$ of $\mathrm{Prim}(S).$ Then for any $\mathfrak{q}\in \mathrm{Prim}(T),$ we have: \begin{align*} \mathfrak{q}\in \phi_*\inv (\mathcal{C}(\mathfrak{a}))\Leftrightarrow \phi\inv(\mathfrak{q})\in \mathcal{C}(\mathfrak{a})\Leftrightarrow \mathfrak{a}\subseteq \phi\inv(\mathfrak{q})\Leftrightarrow \mathfrak{q}\in\mathcal{C}(\langle \phi(\mathfrak{a})\rangle), \end{align*} and this proves the desired continuity of $\phi_*$. \end{proof} \end{document}
\begin{document} \tildetle{Universal Transformability between Hamiltonians and Hidden Adiabaticity} \tildetle{Hamiltonian Transformability, Fast Adiabatic Dynamics and Hidden Adiabaticity} \author{Lian-Ao Wu$^{1,2}$\footnote{Author to whom any correspondence should be addressed. Email address: [email protected] }, Dvira Segal $^{3,4}$ } \affiliation{$^{1}$Department of Theoretical Physics and History of Science, The Basque Country University (EHU/UPV), PO Box 644, 48080 Bilbao, Spain \\ $^{2}$Ikerbasque, Basque Foundation for Science, 48011 Bilbao\\ $^{3}$Chemical Physics Theory Group, Department of Chemistry, and Centre for Quantum Information and Quantum Control, University of Toronto, 80 Saint George St., Toronto, Ontario, Canada M5S 3H6 \\ $^{4}$Department of Physics, University of Toronto, Toronto, Ontario, Canada M5S 1A7} \daggerte{\today} \begin{abstract} We prove the existence of a unitary transformation that enables two \red{arbitrarily given} Hamiltonians in the same Hilbert space to be transformed into one another. The result is straightforward yet, for example, it lays the foundation to implementing or mimicking dynamics with the most controllable Hamiltonian. As a promising application, this existence theorem allows for a rapidly evolving realization of adiabatic quantum computation by transforming a Hamiltonian where dynamics is in the adiabatic regime into a rapidly evolving one. \red{We illustrate the theorem with examples.} \end{abstract} \partialcs{03.65.-w, 42.50.Lc, 42.50.Dv} \title{Universal Transformability between Hamiltonians and Hidden Adiabaticity} {\em Introduction.--} Understanding quantum dynamics and control is essential to modern quantum technologies such as adiabatic quantum computation~\cite{AQC,Childs}. A quantum dynamical processes is driven by its corresponding Hamiltonian, where the Hamiltonian represents a physical realization. For instance, spin dynamics can be driven by the Zeeman Hamiltonian, which is physically realized by applying magnetic fields~\cite{Messiah}. Different realized dynamics, for example fast vs. adiabatically controlled passage~\cite{Rice1,Rice2, Berry09} may seem remote from one another. However, in this paper we show that they \red{can} well be intimately related. For example, a physical realization of adiabatic quantum computation (AQC) suffers from its slowness, with the resultant destructive effects of decoherence and the occurrence of quantum phase transitions during dynamics~\cite{Adolfo,Torrontegui13,Jing13,JW2}. Here, we \red{prove rigorously} that different dynamics, described by two Hamiltonians defined on the same Hilbert space, can always be transformed into one another. As a consequence, the physical outcome of AQC can be made equivalent to the outcome of a dynamical process that can be extremely fast. This relationship between different dynamics is based on a straightforward but profound proposition described below, implying, for example, that an adiabatic process may be physically realized with a fast Hamiltonian. Similarly, it implies a hidden adiabaticity amongst rapid dynamics. {\em The Transformability proposition.--} Given any two Hamiltonians, $\hat H$ and $\hat h$ in the same Hilbert space, which can be time-independent or time-dependent, the corresponding Schr\"odinger equations are \begin{equation}\langlebel{e1} i\partial_t\hat U=\hat H(t)\hat U, \end{equation} and \begin{equation}\langlebel{e2} i\partial_t\hat u=\hat h(t)\hat u, \end{equation} where $\hat U$ and $\hat u$ are propagators of $\hat H(t)$ and $\hat h(t)$, respectively. Proposition: {\em Two Hamiltonians $\hat H$ and $\hat h$ can always be transformed into one another. } Mathematically, this claim can be expressed as: For given $\hat H$ and $\hat h$, there exists at least one unitary operator $\hat S$ such that \begin{equation}\langlebel{e3} \hat h=\hat S^{\daggergger} \hat H \hat S-i\hat S^{\daggergger}\dot{\hat S}, \end{equation} and \begin{equation}\langlebel{e4} \hat H=\hat S \hat h \hat S^{\daggergger}-i\hat S\dot{\hat S}^{\daggergger}. \end{equation} where the overdot indicates a time derivative. Proof: The operator $\hat S$ enables the transformation $\hat U=\hat S\hat u$. Substituting it into the Schr\"odinger equation~(\ref{e1}), we obtain Eq.~(\ref{e2}) with the {\em effective} Hamiltonian $\hat h=\hat S^{\daggergger} \hat H \hat S-i\hat S^{\daggergger}\dot{\hat S}$. Similarly, if we begin with the Schr\"odinger equation (\ref{e2}) we transform it to Eq. (\ref{e1}) by identifying its Hamiltonian with $\hat H=\hat S \hat h \hat S^{\daggergger}-i\hat S\dot{\hat S}^{\daggergger}$. Because the solutions $\hat u$ and $\hat U$ of the Schr\"odinger equations~(\ref{e1}) and (\ref{e2}) always exist, so does the product $\hat U\hat u^{\daggergger}$. \red{By setting} $\hat S=\hat U\hat u^{\daggergger}$, we can reproduce the Hamiltonians~(\ref{e3}) and (\ref{e4}) and therefore formally prove the \red{universal} existence of the \red{unitary} transformation $\hat S$. \red{In other words, there is {\em always} a unitary transformation that enables two arbitrarily given Hamiltoni- ans in the same Hilbert space to be transformed into one another.} We term this property transformability, and the two Hamiltonians $\hat H$ and $\hat h$ are {\em transformable}. The special case of the proposition with $\hat h$ being time-independent was proven a quarter-century ago in Ref.~\cite{PLA93}. {\em Rapid Adiabatic Quantum Computation} Adiabatic quantum computation is one of the most promising candidates to realize quantum computing \cite{Lidar}. The approach is based on the adiabatic theorem: The solution to a computational problem of interest is encoded in the ground state of a potentially complicated Hamiltonian. To approach the solution, one prepares a system with a simpler Hamiltonian and initializes it at its ground state. By evolving the Hamiltonian sufficiently slowly towards the desired (complex) one, the adiabatic theorem guarantees that the system follows the instantaneous ground state, finally realizing the target ground state. Evidently, the slowness of AQC could be the main impediment to its utility for quantum algorithms. The universal transformability property suggests that a slow AQC process $\hat u$ can be mapped onto a fast quantum process $\hat U$---that is more controllable, and suffers reduced decoherence during processing. Note that as a convention, we use lower (upper) cases to denote the slow (fast) dynamics throughout the paper.] Consequently, AQC can be physically realized by a fast process. The eigenstate $|E(T)\rangle$ of the problem Hamiltonian at time $T$ is given by implementing the adiabatic process \begin{equation} |E(T)\rangle \sigmam \hat u|E(0)\rangle=\hat S^{\daggergger} (T) \hat U (T) |E(0)\rangle. \langlebel{eq:adia} \end{equation} The second equality suggests to physically implement $|E(T)\rangle$ by the following circuit: the first gate $\hat U (T)$ is governed by $\hat H$. The transformation $\hat S^{\daggergger} (T)$ acts on the output. {\em Adiabatic algorithms and their fast counterparts.--} Consider now the proposition in the context of a realistic AQC, i.e. an ensemble of qubits described by a family of slowly-varying Hamiltonians, \begin{equation}\langlebel{e10} \hat h=\Gammamma(t) \sum_{i}\hat X_{i}+\hat h_P(\{\hat Z_i\}). \end{equation} Here, $\Gammamma(t)$ is large at $t=0$, and slowly evolves towards zero at $t=T$. The Hamiltonian $\hat h_P(\{\hat Z_i\})$ contains the $\hat Z_i$ component of the $i$-th qubit. The solution of a {\em hard} problem is encoded within $\hat h_P$. For example, Grover's search problem~\cite{Lidar} is realized with \begin{equation}\langlebel{e11} \hat h_P({\hat Z_i})=\hat I-|B\rangle \langle B|, \end{equation} where $|B\rangle$ is the {\em marked} state, and $|B\rangle \langle B|$ is a function of $\hat Z_i$. In the D-Wave system the Hamiltonian (\ref{e10}) is given by \begin{equation}\langlebel{e12} \hat h_P({\hat Z_i})=\sum_{i} h_{i}\hat Z_{i}+\sum_{ij} J_{ij}\hat Z_i \hat Z_j, \end{equation} with the parameters $h_i$ and $J_{ij}$. Applying a fast magnetic field, we can enable the corresponding fast-varying Hamiltonian \begin{equation}\langlebel{e13} \hat H=\gammamma(t)\sum_{i}\hat X_{i}+\hat h_P(\{e^{-i\phi_{i}(t)\hat X_{i}}\hat Z_{i}e^{i\phi_{i}(t)\hat X_{i}}\}), \end{equation} where $\gammamma(t)=\Gammamma(t)+\dot{\phi}$ is a fast-varying function. Here, the transformation matrix is given by $\hat S(t)=\Pi_i e^{-i\phi_i(t)\hat X_i }$. Thus, instead of evolving the system slowly under $\hat h$, the two gates $\hat U(t)$ and $\hat S(t)$ should be realized, Eq. (\ref{eq:adia}), allowing for a fast implementation. {\em Built-in adiabaticity} The discussion above focuses on implementing fast dynamics to achieve the adiabatic result, i.e. to replace slow dynamics by fast dynamics. Here we show that the opposite is also the case, That is, fast dynamics can be shown to have a "hidden adiabaticity". As an example, consider a qubit under external fields, with the NMR-type Hamiltonian \begin{equation}\langlebel{e5} \hat H=\frac{\omegaega_0(t)}{2}\hat Z+g\left[\hat X\cos\phi(t)+\hat Y\sigman\phi(t)\right]. \end{equation} Here, $\hat X$, $\hat Y$ and $\hat Z$ are the Pauli operators, $\omegaega_0(t)$ and $\phi(t)$ potentially depend on time and are allowed to be fast-varying. A unitary transformation $\hat S=\exp\left[i\frac{\theta(t)-\phi(t)}{2}\hat Z\right]$ brings $\hat H$ into $\hat h$, \begin{equation}\langlebel{e6} \hat h=\frac{\omegaega_0(t)+\dot{\theta}-\dot{\phi}}{2}\hat Z+g\left[\hat X\cos\theta(t)+\hat Y\sigman\theta(t)\right]. \end{equation} We assume that $g$ is a constant and that the newly introduced time-dependent parameter $\theta(t)$, as well as $\frac{\omegaega_0(t)+\dot{\theta}-\dot{\phi}}{2}$ are controlled such that they vary slowly. The transformation $\hat S$ thus brings the system into the adiabatic domain. In other words, a system driven by fast-varying $\hat H$ has built-in hidden adiabaticity characterized by $\hat h$. In the particular case where $\phi(t)=\omegaega t$ and $\omegaega_0$, $\omegaega$ are constants, we can easily obtain the solution, that is the time evolution operator corresponding to $\hat H$, \begin{equation}\langlebel{e7} \hat U=\exp\left(-i\frac{\omegaega \hat Z}{2} t\right)\exp\left(-i\frac{2g\hat X-\Omegaega \hat Z}{2}t\right) \end{equation} where $\Omegaega=\omegaega-\omegaega_0$. We can control parameters and realize the function $\theta(t)=\Omegaega t$, resulting in \begin{equation}\langlebel{e8} \hat u=\exp\left(-i\frac{\Omegaega \hat Z}{2}t\right)\exp\left(-i\frac{2g\hat X-\Omegaega \hat Z}{2}t\right). \end{equation} The instantaneous eigenstates of $\hat h=\exp\left(-i\frac{\Omegaega \hat Z}{2}t\right)\hat X\exp\left(i\frac{\Omegaega \hat Z}{2}t\right)$ are \begin{equation}\langlebel{e9} |E_{\pm}(t)\rangle=\exp\left(-i\frac{\Omegaega \hat Z}{2} t \right)|\pm\rangle. \end{equation} These states are proportional to the wave function $\hat u|\pm\rangle$ ($\hat X|\pm\rangle=\pm|\pm\rangle$) as stated by the adiabatic theorem for the adiabatic regime $g\gg \Omegaega$. Therefore, in order to physically realize $|E_{\pm}(T)\rangle$, say at $T=\pi/2\Omegaega$ when $\hat h(T)=g\hat Y$, one needs to implement two gates: $\hat U(T)$, then $\hat S^\daggergger(T)= \exp(i\frac{\pi \omegaega_0}{4\Omegaega} \hat Z)$. We now come to a simple but nontrivial corollary following immediately from the Transformability proposition. {\em The transformability corollary at different times.--} Let $\hat H$ (the fast Hamiltonian) be a function of the normalized or scaling time $\tau=t/T$, where $T$ a characteristic time of the dynamical system. Eq.~(\ref{e1}) can then be rewritten as \begin{equation}\langlebel{e14} i\partial_\tau\hat U(\tau)=T\hat H(\tau)\hat U(\tau). \end{equation} Likewise, \begin{equation}\langlebel{e15} i\partial_\tau\hat u(\tau)=T'\hat h(\tau)\hat u(\tau), \end{equation} where $\tau=t'/T'$ and the latter describes a slower process so that $T<T'$, $t'(T')$ is the real time (characteristic time) of the Schr\"odinger equation (\ref{e15}). The scaling times of two equations may be identical or different. Here we set the same scaling time $\tau$ with the constraint $t'/T'=t/T$. As \red{proved} in the transformability proposition, mathematically there is at least one unitary operator $\hat S$ such that \begin{equation}\langlebel{e16} T'\hat h=\hat S^{\daggergger} T\hat H \hat S-i\hat S^{\daggergger}\partial_\tau{\hat S}, \end{equation} and \begin{equation}\langlebel{e17} T\hat H=\hat S T'\hat h \hat S^{\daggergger}-i\hat S\partial_\tau{\hat S}^{\daggergger}, \end{equation} for a given scaling time $\tau$. The simplest non-trivial example is $\hat S=1$, such that $\hat H=\frac {T'}{T}\hat h$ and $\hat U(\tau)=\hat u(\tau)$. The latter equality, rewritten as $\hat U(t)=\hat u(\frac{T'}{T}t)$ with $T<T'$, is an exact proof that the runtime of an adiabatic quantum process can be reduced $\frac {T'}{T}$ times -- exact trade-off between energy and time. \red{Specifically, Eq.~(\ref{e7}) can be rewritten as \begin{equation}\langlebel{e18} \hat U(\tau)=\hat u(\tau)=\exp(-i\pi Z\tau)\exp\left(-i(T g\hat X-\pi \hat Z)\tau\right), \end{equation}} \red{where $\tau=t/T=t'/T'$, $gT=g'T'$ and we have set $\omegaega_0=0$. $U(\tau)$ ($\hat u(\tau)$) may denote a fast (adiabatic) evolution if $T'$ is in the adiabatic regime while $T$ is not in.} This result suggests a strategy of experimentally implementing an expedited adiabatic processes: simply enhancing the strength of the driving Hamiltonian to its strongest possible value. \red{In general, the universal existence of $S$ and the equality \begin{equation}\langlebel{e199} \hat u(\frac {t'}{T'})=\hat S^{\daggergger}(\frac {t}{T}) \hat U(\frac {t}{T}) \end{equation} manifests that an adiabatic quantum algorithm can always be mimicked by at most two fast gates where $T' \gg T$ is in the adiabatic regime.} {\em Conclusion.--}Two \red{arbitrarily given} Hamiltonians within the same Hilbert space can be always transformed to each other via a unitary transformation. This seemingly \red{simple but rigorous theorem} is powerful: It allows one to implement a slowly varying evolution within a fast protocol, which is less susceptible to errors. We exemplified this result on a qubit system and on problems in the context of quantum adiabatic computing. The transformability of open quantum system Hamiltonians is left for future work. \acknowledgments L.A. Wu acknowledges grant support from the Basque Government (Grant No. IT986-16), the Spanish MICINN (Grant No. FIS2015-67161-P). D. S. acknowledges the Canada Research Chairs Program. We thank Professor P. Brumer for very helpful discussions. \end{document}
\begin{document} \title[Extender sets and measures of maximal entropy for subshifts] {Extender sets and measures of maximal entropy for subshifts} \date{} \author{Felipe García-Ramos} \address{Felipe García-Ramos\\ CONACyT \& Physics Institute of the Universidad Aut\'{o}noma de San Luis Potos\'{\i}\\ Av. Manuel Nava \#6, Zona Universitaria, C.P. 78290 \\ San Luis Potosí, S.L.P.\\ Mexico} \email{[email protected]} \author{Ronnie Pavlov} \address{Ronnie Pavlov\\ Department of Mathematics\\ University of Denver \\ 2390 S. York St. \\ Denver, CO 80208 \\ USA} \email{[email protected]} \thanks{The second author gratefully acknowledges the support of NSF grant DMS-1500685.} \keywords{Symbolic dynamics, measure of maximal entropy, extender set, synchronized subshift} \subjclass[2010]{37B10, 37B40, 37D35} \begin{abstract} For countable amenable finitely generated torsion-free $\mathbb{G}$, we prove inequalities relating $\mu(v)$ and $\mu(w)$ for any measure of maximal entropy $\mu$ on a $G$-subshift and any words $v, w$ where the extender set of $v$ is contained in the extender set of $w$. Our main results are two generalizations of the main result of \cite{M}; the first applies to all such $v,w$ when $\mathbb{G} = \mathbb{Z}$, and the second to $v,w$ with the same shape for any $\mathbb{G}$. As a consequence of our results we give new and simpler proofs of several facts about synchronizing subshifts (including the main result from \cite{Th}) and we answer a question of Climenhaga. \end{abstract} \maketitle \section{Introduction} \label{intro} In this paper, we prove several results about measures of maximal entropy on symbolic dynamical systems (subshifts). Measures of maximal entropy are natural measures, defined via the classical Kolmogorov-Sinai entropy, which also connect to problems in statistical physics, such as existence of phase transitions. Our dynamical systems are subshifts, which consist of a compact $X \subseteq \mathcal{A}^{\mathbb{G}}$ (for some finite alphabet $\mathcal{A}$ and a countable amenable finitely generated torsion-free group $\mathbb{G}$) and dynamics given by the $\mathbb{G}$-action of translation/shift maps $\{\sigma_{g}\}_{g \in\mathbb{G}}$ (under which $X$ must be invariant). Subshifts are useful both as discrete models for the behavior of dynamical systems on more general spaces, and as an interesting class of dynamical systems in their own right, with applications in physics and information theory. Our main results show that when a word $v$ (i.e. an element of $\mathcal{A}^F$ for some finite $F \subset \mathbb{G}$) is replaceable by another word $w$ in $X$ (meaning that $\forall x \in X$, when any occurrence of $v$ is replaced by $w$, the resulting point is still in $X$), there is a simple inequality relating $\mu(v)$ and $\mu(w)$ for every measure of maximal entropy $\mu$. (As usual, the measure of a finite word is understood to mean the measure of its cylinder set; see Section~\ref{defs} for details.) A formal statement of our hypothesis uses extender sets (\cite{KM}, \cite{OP}); the condition ``$v$ is replaceable by $w$'' is equivalent to the containment $E_{X}(v) \subseteq E_{X}(w)$, where $E_X(u)$ denotes the extender set of a word $u$. For $\mathbb{Z}$-subshifts specifically, it is possible to talk about replacing $v$ by $w$ (and thereby the containment $E_X(v) \subseteq E_X(w)$) even if their lengths $|v|$ and $|w|$ are different, and our first results treat this case. \begin{Htheorem} Let $X$ be a $\mathbb{Z}$-subshift with positive topological entropy, $\mu$ a measure of maximal entropy of $X$ , and $w,v\in L(X)$. If $E_{X}(v)\subseteq E_{X}(w)$, then \begin{equation*} \mu(v)\leq\mu(w)e^{h_{top}(X)(|w|-|v|)}. \end{equation*} \end{Htheorem} \begin{Mcorollary} Let $X$ be a $\mathbb{Z}$-subshift with positive topological entropy, $\mu$ a measure of maximal entropy of $X$ , and $w,v\in L(X).$ If $E_{X}(v)=E_{X}(w)$, then for every measure of maximal entropy of $X$, \begin{equation*} \mu(v)=\mu(w)e^{h_{top}(X)(|w|-|v|)}. \end{equation*} \end{Mcorollary} In the class of synchronized subshifts (see Section 3.1 for the definition), $E_{X}(v)=E_{X}(w)$ holds for many pairs of words of different lengths, in which case Corollary~\ref{maincor} gives significant restrictions on the measures of maximal entropy. In Section~\ref{apps}, we use Corollary~\ref{maincor} to obtain results about synchronized subshifts. These applications include a new proof of uniqueness of measures of maximal entropy under the hypothesis of entropy minimality (see Theorem~\ref{synchunique}), which was previously shown in \cite{Th} via the much more difficult machinery of countable-state Markov shifts, and the following result which verifies a conjecture of Climenhaga (\cite{Cl}). (Here, $X_S$ represents a so-called $S$-gap subshift; see Definition~\ref{Sgap}.) \begin{Lcorollary}Let $S\subseteq\mathbb{N}$ satisfy $\gcd(S+1)=1$, let $\mu$ be the unique MME on $X_{S}$, and let $\lambda = e^{h_{top}(X_{S})}$. Then $\displaystyle\lim_{n\rightarrow\infty}\frac{\left\vert L_{n}(X_{S})\right\vert }{\lambda^n}$ exists and is equal to $\displaystyle \frac{\mu(1)\lambda}{(\lambda-1)^{2}}$ when $S$ is infinite and $\displaystyle \frac{\mu(1)\lambda (1 - \lambda^{-(\max S) - 1})^2}{(\lambda-1)^{2}}$ when $S$ is finite. \end{Lcorollary} In fact, we prove that this limit exists for all synchronized subshifts where the unique measure of maximal entropy is mixing. Our second main result applies to countable amenable finitely generated torsion-free $\mathbb{G}$, but only to $v,w$ which have the same shape. This is unavoidable in a sense, since in general, for $F \neq F'$, there will be no natural way to compare the configurations with shapes $F^c$ and $F'^c$ in extender sets of words $v \in A^F$ and $w \in A^{F'}$ respectively. \begin{Gtheorem} Let $X$ be a $\mathbb{G-}$subshift, $\mu$ a measure of maximal entropy of $X$ , $F \Subset \mathbb{G}$, and $w,v\in\mathcal{A}^{F}$. If $E(v)\subseteq E(w)$ then \begin{equation*} \mu(v)\leq\mu(w). \end{equation*} \end{Gtheorem} As a direct consequence of this theorem we recover the following result due to Meyerovitch. \begin{theorem}[Theorem 3.1, \textrm{\protect\cite{M}}] \label{tomthm} If $X$ is a $\mathbb{Z}^{d}$-subshift and $v,w\in\mathcal{A} ^{F}$ satisfy $E_{X}(v)=E_{X}(w)$, then for every measure of maximal entropy $\mu$ on $X$, $\mu(v)=\mu(w)$. \end{theorem} \begin{remark} In fact the theorem from \cite{M} is more general; it treats equilibrium states for a class of potentials $\phi$ with a property called $d$-summable variation, and the statement here for measures of maximal entropy corresponds to the $\phi=0$ case only. \end{remark} Due to our weaker hypothesis, $E_X(v) \subseteq E_X(w)$, our proof techniques are different from those used in \cite{M}. In particular, the case of different length $v,w$ treated in Theorem~\ref{hardcase} requires some subtle arguments about the ways in which $v,w$ can overlap themselves and each other. Much as Corollary~\ref{maincor} was applicable to the class of synchronized subshifts, Theorem~\ref{Gtheorem} has new natural applications to the class of hereditary subshifts (introduced in \cite{KL1}), where there exist many pairs of words satisfying $E_{X}(v)\subsetneq E_{X}(w)$; see Section~\ref{hered} for details. Section~\ref{defs} contains definitions and results needed throughout our proofs, Section~\ref{zsec} contains our results for $\mathbb{Z}$-subshifts (including various applications in Section~\ref{apps}), and Section~\ref {gsec} contains our results for $\mathbb{G}$-subshifts. \section*{acknowledgments} We would like to thank the anonymous referee for their useful comments and suggestions. \section{General definitions and preliminaries} \label{defs} We will use $\mathbb{G}$ to refer to a countable discrete group. We write $F \Subset \mathbb{G}$ to mean that $F$ is a finite subset of $\mathbb{G}$, and unless otherwise stated, $F$ always refers to such an object. A sequence $\{F_{n}\}_{n\in\mathbb{N}}$ with $F_{n} \Subset \mathbb{G}$ is said to be \textbf{Følner} if for every $K\Subset\mathbb{G}$, we have that $ |(K \cdot F_{n}) \Delta F_{n}|/|F_{n}|\rightarrow 0$. \ We say that $\mathbb{G}$ is \textbf{amenable} if it admits a Følner sequence. In particular, $\mathbb{Z }$ is an amenable group, since any sequence $\{F_{n}\} = [a_n, b_n] \cap \mathbb{Z}$ with $b_n - a_n \rightarrow \infty$ is F\o lner. Let $\mathcal{A}$ be any finite set (usually known as the alphabet). We call $\mathcal{A}^{\mathbb{G}}$ the \textbf{full $\mathcal{A}$-shift on $\mathbb{G }$}, and endow it with the product topology (using the discrete topology on $ \mathcal{A}$). For $x\in\mathcal{A}^{\mathbb{G}},$ we use $x_{i}$ to represent the $i$th coordinate of $x$, and $x_{F}$ to represent the restriction of $x$ to any $F\Subset\mathbb{G}$. For any $g \in\mathbb{G}$, we use $\sigma_{g}$ to denote the left translation by $g$ on $\mathcal{A}^{\mathbb{G}}$, also called the \textbf{ shift by $g$}; note that each $\sigma_{g}$ is an automorphism. We say $ X \subseteq \mathcal{A}^{\mathbb{G}}$ is a $\mathbb{G}$-\textbf{subshift} if it is closed and $\sigma_{g}(X) = X$ for all $g\in\mathbb{G}$; when $\mathbb{G=Z }$ we simply call it a subshift. For $F \Subset\mathbb{G}$, we call an element of $\mathcal{A}^{F}$ a \textbf{ word with shape $F$}. For $w$ a word with shape $F$ and $x$ either a point of $\mathcal{A}^{ \mathbb{G}}$ or a word with shape $F^{\prime}\supset F$, we say that $w$ is a \textbf{subword of $x$} if $x_{g + F} = w$ for some $g \in\mathbb{G}$. For any $F$, the \textbf{$F$-language of $X$} is the set $L_{F}(X)\subseteq \mathcal{A}^{F}=\{x_{F}\ :\ x\in X\}$ of words with shape $F$ that appear as subwords of points of $X.$ When $\mathbb{G} = \mathbb{Z}$, we use $L_{n}(X)$ to refer to $L_{\{0, \ldots, n-1\}}(X)$ for $n \in\mathbb{N}$. We define \begin{align*} L(X) & :=\bigcup_{F\Subset\mathbb{G}}L_{F}(X)\text{ if }\mathbb{G\neq Z} \text{ and} \\ L(X) & :=\bigcup_{n \in\mathbb{N}}L_{n}(X)\text{ if }\mathbb{G=Z}\text{.} \end{align*} For any $\mathbb{G}$-subshift $X$ and $w\in L_{F}(X)$, we define the \textbf{ cylinder set of $w$} as \begin{equation*} \left[ w\right] :=\left\{ x\in X:x_{F}=w\right\} \text{.} \end{equation*} Whenever we refer to an interval in $\mathbb{Z}$, it means the intersection of that interval with $\mathbb{Z}$. So, for instance, if $x\in\mathcal{A}^{ \mathbb{Z}}$ and $i < j$, $x_{\left[ i,j\right] }$ represents the subword of $x$ that starts in position $i$ and ends in position $j$. Unless otherwise stated, a word $w\in\mathcal{A}^{n}$ is taken to have shape $[0, n)$. Every word $w \in L(\mathcal{A}^{\mathbb{Z}})$ is in some $\mathcal{A}^{n}$ by definition; we refer to this $n$ as the \textbf{length} of $w$ and denote it by $|w|$. For any amenable $\mathbb{G}$ with Følner sequence $\left\{ F_{n}\right\} _{n\in\mathbb{N}}$ and any $\mathbb{G}$-subshift $X$, we define the \textbf{ topological entropy of $X$} as \begin{equation*} h_{top}(X)=\lim_{n\rightarrow\infty}\frac{1}{\left\vert F_{n} \right\vert }\log\left\vert L_{F_{n}}(X)\right\vert \end{equation*} (this definition is in fact independent of the Følner sequence used.) For any $w\in L(X)$, we define the \textbf{extender set of $w$} as \begin{equation*} E_{X}(w):=\{x|_{F^{c}}\ :\ x\in\lbrack w]\}. \end{equation*} \begin{example} For any $\mathbb{G}$, if $X$ is the full shift on two symbols, $\left\{ 0,1\right\}^\mathbb{G}$, then for any $F$, all words in $\{0,1\}^F$ have the same extender set, namely $\{0,1\}^{F^c}$. \end{example} \begin{example} Take $\mathbb{G} = \mathbb{Z}^2$ and $X$ the hard-square shift on $\{0,1\}$ in which adjacent $1$s are forbidden horizontally and vertically. Then if we take $F = \{(0,0)\}$, we see that $E(0)$ is the set of all configurations on $\mathbb{Z}^2 \setminus F$ which are legal, i.e. which contain no adjacent $1$s. Similarly, $E(1)$ is the set of all legal configurations on $\mathbb{Z}^2 \setminus F$ which also contain $0$s at $(0, \pm 1)$ and $(\pm 1, 0)$. In particular, we note that here $E(1) \subsetneq E(0)$. \end{example} In the specific case $\mathbb{G}=\mathbb{Z}$ and $w\in L_{n}(X)$, we may identify $E_{X}(w)$ with the set of sequences which are concatenations of the left side and the right side, i.e. $\{(x_{(-\infty,0)}x_{[n,\infty)})\ :\ x\in\lbrack w]\}$, and in this way can relate extender sets even for $v,w$ with different lengths. All extender sets in $\mathbb{Z}$ will be interpreted in this way. \begin{example} If $X$ is the golden mean $\mathbb{Z-}$subshift on $\{0,1\}$ where adjacent $1$s are prohibited, then $E(000)$ is the set of all legal configurations on $\mathbb{Z} \setminus \{0, 1, 2\}$, which is identified with the set of all $\{0,1\}$ sequences $x$ which have no adjacent $1$s, with the exception that $x_{0} = x_{1} = 1$ is allowed. This is because $000$ may be preceded by a one-sided sequence ending with $1$ and followed by a one-sided sequence beginning with $1$, and after the identification with $\{0,1\}^{\mathbb{Z}}$, those $1$s could become adjacent. Similarly, $E(01)$ is identified with the set of all $x$ on $\mathbb{Z}$ which have no adjacent $1$s and satisfy $x_{0} = 0$, and $E(1)$ is identified with the set of all $x$ on $\mathbb{Z}$ which have no adjacent $1$s and satisfy $x_{0} = x_{1} = 0$. Therefore, even though they have different lengths, we can say here that $E(1) \subsetneq E(01) \subsetneq E(000)=E(0)$. \end{example} The next few definitions concern measures. Every measure in this work is assumed to be a Borel probability measure $\mu$ on a $\mathbb{G-}$subshift $X $ which is invariant under all shifts $\sigma_{g}$. By a generalization of the Bogolyubov-Krylov theorem, every $\mathbb{G-}$subshift $X$ has at least one such measure. For any such $\mu$ and any $w\in L(X)$, we will use $\mu(w) $ to denote $\mu(\left[ w\right] ).$ For any Følner sequence $\{F_{n}\}$, we define the \textbf{entropy} of any such $\mu$ as \begin{equation*} h_{\mu}(X):=\lim_{n\rightarrow\infty}\frac{1}{\left\vert F_{n}\right\vert } \sum\nolimits_{w\in\mathcal{A}^{F_{n}}}-\mu(w)\log\mu(w). \end{equation*} Again, this limit does not depend on the choice of Følner sequence (see \cite {KL2} for proofs of this property and of other basic properties of entropy of amenable group actions). It is always the case that $h_{\mu}(X)\leq h(X)$, and so a measure $\mu$ is called a \textbf{measure of maximal entropy} (or \textbf{MME}) if $ h_{\mu}(X)=h_{top}(X).$ For amenable $\mathbb{G}$, every $\mathbb{G}-$ subshift has at least one measure of maximal entropy \cite{Mi}. We briefly summarize some classical results from ergodic theory. A measure $ \mu$ is \textbf{ergodic} if every set which is invariant under all $\sigma_{g}$ has measure $0$ or $1$. In fact, every measure $\mu$ can be written as a generalized convex combination (really an integral) of ergodic measures; this is known as the \textbf{ergodic decomposition} (e.g. see Section 8.7 of \cite{Gl}). The entropy map $ \mu\mapsto h_{\mu}$ is linear and so the ergodic decomposition extends to measures of maximal entropy as well; every MME can be written as a generalized convex combination of ergodic MMEs. \begin{theorem}[Pointwise ergodic theorem \protect\cite{Li}] \label{ergthm} For any ergodic measure $\mu$ on a $\mathbb{G}-$subshift $X$, there exists a Følner sequence $\left\{ F_{n}\right\} $ such that for every $ f\in L^{1}(\mu)$, \begin{equation*} \mu\left( \left\{ x:\lim_{n\rightarrow\infty}\frac{1}{\left\vert F_{n}\right\vert }\sum_{g\in F_{n}}f(\sigma_{g}x)=\int f\ d\mu\right\} \right) =1. \end{equation*} \end{theorem} \begin{theorem}[Shannon-Macmillan-Breiman theorem for amenable groups \protect\cite{We}] \label{SMBthm} For any ergodic measure $\mu$ on a $\mathbb{G}-$subshift $X$, there exists a Følner sequence $\left\{ F_{n}\right\} $ such that \begin{equation*} \mu\left( \left\{ x:\lim_{n\rightarrow\infty}-\frac{1}{\left\vert F_{n}\right\vert } \log \mu(x_{F_{n}})=h_{\mu}(X)\right\} \right) =1. \end{equation*} \end{theorem} The classical pointwise ergodic and Shannon-Macmillan-Breiman theorems were originally stated for $\mathbb{G=Z}$ and the Følner sequence $[0,n]$. We only need Theorem~\ref{SMBthm} for the following corollary (when $\mathbb{G=Z}$ this is essentially what is known as Katok's entropy formula; see \cite{Ka}). \begin{corollary} \label{SMBcor} Let $\mu$ be an ergodic measure of maximal entropy on a $ \mathbb{G}$-subshift $X$. There exists a Følner sequence $\{F_{n}\}$ such that for every $S_{n} \subseteq L_{F_{n}}(X)$ such that $\mu(S_{n}) \rightarrow1$, then \begin{equation*} \lim_{n \rightarrow\infty} \frac{1}{|F_{n}|} \log|S_{n}| = h_{top}(X). \end{equation*} \end{corollary} \begin{proof} Take $X$, $\mu$ as in the theorem, $\{F_{n}\}$ a Følner sequence that satisfies the Shannon-Macmillan-Breiman theorem, and $S_{n}$ as in the theorem. Fix any $ \epsilon> 0$. By the definition of topological entropy, \begin{equation*} \limsup_{n \rightarrow\infty} \frac{1}{|F_{n}|} \log|S_{n}| \leq\lim_{n \rightarrow\infty} \frac{1}{|F_{n}|} \log|L_{F_{n}}(X)| = h_{top}(X). \end{equation*} For every $n$, define \begin{equation*} T_{n} = \{w \in\mathcal{A}^{F_{n}} \ : \ \mu(w) < e^{-|F_{n}|(h_{top}(X) - \epsilon)} \}. \end{equation*} By the Shannon-Macmillan-Breiman theorem, $\mu\left( \bigcup_{N} \bigcap_{n = N}^{\infty} T_{n} \right) = 1$, and so $\mu(T_{n}) \rightarrow1$. Therefore, $\mu(S_{n} \cap T_{n}) \rightarrow1$, and by definition of $T_{n}$ , \begin{equation*} |S_{n} \cap T_{n}| \geq\mu(S_{n} \cap T_{n}) e^{|F_{n}| (h_{top}(X) - \epsilon)}. \end{equation*} Therefore, for sufficiently large $n$, $|S_{n}| \geq|S_{n} \cap T_{n}| \geq0.5 e^{|F_{n}| (h_{top}(X) - \epsilon)}$. Since $\epsilon> 0$ was arbitrary, the proof is complete. \end{proof} Finally, several of our main arguments rely on the following elementary combinatorial lemma, whose proof we leave to the reader. \begin{lemma} \label{counting} If $S$ is a finite set, $\{A_{s}\}$ is a collection of finite sets, $m = \min\{|A_{s}|\}$, and $M = \max_{a \in\bigcup A_{s}} |\{s \ | \ a \in A_{s}\}|$, then \begin{equation*} \left| \bigcup_{s \in S} A_{s} \right| \geq|S| \frac{m}{M}. \end{equation*} \end{lemma} \section{Results on $\mathbb{Z-}$Subshifts} \label{zsec} In this section we present the results for $\mathbb{G}=\mathbb{Z}$, and must begin with some standard definitions about $\mathbb{Z}-$subshifts. For words $v \in\mathcal{A}^{m}$ and $w \in\mathcal{A}^{n}$ with $m \leq n$, we say that $v$ is a \textbf{prefix} of $w$ if $w_{[0,m)} = v$, and $v$ is a \textbf{suffix} of $w$ if $w_{[n-m, n)} = v$. \subsection{Main result} We now need some technical definitions about replacing one or more occurrences of a word $v$ by a word $w$ inside a larger word $u$, which are key to most of our arguments in this section. First, for any $v\in L( \mathcal{A}^{\mathbb{Z}}),$ we define the function $O_{v} :L(\mathcal{A}^{ \mathbb{Z}}) \rightarrow \mathcal{P}(\mathbb{N})$ which sends any word $u$ to the set of locations where $v$ occurs as a subword in $u$, i.e. \begin{equation*} O_{v}(u):=\left\{ i\in\mathbb{N}:\sigma_{i}(u)\in\left[ v\right] \right\} . \end{equation*} For any $w \in L(\mathcal{A}^{\mathbb{Z}})$, we may then define the function $R_{u}^{v\rightarrow w}: O_{v}(u) \rightarrow L(\mathcal{A}^{\mathbb{Z}})$ which replaces the occurrence of $v$ within $u$ at some position in $O_{v}(u) $ by the word $w$. Formally, $R_{u}^{v\rightarrow w}(i)$ is the word $ u^{\prime }$ of length $|u| - |v| + |w|$ defined by $u^{\prime}_{[0,i)} = u_{[0,i)}$, $u^{\prime}_{[i,i+|w|)} = w$, and $u^{ \prime}_{[i+|w|,|u|-|v|+|w|)} = u_{[i+|v|,|u|)}$. Our arguments in fact require replacing many occurrences of $v$ by $ w$ within a word $u$, at which point some technical obstructions appear. For instance, if several occurrences of $v$ overlap in $u$, then replacing one by $w$ may destroy the other. The following defines conditions on $v$ and $w$ which obviate these and other problems which would otherwise appear in our counting arguments. \begin{definition} \label{respect} For $v,w \in L(\mathcal{A}^{\mathbb{Z}})$, we say that $v$ \textbf{respects the transition to} $w$ if, for any $u\in L(\mathcal{A}^{ \mathbb{Z}})$ and any $i\in O_{v}(u)$, \begin{align*} \mathrm{(i) } \ & j+|w|-|v| \in O_{v}(R_{u}^{v\rightarrow w}(i))\text{ for any }j\in O_{v}(u)\text{ with }i < j, \\ \mathrm{(ii) } \ & j \in O_{v}(R_{u}^{v\rightarrow w}(i))\text{ for any } j\in O_{v}(u)\text{ with }i>j, \\ \mathrm{(iii) } \ & j \in O_{w}(R_{u}^{v\rightarrow w}(i))\text{ for any } j\in O_{w}(u)\text{ with }i>j, \\ \mathrm{(iv) } \ & j + |w| - |v| > i \text{ for any }j\in O_{v}(u)\text{ with }i < j. \end{align*} \end{definition} Informally, $v$ respects the transition to $w$ if, whenever a single occurrence of $v$ is replaced by $w$ in a word $u$, all other occurrences of $v$ in $u$ are unchanged, all occurrences of $w$ in $u$ to the left of the replacement are unchanged, and all occurrences of $v$ in $u$ which were to the right of the replaced occurrence remain on that side of the replaced occurrence. When $v$ respects the transition to $w$, we are able to meaningfully define replacement of a set of occurrences of $v$ by $w$, even when those occurrences of $v$ overlap, as long as we move from left to right. For any $ u,v,w \in L(\mathcal{A}^{\mathbb{Z}})$, we define a function $R_{u}^{v\rightarrow w}: \mathcal{P}(O_{v}(u)) \rightarrow L(\mathcal{A}^{\mathbb{Z}})$ as follows. For any $S:=\left\{ s_{1} ,...,s_{n}\right\} \subseteq O_{v}(u)$ (where we always assume $s_1 < s_2 < \ldots < s_n$), we define sequential replacements $ \left\{ u^{m}\right\} _{m=1}^{n+1}$ by\ 1) $u=u^{1}.$ 2) $u^{m+1}=R_{u^{m}}^{v\rightarrow w}(s_{m}+(m-1)(|w|-|v|)).$ Finally, we define $R_{u}^{v\rightarrow w}(S)$ to be $u^{n+1}$. We first need some simple facts about $R_{u}^{v\rightarrow w}$ which are consequences of Definition~\ref{respect}. \begin{lemma} \label{wsurvive} For any $u,v,w \in L(\mathcal{A}^{\mathbb{Z}})$ where $v$ respects the transition to $w$ and any $S=\left\{ s_{1},...,s_{n}\right\} \subseteq O_{v}(u)$, all replacements of $v$ by $w$ persist throughout, i.e. $\{s_{1},s_{2} +(|w|-|v|),s_{3}+2(|w|-|v|),\ldots,s_{n}+(n-1)(|w|-|v|)\}\subseteq O_{w} (R_{u}^{v\rightarrow w}(S))$. \end{lemma} \begin{proof} Choose any $v,w,u,S$ as in the lemma, and any $s_{i} \in S$. Using the terminology above, clearly $s_{i} + (i-1)(|w| - |v|) \in O_{w}(u^{(i+1)})$. By property (iv) of a respected transition, $s_{1} < s_{2} + |w| - |v| < \ldots< s_{n} + (n-1)(|w| - |v|)$. Then, since $s_{i} + (i-1)(|w| - |v|) < s_{j} + (j-1)(|w| - |v|)$ for $j > i$, by property (iii) of respected transition, $s_{i} + (i-1)(|w| - |v|) \in O_{w}(u^{(j+1)})$ for all $j > i$, and so $s_{i} + (i-1)(|w| - |v|) \in O_{w}(R_{u}^{v \rightarrow w}(S))$. Since $i$ was arbitrary, this completes the proof. \end{proof} \begin{lemma} \label{vsurvive} For any $u,v,w \in L(\mathcal{A}^{\mathbb{Z}})$ where $v$ respects the transition to $w$ and any $S=\left\{ s_{1},...,s_{n}\right\} \subseteq O_{v}(u)$, any occurrence of $v$ not explicitly replaced in the construction of $ R_{u}^{v\rightarrow w}$ also persists, i.e. if $m\in O_{v}(u)\setminus S$ and $s_{i}<m<s_{i+1}$, then $m+i(|w|-|v|)\in O_{v}(R_{u}^{v\rightarrow w}(S)) $. \end{lemma} \begin{proof} Choose any $v,w,u,S$ as in the lemma, and any $m \in O_{v}(u) \cap(s_{i}, s_{i+1})$ for some $i$. Using property (i) of a respected transition, a simple induction implies that $m + j(|w| - |v|) \in O_{v}(u^{(j+1)})$ for all $j \leq i$. By property (iv) of a respected transition, $m + i(|w| - |v|) < s_{i+1} + i(|w| - |v|) < \ldots< s_{n} + (n-1)(|w| - |v|)$. Therefore, using property (ii) of a respected transition allows a simple induction which implies that $m + i(|w| - |v|) \in O_{v}(u^{(j+1)})$ for all $j > i$, and so $m + i(|w| - |v|) \in O_{v}(R_{u}^{v \rightarrow w}(S))$. \end{proof} We may now prove injectivity of $R_{u}^{v\rightarrow w}$ under some additional hypotheses, which is key for our main proofs. \begin{lemma} \label{injective} Let $v,w\in L(\mathcal{A}^{\mathbb{Z}})$ such that $v$ respects the transition to $w$, $v$ is not a suffix of $w$, and $w$ is not a prefix of $v$. For any $ u\in L(\mathcal{A}^{\mathbb{Z}})$ and $m$, $R_{u}^{v\rightarrow w}$ is injective on the set of $m$ -element subsets of $O_{v}(u)$. \end{lemma} \begin{proof} Assume that $v,w,u$ are as in the lemma, and choose $S=\left\{ s_{1} ,...,s_{m}\right\} \neq S^{\prime}=\left\{ s_{1}^{\prime},...,s_{m}^{\prime }\right\} \subseteq O_{v}(u)$ with $|S|=|S^{\prime}|=m$. We first treat the case where $|v| \geq|w|$, and recall that $w$ is not a prefix of $v$. Since $S \neq S^{\prime}$, we can choose $i$ maximal so that $ s_{j} = s^{\prime}_{j}$ for $j < i$. Then $s_{i} \neq s^{\prime}_{i}$; we assume without loss of generality that $s_{i} < s^{\prime}_{i}$. Since $ s_{i} \in S$, we know that $s_{i} \in O_{v}(u)$. Since $s^{\prime}_{i-1} = s_{i-1} < s_{i} < s^{\prime}_{i}$, by Lemma~\ref{vsurvive} $s_{i} + (i-1)(|w| - |v|) \in O_{v}(R_{u}^{v \rightarrow w}(S^{\prime}))$. Also, by Lemma~\ref{wsurvive}, $s_{i} + (i-1)(|w| - |v|) \in O_{w}(R_{u}^{v \rightarrow w}(S))$. Since $w$ is not a prefix of $v$, this means that $ R_{u}^{v \rightarrow w}(S) \neq R_{u}^{v \rightarrow w}(S^{\prime})$, completing the proof of injectivity in this case. We now treat the case where $|v| \leq|w|$, and recall that $v$ is not a suffix of $w$. Since $S \neq S^{\prime}$, we can choose $i$ maximal so that $ s_{m - j} = s^{\prime}_{m - j}$ for $j < i$. Then $s_{m - i} \neq s^{\prime}_{m - i} $; we assume without loss of generality that $s_{m - i} < s^{\prime}_{m - i}$. Since $s^{\prime}_{m - i} \in S^{\prime}$, we know that $s^{\prime}_{m - i} \in O_{v}(u)$. Since $s_{m-i} < s^{\prime}_{m-i} < s^{\prime}_{m-i+1} = s_{m-i+1}$, by Lemma~\ref{vsurvive} $s^{\prime}_{m-i} + (m-i)(|w| - |v|) \in O_{v}(R_{u}^{v \rightarrow w}(S))$. Also, by Lemma~\ref {wsurvive}, $s^{\prime }_{m-i} + (m-i-1)(|w| - |v|) \in O_{w}(R_{u}^{v \rightarrow w}(S^{\prime}))$. Since $v$ is not a suffix of $w$, this means that $R_{u}^{v \rightarrow w}(S) \neq R_{u}^{v \rightarrow w}(S^{\prime})$, completing the proof of injectivity in this case and in general. \end{proof} \begin{lemma} \label{preimage} Let $v,w\in L(\mathcal{A}^{\mathbb{Z}})$ such that $v$ respects the transition to $ w$, $v$ is not a suffix of $w$, and $w$ is not a prefix of $v$. Then for any $u^{\prime}\in L(\mathcal{A}^{\mathbb{Z}})$ and any $m \leq|O_{w}(u^{\prime})|$, \begin{equation*} |\{(u, S) \ : \ |S| = m, S \subseteq O_{v}(u), u^{\prime}= R_{u}^{v\rightarrow w}(S)\}| \leq{\binom{|O_{w}(u^{\prime})| }{m}}. \end{equation*} \end{lemma} \begin{proof} Assume that $v,w,u^{\prime}$ are as in the lemma, and denote the set above by $f(u^{\prime})$. For any $(u,S)\in f(u^{\prime})$ we define $g(S)=\{s_{1} ,s_{2}+|w|-|v|,\ldots,s_{m}+(m-1)(|w|-|v|)\}$; note that by Lemma~\ref {wsurvive}, $g(S)\subseteq O_{w}(u^{\prime})$. We claim that for any $S$, there is at most one $u$ for which $(u,S)\in f(u^{\prime})$. One can find this $u$ by simply reversing each of the replacements in the definition of $R_{u}^{v \rightarrow w}(S)$. Informally, the only such $u$ is $u = R_{u^{\prime}}^{v \leftarrow w}(g(S))$, where $ R_{u'}^{v\leftarrow w}$ is defined analogously to $R_{u}^{v\rightarrow w}$ with replacements of $w$ by $v$ made from right to left instead of $v$ by $w$ made from left to right. Finally, since $g(S)\subseteq O_{w}(u^{\prime})$, and since $g$ is clearly injective, there are less than or equal to ${\binom{|O_{w}(u^{\prime})|}{m}}$ choices for $S$ with $(u,S)\in f(u^{\prime})$ for some $u$, completing the proof. \end{proof} We may now prove the desired relation for $v$, $w$ with $E(v)\subseteq E(w)$ under additional assumptions on $v$ and $w$. \begin{proposition} \label{easycase} Let $X$ be a subshift, $\mu$ a measure of maximal entropy of $X$, and $v,w\in L(X).$ If $v$ respects the transition to $w$, $v$ is not a suffix of $w$, $w$ is not a prefix of $v$, and $E_{X}(v)\subseteq E_{X}(w)$ , then \begin{equation*} \mu(v)\leq\mu(w)e^{h_{top}(X)(|w|-|v|)}. \end{equation*} \end{proposition} \begin{proof} Let $\delta,\varepsilon\in\mathbb{Q}_{+}$. We may assume without loss of generality that $\mu$ is an ergodic MME, since proving the desired inequality for ergodic MMEs implies it for all MMEs by ergodic decomposition. For every $n\in\mathbb{Z}_{+},$ we define \begin{equation*} S_{n}:=\left\{ u\in L_{n}(X):\left\vert O_{v}(u)\right\vert \geq n(\mu(v)-\delta)\text{ and }\left\vert O_{w}(u)\right\vert \leq n(\mu (w)+\delta)\right\} . \end{equation*} By the pointwise ergodic theorem (applied to $\chi_{[v]}$ and $\chi_{[w]}$), $\mu(S_{n}) \rightarrow1$. Then, by Corollary~\ref{SMBcor}, there exists $N$ so that for $n>N$, \begin{equation} |S_{n}|>e^{n(h_{top}(X)-\delta)}. \label{Sbound} \end{equation} For each $u\in S_{n}$, we define \begin{equation*} A_{u}:=\left\{ R_{u}^{v\rightarrow w}(S):S\subseteq O_{v}(u)\text{ and } \left\vert S\right\vert =\varepsilon n\right\} \end{equation*} (without loss of generality we may assume $\varepsilon n$ is an integer by taking a sufficiently large $n$.) Since each word in $A_u$ is obtained by making $\varepsilon n$ replacements of $v$ by $w$ in a word of length $n$, all words in $A_u$ have length $m := n + \varepsilon n (|w| - |v|)$. Since $E_{X}(v)\subseteq E_{X}(w)$, we have that $A_{u}\subset L(X)$. Also, by Lemma~\ref{injective}, \begin{equation*} |A_{u}|={\binom{\left\vert O_{v}(u)\right\vert}{\left\vert S\right\vert} \geq {\binom{n(\mu(v)-\delta)}{\varepsilon n}} } \end{equation*} for every $u$. On the other hand, for every $u^{\prime}\in\bigcup_{u\in S_{n}}A_{u}$ we have that \begin{equation*} \left\vert O_{w}(u^{\prime})\right\vert \leq n(\mu(w)+\delta)+n\varepsilon (2|w|+1) \end{equation*} (here, we use the fact that any replacement of $v$ by $w$ can create no more than $2|w|$ \ new occurrences of $w$.) Therefore, by Lemma~\ref{preimage}, \begin{equation*} \left\vert \left\{ u\in S_{n}:u^{\prime}\in A_{u}\right\} \right\vert \leq{ \binom{n\left(\mu(w)+\delta+(2|w|+1)\varepsilon\right)}{\varepsilon n}.} \end{equation*} Then, by Lemma~\ref{counting}, we see that for $n>N$, \begin{multline} |L_{m}(X)|\geq\left\vert \bigcup_{u\in S_{n}}A_{u}\right\vert \geq |S_{n}|{ \binom{n(\mu(v)-\delta)}{\varepsilon n}}{\binom{n(\mu(w)+\delta +(2|w|+1)\varepsilon)}{\varepsilon n}}^{-1} \label{imagebound} \\ \geq e^{n(h_{top}(X)-\delta)}{\binom{n(\mu(v)-\delta)}{\varepsilon n}} { \binom{n(\mu(w)+\delta+(2|w|+1)\varepsilon)}{\varepsilon n}}^{-1}. \end{multline} For readability, we define $x=\mu(v)-\delta$ and $y=\mu(w)+\delta$. We recall that by Stirling's approximation, for $a > b > 0$, \[ \log\left( \begin{array} [c]{c} an\\ bn \end{array} \right) =an\log(an)-bn\log(bn)-n(a-b)\log(n(a-b))+o(n) .\] Therefore, if we take logarithms and divide by $n$ on both sides of (\ref{imagebound}) and let $n$ approach infinity, we obtain \begin{multline*} h_{top}(X)(1+\varepsilon(|w|-|v|))\geq h_{top}(X)-\delta+x\log x-(x-\varepsilon)\log(x-\varepsilon) \\ -(y+(2|w|+1)\varepsilon)\log(y+(2|w|+1)\varepsilon)+(y+2|w|\varepsilon )\log(y+2|w|\varepsilon). \end{multline*} We subtract $h_{top}(X)$ from both sides, let $\delta\rightarrow0$, and simplify to obtain \begin{multline*} h_{top}(X)\varepsilon(|w|-|v|)\geq\varepsilon\log\mu(v)+(\mu(v)-\varepsilon )\left( \log\frac{\mu(v)}{\mu(v)-\varepsilon}\right) \\ -\varepsilon\log(\mu(w)+(2|w|+1)\varepsilon)-(\mu(w)+2|w|\varepsilon )\log\left( \frac{\mu(w)+(2|w|+1)\varepsilon}{\mu(w)+2|w|\varepsilon}\right) . \end{multline*} We have that \begin{align*} & \lim_{\varepsilon\rightarrow0}\frac{\mu(v)-\varepsilon}{\varepsilon} \log \frac{\mu(v)}{\mu(v)-\varepsilon} \\ & =\lim_{\varepsilon\rightarrow0}\frac{\mu(v)}{\varepsilon}\log\frac{\mu (v) }{\mu(v)-\varepsilon} \\ & =1, \end{align*} and \begin{align*} & \lim_{\varepsilon\rightarrow0}-\frac{\mu(w)+2|w|\varepsilon}{\varepsilon } \log\left( \frac{\mu(w)+(2|w|+1)\varepsilon}{\mu(w)+2|w|\varepsilon}\right) \\ & =\lim_{\varepsilon\rightarrow0}-\frac{\mu(w)}{\varepsilon}\log\left( \frac{ \mu(w)+(2|w|+1)\varepsilon}{\mu(w)+2|w|\varepsilon}\right) \\ & =\lim_{t\rightarrow0}-\frac{1}{t}\log\left( \frac{1+(2|w|+1)t} {1+2|w|t} \right) \\ & =-1. \end{align*} This implies (by dividing by $\varepsilon$ and taking limit on the previous estimate) that \begin{equation*} h_{top}(X)(|w|-|v|)\geq\log\mu(v)-\log\mu(w). \end{equation*} Exponentiating both sides and solving for $\mu(v)$ completes the proof. \end{proof} Our strategy is now to show that any pair $v, w$, the cylinder sets $[v]$ and $[w]$ may each be partitioned into cylinder sets of the form $[\alpha v \beta]$ and $[\alpha w \beta]$ where the additional hypotheses of Theorem~ \ref{easycase} hold on corresponding pairs. For this, we make the additional assumption that $X$ has positive entropy to avoid some pathological examples (for instance, note that if $X = \{0^{\infty}\}$, then it's not even possible to satisfy the hypotheses of Theorem~\ref{easycase}!) \begin{definition} Let $X$ be a subshift and $v\neq w\in L(X)$. We define \begin{align*} X_{resp(v\rightarrow w)} & :=\{x\in\left[ v\right] :\\ \exists N,M & \in\mathbb{Z}_{+} \text{ s.t. } \alpha v\beta=x_{[-N,M)}\text{ respects the transition to }\alpha w\beta,\\ & \alpha v\beta\text{ is not a suffix of }\alpha w\beta\text{, and }\\ & \alpha w\beta\text{ is not a prefix of }\alpha v\beta\}. \end{align*} \end{definition} \begin{proposition} \label{extend} \label{transition} Let $X$ be a subshift with positive topological entropy, $\mu$ an ergodic measure of maximal entropy of $X$, and $v\neq w\in L(X)$. There exists $G^{v,w}\subset X_{resp(v\rightarrow w)}$ such that $\mu(G^{v,w})=\mu(v)$. \\ \end{proposition} \begin{proof} Define \begin{equation*} Q:=\left\{ \gamma\in L(X):\mu(\gamma)>0\right\} \end{equation*} and, for all $n \in\mathbb{N}$, define $Q_{n} := Q \cap A^{n}$. Recall that \begin{equation*} h_{\mu}(X)=\lim_{n\rightarrow\infty}\frac{1}{n}\sum\nolimits_{w\in A^{n}}-\mu(w)\log\mu(w). \end{equation*} The only positive terms of this sum are those corresponding to $w \in Q_{n}$ , and it's a simple exercise to show that when $\sum_{i=1}^{t} \alpha_{i} = 1 $, $\sum_{i=1}^{t} -\alpha_{i} \log\alpha_{i}$ has a maximum value of $\log t $. Therefore, \begin{equation*} h_{\mu}(X) \leq\liminf_{n \rightarrow\infty} \frac{1}{n} \log|Q_{n}|. \end{equation*} Since $h_{\mu}(X) > 0$, $\left\vert Q_{n}\right\vert $ grows exponentially. Therefore, there exists $n_{2}\in\mathbb{Z}_{+}$ such that for every $n\geq n_{2}$ we have that $\left\vert Q_{n}\right\vert $ $\geq2n.$ Let \begin{align*} N & :=\max\left\{ n_{2},\left\vert v\right\vert \right\} +1, \\ P & :=\left\{ x \in X \ : \ x_{(-\infty, 0)} \text{ periodic with period less than } |w| \right\} , \\ S & :=\left\{ x \in X \ : \ \forall\gamma\in Q, \gamma\text{ is a subword of } x_{[0, \infty)} \right\} ,\text{ and} \\ G^{v,w} & :=[v]\cap S \diagdown P. \end{align*} Since $\mu$ has positive entropy, it is not supported on points with period less than $|w|$, and so for each $i \leq|w|$, there exists a word $u_{i} \in L_{i+1}(X)$ with different first and last letters. Then the pointwise ergodic theorem (applied to $\chi_{[u_{1}]}, \ldots, \chi_{[u_{|w| - 1}]}$ with $F_n = [-n,0)$) implies that $ \mu(P) = 0$. The pointwise ergodic theorem (applied to $\chi_{[\gamma]}$ for $\gamma\in Q$ with $F_n = [0,n]$) shows that $\mu(S)=1$, and so $\mu(G^{v,w})=\mu(v)$. \\ Now we will prove that $G^{v,w}\subset X_{resp(v\rightarrow w)}$. Let $x\in R$. If for every $n$, $x_{(-n,0)}v$ is a suffix of $x_{(-n,0)}w$, then clearly $|w|\geq |v|$, and for any $i>0$, the $(i+|w|)$th letters from the end of $x_{(-\infty,0)}v$ and $x_{(-\infty,0)}w$ must be the same, i.e. $ x(-i)=x(-i-|w|+|v|)$. This would imply $x\in P$, which is not possible. We can therefore define $N^{\prime}\geq N$ to be minimal so that for $ \alpha^x= x_{[-N^{\prime}, 0)}$, $\alpha^x v$ is not a suffix of $\alpha^x w$. (Obviously if $|v| \geq|w|$, then $N^{\prime}= N$.) Since $x \in S$, we can define the minimal $M$ so that all $N^{\prime} $ -letter words of positive $\mu$-measure are subwords of $x_{\left[ -N^{\prime},M\right) }$; for brevity we write this as $Q_{N^{\prime}} \sqsubset x_{\left[ -N^{\prime},M\right) }$. Since $M$ is the first natural with $Q_{N^{\prime}}\sqsubset x_{\left[ -N^{\prime},M\right) }$, then \begin{equation*} \left\vert O_{x_{\left[ M-N^{\prime},M\right) }}(x_{\left[ -N^{\prime },M\right) })\right\vert =1, \end{equation*} i.e. the $N^{\prime}$-letter suffix of $x_{[-N^{\prime}, M)} = \alpha^x v \beta^x $ appears only at the end of $\alpha^x v \beta^x$. Since $N^{\prime}\geq N \geq n_{2}$, $\left\vert Q_{N^{\prime}}\right\vert $ $\geq 2N^{\prime}$, and so $M > 2N^{\prime} \geq N^{\prime} + |v|$, implying that the aforementioned $N^{\prime}$-letter suffix of $\alpha^x v \beta^x$ is also the $N^{\prime}$-letter suffix of $\alpha^x w \beta^x$. First, it is clear that $\alpha^x v \beta^x$ is not a suffix of $\alpha^x w \beta^x$ , since $\alpha^x v$ was not a suffix of $\alpha^x w$ by definition of $\alpha^x$. Since the $N^{\prime}$-letter suffix of $\alpha^x w \beta^x$ appears only once within $\alpha^x v \beta^x$, we see that $\alpha^x w \beta^x$ cannot be a prefix of $ \alpha^x v \beta^x$ either. It remains to show that $\alpha^x v\beta^x=x_{\left[ -N^{\prime},M\right) }$ respects the transition to $\alpha^x w\beta^x.$ Suppose that a word $u\in L(X)$ contains overlapping copies of $\alpha^x v \beta^x$, i.e. we have $i,j\in O_{\alpha^x v\beta^x}(u)$ with $j>i$. Since $\left\vert O_{x_{\left[ M-N^{\prime },M\right) }}(x_{\left[ -N^{\prime},M\right) })\right\vert =1$ we have that $ j > i + M$; otherwise the $N^{\prime}$-letter suffix of $\alpha^x v \beta^x= x_{[i, i+N^{\prime}+M)}$ would be a non-terminal subword of $ \alpha^x v \beta^x= x_{[j, j+N^{\prime}+M)}$. Then $j + |w| - |v| > i + M + |w| - |v| > i$, and so property (iv) is verified. Since $j > i + M$, the central $v$ within $x_{[i, i+N^{\prime}+M})$ is disjoint from $x_{[j, j+N^{\prime }+M})$, and so $j + |w| - |v| \in O_{v}(R_{u}^{v\rightarrow w}(i))$, verifying property (i). For property (ii), the same argument as above shows that when $i,j\in O_{\alpha^x v\beta^x}(u)$ with $i>j$, $i > j + M$. Again this means that the central $v$ within $x_{[i, i+N^{\prime}+M)}$ is disjoint from $ x_{[j, j+N^{\prime}+M)}$, and so $j \in O_{v}(R_{u}^{v\rightarrow w}(i))$ , verifying property (ii) and completing the proof. For property (iii), we simply note that the proof of (ii) is completely unchanged if we instead assumed $j \in O_{\alpha^x w\beta^x}(u)$, since the $ N^{\prime}$-letter suffixes of $\alpha^x w \beta^x$ and $\alpha^x v \beta^x$ are the same. \end{proof} \begin{remark}\label{cp} For $x\in G^{v,w}$ (as in Proposition~\ref{extend}) we denote by $\alpha^{x}$ and $\beta ^{x}$ the words $\alpha$ and $\beta$ constructed in the proof. \end{remark} \begin{lemma}\label{cl} For $x \neq y\in G^{v,w}$, it is not possible for either of $\alpha^x$, $\alpha^y$ to be a proper suffix of the other, and if $\alpha^x = \alpha^y$, then it is not possible for either of $\beta^x, \beta^y$ to be a proper prefix of the other. \end{lemma} \begin{proof} Let $x \neq y\in G^{v,w}$. We write $\alpha^x v \beta^x = x_{[-N'_x, M_x)}$ and $\alpha^y v \beta^y= y_{[-N'_y, M_y)}$. We recall that $\alpha^x= x_{[-N'_x, 0)}$ was chosen as the minimal $N'_x$ (above a certain $N_x$ dependent only on $v$ and $X$) so that $\alpha^x v$ is not a suffix of $\alpha^x w$, and that $\alpha^y = y_{[-N'_y, 0)}$ was defined similarly using minimal $N'_y$ above some $N_y$. If $\alpha^y$ were a proper suffix of $\alpha^x$, then $N'_y < N'_x$ and $\alpha^y = x_{[-N'_y, 0)}$. Since by construction $\alpha^y v$ is not a suffix of $\alpha^y w$, this would contradict the minimality of $\alpha^x$. A trivially similar argument shows that $\alpha^x$ is not a proper suffix of $\alpha^y$. Now, assume that $\alpha^x = \alpha^y$; we denote their common value by $\alpha$ and their common length by $N'$. Recall that $\beta^x = x_{[|v|, M_x)}$ was chosen using the minimal $M_x$ so that $\alpha^x v \beta^x$ contains all $N'_x$-letter words of positive $\mu$-measure, and that $\beta^y$ was defined similarly using minimal $M_y$ for $y$. If $\beta^y$ were a proper prefix of $\beta^x$, then $M_y < M_x$ and $\beta^y = x_{[|v|, M_y)}$. Since $\alpha v \beta^y$ contains all $N'$-letter words of positive $\mu$-measure, this would contradict the minimality of $\beta^x$. A trivially similar argument shows that $\beta^x$ is not a proper prefix of $\beta^y$. \end{proof} We may now prove the main result of this section. \begin{theorem} \label{hardcase} Consider any $X$ a subshift with positive entropy, $\mu$ a measure of maximal entropy of $X$, and $v,w\in L(X).$ If $E_{X}(v)\subseteq E_{X}(w)$ then \begin{equation*} \mu(v)\leq\mu(w)e^{h_{top}(X)(|w|-|v|)}. \end{equation*} \end{theorem} \begin{proof} Consider $X, \mu, v, w$ as in the theorem. We may prove the result for only ergodic $\mu$, since it then follows for all $\mu$ by ergodic decomposition. If $v=w$ the result is trivial, so we assume $v\neq w$. Let $G^{v,w}$ be as in the proof of Proposition~\ref{extend}. For any $x \in G^{v,w}$, by definition $\alpha^x v \beta^x \in L(X)$. Since $E_{X}(v)\subseteq E_{X} (w)$, we then know that $\alpha^x w \beta^x\in L(X)$ and $E_{X}(\alpha^x v \beta^x)\subseteq E_{X}(\alpha^x w \beta^x)$ for every $x\in G^{v,w}$. Now, using Proposition~\ref{easycase} we have that \begin{equation} \label{hardbound} \mu(\alpha^x v \beta^x)\leq\mu(\alpha^x w \beta^x)e^{h_{top}(X)(|\alpha^x w \beta^x|-|\alpha^x v \beta^x|)}=\mu(\alpha^x w \beta^x)e^{h_{top}(X)(|w|-|v|)}. \end{equation} For convenience, we adopt the notation $[\alpha^x v \beta^x] = [\alpha^x.v\beta^x]$ and $[\alpha^x w \beta^x] = [\alpha^x.w\beta^x]$ to emphasize the location of the words $\alpha^x v \beta^x$ and $\alpha^x w \beta^x$ within these cylinder sets. We now claim that if $\alpha^x v \beta^x \neq \alpha^y v \beta^y$ for $x,y \in G^{v,w}$, then $[\alpha^x v \beta^x]\cap [\alpha^y v \beta^y]=\emptyset$. To verify this, choose any $x,y$ for which $\alpha^x v \beta^x \neq \alpha^y v \beta^y$; then either $\alpha^x\neq\alpha^{y}$ or $\alpha^x = \alpha^y$ and $\beta^x\neq\beta^{y}$. If $\alpha^x \neq \alpha^y$, then by Lemma~\ref{cl}, neither of $\alpha^x$ or $\alpha^y$ can be a suffix of the other, which means that the cylinder sets $[\alpha^x .v \beta^x]$ and $[\alpha^{y} .v \beta^{y}]$ are disjoint. If instead $\alpha^x = \alpha^y$ and $\beta^x \neq \beta^y$, then again by Lemma~\ref{cl}, neither of $\beta^x$ or $\beta^y$ can be a prefix of the other, meaning that the cylinder sets $[\alpha^x .v \beta^x]$ and $[\alpha^{y} .v \beta^{y}]$ are again disjoint. This proves the claim. Let $K=\{\alpha^x v \beta^x \ : \ x\in G^{v,w}\}$. Since all $[\alpha^x .v \beta^x]$ are disjoint or equal, $\{[\alpha .v \beta]\}_{\alpha v \beta \in K}$ forms a partition of $G^{v,w}$. Furthermore we also obtain that the sets $\{[\alpha^x .w \beta^x]\}_{\alpha v \beta \in K}$ are disjoint, and so \begin{align*} \sum_{\alpha v \beta \in K}\mu(\alpha v \beta) & =\mu(G^{v,w})=\mu(v)\text{ and} \\ \sum_{\alpha v \beta \in K}\mu(\alpha w \beta) & \leq\mu(w). \end{align*} In fact one can show the final inequality is an equality but we will not use this. We may then sum (\ref{hardbound}) over $\alpha v \beta \in K$ yielding \begin{align*} \mu(v) & =\sum_{\alpha v \beta \in K}\mu(\alpha v \beta) \\ & \leq e^{h_{top}(X)(|w|-|v|)}\sum_{\alpha v \beta \in K} \mu(\alpha w \beta) \\ & \leq\mu(w)e^{h_{top}(X)(|w|-|v|)}, \end{align*} as desired. \end{proof} The following corollary is immediate. \begin{corollary} \label{maincor} Let $X$ be a $\mathbb{Z}$-subshift, $\mu$ a measure of maximal entropy of $X$, and $w,v\in L(X).$ If $E_{X}(v)=E_{X}(w)$, then for every measure of maximal entropy of $X$, \begin{equation*} \mu(v)=\mu(w)e^{h_{top}(X)(|w|-|v|)}. \end{equation*} \end{corollary} \subsection{Applications to synchronized subshifts} \label{apps} The class of synchronized subshifts provides many examples where $E_X(v) = E_X(w)$ is satisfied for many pairs $v,w$ of different lengths, allowing for the usage of Corollary~\ref{maincor}. \begin{definition} For a subshift $X$, we say that $v\in L(X)$ is \textbf{synchronizing} if for every $uv,vw\in L(X)$, it is true that $uvw\in L(X).$ A subshift $X$ is \textbf{synchronized} if $L(X)$ contains a synchronizing word. \end{definition} The following fact is immediate from the definition of synchronizing word. \begin{lemma} \label{synchlem} If $w$ is a synchronizing word for a subshift $X$, then for any $v \in L(X)$ which contains $w$ as both a prefix and suffix, $E_{X}(v) = E_{X}(w)$. \end{lemma} \begin{definition} A subshift $X$ is \textbf{entropy minimal }if every subshift strictly contained in $X$ has lower topological entropy. Equivalently, $X$ is entropy minimal if every MME on $X$ is fully supported. \end{definition} The following result was first proved in \cite{Th}, but we may also derive it as a consequence of Corollary~\ref{maincor} with a completely different proof. \begin{theorem} \label{synchunique} Let $X$ be a synchronized subshift. If $X$ is entropy minimal then $X$ has a unique measure of maximal entropy. \end{theorem} \begin{proof} Let $\mu$ be an ergodic measure of maximal entropy of such an $X$. Let $w$ be a synchronizing word, $u\in L(X)$ and \[ R_{u}:=\left\{ x\in\left[ u\right] :\left\vert O_{w}(x_{\left( -\infty,0\right] })\right\vert \geq1\text{ and }\left\vert O_{w}(x_{\left[ \left( \left\vert u\right\vert ,\infty\right] \right) })\right\vert \geq1\right\} . \] Since $X$ is entropy minimal, $\mu(w) > 0$, and so by the pointwise ergodic theorem (applied to $\chi_{[w]}$ with $F_{n} = [-n,0]$ or $(|u|, n]$), $\mu(R_{u}) = \mu(u)$. For every $x\in R_{u}$ we define minimal $n \geq|w|$ and $m \geq|w| + |u|$ so that $g_{u}(x):=x_{\left[ -n,m\right] }$ contains $w$ as both a prefix and a suffix. Then $\{[g_{u}(x)]\}$ forms a partition of $R_{u}$. By Lemma~\ref{synchlem}, $E_X(w) = E_X(wvw)$ for all $v$ s.t. $wvw \in L(X)$. Then by Corollary \ref{maincor} we have that \[ \mu(g_{u}(x))=\mu(w)e^{h_{top}(X)(\left\vert w\right\vert -\left\vert g_{u}(x)\right\vert )}. \] Since $g_{u}(R_{u})$ is countable we can write \[ \mu(u)=\mu(R_{u}) = \mu(w)\sum_{g_{u}(x)\in g_{u}(R_{u})}e^{h_{top} (X)(\left\vert w\right\vert -\left\vert g_{u}(x)\right\vert )}. \] This implies that \[ 1=\sum_{a\in\mathcal{A}}\mu(a)=\mu(w)\sum_{a\in\mathcal{A}}\sum_{g_{a}(x)\in g_{a}(R_{a})}e^{h_{top}(X)(\left\vert w\right\vert -\left\vert g_{a} (x)\right\vert )}. \] We combine the two equations to yield \begin{align*} \mu(u) & =\frac{\sum\nolimits_{g_{u}(x)\in g_{u}(R_{u})}e^{h_{top} (X)(\left\vert w\right\vert -\left\vert g_{u}(x)\right\vert )}}{\sum _{a\in\mathcal{A}}\sum_{g_{a}(x)\in g_{a}(R_{a})}e^{h_{top}(X)(\left\vert w\right\vert -\left\vert g_{a}(x)\right\vert )}}\\ & =\frac{\sum\nolimits_{g_{u}(x)\in g_{u}(R_{u})}e^{-h_{top}(X)\left\vert g_{u}(x)\right\vert }}{\sum_{a\in\mathcal{A}}\sum\nolimits_{g_{a}(x)\in g_{a}(R_{a})}e^{-h_{top}(X)\left\vert g_{a}(x)\right\vert }}. \end{align*} Since the right-hand side is independent of the choice of the measure we conclude there can only be one ergodic measure of maximal entropy, which implies by ergodic decomposition that there is only one measure of maximal entropy. \end{proof} In \cite{CT}, one of the main tools used in proving uniqueness of the measure of maximal entropy for various subshifts was boundedness of the quantity $\frac{|L_{n}(X)|}{e^{nh_{top}(X)}}$. One application of our results is to show that this quantity in fact converges to a limit for a large class of synchronized shifts. \begin{definition} A measure $\mu$ on a subshift $X$ is \textbf{mixing} if, for all measurable $A,B$, \[ \lim_{n \rightarrow\infty} \mu(A \cap\sigma_{-n} B) = \mu(A) \mu(B). \] \end{definition} \begin{theorem} \label{limit}Let $X$ be a synchronized entropy minimal subshift such that the measure of maximal entropy is mixing. We have that \[ \lim_{n\rightarrow\infty}\frac{\left\vert L_{n}(X)\right\vert }{e^{nh_{top} (X)}}\text{ exists.} \] \end{theorem} \begin{proof} We denote $\lambda:=e^{h_{top}(X)}$ and define $\mu$ to be the unique measure of maximal entropy for $X$. Let $w\in L(X)$ be a synchronizing word and \[ R_{n}:=\left\{ u\in L_{n}(X):w\text{ is a prefix and a suffix of }u\right\} . \] Lemma~\ref{synchlem} and Corollary \ref{maincor} imply that for every $u\in R_{n}$, \[ \mu(u)=\mu(w)\lambda^{\left\vert w\right\vert -n}. \] This implies that \[ \sum_{u\in R_{n}}\mu(u)=\left\vert R_{n}\right\vert \mu(w)\lambda^{\left\vert w\right\vert -n} \] On the other hand \[ \sum_{u\in R_{n}}\mu(u)=\mu(\left[ w\right] \cap\sigma_{\left\vert w\right\vert -n}\left[ w\right] ). \] Since the measure is mixing we obtain that \[ \lim_{n\rightarrow\infty}\mu(\left[ w\right] \cap\sigma_{\left\vert w\right\vert -n}\left[ w\right] )=\mu(\left[ w\right] )^{2}. \] Combining the three equalities above yields \[ \lim_{n\rightarrow\infty}\frac{\left\vert R_{n}\right\vert }{\lambda^{n} }=\frac{\mu(w)}{\lambda^{\left\vert w\right\vert }}. \] For all $n\in\mathbb{N}$, we define \begin{align*} P_{n} & :=\left\{ u\in L_{n+|w|}(x):w\text{ is a prefix of }u,|O_{w}| (u)=1\right\} \text{ and}\\ S_{n} & :=\left\{ u\in L_{n+|w|}(x):w\text{ is a suffix of }u,|O_{w}| (u)=1\right\} \end{align*} to be the sets of $(n+|w|)$-letter words in $L(X)$ containing $w$ exactly once as a prefix/suffix respectively. We also define \[ K_{n}:=\left\{ u\in L_{n}(x):|O_{w}(u)|=0\right\} \] to be the set of $n$-letter words in $L(X)$ not containing $w$. Then partitioning words in $L_{n}(X)\setminus K_{n}$ by the first and last appearance of $w$, recalling that $w$ is synchronizing, gives the formula \[ \left\vert L_{n}(X)\right\vert =\left\vert K_{n}\right\vert + \sum_{0\leq i < j\leq n}|S_{i}||R_{j-i} ||P_{n-j}|, \] thus \begin{equation} \label{sumproduct}\frac{\left\vert L_{n}(X)\right\vert }{\lambda^{n}} = \frac{\left\vert K_{n}\right\vert }{\lambda^{n}} + \sum_{0\leq i < j \leq n} \frac{|S_{i}|}{\lambda^{i}} \frac{|R_{j-i}|} {\lambda^{j-i}} \frac{|P_{n-j} |}{\lambda^{n-j}}. \end{equation} We now wish to take the limit as $n \rightarrow\infty$ of both sides of (\ref{sumproduct}). First, we note that since $X$ is entropy minimal, $h_{top}(X_{w}) < h_{top}(X)$, where $X_{w}$ is the subshift of points of $X$ not containing $w$. Therefore, \[ \limsup_{n\rightarrow\infty}\frac{1}{n}\log\left\vert K_{n}\right\vert < h_{top}(X). \] Since all words in $P_{n}$ and $S_{n}$ are the concatenation of $w$ with a word in $K_{n}$, $|P_{n}|, |S_{n}| \leq|K_{n}|$, and so \[ \limsup_{n\rightarrow\infty}\frac{1}{n}\log\left\vert P_{n}\right\vert ,\limsup_{n\rightarrow\infty}\frac{1}{n}\log\left\vert S_{n}\right\vert < h_{top}(X), \] implying that the infinite series \[ \sum_{n=0}^{\infty}\frac{\left\vert P_{n}\right\vert }{\lambda^{n}} \text{ and } \sum_{n=0}^{\infty}\frac{\left\vert S_{n}\right\vert }{\lambda^{n}}\text{ converge.} \] We now take the limit of the right-hand side of (\ref{sumproduct}). \[ \lim_{n \rightarrow\infty} \frac{\left\vert K_{n}\right\vert }{\lambda^{n}} + \sum_{0\leq i < j \leq n} \frac{|S_{i}|} {\lambda^{i}} \frac{|R_{j-i} |}{\lambda^{j-i}} \frac{|P_{n-j}|}{\lambda^{n-j}} = \lim_{n \rightarrow\infty} \sum_{0 \leq k \leq n} \left( \frac{|R_{k}|}{\lambda^{k}} \left( \sum_{i = 0}^{n-k} \frac{|S_{i}|}{\lambda^{i}} \frac{|P_{n - k - i}|} {\lambda^{n-k-i} }\right) \right) . \] Since $\frac{|R_{k}|}{\lambda^{k}}$ converges to the limit $\frac{\mu (w)}{\lambda^{\left\vert w\right\vert }}$ and the series $\sum_{m = 0}^{\infty} \sum_{i = 0}^{m} \frac{|S_{i}|}{\lambda^{i}} \frac{|P_{m-i} |}{\lambda^{n-k-i}}$ converges, the above can be rewritten as \begin{multline*} \lim_{n \rightarrow\infty} \sum_{0 \leq k \leq n} \left( \frac{|R_{k} |}{\lambda^{k}} \left( \sum_{i = 0}^{n-k} \frac{|S_{i}|}{\lambda^{i}} \frac{|P_{n - k - i}|}{\lambda^{n-k-i}}\right) \right) = \frac{\mu (w)}{\lambda^{\left\vert w\right\vert }} \lim_{m \rightarrow\infty} \sum_{m = 0}^{\infty} \sum_{i = 0}^{m} \frac{|S_{i}|}{\lambda^{i}} \frac{|P_{m- i} |}{\lambda^{n-k-i}}\\ = \frac{\mu(w)}{\lambda^{\left\vert w\right\vert }} \sum_{n=0}^{\infty} \frac{\left\vert P_{n}\right\vert }{\lambda^{n}} \sum_{n=0}^{\infty} \frac{\left\vert S_{n}\right\vert }{\lambda^{n}}. \end{multline*} Recalling (\ref{sumproduct}), we see that $\lim_{n \rightarrow\infty} \frac{|L_{n}(X)|}{\lambda^{n}}$ converges to this limit as well, completing the proof. \end{proof} We will be able to say even more about a class of synchronized subshifts called the $S$-gap subshifts. \begin{definition}\label{Sgap} Let $S\subseteq\mathbb{N} \cup \{0\}$. We define the $S-$gap subshift $X_{S}$ by the set of forbidden words $\{10^{n}1 \ : \ n \notin S\}$. Alternately, $X_{S}$ is the set of bi-infinite $\{0,1\}$ sequences where the gap between any two nearest $1$s has length in $S.$ \end{definition} It is immediate from the definition that $1$ is a synchronizing word for every $S-$gap subshift. Also, all $S$-gap subshifts are entropy minimal (see Theorem C, Remark 2.4 of \cite{CT2}), and as long as $\gcd(S+1)=1$, their unique measure of maximal entropy is mixing (in fact Bernoulli) by Theorem 1.6 of \cite{Cl2}. (This theorem guarantees that the unique MME is Bernoulli up to period $d$ given by the gcd of periodic orbit lengths, and it's clear that $S+1$ is contained in the set of periodic orbit lengths.) In this case Climenhaga \cite{Cl} conjectured that the limit $\lim _{n\rightarrow\infty}\frac{\left\vert L_{n}(X_{S})\right\vert }{e^{nh_{top} (X_{S})}}$ existed; we prove this and we give an explicit formula for the limit. \begin{corollary} \label{limit2}Let $S\subseteq\mathbb{N}$ satisfy $\gcd(S+1)=1$, let $\mu$ be the unique MME on $X_{S}$, and let $\lambda = e^{h_{top}(X_{S})}$. Then $\displaystyle\lim_{n\rightarrow\infty}\frac{\left\vert L_{n}(X_{S})\right\vert }{\lambda^n}$ exists and is equal to $\displaystyle \frac{\mu(1)\lambda}{(\lambda-1)^{2}}$ when $S$ is infinite and $\displaystyle \frac{\mu(1)\lambda (1 - \lambda^{-(\max S) - 1})^2}{(\lambda-1)^{2}}$ when $S$ is finite. \end{corollary} \begin{proof} Using the notation of the proof of Theorem~\ref{limit}, we define $w=1$ and write $\lambda=e^{h_{top}(X_{S})}$. If $S$ is infinite, it is easy to see that $\left\vert P_{i}\right\vert =\left\vert S_{i}\right\vert = 1$ for all $i$. As noted above, $X_{S}$ is entropy minimal and its unique measure of maximal entropy is mixing, and so the proof of Theorem~\ref{limit} implies that \[ \lim_{n\rightarrow\infty}\frac{\left\vert L_{n}(X_{S})\right\vert }{e^{nh_{top}(X_{S})}}=\frac{\mu(1)}{\lambda}\left( \sum_{i=0}^{\infty} \frac{1}{\lambda^{i}}\right) ^{2}=\frac{\mu(1)}{\lambda}\left( \frac {1}{1-\lambda^{-1}}\right) ^{2}=\frac{\mu(1)\lambda}{(\lambda-1)^{2}}. \] If instead $S$ is finite (say $M = \max S$), then the reader may check that $\left\vert P_{i}\right\vert$ and $\left\vert S_{i}\right\vert$ are both equal to $1$ for all $i \leq M$ and equal to $0$ for all $i > M$. Then, the proof of Theorem~\ref{limit} implies that \[ \lim_{n\rightarrow\infty}\frac{\left\vert L_{n}(X_{S})\right\vert }{e^{nh_{top}(X_{S})}}=\frac{\mu(1)}{\lambda}\left( \sum_{i=0}^{M} \frac{1}{\lambda^{i}}\right) ^{2}=\frac{\mu(1)}{\lambda}\left( \frac {1 - \lambda^{-M-1}}{1-\lambda^{-1}}\right) ^{2} = \frac{\mu(1)\lambda(1 - \lambda^{-M-1})^2}{(\lambda - 1)^2}, \] completing the proof. \end{proof} As noted in \cite{Cl}, a motivation for proving the existence of this limit is to fill a gap from \cite{spandl} for a folklore formula for the topological entropy of $X_{S}$. Two proofs of this formula are presented in \cite{Cl}, and Corollary \ref{maincor} yields yet another proof. \begin{corollary} Let $S\subseteq\mathbb{N} \cup \{0\}$ with $\gcd(S+1)=1$. Then $h_{top}(X_{S})=\log\lambda$, where $\lambda$ is the unique solution of \[ 1=\sum_{n\in S}\lambda^{-n-1}. \] \end{corollary} \begin{proof} For any $S$-gap shift $X_S$, we can write \[ \left[1\right] = \left(\bigsqcup_{n = 0}^{\infty} \left[ 10^{n}1\right]\right) \cup \{x \in X_S \ : \ x_0 = 1 \textrm{ and } \forall n > 0, x_n = 0\}. \] By shift-invariance, $\mu(10^{\infty})=0$, and so by Lemma~\ref{synchlem} and Corollary \ref{maincor}, \[ \mu(1)=\sum_{n\in S}\mu(10^{n}1)=\sum_{n\in S}\mu(1)e^{h_{top}(X_{S} )(-n-1)}\text{.} \] Dividing both sides by $\mu(1)$ completes the proof. \end{proof} We also prove that for every $S-$gap subshift, the unique measure of maximal entropy has highly constrained values, which are very similar to those of the Parry measure for shifts of finite type. \begin{theorem} \label{value}Let $X_{S}$ be an $S-$gap subshift and $\mu$ the measure of maximal entropy. Then $\mu(1)=\frac{1}{\sum_{n\in S}(n+1)e^{-h_{top}(X_{S})(n+1)}}$, and for every $w\in L(X_{S})$, there exists a polynomial $f_{w}$ with integer coefficients so that $\mu(w)=k_{w}+\mu(1)f_{w}(e^{-h_{top}(X_{S})})$ for some integer $k_{w}$. \end{theorem} \begin{proof} As noted above, $S$-gap shifts are synchronized and entropy minimal, and so have unique measures of maximal entropy. Denote by $\mu$ the unique measure of maximal entropy for some $S-$gap subshift $X_{S}$, and for readability we define \[ t=e^{-h_{top}(X)}. \] Since $X_S$ is entropy minimal, $\mu(1) > 0$, and so by the pointwise ergodic theorem (applied to $\chi_{[1]}$), $\mu$-a.e. point of $X_S$ contains infinitely many $1$s. Therefore, we can partition points of $X_S$ according to the closest $1$ symbols to the left and right of the origin, and represent $X_S$ (up to a null set) as the disjoint union $\bigcup_{n\in S} \bigcup_{i=0}^{n} \sigma_i \left[10^{n}1\right]$. Then by Lemma~\ref{synchlem} and Corollary \ref{maincor}, \begin{align*} 1 & =\sum_{n\in S}(n+1)\mu(10^{n}1)\\ & =\sum_{n\in S}(n+1)\mu(1)t^{n+1}\text{,} \end{align*} yielding the claimed formula for $\mu(1)$. Now we prove the general formula for $\mu(w)$, and will proceed by induction on the length $n$ of $w$. For the base case $n=1$, $\mu(0) = 1 - \mu(1)$, verifying the theorem. Now, assume that the theorem holds for every $n\leq N$ for some $N \geq1$. Let $w\in L_{N-1}(X_{S})$, and we will verify the theorem for $1w1$, $1w0$, $0w1$, and $0w0$. If $1w1 \notin L(X_{S})$, then \begin{align*} \mu(1w1) & =0,\\ \mu(1w0) & =\mu(1w) - \mu(1w1) = \mu(1w),\\ \mu(0w1) & =\mu(w1) - \mu(1w1) = \mu(w1), \text{ and}\\ \mu(0w0) & =1-\mu(1w1) - \mu(1w0) - \mu(0w1)= 1 - \mu(1w) - \mu(w1). \end{align*} The theorem now holds by the inductive hypothesis. If $1w1 \in L(X_{S})$, then as before $E_{X_{S}}(1w1)=E_{X_{S}}(1)$, implying \begin{align*} \mu(1w1) & = \mu(1) t^{1 + |w|},\\ \mu(1w0) & = \mu(1w) - \mu(1w1) = \mu(1w) - \mu(1) t^{1 + |w|},\\ \mu(0w1) & = \mu(w1) - \mu(1w1) = \mu(w1) - \mu(1) t^{1 + |w|}, \text{ and}\\ \mu(0w0) & = 1 - \mu(1w1) - \mu(1w0) - \mu(0w1) = 1 - \mu(1w) - \mu(w1) + \mu(1) t^{1 + |w|}, \end{align*} again implying the theorem by the inductive hypothesis and completing the proof. \end{proof} \section{$\mathbb{G}-$subshifts} \label{gsec} Throughout this section, $\mathbb{G}$ will denote a countable amenable group generated by a finite set $G=\left\{ g_{1},...,g_{d}\right\} $ which is torsion-free, i.e. $g^{n}=e$ if and only if $n=0$. \subsection{Main result} For any $N=(N_{1} ,...,N_{d})\in\mathbb{Z}_{+}^{d}$, we define $\mathbb{G}_{N}$ to be the subgroup generated by $\left\{ g_{1}^{N_{1}},...,g_{d}^{N_{d}}\right\} ,$ and use $\faktor{\mathbb{G}}{\mathbb{G}_{N}}$ to represent the collection $\left\{g\cdot\mathbb{G}_{N}:g\in\mathbb{G}\right\}$ of left cosets of $\mathbb{G}_N$. Clearly, $\left\vert \faktor{\mathbb{G}}{\mathbb{G}_{N}}\right\vert =N_{1}N_{2}\cdots N_{d}$. We again must begin with some relevant facts and definitions. The following structural lemma is elementary, and we leave the proof to the reader. \begin{lemma} \label{one} For any amenable $\mathbb{G}$ and $F\Subset\mathbb{G}$, there exists $N=(N_{1},...,N_{d})\in\mathbb{Z}_{+}^{d}$ such that for every nonidentity $g \in\mathbb{G}_{N}$, $g\cdot F\cap F=\varnothing.$ \end{lemma} As in the $\mathbb{Z}$ case, if $v,w\in L_{F}(\mathcal{A}^{\mathbb{G}})$ for some $F\Subset\mathbb{G}$, we define the function $O_{v}:L(\mathcal{A}^{ \mathbb{G}})\rightarrow \mathcal{P}(\mathbb{G})$ which sends a word to the set of locations where $v$ appears as a subword, i.e. \begin{equation*} O_{v}(u):=\left\{ g\in\mathbb{G}:\sigma_{g}(u) \in [v]\right\} . \end{equation*} We also define the function $R_{u}^{v\rightarrow w}:O_{v}(u)\rightarrow L( \mathcal{A}^{\mathbb{G}})$, where $R_{u}^{v\rightarrow w}(g)$ is the word you obtain by replacing the occurrence of $v$ at $g \cdot F$ within $u$ by $w $. We now again must define a way to replace many occurrences of $v$ by $w$ within a word $u$, but will do this via restricting the sets of locations where the replacements occur rather than the pairs $(v,w)$. $\ $We say $ S\subset\mathbb{G}$ is $F-$\textbf{sparse }if $g\cdot F\cap g^{\prime}\cdot F=\varnothing$ for every unequal pair $g,g^{\prime}\in S$. When $v,w \in L_{F}(X)$ and $S$ is $F-$sparse, we may simultaneously replace occurrences of $v$ by $w$ at locations $g \cdot F$, $g \in S$ by $w$ without any of the complications dealt with in the one-dimensional case, and we denote the resulting word by $R_{u}^{v\rightarrow w}(S)$. Formally, $ R_{u}^{v\rightarrow w}(S)$ is just the image of $u$ under the composition of $R_{u}^{v \rightarrow w}(s)$ over all $s \in S$. The following lemmas are much simpler versions of Lemmas \ref{injective} and \ref{preimage} for $F$-sparse sets. \begin{lemma} \label{Ginjective} For any $F$, $v,w\in L_{F}(X)$, and $F$-sparse set $T \subseteq O_{v}(u)$, $R_{u}^{v\rightarrow w}$ is injective on subsets of $T$. \end{lemma} \begin{proof} Fix $F, u, v, w, T$ as in the lemma. If $S \neq S' \subseteq T$, then either $S \setminus S'$ or $S' \setminus S$ is nonempty; assume without loss of generality that it is the former. Then, if $s \in S \setminus S'$, by definition $(R_{u}^{v\rightarrow w}(S))_{s + F} = w$ and $(R_{u}^{v\rightarrow w}(S'))_{s + F} = v$, and so $R_{u}^{v\rightarrow w}(S) \neq R_{u}^{v\rightarrow w}(S')$. \end{proof} \begin{lemma} \label{Gpreimage} For any $F$ and $v,w\in L_{F}(X)$, any $F$-sparse set $T \subseteq O_{v}(u)$, any $u^{\prime}$, and any $m \leq|T \cap O_{w}(u^{\prime })|$, \begin{equation*} |\{(u, S) \ : \ S \text{ is $F$-sparse}, |S| = m, S \subseteq T, u^{\prime}= R_{u}^{v\rightarrow w}(S)\}| \leq{\binom{|T \cap O_{w}(u^{\prime})|}{m}}. \end{equation*} \end{lemma} \begin{proof} Fix any such $F, u', v, w, T, m$ as in the lemma. Clearly, for any $S$, $S \subseteq O_w(R_{u}^{v\rightarrow w}(S))$, and so if $R_{u}^{v\rightarrow w}(S) = u'$, then $S \subseteq O_w(u')$. There are only ${\binom{|T \cap O_{w}(u^{\prime})|}{m}}$ choices for $S \subseteq T \cap O_w(u')$ with $|S| = m$, and an identical argument to that of Lemma~\ref{preimage} shows that for each such $S$, there is only one $u$ for which $R_{u}^{v\rightarrow w}(S) = u'$. \end{proof} Whenever $v,w\in L_{F}(X)$ and $E_{X}(v)\subseteq E_{X}(w)$, clearly $ R_{u}^{v\rightarrow w}(S)\in L(X)$ for any $F$-sparse set $S\subseteq O_{v}(u)$; this, along with the use of Lemma \ref{one}, will be the keys to the counting arguments used to prove our main result for $\mathbb{G}$-subshifts. \begin{theorem} \label{Gtheorem} Let $X$ be a $\mathbb{G-}$subshift, $\mu$ a measure of maximal entropy of $X$, $F\Subset\mathbb{G}$, and $v,w\in L_{F}(X).$ If $ E(v)\subseteq E(w)$ then \begin{equation*} \mu(v)\leq\mu(w). \end{equation*} \end{theorem} \begin{proof} Take $\mathbb{G}$, $X$, $\mu$, $F$, $v$, and $w$ as in the theorem, and suppose for a contradiction that $\mu(v) > \mu(w)$. Choose any $\delta \in \mathbb{Q}_{+}$ with $\delta< \frac{\mu(v) - \mu(w)}{5}$. Let $F_n$ be a Følner sequence satisfying Theorem~\ref{SMBthm}. For every $n\in\mathbb{Z}_{+},$ we define \begin{equation*} S_{n}:=\left\{ u\in L_{F_{n}}(X):\left\vert O_{v}(u)\right\vert \geq\left\vert F_{n}\right\vert (\mu(v)-\delta)\text{ and }\left\vert O_{w}(u)\right\vert \leq\left\vert F_{n}\right\vert (\mu(w)+\delta)\right\} . \end{equation*} By the pointwise ergodic theorem (applied to $\chi_{[v]}$ and $\chi_{[w]}$), $\mu(S_{n})\rightarrow1$, and then by Corollary~\ref{SMBcor}, \begin{equation} \lim_{n\rightarrow\infty}\frac{\log|S_{n}|}{n}=h_{top}(X). \label{Snbound} \end{equation} Let $N\in\mathbb{Z}_{+}^{d}$ be a number obtained by Lemma \ref{one} that is minimal in the sense that if any of the coordinates is decreased then it will not satisfy the property of the lemma. We note that for every $u\in S_{n}$, $|O_{v}(u)|-|O_{w}(u)|>3\delta|F_{n}|$. Therefore, for every $u\in S_{n}$, there exists $h(u)\in \faktor{ \mathbb{G}}{\mathbb{G}_{N}}$ such that \begin{equation} \left\vert O_{v}(u)\cap h(u) \right\vert -\left\vert O_{w}(u)\cap h(u) \right\vert >\frac{3\delta}{M} |F_{n}|\text{,} \label{ineq} \end{equation} where $M=\left| \faktor{\mathbb{G}}{\mathbb{G}_{N}}\right| .$ For every $u\in S_{n}$, define $k_{n}(u)\in\mathbb{N}$ satisfying $\left\vert O_{v}(u)\cap h(u)\right\vert \in [ k_{n} (u)|F_{n}|\frac{\delta}{M},$ \newline $(k_{n}(u)+1)|F_{n}|\frac{\delta}{M}] $. Using $M=\left| \faktor{\mathbb{G}}{\mathbb{G}_{N}}\right| $ and the fact that $3\leq k_{n}(u)\leq\frac{M}{\delta}$, we may choose $S_{n}^{\prime }\subseteq S_{n}$ with $|S_{n}^{\prime}|\geq\frac{|S_{n}|}{M^{2}/\delta}$, $ h_{n} \in\faktor{\mathbb{G}}{\mathbb{G}_{N}}$ and $k_{n}\in\mathbb{N}$ such that for every $u\in S_{n}^{\prime}$ we have $h(u)=h_{n}$ and $k_{n}(u)=k_{n} $. This implies that for every $u\in S_{n}^{\prime}$ \begin{align*} \left\vert O_{v}(u)\cap h_{n}(u)\right\vert & \geq(k_{n}+1)|F_{n}|\frac{ \delta}{M}\text{, and hence} \\ \left\vert O_{w}(u)\cap h_{n}(u)\right\vert & \leq(k_{n}-2)|F_{n}|\frac{ \delta}{M}\text{ (using (\ref{ineq})).} \end{align*} By the pigeonhole principle, we may pass to a sequence on which $h_{n} = h$ and $k_{n} = k$ are constant, and for the rest of the proof consider only $n$ in this sequence. Let $\varepsilon\in\mathbb{Q}_{+}$with $\varepsilon <\frac{ \delta}{|F \cdot F^{-1}|}$. For each $u\in S_{n}^{\prime}$, we define \begin{equation*} A_{u}:=\left\{ R_{u}^{v\rightarrow w}(S):S\subseteq O_{v}(u)\cap h \text{ and } \left\vert S\right\vert =\varepsilon\left\vert F_{n}\right\vert /M\right\} \end{equation*} (without loss of generality we may assume $\varepsilon\left\vert F_{n}\right\vert /M$ is an integer by taking a sufficiently large $n$) $.$ Since $E_{X}(v)\subseteq E_{X}(w)$, we have that $A_{u}\subset L(X).$ By Lemma~\ref{Ginjective}, \begin{equation*} |A_{u}| \geq{\binom{|O_{v}(u)\cap h|} {\varepsilon\left\vert F_{n}\right\vert /M}} \geq{\binom{\delta k |F_{n}|/M} {\varepsilon\left\vert F_{n}\right\vert /M}}. \end{equation*} On the other hand, for every $u^{\prime}\in\bigcup_{u\in S_{n}}A_{u}$, we have that \begin{equation*} \left\vert O_{w}(u^{\prime})\cap h\right\vert \leq\frac{|F_{n}|}{M}\left( (k_{n}-2)\delta+\varepsilon|F \cdot F^{-1}|\right) \leq\frac{\delta|F_{n}|}{M} (k_{n}-1). \end{equation*} (here, we use $\left\vert O_{w}(u)\cap h(u)\right\vert \leq(k_{n}-2)|F_{n}| \frac{\delta}{M}$ plus $\left\vert S\right\vert =\varepsilon \left\vert F_{n}\right\vert /M$ and the simple fact that a replacement of $v$ by $w$ in $u$ can create at most $|F \cdot F^{-1}|$ new occurrences of $w$.) Therefore, by Lemma~\ref{Gpreimage}, \begin{equation*} \left\vert \left\{ u\in S_{n}^{\prime}:u^{\prime}\in A_{u}\right\} \right\vert \leq{\binom{\delta(k_{n}-1)|F_{n}|/M}{\varepsilon\left\vert F_{n}\right\vert /M}.} \end{equation*} By combining the two inequalities, we see that \begin{equation} |L_{n}(X)|\geq\left\vert \bigcup_{u\in S_{n}^{\prime}}A_{u}\right\vert \geq|S_{n}^{\prime}|{\binom{\delta k_{n}|F_{n}|/M}{\varepsilon\left\vert F_{n}\right\vert /M}}{\binom{\delta(k_{n}-1)|F_{n}|/M}{\varepsilon\left\vert F_{n}\right\vert /M}}^{-1}. \end{equation} Now, we take logarithms of both sides, divide by $\left\vert F_{n}\right\vert $, and let $n$ approach infinity (along the earlier defined sequence). Then we use the definition of entropy, the inequality $ |S_{n}^{\prime}|\geq \frac{|S_{n}|}{M^{2}/\delta}$, (\ref{Snbound}), and Stirling's approximation to yield \begin{align*} h_{top}(X) & \geq h_{top}(X)+\frac{\varepsilon}{M}\bigg[\left( \frac{\delta k }{\varepsilon}\log\frac{\delta k}{\varepsilon}-\left( \frac{\delta k}{ \varepsilon}-1\right) \log\left( \frac{\delta k}{\varepsilon}-1\right) \right) \\ & -\left( \frac{\delta(k-1)}{\varepsilon}\log\frac{\delta(k-1)}{\varepsilon } -\left( \frac{\delta(k-1)}{\varepsilon}-1\right) \log\left( \frac { \delta(k-1)}{\varepsilon}-1\right) \right) \bigg]. \end{align*} Since the function $x \log x - (x - 1) \log(x-1)$ is strictly increasing for $x > 1$, the right-hand side of the above is strictly greater than $ h_{top}(X)$, a contradiction. Therefore, our original assumption does not hold and hence $\mu(v) \leq\mu(w)$. \end{proof} \subsection{Applications to hereditary subshifts} \label{hered} One class of $\mathbb{G-}$subshifts with many pairs of words satisfying $E_{X}(v)\subsetneq E_{X}(w)$, allowing for the use of Theorem~\ref{Gtheorem}, are the hereditary subshifts (introduced in \cite{KL1}). A partial order $\leq$ on a finite set $\mathcal{A}$ induces a partial order on $\mathcal{A}^{n}$ and $\mathcal{A}^{\mathbb{G}}$ (coordinatewise) which will also be denoted by $\leq$. When $\mathcal{A=}\left\{ 0,1...,m\right\} $ we will always use the linear order $0 \leq1 \leq\ldots\leq m$. \begin{definition} Let $X\subseteq\mathcal{A}^{\mathbb{G}}$ be a subshift and $\leq$ a partial order on $\mathcal{A}$. We say $X$ is $\leq-$\textbf{hereditary (or simply hereditary)} if for every $x\in\mathcal{A}^{\mathbb{G}}$ such that there exists $y\in X$ such that $x\leq y$ then $x\in X.$ \end{definition} Examples of hereditary shifts include $\beta-$shifts \cite{Kw}, $\mathscr{B}-$free shifts (\cite{KLW}), spacing shifts (\cite{LZ}), multi-choice shifts (\cite{LMP}) and bounded density shifts (\cite{S}). Many of these examples have a unique measure of maximal entropy, but not every hereditary subshift has this property (see \cite{KLW}) . This definition immediately implies that whenever $x \leq y$ for $x, y \in L(X)$, $E_{X}(y) \subseteq E_{X}(x)$, yielding the following corollary of Theorem~\ref{hardcase}. \begin{corollary} Let $X$ be a $\leq-$hereditary $\mathbb{G-}$subshift, $\mu$ a measure of maximal entropy, and $v,w\in L_{n}(X)$ for some $n \in\mathbb{N}.$ If $u\leq v$ then $\mu(v)\leq\mu(u).$ \end{corollary} In particular, if $\mathcal{A=}\left\{ 0,1...,m\right\}$, then $\mu(m)\leq\mu(m-1)...\leq\mu(1)\leq\mu(0)$. Having $u\leq v$ $\ $is sufficient but not necessary for $E(v)\subseteq E(w). $ In particular, for $\beta-$shifts and bounded density shifts, there are many other pairs (with different lengths) where this happens. This is due to an additional property satisfied by these hereditary shifts. \begin{definition} Let $X\subseteq\left\{ 0,1,...,m\right\} ^{\mathbb{Z}}$ be a hereditary $\mathbb{Z}$-subshift. We say $X$ is \textbf{$i$-hereditary} if for every $u\in L_{n}(X)$ and $u^{\prime}$ obtained by inserting a $0$ somewhere in $u$, it is the case that $u^{\prime}\in L_{n+1}(X)$. \end{definition} In particular, $\beta-$shifts and bounded density shifts are $i$-hereditary, but not every spacing shift is $i$-hereditary. It's immediate that any $i$-hereditary shift satisfies $E_{X}(0^{j}) \subseteq E_{X}(0^{k})$ whenever $j \geq k$. We can get equality if we assume the additional property of specification. \begin{definition} A $\mathbb{Z}$-subshift $X$ has the \textbf{specification property (at distance } $N$)\textbf{\ }if for every $u,w\in L(X)$ there exists $v\in L_{N}(X)$ such that $uvw\in L(X).$ \end{definition} Clearly, if $X$ is hereditary and has specification property at distance $N$, then $u0^{N}w$ and $u0^{N+1}w\in L(X)$ for all $u,w\in L(X)$, and so in this case $E_{X}(0^{N})=E_{X}(0^{N+1})$. We then have the following corollary of Theorem~\ref{hardcase}. \begin{corollary} \label{hereditary}Let $X\subseteq\left\{ 0,1,...,m\right\} ^{\mathbb{Z}}$ be a i-hereditary $\mathbb{Z-}$subshift$.$ Then for every $n\in\mathbb{Z}_{+}$ \[ h_{top}(X)\geq\log\frac{\mu(0^{n})}{\mu(0^{n+1})}. \] Furthermore, if $X$ has the specification property at distance $N$, then \[ h_{top}(X)=\log\frac{\mu(0^{N})}{\mu(0^{N+1})}. \] \end{corollary} We note that if $X$ has the specification property at distance $N$, then it also has it at any larger distance. Therefore, the final formula can be rewritten as \begin{multline*} h_{top}(X)=\lim_{N\rightarrow\infty}\log\frac{\mu(0^{N})}{\mu(0^{N+1})} =\lim_{N\rightarrow\infty}-\log\mu(x(0)=0\ |\ x_{[-N,-1]}=0^{N})\\ =-\log\mu(x(0)=0\ |\ x_{(-\infty,-1]}=0^{\infty}), \end{multline*} recovering a formula (in fact a more general one for topological pressure of $\mathbb{Z}^{d}$ SFTs) proved under different hypotheses in \cite{MP}. \end{document}
\begin{document} \title{Evaluation of Tornheim's type of double series} \begin{abstract} We examine values of certain Tornheim's type of double series with odd weight. As a result, an affirmative answer to a conjecture about the parity theorem for the zeta function of the root system of the exceptional Lie algebra $G_2$, proposed by Komori, Matsumoto and Tsumura, is given. \end{abstract} \section{Introduction and main theorem} For integers $a,b,k_1,k_2,k_3\ge1$, let \[ \zeta_{a,b}(k_1,k_2,k_3):=\sum_{m,n>0} \frac{1}{m^{k_1}n^{k_2}(am+bn)^{k_3}}.\] This series, which converges absolutely and gives a real number, was first introduced by the second author \cite{Okamoto1} in the study of evaluations of special values of the zeta functions of root systems associated with $A_2, B_2$ and $G_2$. Since Tornheim \cite{Tornheim} first studied the value $\zeta_{1,1}(k_1,k_2,k_3)$, we call the value $\zeta_{a,b}(k_1,k_2,k_3)$ Tornheim's type of double series. The purpose of this paper is to express $\zeta_{a,b}(k_1,k_2,k_3)$ with $k_1+k_2+k_3$ odd as ${\mathbb Q}$-linear combinations of two products of certain zeta values. As a prototype, we have in mind the analogous story for the parity theorem for multiple zeta values \cite[Corollary 8]{IKZ} and \cite{Tsumura} and for Tornheim's series \cite[Theorem 2]{HWZ}. For example, the identity \[ \zeta_{1,1}(1,1,3)=4\zeta(5)-2\zeta(2)\zeta(3)\] is well-known. Similar studies have been done in many articles \cite{Nakamura,SubbaraoSitaramachandrarao,Tsumura1,Tsumura2,Tsumura4,ZhouCaiBradley} (see also \cite{Okamoto}). We will generalize the above expression to the value $\zeta_{a,b}(k_1,k_2,k_3)$ with $k_1+k_2+k_3$ odd. As a consequence, we give an affirmative answer to a conjecture about special values of the zeta function of the root system of $G_2$, which was proposed by Komori, Matsumoto and Tsumura \cite[Eq.~(7.1)]{KMT5}. We now state our main result. We use the Clausen-type functions defined for a positive integer $k\ge2$ and $x\in\mathbb R$ by \begin{equation}\label{eq1} \begin{aligned} C_k(x) &:= {\rm Re}\ Li_k(e^{2\pi ix}) = \sum_{m>0} \frac{\cos (2\pi mx)}{m^k},\\ S_k(x) &:= {\rm Im}\ Li_k(e^{2\pi ix}) = \sum_{m>0} \frac{\sin (2\pi mx)}{m^k}, \end{aligned} \end{equation} where $ Li_k(z)$ is the polylogarithm $\sum_{m>0} \frac{z^m}{m^k}$. Note that $C_k(x)$ equals the Riemann zeta value $\zeta(k):=\sum_{m>0}\frac{1}{m^k}$ when $x\in {\mathbb Z}$, and $S_k(x)$ is $0$ when $x\in \frac{1}{2}{\mathbb Z}$. \begin{theorem}\label{1_1} For positive integers $N,a,b,k,k_1,k_2,k_3$ with $N={\rm lcm}(a,b)$ and $k=k_1+k_2+k_3$ odd, the value $ \zeta_{a,b}(k_1,k_2,k_3)$ can be expressed as ${\mathbb Q}$-linear combinations of $\pi^{2n}C_{k-2n}(\frac{d}{N})$ and $\pi^{2n+1}S_{k-2n-1} (\frac{d}{N})$ for $0\le n \le \frac{k-3}{2}$ and $ d\in {\mathbb Z}/N{\mathbb Z}$. \end{theorem} Theorem \ref{1_1} will be proved in Section 4 by using the generating functions. This leads to a recipe for giving a formula for the ${\mathbb Q}$-linear combination in Theorem \ref{1_1}. More precisely, one can deduce an explicit formula from Corollary \ref{2_2} and Propositions \ref{2_3}, \ref{3_4} and \ref{3_5}, but it might be much complicated (we do not develop the explicit formulas in this paper). As an example of a simple identity, we have \begin{equation}\label{eq1_1} \zeta_{1,3}(1,1,3) = \frac{1}{81} \left(367\zeta(5)-19\pi^2\zeta(3)-27 \pi S_4(\tfrac13) -4 \pi^3 S_2(\tfrac13)\right). \end{equation} We apply Theorem \ref{1_1} to proving the conjecture suggested by Komori, Matsumoto and Tsumura \cite[Eq.~(7.1)]{KMT5}. This will be described in Section 5. It is worth mentioning that since the value $\zeta_{a,b}(k_1,k_2,k_3)$ can be expressed as ${\mathbb Q}$-linear combinations of double polylogarithms \begin{equation}\label{eq1_2} Li_{k_1,k_2}(z_1,z_2)=\sum_{0<m<n} \frac{z_1^m z_2^n}{m^{k_1}n^{k_2}}, \end{equation} Theorem \ref{1_1} might be proved by the parity theorem for double polylogarithms obtained by Panzer \cite{Erik} and Nakamura \cite{Nakamura}, which is illustrated in Remark \ref{4_1}. In this paper, we however do not use their result to prove Theorem \ref{1_1}, since we want to keep this paper self-contained. The contents of this paper is as follows. In Section 2, we give an integral representation of the generating function of the values $\zeta_{a,b}(k_1,k_2,k_3)$ for any integers $a,b\ge1$. In Section 3, the integral is computed. Section 4 gives a proof of Theorem 1.1. In Section 5, we recall the question \cite[Eq.~(7.1)]{KMT5} and give an affirmative answer to this. \section{Integral representation} In this section, we give an integral representation of the generating function of the values $\zeta_{a,b}(k_1,k_2,k_3)$ for any integers $a,b\ge1$. The integral representation of the value $\zeta_{a,b}(k_1,k_2,k_3)$ was first given by the second author \cite[Theorem 4.4]{Okamoto1}, following the method used by Zagier. We recall it briefly. For an integer $k\ge0$, the Bernoulli polynomial $B_k(x)$ of order $k$ is defined by \[\sum_{k\ge0} B_k(x)\frac{t^k}{k!} = \frac{te^{xt}}{e^t-1}.\] The polynomial $B_k(x)$ admits the following expression (see \cite[Theorem 4.11]{AIK}): for $k\ge1$ and $x\in \mathbb R$ ($x\in \mathbb R-{\mathbb Z}$, if $k=1$) \[B_k(x-[x]) = \begin{cases}-2i \dfrac{k!}{(2\pi i)^k}\displaystyle\sum_{m>0} \dfrac{\sin(2\pi m x)}{m^k} & k\ge1:{\rm odd},\\ -2 \dfrac{k!}{(2\pi i)^k}\displaystyle\sum_{m>0} \dfrac{\cos(2\pi m x)}{m^k} & k\ge2:{\rm even},\end{cases}\] where $i=\sqrt{-1}$ and the summation $\displaystyle\sum_{m>0}$ is regarded as $\displaystyle\lim_{N\rightarrow \infty} \sum_{N>m>0}$ when $k=1$ (this ensures convergence). We define the modified (generalized) Clausen function for $k\ge1$ and $x\in \mathbb R$ ($x\in \mathbb R-{\mathbb Z}$, if $k=1$) by \[ Cl_k(x-[x]) = \begin{cases}-\dfrac{k!}{(2\pi i)^{k-1}}\displaystyle\sum_{m>0} \dfrac{\cos(2\pi m x)}{m^k} & k\ge1:{\rm odd},\\ -i \dfrac{k!}{(2\pi i)^{k-1}}\displaystyle\sum_{m>0} \dfrac{\sin(2\pi m x)}{m^k} & k\ge2:{\rm even}.\end{cases}\] With this, for $k\ge1$ and $x\in\mathbb R$ ($x\in \mathbb R-{\mathbb Z}$ if $k=1$), the polylogarithm $ Li_k(e^{2\pi i x}) $ can be written in the form \begin{equation}\label{eq2_1} Li_k(e^{2\pi i x}) =- \frac{(2\pi i)^{k-1}}{ k!}\left( Cl_k(x-[x]) + \pi i B_k(x-[x]) \right). \end{equation} We introduce formal generating functions. For $x\in \mathbb R-{\mathbb Z}$, let \[ \beta(x;t):=\sum_{k>0} \frac{B_k(x-[x])t^k}{k!} \quad {\rm and} \quad \gamma(x;t):=\sum_{k>0} \frac{Cl_k(x-[x])t^k}{k!}.\] \begin{proposition}\label{2_1} For integers $a,b\ge1$, we have \begin{align*} &\sum_{k_1,k_2,k_3>0} \zeta_{a,b}(k_1,k_2,k_3) t_1^{k_1}t_2^{k_2}t_3^{k_3}\\ &=- \frac{1}{4\pi i} \int_0^1 \big( \gamma(ax;2\pi it_1)\beta(bx;2\pi it_2) + \beta(ax;2\pi i t_1) \gamma(bx;2\pi it_2)\big) \beta(x;-2\pi it_3)dx \\ &+ \frac{1}{4\pi^2} \int_0^1 \big(\gamma(ax;2\pi it_1)\gamma(bx;2\pi it_2)-\pi^2 \beta(ax;2\pi it_1)\beta(bx;2\pi it_2)\big) \beta(x;-2\pi i t_3)dx . \end{align*} \end{proposition} \begin{proof} When $k_1,k_2,k_3\ge2$, it follows \begin{align*} \int_0^1 Li_{k_1}\big(e^{2\pi i ax}\big)Li_{k_2}\big(e^{2\pi i bx}\big)\overline{Li_{k_3}\big(e^{2\pi ix})}dx &=\int_0^1 \sum_{m,n,l>0}\frac{e^{2\pi i max}e^{2\pi inbx}e^{-2\pi ilx}}{m^{k_1}n^{k_2}l^{k_3}}dx\\ &=\sum_{m,n,l>0}\frac{1}{m^{k_1}n^{k_2}l^{k_3}}\int_0^1e^{2\pi ix (am+bn-l)}dx\\ &= \zeta_{a,b}(k_1,k_2,k_3), \end{align*} where $\overline{Li_{k_3}\big(e^{2\pi ix})}$ stands for complex conjugate of $Li_{k_3}\big(e^{2\pi ix})$. For $k_1,k_2,k_3\ge1$, the above equality is justified by replacing the integral $\displaystyle\int_0^1$ with \begin{equation}\label{eq11} \lim_{\varepsilon\rightarrow 0}\sum_{j=1}^{{\rm lcm}(a,b)} \int_{\frac{j-1}{{\rm lcm}(a,b)}+\varepsilon}^{\frac{j}{{\rm lcm}(a,b)}-\varepsilon}, \end{equation} where ${\rm lcm}(a,b)$ is the least common multiple of $a$ and $b$ (see \cite[Theorem 4.4]{Okamoto1} for the detail). Letting $ Li(x;t):=\displaystyle\sum_{k>0} Li_k(e^{2\pi i x})t^k$, we therefore obtain \begin{equation}\label{eq2_2} \sum_{k_1,k_2,k_3>0} \zeta_{a,b}(k_1,k_2,k_3) t_1^{k_1}t_2^{k_2}t_3^{k_3}= \int_0^1 Li(ax;t_1)Li(bx;t_2)\overline{Li(x;t_3)}dx. \end{equation} Furthermore, the generating function of $Li_k(e^{2\pi ix})$ with $x\in \mathbb R-{\mathbb Z}$ can be written in the form \begin{equation}\label{eq2_3} Li(x;t) = -\frac{1}{2\pi i }\left( \gamma(x;2\pi i t)+\pi i \beta(x;2\pi it)\right), \end{equation} and hence, the right-hand side of \eqref{eq2_2} is equal to \begin{equation}\label{eq2_4} \begin{aligned} &\frac{1}{(2\pi i )^3}\int_0^1 \big(\gamma(ax;2\pi i t_1)+\pi i \beta(ax;2\pi it_1)\big)\\ &\times \big(\gamma(bx;2\pi i t_2)+\pi i \beta(bx;2\pi it_2)\big)\big(\gamma(x;-2\pi i t_3)-\pi i \beta(x;-2\pi it_3)\big)dx. \end{aligned} \end{equation} We note that, similarly to \eqref{eq2_2}, one obtains the relation \begin{align*} \int_0^1 Li(ax;t_1)Li(bx;t_2)Li(x;-t_3)dx=0, \end{align*} and substituting \eqref{eq2_3} to the above identity, one has \begin{align*} & \int_0^1 \big(\gamma(ax;2\pi i t_1)+\pi i \beta(ax;2\pi it_1)\big)\big(\gamma(bx;2\pi i t_2)+\pi i \beta(bx;2\pi it_2)\big)\gamma(x;-2\pi i t_3)dx\\ &=- \pi i \int_0^1 \big(\gamma(ax;2\pi i t_1)+\pi i \beta(ax;2\pi it_1)\big)\big(\gamma(bx;2\pi i t_2)+\pi i \beta(bx;2\pi it_2)\big)\beta(x;-2\pi it_3)dx. \end{align*} With this, \eqref{eq2_4} is reduced to \begin{align*} -\frac{1}{(2\pi i )^2}\int_0^1 \big(\gamma(ax;2\pi i t_1)+\pi i \beta(ax;2\pi it_1)\big) \big(\gamma(bx;2\pi i t_2)+\pi i \beta(bx;2\pi it_2)\big)\beta(x;-2\pi it_3)dx, \end{align*} which completes the proof. \end{proof} The coefficient of $t^k$ in $\gamma(x;2\pi i t)$ (resp. $\beta(x;2\pi it)$) is a real-valued function, if $k$ is even, and a real-valued function times $i=\sqrt{-1}$, if $k$ is odd. Thus, comparing the coefficient of both sides, we have the following corollary. For simplicity, for integers $a,b\ge1$ we let \begin{equation}\label{eq111} F_{a,b}(t_1,t_2,t_3) := \int_0^1 \gamma(ax;t_1)\beta(bx;t_2) \beta(x;-t_3)dx, \end{equation} where the integral is defined formally by term-by-term integration and by \eqref{eq11}. \begin{corollary}\label{2_2} One has \begin{align*} &\sum_{\substack{k_1,k_2,k_3>0\\k_1+k_2+k_3:{\rm odd}}} \zeta_{a,b} (k_1,k_2,k_3)t_1^{k_1}t_2^{k_2}t_3^{k_3}\\ &=- \frac{1}{4\pi i} F_{a,b}(2\pi i t_1,2\pi it_2,2\pi it_3) - \frac{1}{4\pi i} F_{b,a}(2\pi i t_2,2\pi it_1,2\pi it_3). \end{align*} \end{corollary} Remark that, using the same method, one can give an integral expression of the generating function of the Riemann zeta values, which will be used later. \begin{proposition}\label{2_3} For integers $a,b\ge1$, we have \begin{equation}\label{eq2_5} \frac{1}{2\pi i} \int_0^1 \gamma(ax;2\pi it_1) \beta(bx;-2\pi it_2)dx=\sum_{\substack{r,s>0\\r+s:{\rm odd}}}\frac{{\rm gcd}(a,b)^{r+s}}{a^sb^r} \zeta(r+s) t_1^{r}t_2^{s}. \end{equation} \end{proposition} \begin{proof} For any integers $a,b\ge1$, it follows \[ \int_0^1 Li(ax;t_1)\overline{Li(bx;t_2)}dx=\sum_{r,s>0}\frac{{\rm gcd}(a,b)^{r+s}}{a^sb^r} \zeta(r+s) t_1^{r}t_2^{s}.\] By the relation $\displaystyle\int_0^1 Li(ax;t_1)Li(bx;-t_2)dx=0$ and \eqref{eq2_3}, the left-hand side of the above equation can be reduced to \begin{equation*} \frac{1}{2\pi i} \int_0^1 \big( \gamma(ax;2\pi it_1)+\pi i \beta(ax;2\pi it_1)\big) \beta(bx;-2\pi it_2)dx. \end{equation*} Comparing the coefficients of $t_1^rt_2^s$, we complete the proof. \end{proof} \section{Evaluation of integrals} In this section, we compute the integral $F_{a,b}(t_1,t_2,t_3)$. We denote the generating function of the Bernoulli polynomials by $\beta_0(x;t)$: \[\beta_0(x;t):= \frac{te^{xt}}{e^t-1}=\sum_{k\ge0} B_k(x)\frac{t^k}{k!}.\] For integers $b,c\ge1$, we set \begin{align*} \alpha_{b} (t_1,t_2)&:=\beta_0(0;t_1)\beta_0(0;-t_2)\frac{e^{bt_1-t_2}-1}{bt_1-t_2},\\ \widetilde{\alpha}_{b,c}(t_1,t_2) &:= -t_1e^{-ct_1} \beta_0(0;-t_2) \frac{e^{bt_1-t_2}-1}{bt_1-t_2}, \end{align*} which are elements in the formal power series ring ${\mathbb Q}[[t_1,t_2]]$. \begin{lemma}\label{3_1} For any integers $b,d\ge1$, we have \[ e^{-dt_1}\alpha_b(t_1,t_2) =\alpha_b (t_1,t_2)+ \sum_{c=1}^{d} \widetilde{\alpha}_{b,c} (t_1,t_2).\] \end{lemma} \begin{proof} By the relation $B_k(x)=B_k(x+1)-kx^{k-1}$ for $k\in {\mathbb Z}_{\ge0}$ (see \cite[Proposition 4.9 (2)]{AIK}), we have $\beta_0(x;t)=\beta_0(x+1;t)-te^{xt}$. Using this formula with $x=-d,-d+1,\ldots,1$ repeatedly, one gets \[ \beta_0(-d;t)=\beta_0(-d+1;t)-te^{-dt}=\cdots =\beta_0(0;t)-t\sum_{c=1}^d e^{-ct}.\] Hence, we obtain \begin{align*} e^{-dt_1}\alpha_b(t_1,t_2) &= \beta_0(-d;t_1)\beta_0(0;-t_2) \frac{e^{bt_1-t_2}-1}{bt_1-t_2}\\ &=\alpha_b(t_1,t_2)-t_1 \sum_{c=1}^d e^{-ct_1}\beta_0(0;-t_2) \frac{e^{bt_1-t_2}-1}{bt_1-t_2}\\ &=\alpha_b(t_1,t_2)+ \sum_{c=1}^d\widetilde{\alpha}_{b,c}(t_1,t_2), \end{align*} which completes the proof. \end{proof} \begin{remark}\label{3_2} Let us denote by $A_b(r,s)$ (resp.~$\widetilde{A}_{b,c}(r,s)$) the coefficient of $t_1^rt_2^s$ in $\alpha_b(t_1,t_2)$ (resp.~in $\widetilde{\alpha}_{b,c}(t_1,t_2)$). Then, we have \[A_b(r,s) = \sum_{\substack{p_1+q_1=r\\ p_2+q_2=s\\ p_1,p_2,q_1,q_2\ge0}} \frac{(-1)^{q_2+p_2} b^{p_1} B_{q_1}B_{q_2}}{p_1!p_2!q_1!q_2!(p_1+p_2+1)}\] and \[\widetilde{A}_{b,c}(r,s) = \sum_{\substack{p_1+q_1=r\\ p_2+q_2=s\\ p_1,p_2,q_2\ge0\\q_1\ge1}} \frac{(-1)^{q_1+q_2+p_2} c^{q_1-1} b^{p_1} B_{q_2}}{p_1!(q_1-1)!p_2!q_2!(p_1+p_2+1)},\] where $B_k=B_k(1)=(-1)^kB_k(0)$ is the $k$-th Bernoulli number. We note that since $\widetilde{\alpha}_{b,c}(t_1,t_2)\in t_1{\mathbb Q}[[t_1,t_2]]$, we have $\widetilde{A}_{b,c}(0,s)=0$ for any $s\in {\mathbb Z}_{\ge0}$. \end{remark} \begin{lemma}\label{3_3} Let $b,d$ be positive integers with $d \in \{0,1,\ldots,b-1\}$. Then, for $x\in (\frac{d}{b},\frac{d+1}{b})$, we have \[ \beta(bx;t_1)\beta(x;-t_2) = e^{-dt_1}\alpha_b(t_1,t_2) \beta_0(x;bt_1-t_2) - \beta(bx;t_1)-\beta(x;-t_2)-1,\] where we recall $\beta(x;t)=\displaystyle\sum_{k>0} \dfrac{B_k(x-[x])}{k!}t^k$. \end{lemma} \begin{proof} Since $bx-[bx]=bx-d$ when $x\in (\frac{d}{b},\frac{d+1}{b})$, one has \begin{align*} &\big( \beta(bx;t_1)+1\big)\big(\beta(x;-t_2)+1\big) = \frac{t_1e^{(bx-d)t_1}}{e^{t_1}-1} \frac{-t_2 e^{-xt_2}}{e^{-t_2}-1}\\ &=e^{-dt_1} \frac{t_1}{e^{t_1}-1} \frac{-t_2}{e^{-t_2}-1} e^{(bt_1-t_2)x}\\ &=e^{-dt_1} \beta_0(0;t_1)\beta_0(0;-t_2) \frac{e^{bt_1-t_2}-1}{bt_1-t_2} \frac{(bt_1-t_2)e^{(bt_1-t_2)x}}{e^{bt_1-t_2}-1}\\ &=e^{-dt_1}\alpha_b(t_1,t_2)\beta_0(x;bt_1-t_2), \end{align*} from which the statement follows. \end{proof} \begin{proposition}\label{3_4} For any integers $a,b\ge1$, we have \begin{equation}\label{eq3_1} \begin{aligned} F_{a,b}(t_1,t_2,t_3) &= \alpha_b(t_2,t_3) \int_0^1 \gamma(ax;t_1)\beta_0(x;bt_2-t_3)dx \\ &+\sum_{c=1}^{b-1} \widetilde{\alpha}_{b,c}(t_2,t_3) \int_{\frac{c}{b}}^1 \gamma(ax;t_1)\beta_0(x;bt_2-t_3)dx\\ &-\int_0^1 \gamma(ax;t_1)\big( \beta(bx;t_2)+\beta(x;-t_3)\big) dx. \end{aligned} \end{equation} \end{proposition} \begin{proof} Splitting the integral $\displaystyle\int_0^1=\displaystyle\sum_{d=0}^{b-1} \displaystyle\int_{\frac{d}{b}}^{\frac{d+1}{b}} $ in the definition of $F_{a,b}$ (see \eqref{eq111}) and then using Lemma \ref{3_3}, we have \begin{align*} F_{a,b}(t_1,t_2,t_3)&=\sum_{d=0}^{b-1} \int_{\frac{d}{b}}^{\frac{d+1}{b}} \gamma(ax;t_1)\beta(bx;t_2)\beta(x;-t_3)dx\\ &=\sum_{d=0}^{b-1} e^{-dt_2}\alpha_b(t_2,t_3) \int_{\frac{d}{b}}^{\frac{d+1}{b}} \gamma(ax;t_1) \beta_0(x;bt_2-t_3)dx\\ &-\sum_{d=0}^{b-1}\int_{\frac{d}{b}}^{\frac{d+1}{b}} \gamma(ax;t_1)\big( \beta(bx;t_2)+\beta(x;-t_3)+1\big)dx\\ &=\sum_{d=0}^{b-1} \left( \alpha_b(t_2,t_3) + \sum_{c=1}^d \widetilde{\alpha}_{b,c}(t_2,t_3) \right) \int_{\frac{d}{b}}^{\frac{d+1}{b}} \gamma(ax;t_1) \beta_0(x;bt_2-t_3)dx\\ &-\int_0^1 \gamma(ax;t_1)\big( \beta(bx;t_2)+\beta(x;-t_3)+1\big)dx, \end{align*} where for the last equality we have used Lemma \ref{3_1}. By the relation $\displaystyle\int_0^1 Li(ax;t)dx=0$, we have $\displaystyle\int_0^1 \gamma(ax;t_1)dx=0$. Hence, the statement follows from and the interchange of order of summation $\displaystyle\sum_{d=1}^{b-1} \sum_{c=1}^{d} =\sum_{c=1}^{b-1} \sum_{d=c}^{b-1}$. \end{proof} We now deal with the integral of the second term of the right-hand side of \eqref{eq3_1}. \begin{proposition}\label{3_5} For any integers $a,b\ge1$ and $c\in \{0,1,\ldots,b-1\}$, we have \begin{align*} &\frac{1}{2\pi i} \int_{\frac{c}{b}}^1 \gamma(ax;2\pi it_1) \beta_0(x;2\pi i(bt_2-t_3))dx\\ &=-i\sum_{\substack{s\ge1\\p,q\ge 0\\p+s:{\rm odd}}} \frac{(-1)^{s}(2\pi i)^{q-1}}{q!a^{s}} S_{p+s+1}(\tfrac{ac}{b}) B_q(\tfrac{c}{b}) t_1^{p+1}(bt_2-t_3)^{q+s-1}\\ &+\sum_{\substack{s\ge1\\p,q\ge 0\\p+s:{\rm even}}} \frac{(-1)^{s}(2\pi i)^{q-1}}{q!a^{s}}\left( \zeta(p+s+1)B_q-C_{p+s+1}(\tfrac{ac}{b} )B_q(\tfrac{c}{b}) \right) t_1^{p+1}(bt_2-t_3)^{q+s-1}, \end{align*} where $S_n(x)$ and $C_n(x)$ are defined in \eqref{eq1}. \end{proposition} \begin{proof} For an integer $s\ge1$, we let \[ \gamma_s(x;t) =\sum_{k\ge s} \frac{Cl_k(x-[x])}{k!}t^k.\] It is easily seen that for any integer $s\ge2$ we have \[ \frac{d}{dx} \gamma_s(ax;t) = at\gamma_{s-1}(ax;t) \quad \mbox{ and} \quad \frac{d}{dx} \beta_0(x;t) = t\beta_0(x;t).\] By repeated use of the integration by parts and noting that $\gamma_1(x;t)=\gamma(x;t)$, we have \begin{align*} &\int_{\frac{c}{b}}^1 \gamma(ax;2\pi it_1) \beta_0(x;2\pi i(bt_2-t_3)) dx\\ &=\sum_{s\ge2}\frac{(-2\pi i (bt_2-t_3))^{s-2}}{(2\pi iat_1)^{s-1}} \left[ \gamma_s (ax;2\pi it_1) \beta_0(x;2\pi i (bt_2-t_3) \right]_{\frac{c}{b}}^1\\ &=\sum_{\substack{s\ge2\\p\ge s\\q\ge0}} \frac{(-1)^s(2\pi i)^{p+q-1}}{p!q!a^{s-1}}\left[ Cl_p(ax-[ax]) B_q(x)\right]_{\frac{c}{b}}^1 t_1^{p-s+1}(bt_2-t_3)^{q+s-2}\\ &=\sum_{\substack{s\ge1\\p,q\ge 0}} \frac{(-1)^{s+1}(2\pi i)^{p+q+s}}{(p+s+1)!q!a^{s}}\left[ Cl_{p+s+1}(ax-[ax]) B_q(x)\right]_{\frac{c}{b}}^1 t_1^{p+1}(bt_2-t_3)^{q+s-1}. \end{align*} By definition, for any $x\in {\mathbb Q}$ and $k\ge2$ we have \[ Cl_k(x-[x])= \begin{cases} -\dfrac{k!}{(2\pi i)^{k-1}} C_k(x) & k :{\rm odd}, \\ -i\dfrac{k!}{(2\pi i)^{k-1}} S_k(x) & k:{\rm even},\end{cases} \] and hence, the above last line is computed as follows: \begin{align*} &i\sum_{\substack{s\ge1\\p,q\ge 0\\p+s:{\rm odd}}} \frac{(-1)^{s}(2\pi i)^{q}}{q!a^{s}}\left( S_{p+s+1}(a)B_q(1)-S_{p+s+1}(\tfrac{ac}{b}) B_q(\tfrac{c}{b}) \right) t_1^{p+1}(bt_2-t_3)^{q+s-1}\\ &+\sum_{\substack{s\ge1\\p,q\ge 0\\p+s:{\rm even}}} \frac{(-1)^{s}(2\pi i)^{q}}{q!a^{s}}\left( C_{p+s+1}(a)B_q(1)-C_{p+s+1}(\tfrac{ac}{b} )B_q(\tfrac{c}{b}) \right) t_1^{p+1}(bt_2-t_3)^{q+s-1}, \end{align*} which completes the proof. \end{proof} \section{Proof of Theorem \ref{1_1}} We can now complete the proof of Theorem \ref{1_1} as follows. \begin{proof}[Proof of Theorem \ref{1_1}] We consider only the real part of the coefficient of $t_1^{k_1}t_2^{k_2}t_3^{k_3}$ in the generating function $\frac{1}{2\pi i}F_{a,b}(2\pi it_1,2\pi it_2,2\pi it_3)$ for positive integers $k,k_1,k_2,k_3$ with $k=k_1+k_2+k_3$ odd. By \eqref{eq3_1} with $t_j\rightarrow 2\pi i t_j$, we have \begin{equation}\label{eq4_1} \begin{aligned} &\frac{1}{2\pi i} F_{a,b}(2\pi it_1,2\pi it_2,2\pi it_3) \\ &= \alpha_b(2\pi it_2,2\pi it_3) \times \frac{1}{2\pi i}\int_0^1 \gamma(ax;2\pi it_1)\beta_0(x;-2\pi i(t_3-bt_2))dx \\ &+\sum_{c=1}^{b-1} \widetilde{\alpha}_{b,c}(2\pi it_2,2\pi it_3)\times \frac{1}{2\pi i} \int_{\frac{c}{b}}^1 \gamma(ax;2\pi it_1)\beta_0(x;2\pi i(bt_2-t_3))dx\\ &-\frac{1}{2\pi i}\int_0^1 \gamma(ax;2\pi it_1)\big( \beta(bx;-2\pi i(-t_2))+\beta(x;-2\pi it_3)\big) dx. \end{aligned} \end{equation} It follows from \eqref{eq2_5} that the real part of the coefficient of $t_1^{k_1}t_2^{k_2}t_3^{k_3}$ in the first and last term of the right-hand side of \eqref{eq4_1} can be expressed as ${\mathbb Q}$-linear combinations of $\pi^{2n}\zeta(k-2n)$ with $0\le n \le \frac{k-3}{2}$. For the second term, using Proposition \ref{3_5} (see also Remark \ref{3_2}), we have \begin{equation}\label{eq4_2} \begin{aligned} &\widetilde{\alpha}_{b,c}(2\pi it_2,2\pi it_3)\times \frac{1}{2\pi i} \int_{\frac{c}{b}}^1 \gamma(ax;2\pi it_1) \beta_0(x;2\pi i(bt_2-t_3))dx\\ &=-i\sum_{\substack{n_2\ge1\\n_3\ge0}}\sum_{\substack{s\ge1\\p,q\ge 0\\p+s:{\rm odd}}} \frac{(-1)^{s}\widetilde{A}_{b,c}(n_2,n_3)}{q!a^{s}}(2\pi i)^{n_2+n_3+q-1} S_{p+s+1}(\tfrac{ac}{b}) B_q(\tfrac{c}{b})t_1^{p+1}(bt_2-t_3)^{q+s-1}t_2^{n_2}t_3^{n_3}\\ &+\sum_{\substack{n_2\ge1\\n_3\ge0}} \sum_{\substack{s\ge1\\p,q\ge 0\\p+s:{\rm even}}} \frac{(-1)^{s}\widetilde{A}_{b,c}(n_2,n_3)}{q!a^{s}}(2\pi i)^{n_2+n_3+q-1} \left( \zeta(p+s+1)B_q-C_{p+s+1}(\tfrac{ac}{b} )B_q(\tfrac{c}{b}) \right)\\ &\times t_1^{p+1}(bt_2-t_3)^{q+s-1}t_2^{n_2}t_3^{n_3}, \end{aligned} \end{equation} where we note that in the above both summations, $p+s+1$ runs over integers greater than 1. Since for any $x\in {\mathbb Q}$ and $k\ge0$ we have $B_k(x)\in{\mathbb Q}$, the real part of the coefficient of $t_1^{k_1}t_2^{k_2}t_3^{k_3}$ in the first term (resp. the second term) of the right-hand side of \eqref{eq4_2} is a ${\mathbb Q}$-linear combination of $\pi^{2n+1}S_{k-2n-1}(\frac{ac}{b})$ with $0\le n \le \frac{k-3}{2}$ (resp. $\pi^{2n}C_{k-2n}(\frac{ac}{b})$ and $\pi^{2n}\zeta(k-2n)$ with $0\le n \le \frac{k-3}{2}$). We therefore find that the real part of the coefficient of $t_1^{k_1}t_2^{k_2}t_3^{k_3}$ in the generating function $\frac{1}{2\pi i}F_{a,b}(2\pi it_1,2\pi it_2,2\pi it_3)$ can be expressed as ${\mathbb Q}$-linear combinations of $\pi^{2n+1}S_{k-2n-1}(\frac{ac}{b})$ and $\pi^{2n}C_{k-2n}(\frac{ac}{b})$ with $0\le n \le \frac{k-3}{2}$ and $c\in {\mathbb Z}/b{\mathbb Z}$. Thus by Corollary \ref{2_2} we complete the proof. \end{proof} \begin{remark}\label{4_1} As mentioned in the introduction, the value $\zeta_{a,b}(k_1,k_2,k_3)$ is expressible as ${\mathbb Q}$-linear combinations of double polylogarithms $Li_{r,s}(z_1,z_2)$ defined in \eqref{eq1_2}, where the expression is obtained from the partial fractional decomposition \[ \frac{1}{x^ry^s} = \sum_{\substack{p+q=r+s\\p,q\ge1}} \frac{1}{(x+y)^p}\left(\binom{p-1}{s-1} \frac{1}{x^{q}} + \binom{p-1}{r-1}\frac{1}{y^q} \right) \qquad (r,s\in {\mathbb Z}_{\ge1}) \] and the orthogonality relation \[ \frac{1}{N} \sum_{n\in {\mathbb Z}/N{\mathbb Z}} \mu_N^{dn} = \begin{cases} 1 & N\mid d \\ 0 & N\nmid d\end{cases},\] where $\mu_N=e^{2\pi i/N}$ and $d\in {\mathbb Z}$. For example, one can check \begin{equation}\label{eq4_3} \zeta_{1,3}(1,1,3) =\sum_{u\in {\mathbb Z}/3{\mathbb Z}} Li_{1,4}(\mu_3^{-u},\mu_3^u) + \sum_{u\in {\mathbb Z}/3{\mathbb Z}} Li_{1,4}(\mu_3^u,1). \end{equation} From this, Theorem \ref{1_1} might be proved by the parity theorem for double polylogarithms examined in \cite[Eq.~(3.2)]{Erik}. Although we do not proceed this in general, let us illustrate an example. As a special case of \cite[Eq.~(3.2)]{Erik}, one obtains \begin{align*} Li_{1,4}(z_1,z_2)+Li_{1,4}(z_1^{-1},z_2^{-1}) &= \sum_{n=1}^5 (-1)^{n+1} Li_n(z_1) \mathcal{B}_{5-n}(z_1z_2) - Li_1(z_1)\mathcal{B}_4(z_2) \\ &+\sum_{n=4}^5 \binom{n-1}{3} Li_n(z_2^{-1}) \mathcal{B}_{5-n}(z_1z_2) - Li_5(z_1z_2), \end{align*} where for each integer $k\ge0$ we set $\mathcal{B}_k(z)=\frac{(2\pi i)^k}{k!} B_k \left(\frac{1}{2} +\frac{\log(-z)}{2\pi i}\right)$. We note that $Li_k(\mu_3^u)= C_k(\frac{u}{3})+iS_k(\frac{u}{3})$ and $\mathcal{B}_k(\mu_3)=\frac{(2\pi i)^k}{k!} B_k(\frac{1}{3})$ since $\log(-\mu_3)=-\frac{\pi i}{3}$. With this, the above formula gives \begin{align*} &{\rm Re}\ (Li_{1,4}(\mu_3^{-1},\mu_3)+Li_{1,4}(\mu_3^{-2},\mu_3^2))=\frac{1}{243}\left(-843\zeta(5) +36\pi^2\zeta(3)+ 4\pi^4\log3\right),\\ &{\rm Re}\ (Li_{1,4}(\mu_3,1)+Li_{1,4}(\mu_3^2,1))=\frac{1}{243}\left(972\zeta(5)-12\pi^2\zeta(3)-4\pi^4 \log3- 81 \pi S_4(\tfrac13) -12\pi^3 S_2(\tfrac13)\right),\\ &2Li_{1,4}(1,1)=4\zeta(5) - \frac13 \pi^2 \zeta(3). \end{align*} where we have used $C_k(\frac13)=C_k(\frac23)=\frac{1-3^{k-1}}{2\cdot 3^{k-1}}\zeta(k)$ for $k\ge2$ and $C_1(\frac13)=C_1(\frac23)=-\frac{1}{2} \log 3$. Substituting the above formulas to \eqref{eq4_3}, one gets \eqref{eq1_1}. We have checked Theorem \ref{1_1} for $(a,b)=(1,3)$ and $(2,3)$ in this direction. \end{remark} \section{The zeta function of the root system $G_2$} In this section, we give an affirmative answer to the question posed by Komori, Matsumoto and Tsumura \cite[Eq.~(7.1)]{KMT5}. The zeta-function associated with the exceptional Lie algebra $G_2$ is defined for complex variables ${\bf s}=(s_1,s_2,\ldots,s_6)\in {\mathbb C}^6$ by \[ \zeta({\bf s};G_2) := \sum_{m,n>0} \frac{1}{m^{s_1}n^{s_2}(m+n)^{s_3}(m+2n)^{s_4}(m+3n)^{s_5}(2m+3n)^{s_6}} .\] The function $\zeta ({\bf s};G_2) $ was first introduced by Komori, Matsumoto and Tsumura (see \cite{KMT4,KMT5}), where they developed its analytic properties and functional relations. They also examined explicit evaluations of the special values of $\zeta({\bf k};G_2)$ at ${\bf k} \in {\mathbb Z}_{>0}^6$ (see \cite{Zhao} for ${\bf k} \in {\mathbb Z}_{\ge0}^6$), where we note that the series $\zeta({\bf k};G_2) $ converges absolutely for ${\bf k}\in {\mathbb Z}_{>0}^6$. For example, they showed \[ \zeta(2,1,1,1,1,1;G_2)=- \frac{109}{1296} \zeta(7)+\frac{1}{18} \zeta(2)\zeta(5) .\] Komori, Matsumoto and Tsumura \cite[Eq.~(7.1)]{KMT5} suggested a conjecture, which we now prove, that the value $\zeta(k_1,\ldots,k_6;G_2)$ with $k_1+\cdots+k_6$ odd lies in the polynomial ring over ${\mathbb Q}$ generated by $\zeta(k) \ (k\in {\mathbb Z}_{\ge2})$ and $L(k,\chi_3) \ (k\in {\mathbb Z}_{\ge1})$, where $L(s,\chi_3)$ is the Dirichlet $L$-function associated with the character $\chi_3$ defined by \[ L(s,\chi_3) = \sum_{m>0} \frac{\chi_3(m)}{m^s}\] and the character $\chi_3$ is determined by $\chi_3(n)=1$ if $n\equiv 1 \mod 3$, $\chi_3(n)=-1$ if $n\equiv 2\mod 3$ and $\chi_3(n)=0$ if $n\equiv 0 \mod 3$. We remark that the second author \cite{Okamoto1} showed that the value $\zeta(k_1,\ldots,k_6;G_2)$ with $k_1+\cdots+k_6$ odd can be written in terms of $\zeta(s),L(s,\chi_3),S_r(\frac{d}{N}),C_r(\frac{d}{N})$ for $N=4,12$ and $0<d<N, \ (d,N)=1$ (see also \cite[\S7]{KMT5}). The following theorem gives an affirmative answer to the question. \begin{theorem} For any integers $k,k_1,\ldots,k_6\ge1$ with $k=k_1+\cdots+k_6$ odd, the value $\zeta(k_1,\ldots,k_6;G_2)$ can be expressed as ${\mathbb Q}$-linear combinations of $\zeta(2n) \zeta(k-2n) \ (0\le n \le \frac{k-3}{2})$ and $L(2n+1,\chi_3)L(k-2n-1,\chi_3) \ (0\le n \le\frac{k-3}{2})$, where $\zeta(0)=-\frac{1}{2}$. \end{theorem} \begin{proof} In \cite[Theorem 2.3]{Okamoto1}, the second author proved that for any integers $l_1,\ldots,l_6\ge1$, the value $\zeta(l_1,\ldots,l_6;G_2)$ can be expressed as ${\mathbb Q}$-linear combinations of $\zeta_{a,b}(n_1,n_2,n_3)$ with $(a,b)=(1,1),(1,2),(1,3),(2,3)$, $n_1+n_2+n_3 =l_1+\cdots+l_6$ and $n_1,n_2,n_3\in{\mathbb Z}_{>0}$. As a consequence, it follows from Theorem \ref{1_1} that the value $\zeta(k_1,\ldots,k_6;G_2)$ can be written as ${\mathbb Q}$-linear combinations of $\pi^{2n}C_{k-2n}(\frac{d}{6})$ and $\pi^{2n+1} S_{k-2n-1}(\frac{d}{6})$ with $0\le n\le \frac{k-3}{2}$ and $d\in {\mathbb Z}/6{\mathbb Z}$. For any $d\in {\mathbb Z}/6{\mathbb Z}$ and $k\ge2$ we have $C_k(\frac{d}{6})\in {\mathbb Q}\zeta(k) $ and $S_k(\frac{d}{6}) \in {\mathbb Q} \sqrt{3}L(k,\chi_3)$, and then the result follows from the well-known formula: $\zeta(2n)\in {\mathbb Q}\pi^{2n}, \ L(2n+1,\chi_3)\in {\mathbb Q} \sqrt{3} \pi^{2n+1}$ for any $n\in{\mathbb Z}_{\ge0}$ (see \cite[Theorem 9.6]{AIK}). \end{proof} Let us illustrate an example of the formula for $\zeta(k_1,\ldots,k_6;G_2)$. Applying the partial fractional decomposition repeatedly to the form $(m+n)^{-k_3}(m+2n)^{-k_4}(m+3n)^{-k_5}(2m+3n)^{-k_6}$, we get \begin{align*} &\zeta(1,1,1,1,1,2;G_2)\\ &=\frac{1}{2} \zeta_{1,1}(5,1,1)-16\zeta_{1,2}(5,1,1)+\frac{9}{2} \zeta_{1,3}(5,1,1)+9\zeta_{2,3}(4,1,2)+18\zeta_{2,3}(5,1,1). \end{align*} Then, by Theorem \ref{1_1} (actually we use Corollary \ref{2_2} together with Propositions \ref{2_3}, \ref{3_4} and \ref{3_5}), we have \begin{align*} \zeta(1,1,1,1,1,2;G_2) &= \frac{2507}{1296}\zeta(7)-\frac{505}{648}\pi^2\zeta(5)+\frac{9}{4}\pi S_6(\tfrac{1}{3})\\ &=\frac{2507}{1296}\zeta(7)-\frac{505}{108}\zeta(2)\zeta(5)+\frac{3}{8}L(1,\chi_3) L(6,\chi_3), \end{align*} where $L(1,\chi_3)=\frac{\pi}{3\sqrt{3}}$. \end{document}
\begin{document} \columnsversion{}{ \setcounter{page}{0} } \title{Price Competition in Online Combinatorial Markets} \columnsversion{ \numberofauthors{3} }{} \author{ \columnsversion{\alignauthor}{} Moshe Babaioff\\ \columnsversion{\affaddr}{}{Microsoft Research}\\ \email{[email protected]} \columnsversion{\alignauthor}{\and} Noam Nisan\\ \columnsversion{\affaddr}{}{Microsoft Research and HUJI}\\ \email{[email protected]} \columnsversion{\alignauthor}{\and} Renato Paes Leme\columnsversion{\titlenote}{\footnote}{This work was done while the author was a post-doctoral researcher at Microsoft Research Silicon Valley.}\\ \columnsversion{\affaddr}{}{Google Research NYC}\\ \email{[email protected]} } \columnsversion{}{ \date{} } \maketitle \begin{abstract} We consider a single buyer with a combinatorial preference that would like to purchase related products and services from different vendors, where each vendor supplies exactly one product. We study the general case where subsets of products can be substitutes as well as complementary and analyze the game that is induced on the vendors, where a vendor's strategy is the price that he asks for his product. This model generalizes both Bertrand competition (where vendors are perfect substitutes) and Nash bargaining (where they are perfect complements), and captures a wide variety of scenarios that can appear in complex crowd sourcing or in automatic pricing of related products. We study the equilibria of such games and show that a pure efficient equilibrium always exists. In the case of submodular buyer preferences we fully characterize the set of pure Nash equilibria, essentially showing uniqueness. For the even more restricted ``substitutes'' buyer preferences we also prove uniqueness over {\em mixed} equilibria. Finally we begin the exploration of natural generalizations of our setting such as when services have costs, when there are multiple buyers or uncertainty about the the buyer's valuation, and when a single vendor supplies multiple products. \end{abstract} \columnsversion{ \category{J.4}{Social and Behavioral Sciences}{Economics} \category{F.2.2}{Analysis of Algorithms and Problem Complexity}{Nonnumerical Algorithms and Problems} \terms{games, economics, algorithms} }{ } \section{Introduction} It is a common practice in electronic commerce for sellers to use algorithmic techniques to automatically adjust the price of goods and services in response to prices posted by other sellers. Such practices are specially in evidence when malfunctioning algorithms lead to rather amusing results: Eisen \cite{eisen_blog} observed that the biology book 'The Making of a Fly' was priced at 23.6 million dollars by a seller in Amazon and at 18.6 million dollars by another seller. By observing how prices changed over time, Eisen concluded that this result was reached by sellers recurrently applying price update rules $p_1 \leftarrow .998 \cdot p_2$ and $p_2 \leftarrow 1.27 \cdot p_1$, without taking the valuation of the prospective buyers into account. Prices reached these high values as after each time both sellers updated their prices, the prices grew by a factor of $.998 \cdot 1.27 > 1$. Such absurd outcomes are uncommon (but this example is by no means unique) and are often the result of faulty algorithms, but they hint at at a complex game between different sellers that adjust their prices based on the price of their competitors and based on the dependencies among products (substitutabilities and complementarities). The fact that sellers frequently update their prices in response to other sellers and the resulting fluctuations have been traditionally observed in financial markets as well as in prices for airline tickets. A recent news article \cite{wsj_article} observed that the same technique is being adopted by online retailers for more mundane consumer goods. The article tracks the price of a microwave across three different online retailers: Amazon, Best Buy and Sears. They observed the price being changed $9$ times during the day by Amazon and twice by Best Buy, in response to Amazon prices. Our goal in this paper is to formally model the \emph{pricing game} played by the sellers and study properties of its equilibria as a function of the dependencies among goods/services offered by the sellers. Such phenomena are not an exclusive feature of internet markets. Traditional economic models such as Bertrand competition \cite{bertrand} and Nash Bargaining \cite{nash_bargaining} explore versions of this question. Two aspects of online markets, however, are particularly relevant. The first is operational: the internet provides quick access to the information on prices of other sellers and allows sellers (or software agents acting on their behalf) to respond in real-time. The second aspect is structural: products and services exhibit complex dependencies. We exemplify below some situations in which such dependencies arise: \begin{itemize} \item \emph{cloud services}: building a cloud service involves assembling various components such as storage, databases, bandwidth, etc. . Each of those is available through a variety of vendors. Complex dependencies arise from the fact that some product are more compatible with each other and some are less. For example, a certain database might place some requirements on storage and bandwidth, making some combinations of services infeasible. \item \emph{information and data streams}: it is common for internet advertisers to use behavioral and demographic information about the users to bid on ads and to choose which content to display. This information is typically purchased from third party data providing agencies who collect and curate databases for this purpose. Each data providing agency has a partial view of the user and complex queries often require advertisers to purchase data from multiple vendors. Two pieces of data might exhibit subtitutability (e.g. if they provide the same information about the user) or might be complements (e.g. if they have a common attribute that allows the advertiser to perform a \emph{join} and link two different views about the same user). \item \emph{crowdsourcing and online marketplaces}: online platforms such as oDesk allow workers to post hourly wages and buyers to assemble teams of workers with different skills. Workers with similar skills are substitutes while two workers that are in same geographic location and can more easily work together might be complements. \item \emph{routing}: given a source and a destination in a network, one needs to buy a path connecting them. Two edges needed to complete a path are complements. Two parallel edges are perfect substitutes. This corresponds to the path auctions of Immorlica et al \cite{immorlica05}. \end{itemize} {\bf Our results.} We first define a simple model of a \emph{pricing game}, in which we wish to capture the essential aspects of the competition among sellers. Our basic game consists of $n$ sellers and one buyer: the buyer has a public\footnote{the case in which there is uncertainty about the buyer's valuation is briefly discussed in Section~\ref{subsec:value_uncertainty}.} combinatorial valuation over the items and reacts to item prices by purchasing the bundle of items/services that maximizes his utility (total value minus total price), breaking ties according to a fixed rule. The sellers are strategic agents and each seller's strategy consists of setting a price for the service he is providing. Our first result is the existence of pure Nash equilibria for every combinatorial valuation if the buyer breaks ties maximally, i.e., favoring supersets. For arbitrary tie breaking rules, we show existence of $\epsilon$-Nash equilibria for all $\epsilon > 0$. {Both results rely on a structural characterization of the set of equilibria. } Although pure Nash equilibrium always exists, it might fail to be unique, as in the case of Nash Bargaining. {When two sellers are perfect substitutes from the buyer's perspective, the classical Bertrand competition model asserts that the unique equilibrium consists of both buyers pricing at zero. For combinatorial valuations, are there properties encoding that items are \emph{substitutes} that lead to uniqueness of equilibrium ? We explore four common notions of substitutes in economics and study their effect on the set of equilibria of the pricing game. In increasing generality, we study the following models of substitutability: gross substitutability (a traditional notion of substitutes in economics due to Kelso and Crawford \cite{KelsoCrawford}), submodularity, XOS and subadditivity. We refer to \cite{LehmannLehmannNisan} for an extensive discussion on the various notions of substitutability. } {First we show that for subadditive and XOS valuations, utilities of players might not be unique across all pure Nash equilibria.} If the buyer valuation is submodular, however, each player has the same utility at every pure Nash equilibrium.\footnote{The equilibrium might fail to be unique, but only because sellers with zero utility have different strategies resulting with that utility.} For the subclass of gross substitutes valuations a stronger claim is true: utilities are the same across all mixed Nash equilibrium, under mild assumptions on the tie breaking rules used by the buyer. {\bf Extensions.} We extend our basic model in three different directions, relaxing assumptions made on the basic model: \begin{itemize} \item \emph{service costs}: we consider each seller as having a cost to perform the service if selected by the buyer. The utility of the seller then becomes, zero if not selected and the price posted minus his cost if selected. We show that the existence of equilibrium still holds in this setting, moreover, there is always an equilibrium maximizing the total welfare of the system, which is defined by the total value obtained by the buyer minus the total cost by the sellers. In other words, the Price of Stability\footnote{the Price of Stability measures the ratio between the optimal welfare achievable and the optimal welfare in equilibrium. The Price of Anarchy measures the ratio between the optimal welfare achievable and the worse welfare in equilibrium.} is $1$. We show that if valuations are gross substitutes, all equilibria are welfare maximizing, i.e., the Price of Anarchy is $1$. For submodular functions, however, we show that inefficient equilibria might arise. Moreover, we show that if there are costs, even if the valuation is submodular, the utilities in equilibria might not be unique. \item \emph{value uncertainty and multiple buyers}: we relax the assumption that the valuation of the buyer is public knowledge of the sellers, and assume instead that sellers only know that the valuation is drawn from a certain distribution. This framework also captures the model where there are multiple buyers and sellers are not allowed to price-discriminate between them. We show that pure Nash equilibrium might not exist in this setting. Moreover, not even $\epsilon$-Nash are guaranteed to exist for small $\epsilon > 0$. \item \emph{multiple services per seller}: in the basic model, we assumed that each service is controlled by a single seller. We consider the model where each seller sets item prices and collects utility from potentially many items. We note that even the special case of a \emph{monopolist seller}, where there is only one seller controlling all items, is quite non-trivial. We show that for this case, both the Price of Anarchy and Price of Stability are $\Theta(\log n)$. Moreover, the problem of best-responding in this game is a generalization of the Unique Coverage problem, which was shown to be $O(\log n)$-hard to approximate by Demaine et al \cite{Demaine08}. In this paper, here we provide an $O(\log n)$ approximation to the pricing problem faced by the monopolistic seller for the case where the buyer's valuation is submodular. \end{itemize} {\bf Conclusions and Open Problems.} In this paper we propose a model for the price competition in combinatorial markets generalizing traditional models such as Bertrand Competition and Nash Bargaining to scenarios with more complex dependencies over goods. Through our model we seek to understand which conditions are necessary so that the price competition leads to an equilibrium and also under which conditions is this equilibrium unique or efficient. The lack of equilibrium where there is uncertainty about the valuation of the buyer (Section \ref{subsec:value_uncertainty}) offers a possible explanation for the price cycles observed in \cite{wsj_article}. Given such interpretation, it leads to the interesting open question of trying to learn the belief of the sellers about buyers based on their history of price updates. Our extensions also lead to other open questions: \begin{itemize} \item bounding the Price of Anarchy over the set of mixed Nash equilibria when there is uncertainty about the buyer valuation. \comment{\item bounding the Price of Anarchy over pure Nash equilibria when there are service costs and the buyer's value is submodular.} \item characterizing the set of Nash equilibria when there are multiple services per seller beyond the monopolistic seller case. \end{itemize} {{\bf Related Work} Our work situates in the broader agenda of understanding the structure of Nash equilibria of games motivated by auction mechanisms -- both direct and reverse. Closely related to our work is the study of pay-your-bid mechanisms for the reverse auction settings, in which a buyer solicits price quotes from different sellers and decides to buy from a certain subset based on the quotes received. The same problem is often phrased as hiring a team of agents, in which a firm solicits wages from different workers and chooses a subset of them to hire. Immorlica et al \cite{immorlica05,immorlica10} study the Nash equilibria for procurement settings in which a firm needs to choose a subset of workers subject to some feasibility constraint. Their model differs from ours in two aspects: (i) while in our paper the buyer has a value for each subset and chooses the one maximizing his utility, in \cite{immorlica10} the buyer is \emph{required} to buy some feasible set. In other words, their setting corresponds to the special case of our setting where the value of the buyer is either $-\infty$ for infeasible sets or $\infty$ for feasible sets. (ii) their focus is on bounds on the total payment of the buyer (which is commonly referred as \emph{frugality}), while our focus is on properties of the equilibrium and on measuring \emph{welfare}. Welfare makes sense in our model since there are quantitative values associated with each outcomes rather then feasibility constraints. {For direct auction settings, in which a seller holds a set of items and solicits bids from various buyers on those items, Bernheim and Whinston \cite{BernheimWhinston} study the set of equilibria of an auction where agents bid on generic outcomes (menu auctions). Christodoulou, Kov{\'a}cs and Schapira \cite{CKS08} study the case where outcomes are partitions over items and agents bid on each item individually despite having combinatorial valuations over items (item bidding auctions). This research line was followed-up by a series of subsequent papers \cite{BR11,HKMN11,Feldman13,PLST12}. We refer to \cite{dining} for a survey.} The previously described related work focuses on equilibrium analysis for games that are likely to arise naturally in direct and reverse markets. A different line of research studies the \emph{mechanism design} problem, in which a (direct or reverse) auctioneer engineers the rules of the market in order to guarantee desired outcome such as efficiency and frugality. For work in mechanism design more closely related to our setting, we refer to \cite{frugal0,frugal1,frugal2,frugal3,frugal4}.} {Another stream of related work consists of papers who extend the traditional price competiton model due to Bertrand \cite{bertrand} to combinatorial settings. Babaioff, Lucier and Nisan \cite{bertrand_networks} model the case each buyer can only access some of the sellers but not others. Chawla and Roughgarden \cite{ChawlaR08} and Chawla and Niu \cite{ChawlaN09} study Bertrand competition in the context of network routing. } \section{Preliminaries} We first define a basic version of the \emph{pricing game}, which is the central object of study in this paper. Later we discuss and analyze extensions of the basic model. A (basic) \emph{pricing game} is defined by a set $N = [n]$ of services (items), each controlled by a different provider (a seller) and one buyer with valuation $v:2^{N} \rightarrow \ensuremath{\mathbb R}_+$, where $v(S)$ is the value for acquiring a subset $S\subseteq N$ of those services. We assume that $v(\emptyset) = 0$ and that the valuation is monotone $v(S) \leq v(T)$ for $S \subseteq T$. The strategy of each seller is to set a price $p_i$ for his service. While facing a price vector $p$, the buyer chooses to purchase a set according to a decision map $X : \ensuremath{\mathbb R}^n_+ \rightarrow 2^{N}$ that associated each price vector $p$ to a set that maximizes the quasi-linear utility of the buyer $X(p) \in D(v;p) := \operatorname{argmax}_{S \subseteq N} v(S) - \textstyle\sum_{i \in S} p_i$. We refer to $D(v;p)$ as the \emph{demand correspondence}. The decision map essentially fixes how ties are broken between sets in the demand correspondence. We will say that a decision map is \emph{maximal} if there is no $X' \in D(v;p)$ such that $X(p) \subsetneq X'$. A valuation function $v$ together with a decision map define a game among the providers in which each provider strategy is to set a price $p_i$ for his service. As in the basic model we assume that providers have no cost providing their services,\footnote{We discuss the extension to providers with costs in Section~\ref{sec:costs}.} the utility of each provider $i$ equals to his revenue and is: $$u_i^X(p) = p_i \cdot \ensuremath{\mathbb{I}} \{i \in X(p) \}$$ where $\ensuremath{\mathbb{I}} \{\cdot\}$ is the indicator function. We are interested in studying the set of \emph{pure Nash equilibria} of this game: $$\textsc{Nash}^X = \{p \in \ensuremath{\mathbb R}^n_+; u_i^X(p_i, p_{-i}) \geq u_i^X(p'_i, p_{-i}), \forall p'_i \in \ensuremath{\mathbb R}_+ \}$$ i.e., strategy profiles where no seller can increase his utility by deviating. We say that a strategy profile is a (pure) $\epsilon$-\emph{Nash equilibrium} if no seller can improve his utility by more then $\epsilon$ by deviating: $$\textsc{Nash}_\epsilon^X = \{p \in \ensuremath{\mathbb R}^n_+; u_i^X(p_i, p_{-i}) \geq u_i^X(p'_i, p_{-i}) - \epsilon, \forall p'_i \in \ensuremath{\mathbb R}_+ \}$$ We also consider the sets of \emph{mixed Nash equilibria}: $$\textsc{mNash}^X = \{p \in (\Delta\ensuremath{\mathbb R}_+)^n; \mathbb{E} u_i^X(p_i, p_{-i}) \geq \mathbb{E} u_i^X(p'_i, p_{-i}), \forall p'_i \}$$ where the elements of $(\Delta\ensuremath{\mathbb R}_+)^n$ are vectors of $n$ independent random variable taking values in $\ensuremath{\mathbb R}_+$. When the decision map $X$ is clear from the context, we omit it from the notation, for example, we refer to $u_i$, $\textsc{Nash}$, $\textsc{Nash}_\epsilon$, $\textsc{mNash}$ instead of $u_i^X$, $\textsc{Nash}^X$, $\textsc{Nash}_\epsilon^X$ and $\textsc{mNash}^X$. \comment{We will also be interested in correlated Nash equilibria, in which the strategies of the sellers are correlated random variables: $$\textsc{cNash} = \left\{p \in \Delta(\ensuremath{\mathbb R}_+^n); \begin{aligned} & \mathbb{E} [u_i(p_i, p_{-i} ) \vert p_i] \geq \mathbb{E} [ u_i(p'_i, p_{-i}) \vert p_i] \\ & \forall p_i, p'_i \in \ensuremath{\mathbb R}_+ \end{aligned} \right\}$$} We will characterize the set of equilibria in terms of properties of the buyer's valuation $v$. Consider the following classes of valuation functions, from more general to more specific: \begin{itemize} \item \emph{combinatorial}: no assumptions on the valuation besides monotonicity. \item \emph{subadditive}: $v(S\cup T) \leq v(S) + v(T), \forall S, T$. \item \emph{XOS}: $v(S) = \max_{t \in I} \sum_{i \in S} w_{it}$ for $w_{it} \in \ensuremath{\mathbb R}_+$. \item \emph{submodular}: $v(S\cup T) + v(S \cap T) \leq v(S) + v(T), \forall S, T$. \item \emph{gross substitutes}: given a price vector $p \in \ensuremath{\mathbb R}^n_+$, if $S \in D(v; p)$ then for any vector $p' \geq p$, there is $T \in D(v; p')$ such that $S \cap \{j; p_j = p'_j\} \subseteq T$. \end{itemize} It is known that every class in the above list is a strict subclass of the previous one. We refer to Lehmann, Lehmann and Nisan \cite{LehmannLehmannNisan} for a comprehensive discussion on such classes and on their relations. We fix some additional notation that will be useful for the rest of the paper: given a valuation $v$ and sets $S$ and $T$ such that $S \cap T = \emptyset$, the \emph{marginal values} of $T$ with respect to $S$ is defined to be $v(T \vert S) = v(S \cup T) - v(S)$. Given a price vector $p \in \ensuremath{\mathbb R}^n_+$, denote $p(S) = \sum_{j \in S} p_j$. When clear from the context, we sometimes omit braces in the representation of sets. For example, we represent $v(\{i\} \vert S)$ by $v(i \vert S)$, $A \cup \{i\}$ by $A \cup i$ and $S \setminus \{j\}$ by $S \setminus j$. We keep our model as simple as possible to highlight its interesting features. Later in Section \ref{sec:extensions} we consider extensions of the basic model to incorporate service costs, multiple services provided by the same seller, a market with multiple sellers and multiple buyers and settings with incomplete information. We discuss how such additional features influence the results for the basic model. \section{Examples, Existence and Characterization of Equilibrium}\label{sec:existence} First notice that the pricing game has, as special cases, the classical models of Bertrand competition \cite{bertrand} and Nash bargaining \cite{nash_bargaining}. \begin{example}[Bertrand competition] If $n > 1$, $v(\emptyset) = 0$ and $v(S) = c$ for $S \neq \emptyset$ for some constant $c>0$, all services are perfect substitutes and the buyer has no utility for purchasing more then one. In this case $X(p)$ will be either empty if $p_i > c$ for all $i$ or will contain at most one service of positive price. It is known that for this case there is a unique Nash equilibrium that corresponds to every seller posting price $p_i = 0$. \end{example} \begin{example}[Nash bargaining]\label{ex:nash_bargaining} The other extreme case is when $v(N) = c>0$ and $v(S) = 0$ for any $S \neq N$. This models the scenario where all the services are necessary components for the buyer. Let $X(p) = N$ if $\sum_i p_i \leq c$ and $X(p) = \emptyset$ otherwise. The set of pure Nash equilibria of this game correspond to: $$\textsc{Nash} = \{p \in \ensuremath{\mathbb R}^n_+; \textstyle\sum_i p_i = c\} \cup \{p \in \ensuremath{\mathbb R}^n_+; \textstyle\sum_i p_i \geq c + \max_i p_i\}$$ In order to see that the profiles above are Nash equilibria, notice that if $\textstyle\sum_i p_i = c$, then $X(p) = N$ and no seller has incentive to change his price, since by decreasing his price, he can only decrease his utility. By increasing his price, he can only make $X(p) = \emptyset$. If $\textstyle\sum_i p_i \geq c + \max_i p_i$, then clearly $X(p) = \emptyset$ and for any seller $i$ and price $p'_i > 0$, $X(p'_i, p_{-i})$ is still $\emptyset$, since $\sum_{j \neq i} p_i + p'_i > \sum_{j} p_i - \max_i p_i \geq c$. For the converse, notice that if $p \in \textsc{Nash}$, then either (i) $X(p) = N$ in which case $\sum_i p_i \leq c$. If $\sum_i p_i < c$, any player can deviate to $p'_i = c - \sum_{j \neq i} p_j > p_i$ and improve his utility. So, it must be the case that $\sum_i p_i = c$ or: (ii) $X(p) = \emptyset$, in which case it must be the case that no seller can decrease his price such that $p_i > 0$ and $\sum_i p_i \leq c$, so it must be the case that $\sum_i p_i \geq c + \max_i p_i$. \end{example} Now we extend the characterization of Example \ref{ex:nash_bargaining} to a generic combinatorial valuation. \begin{lemma}\label{lemma:characterization} Given a price vector $p \in \ensuremath{\mathbb R}^n_+$ and $S = X(p) \in D(v;p)$, then $p$ is a Nash equilibrium if the following two properties hold: $$\forall i \in S, \exists T \not\ni i \text{ s.t. } v(S) - p(S) = v(T) - p(T)$$ $$\forall i \notin S, \forall T \ni i, v(S) - p(S) \geq v(T) - p(T \setminus i)$$ Moreover, if the map $X$ is maximal, the above statement holds with \emph{'if and only if'}. \end{lemma} {The first condition ensures that a provider that is picked cannot gain by slightly increasing his price. The second condition ensure that a provider that is not picked cannot gain by being picked even if he posts a positive price that is arbitrarily low.} \begin{proof} If a price vector $p$ is a Nash equilibrium, then for any $i \in S$ and $\epsilon > 0$, $u_i(p_i + \epsilon, p_{-i}) \leq u_i(p_i, p_{-i})$, so it must be the case that $u_i(p_i + \epsilon, p_{-i}) = 0$ as with $p_i + \epsilon$ seller $i$ will not sell. So, for every $\epsilon$, there is a set $T_\epsilon$ such that $i \notin T_\epsilon$ and $v(S) - p(S) - \epsilon \leq v(T_\epsilon) - p(T_\epsilon)$. Taking $\epsilon_t = \frac{1}{t}$ for $t \in \ensuremath{\mathbb Z}_+$, since there are finitely many values for $T_{\epsilon_t}$, there must be one that occurs infinitely often. Let this be $T$. Then taking the limit as $t \rightarrow \infty$ in this subsequence one gets that: $v(S) - p(S) \leq v(T) - p(T)$. Since $S \in D(v;p)$, then $v(S) - p(S)\geq v(T) - p(T)$ thus it must be the case that $v(S) - p(S) = v(T) - p(T)$. Given $i \notin S$, then it must be the case that $0 = u_i(p_i, p_{-i}) \geq u_i(\epsilon, p_{-i})$ for every $\epsilon\geq 0$, so for any set $T \ni i$, it must be the case that $v(S) - p(S) \geq v(T) - p(T \setminus i) + \epsilon$. Taking the limit as $\epsilon \rightarrow 0$, we get the desired condition. For the converse direction, \comment{in the case of a \emph{maximal} $X$ map, assume that $p$ satisfy the properties above. Then the second property guarantees that $X(p) \subseteq S$, since for all sets containing an element outside $S$ are dominated by $S$. Since the map $S \in D(v;p)$ and $X$ is maximal, then $X(p) = S$. To see that this is a Nash equilibrium,} observe that for $i \in S$, there is no incentive to decrease his price. Also, the first property guarantees that for any $p'_i > p_i$, $u_i(p'_i, p_i) = 0$, since there is a set $T \ni i$ such that $v(S) - p(S) = v(T) - p(T)$. For $i \notin S$, the second property guarantees that for any $p'_i > 0$, $X(p'_i, p_{-i}) = S$, so $u_i(p'_i, p_{-i}) = 0$. \end{proof} {One can define the \emph{welfare} $W(p)$ of a decision map $X$ at a price vector $p$ as the sum of the utilities of the agents (all sellers plus the buyer) when the buyer buys $X(p)$, i.e., $W(p) = v(X(p))$. We show that for any valuation function $v$, when the decision map is maximal the set of Nash equilibrium in non-empty, moreover, there is always a welfare maximizing equilibrium, one in which the buyer buys all services. The next theorem can be seen as showing that the \emph{Price of Stability} is one, i.e., it states that there is always an equilibrium that produces maximal welfare.} \begin{theorem}\label{cor:non_empty} If $X$ is maximal, then the set of pure Nash equilibria with $X(p) = N$ is non-empty. \end{theorem} \begin{proof} Define the set $F$ as: $$F = \{p \in \ensuremath{\mathbb R}^n_+; p(T) \leq v(T \vert N \setminus T), \forall T \subseteq N\}$$ and notice the condition defining $F$ corresponds to $v(N) - p(N) \geq v(N \setminus T) - p(N \setminus T)$ for all $T$, so $N \in D(v;p)$. Note that $0\in F$ and thus $F\neq \emptyset$. Now, define the set of Pareto vectors in $F$ as: $$\textsc{Par}(F) = \{p \in F; \not\exists p' \in F \text{ s.t. } p' \geq p \text{ and } \textstyle\sum_i p'_i > \textstyle\sum_i p_i\}$$ Since $F$ is non-empty and compact, $\textsc{Par}(F)$ is also non-empty. Now, we argue that $\textsc{Par}(F)$ is exactly the set of Nash equilibria with $X(p) = N$. Since the full set of sellers is selected, it is enough to argue about the first condition in Lemma \ref{lemma:characterization}. If $p \in \textsc{Par}(F)$ then for any $t \in \ensuremath{\mathbb Z}_+$, $X(p_i + \frac{1}{t}, p_{-i}) \neq N$, so there must be some $T_t \subsetneq N$ and $i \notin T_t$ such that $v(T_t) - p(T_t) \geq v(N) - p(N)-\frac{1}{t}$. Since there are finitely many values for $T_t$, some set $T$ must occur infinitely often. Taking the limit $t \rightarrow \infty$ for this subsequence we get $v(T) - p(T) \geq v(N) - p(N)$, since $N \in D(v;p)$, this must hold with equality. Therefore, by Lemma \ref{lemma:characterization}, this is a Nash equilibrium. Conversely, if $p \notin \textsc{Par}(F)$, we want to show that $p$ is not a Nash equilibrium with $X(p) = N$. If $p \notin F$, then for some $T \subset N$ such that $v(N) - p(N) < v(N \setminus T) - p(N \setminus T)$, so we can't have $X(p) = N$. If $p \in F \setminus \textsc{Par}(F)$, then there is $p' \in F$ with $p' \geq p$ and $i$ such that $p'_i > p_i$. In particular, for all $T \ni i$, $p(T) < p'_i + p(T\setminus i) \leq v(T \vert N \setminus T)$. That is, for all such $T \ni i$, $v(N) - p(N) > v(N \setminus T) - p(N \setminus T)$, which contradicts the first condition on Lemma \ref{lemma:characterization} and therefore can't be an equilibrium. \end{proof} If $X(p)$ is not maximal, then Nash equilibria are not guaranteed to exist. Consider for example the pricing game with one seller and one buyer with valuation $v(\{1\}) = 1$ and a decision map such that $X(p_1) = \{1\}$ for $p_1 < 1$ and $X(p_1) = \emptyset$ otherwise. Note that $X(p)$ is not maximal for $p_1=1$ as $\{1\}$ is also in the demand correspondence yet $X(1)=\emptyset$. This game has no Nash equilibria, since for $p_1 < 1$ the deviation $p'_1 = \frac{1}{2}(1+p_1)$ is a strict best-response. For $p_1 \geq 1$, the deviation $p'_1 = 0.9$ is a strict best-response. This is the same phenomenon that happens in first price auctions. Yet, similarly to first price auctions, it is possible to show that given any decision map (which corresponds to tie breaking rules in first price auctions), there exists an $\epsilon$-Nash equilibrium for every $\epsilon > 0$. {To prove that we first show that if $p$ is an equilibrium with respect to some maximal tie breaking rule $X$, then for any other tie breaking rule $X'$ there always exists $p^\epsilon$ that is an $\epsilon$-Nash equilibria with respect to $X'$ and results with the same welfare, $W(X'(p^\epsilon)) = W(X(p))$.} \comment{ The following result allows us to export results on equilibria for maximal decision maps to approximate equilibria of generic decision maps. Note that when we say \emph{generic}, we still require that $X(p) \in \operatorname{argmax} v(S) - p(S)$, but we don't require that $X(p)$ is maximal among the sets in $\operatorname{argmax} v(S) - p(S)$ anymore. } \begin{theorem}\label{lemma:any_map} Fix a combinatorial valuation $v$. Let $X$ be a maximal decision map and $p \in \textsc{Nash}^{X}$, i.e, $p$ is a pure Nash equilibrium with respect to the game defined by $X$. Now, for any (not necessarily maximal) decision map $X'$, there are $\epsilon$-Nash equilibria with respect to $X'$ converging to $p$, i.e., $\forall \epsilon > 0, \exists p^\epsilon \in \textsc{Nash}_\epsilon^{X'}$ such that $W(X'(p^\epsilon)) = W(X(p))$ and $p^\epsilon \rightarrow p$. \end{theorem} The main idea of the proof is to construct for every $p \in \textsc{Nash}^{X}$ and $\epsilon > 0$ a price vector $p^\epsilon$ such that $p^\epsilon_i = [p_i - \frac{\epsilon}{n}]^+$ for $i \in X(p)$ and $p^\epsilon_i = p_i$ otherwise. Then we argue that $p^\epsilon$ must be an $\epsilon$-Nash equilibrium of the game induced by any decision map $X'$. We defer the details of the proof \columnsversion{for the full version of this paper}{to the appendix}. {We note that the combination of Theorem~\ref{cor:non_empty} and Theorem~\ref{lemma:any_map} implies that:} \begin{corollary} For any combinatorial valuation $v$, any decision map $X'$ and any $\epsilon > 0$, the set of welfare maximizing $\epsilon$-Nash equilibria is non-empty. \end{corollary} \section{Uniqueness of Equilibria}\label{sec:uniqueness} In the previous section, we showed that if the decision map is maximal, equilibria are guaranteed to exist for any combinatorial valuation. This equilibrium might not be unique, as the Nash Bargaining Example (Example \ref{ex:nash_bargaining}) shows. In this section we show that submodularity of the valuation function is a sufficient condition to guarantee uniqueness of utilities for pure Nash equilibrium. Moreover, equilibrium prices are unique for every seller with non-zero utility. For the subclass of gross-substitute valuation, the same claim also hold for mixed Nash equilibrium. \subsection{\columnsversion{Submodular valuations: uniqueness of \\ pure equilibria}{Submodular valuations: uniqueness of pure equilibria}} We start with a few observations: \begin{lemma}\label{lemma:pre_uniqueness} If the buyer valuation is submodular, then for any pure Nash equilibrium $p$, $p_i \geq v(i \vert N \setminus i)$. Moreover, if $S = X(p)$ and $i \notin S$, then $v(i \vert S) = 0$. \end{lemma} \begin{proof} Setting price $p_i < v(i \vert N \setminus i)$ makes seller $i$ always be chosen, since for any $S \subseteq N \setminus i$, $v(S \cup i) - p(S \cup i) > v(S) - p(S) + [v(i \vert S) - v(i \vert N)] \geq v(S) - p(S)$, where the last step follows by submodularity. Therefore, no price $p_i < v(i \vert N \setminus i)$ can be in equilibrium, since a deviation to any price between $p_i$ and $v(i \vert N \setminus i)$ is an improvement. Also, if $i \notin S = X(p)$, then $v(i \vert S) = 0$, otherwise he could deviate to $p'_i = \frac{1}{2} v(i \vert S)$, get selected by the buyer and have positive utility. \end{proof} The following simple fact about submodular functions will be useful in the following proof: \begin{lemma}\label{lemma:submodular_fact} If $v$ is a submodular function and $S \cap T = \emptyset$, then $v(T \vert S) = 0$ iff $v(t \vert S) = 0$ for all $t \in T$ \end{lemma} \begin{proof} If $v(T \vert S) = 0$, then by monotonicity of $v$, $v(t \vert S) \leq v(T \vert S) = 0$. Now, if $v(t \vert S) = 0$ for all $t \in T$ then for let $T = \{t_1, \hdots, t_k\}$ and $$v(T \vert S) = \sum_{i=1}^k v(t_i \vert S \cup \{t_j; j < i\}) \leq \sum_{i=1}^k v(t_i \vert S ) = 0 $$ \end{proof} The previous lemma together with Lemma \ref{lemma:pre_uniqueness} imply that if $v$ is submodular, then for any Nash equilibrium $p$ and $S = X(p)$, it must holds that $v(S) = v(N)$. Looking again from the perspective of the welfare function, $W(p) = v(X(p))$, the theorem above gives a \emph{Price of Anarchy} result for pricing games with submodular function: is states that all Nash equilibria maximize the welfare, and thus the Price of Anarchy is exactly one. \begin{theorem} If the valuation $v$ is submodular, then the utility of each seller $i$ in any pure Nash equilibrium is $u_i = v(i \vert N \setminus i)$. In particular, for every pure Nash equilibrium profile $p$, if $v(i \vert N \setminus i) > 0$, then $p_i = v(i \vert N \setminus i)$ and $i\in X(p)$. \end{theorem} \begin{proof} Let $p$ be a pure Nash equilibrium and $S = X(p)$. From Lemma \ref{lemma:pre_uniqueness}, we know that $p_i \geq v(i \vert N \setminus i)$ for all $i$. Additionally for $i \notin S$, $v(i \vert S) = 0$ and therefore by sub-modularity $v(i \vert N \setminus i) = 0$. We are left to prove that for $i \in S$, $p_i \leq v(i \vert N \setminus i)$. Fixing $i \in S$, we use the first condition in Lemma \ref{lemma:characterization} to obtain a set $T \not\ni i$ such that $v(S) - p(S) = v(T) - p(T)$. We consider two cases: \emph{Case (i)} $T \not\subset S$. Observe that for all $j \in T \setminus S$, $p_j = 0$, otherwise this seller could deviate to $\frac{1}{2} p_j$, making $T$ be selected by the buyer instead of $S$ and getting positive utility. Given that since $S \in D(v;p)$: $$v(S) - p(S) \geq v(S \cup T \setminus i) - p(S \cup T \setminus i) = v(S \cup T \setminus i) - p(S \setminus i)$$ which implies that: $$p_i \leq v(S) - v(S \cup T \setminus i) = v(N) - v(N \setminus i) = v(i \vert N \setminus i)$$ where $v(S) = v(N)$ follows from Lemma \ref{lemma:pre_uniqueness}. The fact that $v(S \cup T \setminus i) = v(N \setminus i)$ comes from the observation that for $k \notin S \cup T$, $v(k \vert T) = 0$, otherwise seller $k$ could set his price to $\frac{1}{2} v(k \vert T)$ and be selected, since $T \cup \{k\}$ would be preferable then $S$ at such prices. Since $v(k \vert T) = 0$, by submodularity $v(k \vert S \cup T \setminus i) = 0$ and therefore $v(S \cup T \setminus i) = v(N \setminus i)$ by Lemma \ref{lemma:submodular_fact}. \emph{Case (ii)} $T \subset S$. We have that $v(S) - p(S) = v(T) - p(T) \geq \max ( v(S\setminus i) - p(S \setminus i), v(T \cup \{i\}) - p(T \cup \{ i \}))$. In particular: $v(i \vert T) \leq p_i \leq v(i \vert S \setminus i)$. Since by submodularity $v(i\vert T) \geq v(i \vert S \setminus i)$, the inequalities must hold with equality, so $p_i = v(i \vert S \setminus i) = v(S) - v(S\setminus i) = v(N) - v(N\setminus i) = v(i \vert N \setminus i)$. We know $v(S) = v(N)$ from Lemma \ref{lemma:pre_uniqueness}. In order to see that $v(S\setminus i) = v(N\setminus i)$, notice that for any $j \notin S$ by the second condition of Lemma \ref{lemma:characterization}: $$v(S\setminus i) - p(S \setminus i) = v(S) - p(S) \geq v(S \cup j \setminus i) - p(S \setminus i)$$ and therefore $v(j \vert S \setminus i) = 0$. Using Lemma \ref{lemma:submodular_fact}, we get that $v(S \setminus i) = v(N \setminus i)$. \end{proof} We emphasize that even if $X(p)=S\neq N$, the utility of $i\in S$ is $v_i(i|N\setminus i)$ and not $v_i(i|S\setminus i)$. These two might be different, for example, this is the case for Bertrand competition. The next example shows that submodularity is in some sense necessary. Weaker concepts of 'substitutability' such as XOS or subadditivity are not enough to ensure uniqueness of equilibria. \begin{example} Consider the following instance of the pricing game with three sellers and a buyer with valuation $v$ such that $v(\emptyset) = 0$, $v(S) = 2$ for $1\leq \abs{S} \leq 2$ and $v(S) = 3$ for $\abs{S} = 3$. This function is in XOS and is subadditive, but it is not submodular, since $v(3 \vert \{1\}) = 0 < 1 = v(3 \vert \{1,2\})$. The utilities in equilibrium are not unique, indeed, using the conditions in Lemma \ref{lemma:characterization} it is easy to see that the pure equilibria $p$ with $X(p) = N$ are given by: $$p = (x,x,1-x) \text{ for } 0 \leq x \leq \textstyle\frac{1}{2}$$ and permutations thereof. \end{example} \subsection{Gross substitute valuations: uniqueness of mixed equilibria} One other question that arises from the previous theorem is weather the uniqueness in utilities also holds for mixed Nash equilibria. In what follows we give a mixed Nash uniqueness result for the subclass of gross substitute valuations. Recall that a valuation $v$ is gross substitute if {when some prices are increased, there is always a demanded set which contains all previously demanded items for which the price did not change. Formally, $v$ is gross substitute if } for any price vectors $p' \geq p$ and $S \in D(v;p)$, there is $S' \in D(v;p')$ such that $S \cap \{j; p_j = p'_j\} \subseteq S'$. We say that a decision map $v$ is \emph{\textsc{Gs}-consistent} with a gross substitute valuation $v$ if given $p' \geq p$, $X(p) \cap \{j; p_j = p'_j\} \subseteq X(p')$. It is known that when $v$ is a gross substitute valuation, then the \emph{greedy algorithm} with lexicographic tie-breaking implements a \textsc{Gs}-consistent decision map. This greedy algorithm starts with $S = \emptyset$ and while $\max_{i \notin S} v(i \vert S) - p_i \geq 0$, adds to $S$ the lexicographically first element for which the maximum is reached. Let $X(p)$ be the resulting set from this process. It follows from \cite{DressTerhalle_WellLayered, Murota96} that $X(p) \in \operatorname{argmax}_S v(S) - p(S)$. Before stating and proving the uniqueness result for gross substitutes, we give a version of Lemma \ref{lemma:pre_uniqueness} for mixed Nash equilibria and a technical lemma about gross substitute valuations. \begin{lemma}\label{lemma:pre_uniqueness_mixed} If the buyer valuation is submodular, then for any mixed Nash equilibrium $p$, $p_i \geq v(i \vert N \setminus i)$ with probability $1$. Moreover, if $\mathbb{E}[u_i(p)] = 0$, then $\mathbb{P}[v(i \vert X(p)) > 0] = 0$. \end{lemma} \begin{proof} The first part follows directly from the proof of Lemma \ref{lemma:pre_uniqueness}. For the second part, if $\mathbb{P}[v(i \vert X(p)) > 0] > 0$, then there is $\epsilon > 0$ for which $\mathbb{P}[v(i \vert X(p)) > \epsilon] > 0$. Therefore, by deviating to price $p_i = \epsilon$, seller $i$ can be obtain utility $\epsilon$ with positive probability, contradicting that $\mathbb{E}[u_i(p)] = 0$ for this equilibrium. \end{proof} \begin{lemma}\label{lemma:technical_gs} Consider a gross substitute valuation function $v$ over $N$, disjoint sets $A,B$ and $j \notin A \cup B$. Assume that $v(i \vert A \cup j) = 0$ for all $i \in B$ and that $0 \leq v(j \vert A) - x < v(B \vert A)$ for some $x \geq 0$. Then there is $i \in B$ such that $v(j \vert A) - x < v(i \vert A)$. \end{lemma} \begin{proof} Let $\epsilon = \frac{1}{2n}[v(B \vert A) - v(j \vert A) + x]$ and define a price vector $p$ such that $p_j = x$, $p_i = 0$ for $i \in A$, $p_i = \epsilon$ for $i \in B$ and $p_i =\infty$ otherwise. Let $S$ be a maximal set in $D(v;p)$. Clearly $A \subseteq S$ since the elements in $A$ have zero price. Now, we argue that $S\setminus A \subseteq B$. We note that if $j \in S$, then no element in $B$ can be in $S$, since $v(i \vert A \cup j) = 0$ and $p_i > 0$ for all $i \in B$ while $p_i>0$. But $A \cup j$ is not optimal, since $v(B \vert A) - \epsilon \abs{B} > v(j \vert A) - x $. Therefore, $S$ must be a subset of $A \cup B$. Also, since $v(B \vert A) - \epsilon \abs{B} > v(j \vert A) - x \geq 0$, there must be some $i \in B \cap S$. Consider such $i$ and the price vector $p'$ such that $p'_k = p_k$ for all $k \in A \cup \{i,j\}$ and $p'_k = \infty$ otherwise. By the definition of gross subtitutes, there must be $S' \in D(v,p')$ such that $i \in S'$. Since items in $A$ have zero price, we can assume that $A \subseteq S'$. Since $i$ has positive price and $v(i \vert A \cup j) = 0$, then $j$ can't be in $S'$. So, $S' = A \cup i$. This implies that $v(A \cup i) - p(A \cup i) \geq v(A \cup j) - p(A \cup j)$, which can be re-written as: $v(i \vert A) > v(i \vert A) - \epsilon \geq v(j \vert A) - x$. \end{proof} \begin{theorem} Let the valuation $v$ be gross substitutes, $X$ be a \textsc{Gs}-consistent decision map and $p$ be a vector of independent random variables forming a mixed Nash equilibrium. It holds that if $v(i \vert N \setminus i) > 0$ then $p_i = v(i \vert N \setminus i)$ and $i\in X(p)$ deterministically. Additionally, if $v(i \vert N \setminus i) = 0$, then seller $i$ has expected utility of zero at any mixed Nash equilibrium, i.e., $\mathbb{E}[u_i(p)] = 0$. \end{theorem} \begin{proof} We know by Lemma \ref{lemma:pre_uniqueness_mixed} that $p_i \geq v(i \vert N \setminus i)$ with probability $1$. Let $S = \{i \in N; \mathbb{P} [ p_i > v(i \vert N \setminus i) ] > 0\}$. If we show that for every $i \in S$, $\mathbb{E}[u_i(p)] = 0$, then we are done, since then clearly $v(i \vert N \setminus i) = 0$, otherwise this seller would be able to get positive utility by posting a price $\frac{1}{2} v(i \vert N \setminus i)$. Let $p_i^t$ be some price in the support of $p_i$ for which $\mathbb{E}[u_i(p)] = \mathbb{E}[u_i(p_i^t, p_{-i})]$, $\mathbb{P}[ p_i \geq p_i^t ] \geq \textstyle\frac{1}{t}$ and $\mathbb{P}[ p_i \leq p_i^t ] \geq 1-\textstyle\frac{1}{t}$. Define $p^t = (p_1^1, \hdots, p_n^t)$. Since $X(p^t)$ can take finitely many values, there is a set $T \subset N$ such that $T = X(p^t)$ infinitely often. We know that $S \not\subseteq T$, since for very large $t$, $p_i^t > v(i \vert N \setminus i)$ for all $i \in S$. For any $i \notin T$ we know that $$\mathbb{E}[u_i(p)] = \mathbb{E}[u_i(p_i^t, p_{-i})] = p_i^t \cdot \mathbb{P}[ i \in X(p_i^t, p_{-i})]$$ where the first equality comes from $p_i^t$ being a best response to $p_{-i}$. Now, since $X$ is a \textsc{Gs}-consistent decision map, if $p_{-i} \leq p^t_{-i}$, then $i \notin X(p_i^t, p_{-i})$, therefore: $$\begin{aligned}\mathbb{P}[ i \in X(p_i^t, p_{-i})] & \leq \mathbb{P}[ \exists j \neq i ; p_{j} > p_{j}^t ] \leq \\ & \leq \textstyle\sum_{j \neq i} \mathbb{P}[ p_{j} > p_{j}^t ] \leq \textstyle\frac{n-1}{t}\end{aligned}$$ Taking $t \rightarrow \infty$ we get that $\mathbb{E}[u_i(p)] = 0$. Now we claim that $v(i \vert T) = 0$. If not, then the price $p'_i = \frac{1}{2} v(i \vert T)$ would guarantee that: $i \in X(p')$ for $p' = (p'_i, p^t_{-i})$ since $v(T \cup \{i\}) - p'(T \cup \{i\}) > v(T) - p'(T) = \max_{S'; i \notin S'} v(S') - p'(T')$. Therefore, by the fact that the valuations are gross substitutes: $$\begin{aligned} \mathbb{E}[u_i(p)] & \geq \mathbb{E}[u_i(p'_i, p_{-i})] = p'_i \cdot \mathbb{P}[i \in X(p'_i, p_{-i})]\\ & \geq p'_i \cdot \mathbb{P}[p_{j} \geq p^t_{j}, \forall j \neq i] \geq \frac{1}{t^{n-1}}> 0\end{aligned}$$ which contradicts that $\mathbb{E}[u_i(p)] = 0$. So, it must be the case that $v(i \vert T) = 0$. In particular, $v(N) = v(T)$ by Lemma \ref{lemma:submodular_fact}. Now, in order to complete the proof, we want to show that $S \cap T = \emptyset$, since this implies that if for some seller $j$, $\mathbb{P} [ p_j > v(j \vert N \setminus j) ] > 0$, then $j \notin T$ and therefore $\mathbb{E}[u_j(p)]= 0$. Assume for contradiction that there is $j \in S \cap T$ and take $t$ large enough we can assume that $p^t_j > v(j \vert N \setminus j)$. Then: $$v(j \vert T\setminus j) - p_j^t < v(j \vert T\setminus j) - v(j \vert N \setminus j) = v(N \setminus T \vert T \setminus j)$$ since $v(T) = v(N)$. \comment{ Since $$\begin{aligned} v(j \vert T \setminus j) - v(j \vert N \setminus j) &= v(T) - v(T \setminus j) - v(N) + v(N \setminus j) \\ &= v(N \setminus j) - v(T \setminus j) = v( N\setminus T \vert T \setminus j) \end{aligned}$$} This allows us to apply Lemma \ref{lemma:technical_gs} with $j$, $A = T \setminus j$, $B = N \setminus T$ and $x = p_j$, we get that there is $i \notin T$ such that: $v(j \vert T \setminus j) - p_j^t < v(i \vert T \setminus j)$. Therefore, if such $i$ deviates to price $p'_i > 0$ such that $v(j \vert T \setminus j) - p_j^t < v(i \vert T \setminus j) - p'_i$, then $i \in X(p')$ for $p' = (p'_i, p^t_{-i})$, since $v(T \cup i \setminus j) - p'(T \cup i \setminus j) > v(T) - p'(T) \geq \max_{S' \not\ni i} v(S') - p'(S')$. Therefore: $$\mathbb{E}[u_i(p)] \geq \mathbb{E}[u_i(p'_i, p_{-i})] \geq p'_i \cdot \mathbb{P}[p_{j} \geq p^t_{j}, \forall j \neq i] > 0$$ contradicting that $\mathbb{E}[u_i(p)]=0$. This shows that there can't be a seller $j$ in $S \cap T$, concluding the proof. \end{proof} \section{Extensions of the basic model}\label{sec:extensions} In this section we explore some natural generalizations of the basic model. We first consider costly services, then we consider valuation uncertainty and the connection to the model of multiple buyers with no price discrimination. Finally, we consider the case that a single seller controls multiple services but can only price individual services. \subsection{Service costs} \label{sec:costs} In the basic model we assumed that the seller has no cost in providing the service. Now, we consider the extension in which each seller $i$ has cost $c_i \geq 0$ for providing the service. We model this by changing the utility of each seller $i$ to: $$u_i(p) = (p_i - c_i) \cdot \ensuremath{\mathbb{I}}\{ i \in X(p)\}$$ Clearly in this new game, it is a dominated strategy to post a price $p_i < c_i$. The first thing we notice is that maximality of the map is not enough to guarantee existence of pure Nash equilibrium anymore. Consider, the following example: \begin{example} Consider a game with two sellers and a buyer with valuation $v(S) = 3$ if $\abs{S} \geq 1$ and $v(\emptyset) = 0$, and two sellers with costs $c_1 = 1$ and $c_2 = 2$. Also, let the seller break ties in favor of the costlier seller, i.e., $X(p) = \{i\}$ if $p_i < p_{3-i} \leq 3$, $X(p) = \{2\}$ if $p_1 = p_2 \leq 3$ and $X(p) = \emptyset$ otherwise. The map $X$ is maximal, and yet there is no pure Nash equilibrium: there is no equilibria with $\min\{p_1, p_2\} > 3$, since one of the sellers can decrease his price and be selected. For the case $\min\{p_1, p_2\} \leq 3$, there is no equilibria with $p_1 \neq p_2$ since the seller with lowest price can improve his utility by slightly increasing his price. We are left with $p_1 = p_2 \leq 3$. By the tie breaking rule, $X(p) = \{2\}$ and since $c_2 = 2$, we must have $p_1 = p_2 \geq 2$, otherwise $2$ would be getting negative utility. This can't be an equilibrium since $p_1$ can decrease his price and be selected. \end{example} However, we show that with an additional mild assumption on the decision map $X$, we can prove the existence of $\epsilon$-Nash equilibria for any $\epsilon > 0$. We say that a map is \emph{up-consistent} if for any price vector $p$ if $X(p) = S$ and $p'_i > p_i$, then either $X(p'_i, p_{-i}) = S$ or $i \notin X(p'_i, p_{-i})$. Lexicographically breaking ties is up-consistent as we show next. \begin{lemma} The map that chooses the lexicographically first set among the (maximal) sets of $D(v;p)$ is (maximal) up-consistent. \end{lemma} \begin{proof} Let $S = X(p)$, $i \in S$, $p'_i > p_i$ and $p'=(p'_i, p_{-i})$. We want to show that if $i \in X(p')$, then $X(p') = S$. By the lexicographic rule, if $X(p') \neq S$, there must be $T \in D(v;p') \setminus D(v;p)$ such that $i \in T$. Given that $T \notin D(v;p)$, there must be a set $T'$ such that $v(T) - p(T) < v(T') - p(T')$. Since $p$ coincides with $p'$ except from coordinate $i$ and $i \in T$, then: $v(T) - p'(T) < v(T') - p'(T')$, therefore $T$ can't be in $D(v;p')$. \end{proof} Using the definition of up-consistency, we show the existence of $\epsilon$-Nash for any $\epsilon > 0$. Moreover, the following theorem also states that there are $\epsilon$-Nash equilibria with optimal welfare, where the welfare function defined as the sum of the utilities of the agents (the sellers and the buyer): $W(p) = v(X(p)) - c(X(p))$. This implies a Price of Stability of one. \begin{theorem} Consider any combinatorial valuation $v$, a vector of service costs $c$ and an up-consistent decision map $X$. For every $\epsilon > 0$, there exists an $\epsilon$-Nash equilibrium $p$ with $X(p) = X(c)$. \end{theorem} \begin{proof} Fix $\epsilon > 0$, let $S = X(c)$ and define: $$F = \left\{p \in \ensuremath{\mathbb R}_+^{N} ; p \geq c, X(p) = S \right\}$$ Clearly $c \in F$. Set initially $p = c$ and while there exists some $i \in S$ such that $i \in X(p'_i, p_{-i})$, for some $p'_i \geq p_i + \epsilon$, update $p_i$ to $p'_i$. By up-consistency we mantain the invariant that $X(p) = S$ during this procedure. When it stops, we have a vector $p$ such that $X(p) = S$, for all $i \in S$ and $p'_i \geq p_i + \epsilon$, $X(p'_i) \not\ni i$ and for $i \notin S$, $p_i = c_i$. Now, it is easy to see that this is an $\epsilon$-Nash equilibrium: for $i \in S$, $u_i = p_i$. In order to increase his utility by $\epsilon$, each seller needs to deviate to a price $p'_i \geq p_i + \epsilon$, but from the construction he won't be allocated at that price. For $i \notin S$, we know that for any set $T \ni i$, $v(S) - p(S) \geq v(T) - p(T)$, so if $i$ deviated to any price $p'_i > c_i$, then for $p' = (p'_i, p_i)$ we would have $v(S) - p'(S) > v(T) - p'(T)$, hence $i \notin X(p')$. \end{proof} The following result characterizes the sets picked in equilibrium: \begin{lemma} If $p$ is a pure Nash equilibrium (or the limit of $\epsilon$-Nash equilibria as $\epsilon \rightarrow 0$), then: $$v(i \vert S \setminus i) \geq c_i, \forall i \in S$$ $$v(T \cup j) + c(S \setminus T) - c_j \leq v(S), \forall T \subseteq S \text{ and } j \notin S$$ \end{lemma} \begin{proof} The first part follows from the fact that $S \in D(v;p)$ and therefore $v(S) - p(S) \geq v(S \setminus i) - p(S \setminus i)$ and the fact that $p_i \geq c_i$. The second statement comes from the fact that for $j \notin S$ and $p'_j > c_j$, $i \notin X(p'_j, p_{-j})$, in particular: $v(S) - p(S) \geq v(T \cup j) - p(T) - p'_j$. Taking $p'_j \rightarrow c_j$ and using the fact that $p_i \geq c_i$, we get the desired result. \end{proof} In particular, $v(S) - c(S) \geq \max\{ v(S \setminus i) - c(S \setminus i), v(S \cup j) - c(S \cup j), v(S \cup j \setminus i) - c(S \cup j \setminus i), \forall i \in S, j \notin S\}$. This corresponds to a minimum of the local search procedure that seeks to optimize $v(S) - c(S)$ by either adding an element, removing an element or swapping an element in the set by an element outside. Gul and Stachetti \cite{GulStachetti} show that if a valuation function $v$ is gross subtitutes, then this procedure doesn't get stuck in suboptimal local maxima. In particular, this implies that: \begin{corollary} If the valuation $v$ is gross substitutes and $p$ is a Nash equilibrium (or a limit of $\epsilon$-Nash equilibria), then the welfare of the allocation $W(p) = v(X(p)) - c(X(p))$ is optimal, i.e., $W(p) = \max_{S \subseteq N} v(S) - c(S)$. In other words, the Price of Anarchy of this game, for gross substitute valuations is $1$. \end{corollary} In the following example we show that submodularity is not enough to guarantee that all Nash equilibria have optimal welfare, unlike the case without costs. In fact, the welfare of a Nash equilibrium might be arbitrarily smaller then the optimal welfare, i.e., the Price of Anarchy is unbounded. \begin{example} \label{ex:unbounded_poa_submodular} {Let $k$ and $\ell$ be integers with $k \gg \ell$ and $k$ odd. Now, consider a pricing game with $k + \ell+1$ sellers indexed by $\{0, 1, \hdots, k+\ell \}$ with costs $c_i = k-\ell$ for $i=0,...,\ell$ and $c_i = 0$ for $i=\ell+1,...,\ell+k$. Define the valuation of the buyer as follows: $v(S) =\min \{k\cdot \ell, \sum_{i \in S} w_i \}$ where $w_i = k$ for $i=0,...,\ell$ and $w_i = \ell$ for $i=\ell+1,...,\ell+k$. Assume that the buyer breaks ties lexicographically.} {Consider now the following price vector: $p_i = k-\ell$ for $i=0,...,\ell$ and $p_i = \ell$ for $i=\ell+1,...,\ell+k$. First we claim that this price vector is a Nash equilibrium. By the lexicographic rule, $X(p) =\{0, \hdots, \ell-1\}$. For any seller $i \in X(p)$, if $i$ increases his price, the buyer will replace him by seller $\ell$, choosing the set $\{0,1, \hdots, \ell\} \setminus i$. For seller $\ell$, he cannot become selected by increasing his price and if he decreases his price he gets negative utility since his cost is $k -\ell$. Now, for any seller $i \in \{\ell+1, \hdots, \ell+k\}$, they are not selected even if they decrease their price to zero. Now, notice that $W(p) = v(X(p)) - c(X(p)) = \ell \cdot \ell$. For the set $S^* = \{\ell+1, \hdots, \ell+k\}$, $v(S^*) - c(S^*) = \ell \cdot k$. As $k/\ell \rightarrow \infty$ the gap between the optimal welfare and the welfare of this Nash equilibrium goes to infinity.} \end{example} \comment{ \begin{example} {Let $k$ be a large odd integer and $\ell= \frac{1}{2}(k+1)$. Now, consider a pricing game with $k + \ell+1$ sellers indexed by $\{0, 1, \hdots, k+\ell \}$ with costs $c_i = k-\ell$ for $i=0,...,\ell$ and $c_i = 0$ for $i=\ell+1,...,\ell+k$. Define the valuation of the buyer as follows: $v(S) =\min \{k\cdot \ell, \sum_{i \in S} w_i \}$ where $w_i = k$ for $i=0,...,\ell$ and $w_i = \ell$ for $i=\ell+1,...,\ell+k$. Assume that the buyer breaks ties lexicographically.} {Consider now the following price vector: $p_i = k-\ell$ for $i=0,...,\ell$ and $p_i = \ell$ for $i=\ell+1,...,\ell+k$. First we claim that this price vector is a Nash equilibrium. By the lexicographic rule, $X(p) =\{0, \hdots, \ell-1\}$. For any seller $i \in X(p)$, if $i$ increases his price, the buyer will replace him by seller $\ell$, choosing the set $\{0,1, \hdots, \ell\} \setminus i$. For seller $\ell$, he cannot become selected by increasing his price and if he decreases his price he gets negative utility since his cost is $k -\ell$. Now, for any seller $i \in \{\ell+1, \hdots, \ell+k\}$, they are not selected even if they decrease their price to zero. Now, notice that $W(p) = v(X(p)) - c(X(p)) = \ell \cdot \ell = \frac{1}{4}(k+1)^2$. for the set $S^* = \{\ell+1, \hdots, \ell+k\}$, $v(S^*) - c(S^*) = \ell \cdot k = \frac{1}{2}k(k+1)$. As $k \rightarrow \infty$ the gap between the optimal welfare and the welfare of this Nash equilibrium goes to $2$.} \end{example} } We also note that even when equilibria exist, it is not necessarily unique, even when the valuations are submodular. Consider the following example: \begin{example} Consider the game with three sellers $\{1,2,3\}$ with costs $0.1$, $0.1$ and $0.3$ and a buyer with the following submodular valuation $$v(S) = \min\{2, \ensuremath{\mathbb{I}}\{1\in S\} + \ensuremath{\mathbb{I}}\{2\in S\} + 2 \cdot \ensuremath{\mathbb{I}}\{3\in S\} \}$$ Also, let $X$ be the decision map that picks the lexicographically first element in $D(v;p)$. Now, note that all the vectors $(p_1, p_2, 0.3)$ with $p_1 \geq 0.1$, $p_2 \geq 0.1$ and $p_1 + p_2 \leq 0.3$ are Nash equilibria. \end{example} \subsection{Value uncertainty and Multiple buyers}\label{subsec:value_uncertainty} The basic pricing game assumes that the valuation function of the buyer is public information. We consider here the case where sellers have uncertainty about the true valuation of the buyer, i.e., sellers only know that $v$ is drawn from a certain distribution $\mathcal{D}$. In such case each seller $i$ seeks to maximize: $$u_i(p) = \mathbb{E}_{v \sim \mathcal{D}}[p_i \cdot \ensuremath{\mathbb{I}}\{ i \in X_v(p) \}]$$ If $\mathcal{D}$ is the uniform distribution, this is equivalent to the scenario where there are multiple buyers, each buyer $k\in [m]$ with a valuation function $v_k:2^{N} \rightarrow \ensuremath{\mathbb R}_+$ and sellers are not allowed to price discriminate, i.e, they need to post the same price for all the buyers. Upon facing a price vector $p$, each buyer purchases the bundle $X_k(p) \in D(v_k, p)$ and the revenue of seller $i$ is: $$u_i(p) = \textstyle\sum_{k=1}^m p_i \cdot \ensuremath{\mathbb{I}} \{ i \in X_k(p) \}$$ We observe that the above model is a generalization of \emph{Bertrand networks} that were defined by Babaioff, Lucier and Nisan \cite{bertrand_networks}. A Bertrand Network is a game defined on a graph where each node is a seller and his strategy is to set a price $p_i \in [0,1]$. Each edge corresponds to a set of buyers each interested in buying one item for price at most $1$ and who chooses to purchase from the accessible seller (incident node) offering the cheaper price, breaking ties in a fixed but arbitrary manner. This defines a game among the sellers whose utility is given by their total revenue, i.e., the price posted multiplied by number of buyers that decide to purchase from this seller. This naturally maps to an instance of our pricing game, where each node corresponds to a seller and each edge $e = (i,j)$ corresponds to a buyer $k$ whose valuation is $v_k(S_k) = 1$ if $S_k \cap \{i,j\} \neq \emptyset$ and zero otherwise. Babaioff et al \cite{bertrand_networks} show that pure Nash equilibria might not exist, but mixed Nash equilibria always exist. Both results carry over to our setting. The non-existence of pure Nash for some instances follows directly, since Bertrand Networks is a particular case of our pricing game. The existence of mixed Nash equilibria proof in \cite{bertrand_networks} is non-trivial and it \emph{doesn't} follow from Nash's Theorem, as usual, since the strategy space is infinite and the utility functions are discontinuous. The strategy used for proving existence of equilibrium, follows by applying a general result by Simon and Zame \cite{simon_zame}. The same type of technique yields the existence of mixed Nash in our setting as well. We defer the proof to the full version of this paper. We remark that the example in \cite{bertrand_networks} can also be used to show an instance of the game with multiple buyers without $\epsilon$-Nash equilibria for small values of $\epsilon$: \begin{example} Consider a setting with two sellers $\{A,B\}$ and two buyers with valuations $v_1(S_1) = \ensuremath{\mathbb{I}}\{A \in S_1\}$ and $v_2(S_2) = \ensuremath{\mathbb{I}}\{ S_2 \neq \emptyset\}$. We show that for sufficiently small $\epsilon$, no (pure) $\epsilon$-Nash exist for any tie breaking rule. \comment{Therefore, if $p_1 \leq p_2 \leq 1$, then: $u_1(p) = 2p_1$ and $u_2(p) = 0$. For $p_2 < p_1 \leq 1$, $u_1(p) = p_1$, $u_2(p) = p_2$.} Fix some value of $\epsilon$ such that $0 < \epsilon < \frac{1}{10}$ and assume that $p$ is an $\epsilon$-Nash equilibrium. First we note that it must be the case that $u_1(p) \geq 1-\epsilon$ since for any $p_2$, $u_1(1,p_2) \geq 1$. Now, consider two cases: Case (i) $p_1 \geq 1-\epsilon$. If $p_2 \geq p_1$, then $u_2(p) = 0$ and seller $2$ can deviate to $p_1 - \epsilon$ giving him utility: $u_2(p_1, p_1-\epsilon) = p_1-\epsilon \geq 1-2\epsilon > \epsilon$. If $p_1 \geq p_2 \geq p_1 - \epsilon$, then seller $1$ can deviate to $ p_2 - 2\epsilon$ and get utility $u_1(p_2 - 2\epsilon, p_2) = 2(p_2 - 2\epsilon) \geq 2-8\epsilon \geq p_1 + (1-8\epsilon) > p_1 + \epsilon$. Now, if $p_2 < p_1 - \epsilon$, then seller $2$ can increase his utility by raising his price by $\epsilon$ and still be chosen. Case (ii): $p_1 < 1-\epsilon$. If $p_2 < p_1$ or $p_1 < \frac{1-\epsilon}{2}$, then $u_1(p) < 1-\epsilon$ and player $1$ can improve his utility by more then $\epsilon$ by raising his price to $1$. If $\frac{1-\epsilon}{2} \leq p_1 \leq p_2$, then $u_2(p) = 0$, but seller $2$ can lower his price to $p_1 - \epsilon$, getting utility at least $\frac{1-\epsilon}{2} - \epsilon \geq \epsilon$. \end{example} \subsection{Multiple services per seller} \comment{ \mbcomment{We need to present POA and POS results that are independent of computation, and then remark that the "positive" side, of achieving O(log n) approximation is also poly-time computable.}} Finally, we relax the assumption that each seller controls a single service. Let $N$ be the total set of services and let $N = \cup_{i=1}^r N_i$ be a disjoint partition ($N_i \cap N_{i'} = \emptyset$ for $i \neq i'$) where $N_i$ represent the services controlled by seller $i$. Given a buyer with valuation $v:2^N \rightarrow \ensuremath{\mathbb R}$ and a decision map $X$, this defines a game between sellers where the strategy of each seller is to set prices for each service in $N_i$, i.e., $p_i \in \ensuremath{\mathbb R}_+^{N_i}$. The utility of seller $i$ is then given by: $$u_i(p) = \textstyle\sum_{j \in N_i} p_j \cdot \ensuremath{\mathbb{I}} \{ j \in X(p) \}$$ Observe that with two goods that are perfect substitutes but controlled by the same seller, their price won't go down to zero as in Bertrand competition, since a seller won't be competing with himself. We illustrate the subtleties of this variant by analyzing the special case of the \emph{monopolistic seller}, i.e., where one seller (seller $1$) controls all the services and posts prices for each of them individually. This boils down to an optimization problem where the goal of the seller is to find a price vector $p$ that optimizes his utility: $$u_1(p):=\sum_{j \in N} p_j \cdot \ensuremath{\mathbb{I}} \{ j \in X(p) \}$$ {Note that the constraint that the seller prices {\em individual services} and not bundles might prevent the seller, although a monopolist, from extracting the entire surplus.} First we show the case that for the case of submodular valuations, this can be phrased as an optimization over sets: \begin{lemma}\label{lemma:monopolistic_seller_format} If the valuation $v$ is submodular and $X$ is maximal, then $\max_{p \in \ensuremath{\mathbb R}^N_+} u_1(p) = \max_{S \subseteq N} \sum_{i \in S} v(i \vert S \setminus i)$ \end{lemma} \begin{proof} Given $p \in \ensuremath{\mathbb R}^N_+$, let $S = X(p)$. Since $S \in D(v;p)$ it must be the case that $p_i \leq v(i \vert S \setminus i)$, so $u_1(p) = \sum_{i \in S} p_i \leq \sum_{i \in S} v(i \vert S \setminus i)$. Conversely, given $S \subseteq N$, setting prices $p_i = v(i \vert S \setminus i)$ for $i \in S$ and $p_i = \infty$ otherwise, we get $X(p) = S$ and $u_1(p) = \sum_{i \in S} v(i \vert S \setminus i)$. \end{proof} {Since there are no costs, the welfare of this game can be defined as $W(p) = v(X(p))$. We note that unlike the case where each seller controls one good, the welfare in equilibrium can be far from efficient even if the buyer's valuation is gross substitutes. Let $H_k = \sum_{j=1}^k \frac{1}{j} = \Theta(\log n)$ be the $k$-th harmonic number and define the valuation $v(S)$ such that: $v(S) = 1+\epsilon$ if $\abs{S} = 1$ and $v(S) = H_{\abs{S}}$ if $\abs{S} \geq 2$. This function is clearly submodular (in fact, it is also gross substitutes). By Lemma \ref{lemma:monopolistic_seller_format}, the revenue in a Nash equilibrium is of the format $\sum_{i \in S} v(i \vert S \setminus i)$ for $S = X(p)$, which is maximized for sets $S$ of size $1$. Therefore, for all equilibria, the buyer buys at most one item, generating $1+\epsilon$ welfare, while the optimal achievable welfare is $H_n$. Since this is the unique equilibrium, this implies an logarithmic lower bound on the Price of Anarchy and Price of Stability.} {In what follows, we show that this bound is tight, i.e, for all Nash equilibria the welfare is at least a $\Omega(1/\log n)$ fraction of the optimal welfare. In other words, this implies an upper bound of $\log n$ of the Price of Anarchy (and thus also on the Price of Stability). We show that by proving that there is always a set $S$ such that $\sum_{i \in S} v(i \vert S \setminus i) \geq \frac{1}{H_n} v(N)$. Therefore, the seller can always secure that much revenue. Since the seller revenue is a lower bound to the welfare we have:} \begin{theorem} If the valuation $v$ is submodular and $X$ is maximal, then for every pure Nash equilibrium $p$ of the monopolistic seller game\footnote{since it is a one-player game, a Nash equilibrium corresponds to an optimal price vector}, $v(X(p)) \geq \frac{1}{H_n} v(N)$. In other words, the Price of Anarchy is bounded by $H_n = O(\log n)$. Moreover, there is a randomized polynomial-time algorithm that finds a vector $p$ with $u_1(p) \geq v(N) /2H_n $ with constant probability. \end{theorem} This is a consequence of the following lemma: \begin{lemma} \label{lemma:sample} Given a function $v:2^N \rightarrow \ensuremath{\mathbb R}_+$, consider the following randomized algorithm: pick a size $k \in \{1, \hdots, n\}$ with probability $(k \cdot H_n)^{-1}$ and then pick a random set $S$ of size $k$. Then: $\mathbb{E} \left[ \sum_{i\in S} v(i \vert S \setminus i) \right] = \frac{1}{H_n} v(N)$. \end{lemma} \begin{proof} Observe that we can rewrite the expectation as: $$\mathbb{E} \left[ \sum_{i\in S} v(i \vert S \setminus i) \right] = \sum_{k=1}^n \frac{1}{k H_n} \mathbb{E}_{\abs{S} = k} \left[ k \cdot v(S) - \sum_{i \in S} v(S \setminus i)\right]$$ Let $\tilde{v}(k) = \mathbb{E}_{\abs{S} = k} v(S)$. We note that: $$\begin{aligned}& \mathbb{E}_{\abs{S} = k} \sum_{i \in S} v(S \setminus i) = \sum_{S:\abs{S} = k} \frac{1}{{n \choose k}} v(S\setminus i) \\ & \quad = \sum_{T:\abs{T} = k-1} \frac{n-k+1}{{n \choose k}} v(T) = \sum_{T:\abs{T} = k-1} \frac{k}{{n \choose k-1}} v(T) = k \tilde{v}(k-1) \end{aligned}$$ Therefore: $$H_n \cdot \mathbb{E} \left[ \sum_{i\in S} v(i \vert S \setminus i) \right] = \sum_{k=1}^n \tilde{v}(k) - \tilde{v}(k-1) = \tilde{v}(n) = v(N)$$ \end{proof} This in particular implies a polynomial time randomized approximation algorithm for the optimization problem faced by the seller. We remark that since $u_1(p)$ is bounded from above by $v(N)$, we can get expected utility of $\Omega(v(N)/H_n)$ with high probability by running the algorithm $\Theta(\log n)$ and taking the best output. We make this statement precise in following Lemma: \begin{lemma}\label{lemma:high_prob} Given a submodular function $v$ and $r(S) := \sum_{i \in S} v(i \vert S \setminus i)$, then if $S_1, \hdots, S_k$ are independent samples according to the procedure described in Lemma~\ref{lemma:sample} for $k= s \cdot H_n$, then $\mathbb{P}[\max_i r(S_i) \geq \frac{v(N)}{2 H_n}] \geq 1-e^{-s/2}$. \end{lemma} \begin{proof} Since $v$ is a submodular function $r(S) = \sum_{i \in S} v(i \vert S \setminus i) \leq v(N)$ for all $S$. Therefore, for all $t \in [0, v(N)]$ $$\frac{v(N)}{H_n} = \mathbb{E}[r(S_i)] \leq t \cdot \mathbb{P}\left[ r(S_i) < t \right] + v(N) \cdot \mathbb{P}\left[ r(S_i) \geq t \right]$$ so $\mathbb{P}\left[ r(S_i) \geq t \right] \geq \frac{\frac{v(N)}{H_n} -t }{v(N) - t} \geq \frac{1}{H_n} - \frac{t}{v(N)}$. Taking $t = \frac{v(N)}{2 H_n}$ we get $\mathbb{P} \left[ r(S_i) \geq \frac{v(N)}{2 H_n} \right] \geq \frac{1}{2 H_n} $. Therefore, $$\mathbb{P} \left[ \max_i r(S_i) \geq \frac{v(N)}{2 H_n} \right] \geq 1-(1-\frac{1}{2H_n})^{s H_n} \geq 1-e^{-s/2}$$ \end{proof} Finally, we note that the optimization problem faced by the monopolistic seller is a particular case of the \emph{Unique Coverage Problem} studied by Demaine et al \cite{Demaine08}: given an universe set $U$ and subsets $Y_1, Y_2, \hdots, Y_n$, find a collection of subsets that maximizes the number of elements covered by \emph{exactly} one set. In other words, find $S \subseteq [n]$ in order to maximize $\sum_{i \in S} \abs{Y_i \setminus \cup_{j \in S \setminus i} Y_j}$. This is exactly the optimization problem faced by the monopolistic seller when $v(S) = \abs{\cup_{i \in S} Y_i}$. Demaine et al \cite{Demaine08} give a $O(\log n)$ approximation of the Unique Coverage problem and show an $\Omega(\log^\sigma n)$ hardness of approximation for some constant $\sigma > 0$ under suitable complexity assumptions. This in particular implies logarithmic hardness of approximation for the monopolistic seller problem. \columnsversion{}{ \appendix \section{Proof of Existence of $\epsilon$-Nash} In this appendix, we prove Theorem \ref{lemma:any_map}. First, consider the following Lemma: \begin{lemma}\label{lemma:pre_any_map} Given a combinatorial valuation $v$ and a maximal decision map $X$, if $X(p) = S$, and $p^\epsilon_i = [p_i - \frac{\epsilon}{n}]^+$ for $i \in S$ and $p^\epsilon_i = p_i$ otherwise, then for any decision map $X'$, and $S^\epsilon = X'(p^\epsilon)$, it holds that $S \cap \{j; p_j > 0 \} \subseteq S^\epsilon \subseteq S$. \end{lemma} \begin{proof} First we show that if $S^\epsilon \setminus S \neq \emptyset$, then $S^\epsilon \notin D(v;p^\epsilon)$, contradicting the definition of $S^\epsilon$. Since $X$ is maximal, we must have $p_j > 0$ for all $j \in S^\epsilon \setminus S$. Thus, if $S^\epsilon \setminus S \neq \emptyset$, then for $j\in S^\epsilon \setminus S$, by the second condition in Lemma \ref{lemma:characterization}, $v(S) - p(S) \geq v(S^\epsilon) - p(S^\epsilon) + p_j> v(S^\epsilon) - p(S^\epsilon)$ so it must also be the case that $v(S) - p^\epsilon(S) > v(S^\epsilon) - p^\epsilon(S^\epsilon)$ since for $i \in S^\epsilon \setminus S$, $p^\epsilon_i = p_i$. We conclude that if $S^\epsilon \setminus S \neq \emptyset$, then $S^\epsilon \notin D(v;p^\epsilon)$. So, it must be the case that $S^\epsilon \subseteq S$. Now, we show that $S \cap \{j; p_j > 0\}\subseteq S^\epsilon$. If not, we show that $S^\epsilon \notin D(v;p^\epsilon)$. Indeed, since $S \in D(v;p)$, then $v(S) - p(S) \geq v(S^\epsilon) - p(S^\epsilon)$. Now, given that $p^\epsilon \leq p$ and there is some $j \in S \setminus S^\epsilon$ with $p_j^\epsilon < p_j$, $v(S) - p^\epsilon(S) > v(S^\epsilon) - p^\epsilon(S^\epsilon)$. \end{proof} \begin{proofof}{Theorem \ref{lemma:any_map}} Let $S = X(p)$ and let $p^\epsilon$ and $S^\epsilon$ by as in Lemma \ref{lemma:pre_any_map}. We want to show that $p^\epsilon$ is an $\epsilon$-Nash equilibrium. First we consider $i \in S$. Since $S \cap \{j; p_j > 0\} \subseteq S^\epsilon$ (by Lemma \ref{lemma:pre_any_map}), we know that $u^{X'}_i(p^\epsilon) = p_i^\epsilon$. In order to increase his utility by more then $\epsilon$, it is necessary for him to deviate to $\tilde{p}_i > p_i^\epsilon + \epsilon$. Let $\tilde{p} = (\tilde{p}_i, p_{-i}^\epsilon)$. We argue that $i \notin \tilde{S} := X'(\tilde{p})$. Assume otherwise. We note that by the first condition in Lemma \ref{lemma:characterization}, there is $T \not\ni i$, such that $v(T) - p(T) = v(S) - p(S) \geq v(\tilde{S}) - p(\tilde{S})$. We note that: $$\begin{aligned} v(T) - \tilde{p}(T) & = v(T) - p^\epsilon(T) \geq v(T) - p(T) \\ & \geq v(\tilde{S}) - p(\tilde{S}) > v(\tilde{S}) - \tilde{p}(\tilde{S}) \end{aligned}$$ where the last inequality follows from $\tilde{p}(\tilde{S}) = \tilde{p}_i + p^\epsilon(S' \setminus i) > (p_i + \epsilon - \frac{\epsilon}{n}) + (p(\tilde{S} \setminus i) - (n-1) \frac{\epsilon}{n}) = p(\tilde{S})$. Which contradicts that $\tilde{S} \in D(v;\tilde{p})$, so it must be the case that $i \notin \tilde{S}$. Now, consider a seller $i \notin S$. His utility $u^{X'}_i(p^\epsilon) = 0$ since $X'(p^\epsilon) \subseteq S$. In order to increase it by more then $\epsilon$, he needs to deviate to $\tilde{p}_i > \epsilon$. Again, we argue that for $\tilde{p} = (\tilde{p}_i, p_{-i}^\epsilon)$, $i \notin \tilde{S} := X'(\tilde{p})$. Assume otherwise. Since $i \in \tilde{S}$, by the second condition of Lemma \ref{lemma:characterization} we have $v(S) - p(S) \geq v(\tilde{S})-p(\tilde{S} \setminus i)$. Therefore: $$\begin{aligned} v(S) - p^\epsilon(S) & = v(S) - p(S) + [p(S) - p^\epsilon(S)] \\ & \geq v(\tilde{S}) - p(\tilde{S}\setminus i) + [p(\tilde{S} \setminus i) - p^\epsilon(\tilde{S} \setminus i)] \\ & = v(\tilde{S})-p^\epsilon(\tilde{S} \setminus i) > v(\tilde{S})-\tilde{p}(\tilde{S})\end{aligned}$$ which contradicts the fact that $\tilde{S} \in D(v;\tilde{p})$.\\ In order to see that $S^\epsilon = X'(p^\epsilon)$ and $S = X(p)$ produce the same welfare, notice that $S^\epsilon \subseteq S$ and all $j \in S \setminus S^\epsilon$ are priced at zero. Therefore: $v(S \vert S^\epsilon) = 0$. \end{proofof} \comment{ Let $p \in \textsc{Nash}^X$ and $S = X(p)$. Now, define $p^\epsilon_i = [p_i - \frac{\epsilon}{n}]^+$ for $i \in S$ and $p^\epsilon_i = p_i$ otherwise. Let $T = X'(p^\epsilon)$. First we argue that $S \cap \{j; p_j > 0\} \subseteq T \subseteq S$. First we show that if $T \setminus S \neq \emptyset$, then $T \notin D(v;p^\epsilon)$, contradicting the definition of $T$. Since $X$ is maximal, we must have $p_j > 0$ for all $j \in T \setminus S$. Thus, if $T \setminus S \neq \emptyset$, then for $j\in T \setminus S$, by the second condition in Lemma \ref{lemma:characterization}, $v(S) - p(S) \geq v(T) - p(T) + p_j> v(T) - p(T)$ so it must also be the case that $v(S) - p^\epsilon(S) > v(T) - p^\epsilon(T)$ since for $i \in T \setminus S$, $p^\epsilon_i = p_i$. We conclude that if $T \setminus S \neq \emptyset$, then $T \notin D(v;p^\epsilon)$. So, it must be the case that $T \subseteq S$. Now, we show that $S \cap \{j; p_j > 0\}\subseteq T$. If not, we show that $T \notin D(v;p^\epsilon)$. Indeed, since $S \in D(v;p)$, then $v(S) - p(S) \geq v(T) - p(T)$. Now, given that $p^\epsilon \leq p$ and there is some $j \in S \setminus T$ with $p_j^\epsilon < p_j$, $v(S) - p^\epsilon(S) > v(T) - p^\epsilon(T)$. Now we argue that $p^\epsilon \in \textsc{Nash}_\epsilon^{X'}$. First consider $i \in S$, we know that $u'_i(p^\epsilon) = p_i^\epsilon$, where $u'_i$ refers to the utility in the game defined by $X'$. In order to increase his utility by more then $\epsilon$, it is necessary for him to deviate to $p'_i > p_i^\epsilon + \epsilon$. Let $p' = (p'_i, p_{-i}^\epsilon)$. We argue that $i \notin S' := X'(p')$. Assume otherwise. We note that by the first condition in Lemma \ref{lemma:characterization}, there is $T \not\ni i$, such that $v(T) - p(T) = v(S) - p(S) \geq v(S') - p(S')$. We note that: $$\begin{aligned} v(T) - p'(T) & = v(T) - p^\epsilon(T) \geq v(T) - p(T) \\ & \geq v(S') - p(S') > v(S') - p'(S') \end{aligned}$$ where the last inequality follows from $p'(S') = p'_i + p^\epsilon(S' \setminus i) > (p_i + \epsilon - \frac{\epsilon}{n}) + (p(S' \setminus i) - (n-1) \frac{\epsilon}{n}) = p(S')$. Which contradicts that $S' \in D(v;p')$, so it must be the case that $i \notin S'$. Now, consider a seller $i \notin S$. His utility $u'_i(p^\epsilon) = 0$ since $X'(p^\epsilon) \subseteq S$. In order to increase it by more then $\epsilon$, he needs to deviate to $p'_i > \epsilon$. Again, we argue that for $p' = (p'_i, p_{-i}^\epsilon)$, $i \notin S' := X'(p')$. Assume otherwise. Since $i \in S'$, by the second condition of Lemma \ref{lemma:characterization} we have $v(S) - p(S) \geq v(S')-p(S' \setminus i)$. Therefore: $$\begin{aligned} v(S) - p^\epsilon(S) & = v(S) - p(S) + [p(S) - p^\epsilon(S)] \\ & \geq v(S') - p(S'\setminus i) + [p(S' \setminus i) - p^\epsilon(S' \setminus i)] \\ & = v(S')-p^\epsilon(S' \setminus i) > v(S')-p^\epsilon(S')\end{aligned}$$ } } \end{document}
\begin{document} \title{Is it possible that the Goldbach's and Twins primes conjectures are true with an algebraic approach?} \author{Juan Carlos Riano-Rojas} \address{Department of Mathematics, Universidad Nacional de Colombia, Caldas, Manizales} \curraddr{Departamento de Matemáticas y Estadística, Universidad Nacional de Colombia, Caldas, Manizales.} \email{[email protected]} \subjclass[2020]{Primary 11A41; Secondary 06A11} \dedicatory{This paper is dedicated to my wife Elisabeth and my daugthers Tata and Jana, and in memory of my friend Omar } \keywords{Algebra, Goldbach's conjecture, coprimes} \begin{abstract} In this paper, using an algebraic approach, it is intended to show that the Goldbach's and Twin primes conjectures are true, building, for each $m>2$, an isomorphism between posets. One of the posets is the set of coprimes less than $m$, while the other is endowed with an operation that grants it an abelian group structure. Special features of this operation are demonstrated in the document, which allow characterizing the even numbers, as if they were their fingerprint; furthermore, such an operation locates, in a natural way, the pairs that satisfy the conjecture. Moreover, an algorithm that generates pairs of numbers that satisfy the conjecture is presented, and examples of some of the beautiful symmetries of the orbits of cyclic subgroups are shown, for the proposed abelian group. \end{abstract} \maketitle \section{Introducción} Goldbach's conjecture has been considered one of the great problems, still open, of mathematics, according to G.H. Hardy, in 1921. The interest of scientifics on this conjecture can be verified by several investigations, which show results aimed at achieving boundaries of numbers that meet the conjecture. Other articles transform the problem, seeking to represent some class of numbers, trying to reduce the boundaries that guarantee these new representations; for instance, Sinisalo \cite{Sinisalo} reported a study checking this conjecture up to $4*10^{11}$ by the IBM 3083 mainframe with a vector processor; however, these authors did not develop an explicit proof. They only tested the conjecture. In \cite{Wu}, Wu J. Tried to give a more comprehensive treatment of Chen's double sieve and prove an upper boundary, sharper than $D(N) \leq 7.8209 \Theta(N)$.\\ In \cite{Tao}, Tao proved that, every odd number $N$, greater than $1$ can be expressed as the sum of at most five primes, improving the result of Ramaré, where every even natural number can be expressed as the sum of at most six primes. They follow the circle method of Hardy-Littlewood and Vinogradov, together with Vaughan's identity; their additional techniques, which may be of interest for other Goldbach-type problems, include the use of smoothed exponential sums and optimization of the Vaughan identity parameters to save or reduce some logarithmic losses. Despite their good approximation, these authors do not use the construction of coprimes, nor the algebraic structure to prove that the conjecture holds for all pairs greater than four. \\ In \cite{Zhao} Zhao, proven that, every sufficiently large even integer can be represented as a sum of four squares of primes and $46$ powers of $2$, but they do not directly use the coprimes and again, they only establish a bound that exceeds the existing bound in previous works. In \cite{Ren}-\cite{Garaev}, it is investigated the distribution of values of the primes plus powers of two, seeking to represent natural numbers to improve the bounds that achieve such a representation. In \cite{Helfgott}, Helfgott prensented a proof of the ternary Goldbach conjecture for all $n\geq C = 10^{27}$, follow an approach based on the circle method, the large sieve and exponential sums. Some ideas coming from Hardy, Littlewood and Vinogradov are reinterpreted from a modern perspective. While all work has to be explicit, the focus is on qualitative gains. Nevertheless, they do not prove the strong Goldbach conjecture. They neither use the algebraic nature of the coprimes, nor do they generalize the multiplication of coprimes, over $\mathbb{Z}$, as it is carried out in this research. The Twins Primes Conjecture is one of the oldest in number theory. Many mathematicians continue to research and develop new techniques and approaches to try to address this conjecture. Tools such as sieve theory, harmonic analysis, and analytical methods have been used, but no definitive proof has been found so far. In \cite{Wang}, presented a formal proof of the twin prime conjecture based on a novel mathematical model of two dimensional mirror primes $\mathbb{P}_\mu \subset \mathbb{P}\times \mathbb{P}$, and their symmetric properties, but the construction differs from that of this work, since it does not use the isomorphism between poset proposed. In this paper, the Goldbach's conjecture and the Twins primes conjecture are proven by building, for each $m>2$, an isomorphism between posets. One of the posets is the set of coprimes less than $m$, while the other is endowed with an operation that grants it an abelian group structure. Certain special characteristics of this operation are demonstrated, which serve to locate the pairs that satisfy the conjecture. This work follows the next distribution: in the second section the basic theoretical notions of the coprime and non-coprime numbers generalized in $\mathbb{Z}$ are presented. In the third section the coprimes that are used to construct Euler's $\phi$ function are defined. An algorithm that generates pairs of numbers that satisfy the conjecture is presented. Furthermore, for each $m > 2$, an isomorphism between the posets is defined, in order to guarantee the proof of the central theorem. In the fourth section, the Goldbach's conjecture is proven. In the fifth section the twin primes conjecture is proved. Finally, in the last section, figures obtained from the cyclical subgroups of the group proposed in this work are included, which reflect underlying symmetries and regularities patterns, showing the implicit harmony in the coprimes that can be studied in future works. \section{theoretical development} \subsection{Primes and Coprimes in $\mathbb{N}$} Primes and Coprimes: In arithmetic, Euler was one of the great mathematicians who saw the important relationship that prime numbers have in Mathematics. Euler proposed the function $\phi (n)$ that counts the number of coprimes or relative primes less than a natural number $m$. This function is a key to the development of this work. For this reason, a theoretical framework, necessary for the proof of the main result of this research, is presented below. \begin{definition}\label{def1} Let $\langle \mathbb{N},+,\cdot,|,0,1 \rangle $, be the set of natural numbers, with the operations sum, product and the conventional divisibility relation. Let $x , y \in \mathbb{N}$, be denoted by $ x \wedge y $, the greatest common divisor of $x$ and $y$; $x$ and $y$ are said to be coprimes (or relatively prime) if and only if $x \wedge y = 1$ Let $m \in \mathbb{N}$ be a fixed number. The set of all coprimes less than $m$, will be denoted by $C(m)$, to the set of all coprime numbers whith number $m$, that will be noted by $C(m)$, which is explicitly stated as: \[ C(m) = \lbrace s \in \mathbb{N} : m \wedge s = 1 \rbrace.\] In a complementary way, the set of non-coprime naturals with the number $m$, is constructed. This set will be denoted by $\lambda(m)$, that can be written as: \[ \lambda(m) = \lbrace s \in \mathbb{N} : m \wedge s \neq 1 \rbrace\] \end{definition} Comment \ref{remar1} indicates the basic notation and a property of number theory, which will be used to prove some theorems. \begin{remark}\label{remar1} The ideas discussed in this document use the following conventions: \begin{enumerate} \item $\mathbb{N}^{*}$, are the nonzero natural numbers. \item $\mathbb{P}$, represents the set of prime numbers. \item Given a natural number $m$ and $x \in \mathbb{N}, x \leq m $, it is said that, $m - x$, is the relative complement of $x$, with respect to $m$; then it will be called the co-opposite of $x$. \end{enumerate} \end{remark} In example \ref{ejemp1}, we recall the notions of coprimes and non-coprimes, for a number $m$. \begin{example}\label{ejemp1} Consider $m = 6$, its decomposition into prime factors $m = 2\cdot3 $, allows to identify the coprimes as the natural ones that, when decomposed into prime factors, none of their powers is divisible by $2$ or $3$, this is: \[C(6) = \lbrace s \in \mathbb{N} : 6 \wedge s = 1 \rbrace\] \[C(6) = \lbrace 1,5,7,11,13,17,19,23,29,31,35,37,41,43,...,55, ..., 65,... \rbrace. \] Another way of looking at it is: \[C(6) = \lbrace s \in \mathbb{N} : s = \prod^{\nu}_{j=0}p_{j}^{n_j}, p_{j} \in \mathbb{P}, p_{j} \neq 2, p_{j} \neq 3, n_j, \nu \in \mathbb{N}. \rbrace\] Figure \ref{fig1} shows the tree of natural numbers connected by the order relationship generated by divisibility. A partition of the two blue and magenta sets shows coprime and non-coprime numbers of the number $6$, respectively. In the second level of the tree, the primes $2,3,5,7,11...$, can be observed; the third level is made up of products of pairs of primes belonging to the second level; in the fourth level, products of triples of primes are shown, etc. Edges of the same color show the divisibility relationship between elements of the same set, while gray edges represent non-coprime numbers, which have some coprime factor; For example, the number $75$ has factors of the coprime and non-coprime set. \end{example} \begin{figure} \caption{$C(6)$ in blue and $\lambda(6)$ in magenta} \label{fig1} \end{figure} \subsection{Primes and Coprimes in $\mathbb{Z}$} The notion of coprimes and non-coprimes is generalized to $\mathbb{Z}$, seeking to build one of the required posets. \begin{definition}\label{def2} Let $\langle \mathbb{Z},+,\cdot,|,0,1 \rangle $, be the set of integer numbers, with the operations sum, product and the conventional divisibility relation. Let $x , y \in \mathbb{Z}$, be denoted by $ x \wedge y $, the greatest common divisor of $x$ and $y$; $x$ and $y$ are said to be coprimes (or relatively prime) if and only if $x \wedge y = 1$. The properties of arithmetic that help to characterize the problem are based on a well-known property that is recalled in the comment \ref{remar2}. \begin{remark}\label{remar2} Let $ x, y \in \mathbb{Z}^{*}$, then, it is had that $ x \wedge y = d \Leftrightarrow (\exists s, t \in \mathbb{Z}) ( xs + yt = d )$ (greatest common divisor property)\label{ecua1} \end{remark} Let $m \in \mathbb{Z}$ be fixed, to the set of all coprime numbers whith number $m$, that will be noted by $\mathbb{C}(m)$, which is explicitly stated as: \[\mathbb{C}(m) = \lbrace s \in \mathbb{Z} : m \wedge s = 1 \rbrace.\] In a complementary way, the set of non-coprime naturals with the number $m$, is constructed. This set will be denoted by $\Lambda(m)$, that can be written as: \[ \Lambda(m) = \lbrace s \in \mathbb{Z} : m \wedge s \neq 1 \rbrace\]. \end{definition} The notion of congruence modulo $m$, can be restricted to $\mathbb{C}(m)$, as follows \begin{definition}\label{def3} Let $x,y \in \mathbb{C}(m)$. It is said that, \[ x \equiv_{\mathbb{C}(m)} y \Leftrightarrow \exists k \in \mathbb{Z} \left(x - y = mk\right)\] \end{definition} In Appendix \ref{appendix}, the proof of the following result is performed. \begin{theorem}\label{teo1} For each $m>1$, it is had that $\equiv_{\mathbb{C}(m)}$, is an equivalence relation on $\mathbb{C}(m)$ \end{theorem} The quotient set is built in the traditional way, with the equivalence relation $\equiv_{\mathbb{C}(m)}$. \begin{definition}\label{def4} Let $a \in \mathbb{C}(m)$. The equivalence class of $a$, for the relationship $\equiv_{\mathbb{C}(m)}$ is built as: \[ \overline{a} = \left\lbrace z \in \mathbb{C}(m) : z \equiv_{\mathbb{C}(m)} a \right\rbrace \] The set of equivalence classes is called the quotient set and it will denote it by \[\mathbb{C}(m)/\equiv_{\mathbb{C}(m)}=\left\lbrace \overline{z}: z \in \mathbb{C}(m) \right\rbrace \] \end{definition} \begin{remark}\label{remar3} The cardinal of the quotient set coincides with the number of coprimes less than $m$, which is the value of the function $\phi(m)$, which was proposed by Euler. This quotient set is also noted by $\Gamma(m)=\mathbb{C}(m)/\equiv_{\mathbb{C}(m)}$. This will be used in section \ref{secc3}, in more detail. \end{remark} The multiplication of $\cdot$ de $\mathbb{Z}$, is restricted to $\mathbb{C}(m)$, o endow it with an algebraic structure in the natural way, as follows: \begin{definition}\label{def5} Let $x,y \in \mathbb{C}(m)$. The product in $\mathbb{C}(m)$, is defined as: \[ \cdot:\mathbb{C}(m)\times \mathbb{C}(m) \longrightarrow \mathbb{C}(m)\] \[ \; \; \; (x , y) \longmapsto \cdot(x,y)= x\cdot y\] \end{definition} All product properties are inherited to $\mathbb{C}(m)$. In appendix \ref{appendix}, only the closing property is demonstrated, as it is the least obvious. \begin{theorem}\label{teo2} For each $m>1$, the $\cdot$ is an operation on $\mathbb{C}(m)$, which is closure, modulative, associative and commutative \end{theorem} The $\equiv_{\mathbb{C}(m)}$, is compatible with the product $\cdot$ over $\mathbb{C}(m)$. In appendix \ref{appendix} the proof of the compatibility of the relation is included, with the aim of providing the quotient set $\mathbb{C}(m)/ \equiv_{\mathbb{C}(m)}$, with an algebraic structure. \begin{theorem}\label{teo3} For each $m>1$, it is had that \\ $\forall x,y,u,v \in \mathbb{C}(m) \left( x \equiv_{\mathbb{C}(m)} y \wedge u \equiv_{\mathbb{C}(m)} v \Rightarrow x \cdot u \equiv_{\mathbb{C}(m)} y\cdot v \right) $ \end{theorem} This allows us to extend the product in $\mathbb{C}(m)$, to the quotient set $\mathbb{C}(m)/\equiv_{\mathbb{C}(m)}$ as follows \begin{definition}\label{def6} Let $\overline{a},\overline{b} \in \mathbb{C}(m)/\equiv_{\mathbb{C}(m)}$, the product $\odot$ is defined as: \[ \odot :\mathbb{C}(m)/\equiv_{\mathbb{C}(m)} \times \mathbb{C}(m)/\equiv_{\mathbb{C}(m)} \longrightarrow \mathbb{C}(m)/\equiv_{\mathbb{C}(m)}\] \[ \; \; \; \; \; (\overline{a} , \overline{b}) \longmapsto \odot(\overline{a},\overline{b})= \overline{a}\odot\overline{b}=\overline{a\cdot b} \] \end{definition} This multiplication $\odot$, gives algebraic structure to the set quotient. \begin{theorem}\label{teo4} For each $m>1$, it is had that $\left\langle \mathbb{C}(m)/\equiv_{\mathbb{C}(m)},\odot,\overline{1} \right\rangle $, is an abelian group. \end{theorem} \begin{proof} Only the invertive property will be proved, since associativity, commutative and modulative are inherited from $\mathbb{C}(m)$. Let $\overline{x} \in \mathbb{C}(m)/\equiv_{\mathbb{C}(m)}$, be arbitrary, such that: \begin{enumerate} \item $x \wedge m = 1$, by definition of $x \in \mathbb{C}(m)$. \item $\exists s , t \in \mathbb{Z} \left( x\cdot s + m\cdot t = 1 \right) $ is had by \ref{ecua1}. \item $\exists s , t \in \mathbb{Z} \left( x\cdot s - 1 = m\cdot (-t)\right) $, solving for $m\cdot t$ on the previous line. \item $\exists s \in \mathbb{Z} \left( x \cdot s \equiv_{\mathbb{C}(m)} 1 \right) $, by definition of $\equiv_{\mathbb{C}(m)}$ \item $\exists s \in \mathbb{C}(m) \left( \overline{x\cdot s}=\overline{1}\right) $, since the classes are the same for related elements. \item $\exists s \in \mathbb{C}(m) \left( \overline{x}\odot \overline{s}=\overline{1} \right) $, for compatibility. \end{enumerate} The last one is equivalent to the fact that $x$ has a multiplicative inverse \end{proof} In appendix \ref{appendix}, it is shown that the co-opposites of module $\overline{1}$ are nilpotent. \begin{theorem}\label{teo5} For each $m>1$, in $\left\langle \mathbb{C}(m)/\equiv_{\mathbb{C}(m)},\odot,\overline{1} \right\rangle $, it is had that $\overline{m-1}\odot\overline{m-1}=\overline{1}$. \end{theorem} With the usual order of the natural numbers, between $1$ and $m$, in definition \ref{def7}, it is intended to provide the quotient set, with a structure of total order. \begin{definition}\label{def7} Let $\overline{x},\overline{y} \in \mathbb{C}(m)/\equiv_{\mathbb{C}(m)}$. We will say that $\overline{x} \preceq \overline{y}$, if only if, $\exists s, t \in \left\lbrace 1,2,...,m-1\right\rbrace \left(s\leq t \wedge s\equiv_{\mathbb{C}(m)} x \wedge t\equiv_{\mathbb{C}(m)} y \right)$. \end{definition} The quotient set has the structure of poset, see proof in appendix \ref{appendix}. \begin{theorem}\label{teo6} For each $m>1$, it follows that $\left\langle \mathbb{C}(m)/\equiv_{\mathbb{C}(m)},\preceq \right\rangle $ is a poset. Also, $\preceq$ is total order. \end{theorem} The following property shows an important characteristic that relates the module $\overline{1}$, the co-opposite to the module $\overline{m-1}$, the operation $\odot$ and the order $\preceq$ previously defined. \begin{theorem}\label{teo7} For each $m>1$, and for each $\overline{x} \in \mathbb{C}(m)/\equiv_{\mathbb{C}(m)}$, if $\overline{1} \preceq \overline{x} \preceq \overline{m-1}$ then, when operating by $\odot$ on the inequality, each term by $\overline{m-1}$ (the co-opposite of $\overline{1}$) has that $\overline{m - 1}\odot \overline{1} \succeq \overline{m - 1}\odot \overline{x} \succeq \overline{m - 1}\odot\overline{m-1}$, which is equivalent to that $\overline{1} \preceq \overline{m - x} \preceq \overline{m-1}$. \end{theorem} \begin{proof} Let $\overline{x} \in \mathbb{C}(m)/\equiv_{\mathbb{C}(m)}$, such that $\overline{1} \preceq \overline{x} \preceq \overline{m-1}$: \begin{enumerate} \item $\overline{m - 1}\odot \overline{1} = \overline{m -1}$, by modulative property. \item $\overline{m - 1}\odot\overline{x} = \overline{(m - 1)\cdot x} = \overline{m\cdot x - x}= \overline{m - x}$, by compatibility and distributive property. \item $\overline{m - 1}\odot\overline{m-1} = \overline{1}$, by the idempotency property of the co-opposite. \end{enumerate} It only remains to show that the order is preserved, for this without loss of generality, taking $1\leq x \leq m-1$, the usual order in integers, and multiplying by $-1$, then adding $m$ to each term, we obtain $m-1\geq m - x \geq 1$. Finally, the inequality is given, by applying the equivalence classes to each member of the inequality, this is $\overline{m - 1}\succeq \overline{m - x} \succeq \overline{1}$. \end{proof} \begin{theorem}\label{teo8} For each $m>1$, and for each $\overline{x} \in \mathbb{C}(2m)/\equiv_{\mathbb{C}(2m)}$, if $\overline{1} \preceq \overline{x} \preceq \overline{m}$ then, by operating $\odot$ to the inequality each term times $\overline{2m-1}$ (the co-opposite of $\overline{1}$ in $\mathbb{C}(2m)/\equiv_{\mathbb{C}(2m)}$), we have that $\overline{2m - 1}\odot \overline{1} \succeq \overline{2m - 1}\odot \overline{x} \succeq \overline{2m - 1}\odot\overline{m}$, which is equal to $\overline{m} \preceq \overline{2m - x} \preceq \overline{ 2m-1}$. \end{theorem} \begin{proof} Let $\overline{x} \in \mathbb{C}(2m)/\equiv_{\mathbb{C}(2m)}$, such that $\overline{1} \preceq \overline{x} \preceq \overline{m}$: \begin{enumerate} \item $\overline{2m - 1}\odot \overline{1} = \overline{2m -1}$, by modulative property. \item $\overline{2m - 1}\odot\overline{x} = \overline{(2m - 1)\cdot x} = \overline{2m\cdot x - x}= \overline{2m - x}$, for compatibility and distributive properties in $\mathbb{Z}$. \item $\overline{2m - 1}\odot\overline{m} = \overline{2m^2 - m} =\overline{2m - m} = \overline{m} $, by distributive property and $2m^2 \equiv_{2m} 2m $. \end{enumerate} It only remains to show that order preservation is obtained, without loss of generality, taking $1\leq x \leq m$ the usual order in integers when multiplied by $-1$ and adding $2m$ to each term, we obtain $2m-1\geq 2m - x \geq m$. Finally, the inequality is given by applying equivalence classes. \end{proof} \section{Finite Euler coprimes}\label{secc3} In this section we will use the set defined by Euler to build the function $\phi$ that counts the coprimes, less than a natural number. This given set with a special relationship turns out to be a partially ordered set with a minimum, and with atomic or prime elements. In addition, the function $\phi$ and its most important properties that were used in the construction of an algorithm will be remembered. Such an algorithm is implemented in Matlab2020b, as is presented in the last section. It calculates all the primes that satisfy the conjecture when the user enters a number $2m\geq 4$. \begin{definition}\label{def8} Let $m \in \mathbb{N}^{*}$, be fixed. The set of coprimes less than $m$ will be denoted by $\gamma(m)$. Then, it is had to: $\gamma(m)= \{s \in \mathbb{N} : s \wedge m = 1, s < m \}$. It defines the function $\phi(m)$ as the cardinal of $\gamma(m)$. Furthermore, the isomorphism between the posets $\langle \gamma, \leq\rangle$ and $\langle \Gamma(m), \preceq \rangle$ is given by: $\theta: \gamma(m)\longrightarrow \Gamma (m)$ defined by $\theta(s)= \overline{s}$. For this reason, we have that $\phi(m) = \vert \gamma(m)\vert = \vert \Gamma(m)\vert$. \end{definition} The basic properties of the $\phi$ function that optimize computing time are indicated. \begin{theorem}\label{teo10} The main properties used in the algorithm proposed in this work are presented. \begin{enumerate} \item $\phi(1) = 1$. \item $\phi(p) = p - 1$ if and only if $p$ is prime. \item If $m$ and $n$ are coprime then, $\phi(m\cdot n) =\phi(m)\cdot \phi(n)$. \item If $m = \prod^{\nu}_{j=1}p_{j}^{n_j}, p_{j} \in \mathbb{P}, n_j, \nu \in \mathbb{N^{*}}$ is the prime factorization of $m$, except for the order of the factors, then $m = \prod^{\nu}_{j=1}p_{j}^{n_j-1}(p_{j} - 1)$. \item If $m$ and $n$ are nonzero then $\phi(m\cdot n) \geq \phi(m)\cdot \phi(n)$. \end{enumerate} \end{theorem} The following lemma allows us to guarantee the existence of primes in $\gamma(m)$. \begin{lemma}\label{lema1} $\forall m>2 \exists p \in \mathbb{P} \left( p \in \gamma(m)\right) $. \end{lemma} \begin{proof} \textbf{Reductio ad absurdum} suppose that there exists $m'>2$, such that, for every prime $p$, we have that $p \not\in \gamma(m')$. This implies that, for any prime $p$, one has that $p \wedge m' \neq 1$ or $p>m'$. This means that, all primes less than $m'$ divide it, then $\phi(m')=1$, absurd, so $\forall m>2 \left( \phi(m)>1\right)$. \end{proof} In figure \ref{fig2}, it can be seen that the pairs that satisfy the conjecture are in the set $\gamma(2m) \times \gamma(2m)$ or the pairs of the classes that satisfy the conjecture are in the quotient set $\Gamma(2m)\times \Gamma(2m)$. \begin{figure} \caption{$2m = 36$ in blue are coprime and non-coprime in magenta, those that satisfy the conjecture have a circle with an cross} \label{fig2} \end{figure} An algorithm is presented that calculates the pairs of primes $p,q \in \mathbb{P}$, that satisfy Goldbach's theorem for any $2m > 4$ pair, using Euler's $\phi(2m)$ function. In figure \ref{fig3}, we can see the coprimes in blue, that are in the lower part of the line. The non-coprimes, also shown with magenta color are always on top like in a kind of arc. \begin{algorithm} \small \caption{Search algorithm for prime pairs that satisfy the conjecture}\label{alg1} \begin{flushleft} $2m \gets input(\text{"Enter an even number: "})$ \\ $Meet := $ Meet a couple of primes that satisfy the conjecture for $2m$\\ $obj1 \gets primes(2m)$\\ $Meet \gets $ $\emptyset$\\ For $i = 1: length(obj1)$\\ \, \, if $obj1(i) \wedge 2m - obj1(i))==1$\\ \, \, \, \, if $\phi(2m - obj1(i)) == 2m - obj1(i) -1$\\ \, \, \, \, \, \, $Meet = [Meet ;[obj1(i),2n - obj1(i);2n - obj1(i),obj1(i)]]$\\ \, \, if $obj1(i) - 1 == \phi(2n - obj1(i))$\\ \, \, \, \, $Meet = [Meet ;[obj1(i),2n - obj1(i);2n - obj1(i),obj1(i)]]$\\ Return $Meet$ \end{flushleft} \end{algorithm} \begin{example}\label{ejemp2} Given $2m = 36$, calculating $\Gamma (2m)$, the set of coprimes of $2m$ and the result of operating $\odot$ on $\Gamma(2m)$ is obtained, validating the guaranteed properties for any pair $2m$ with $m>1$. \begin{enumerate} \item $\Gamma(36) = \left\lbrace \overline{1},\overline{5},\overline{7},\overline{11},\overline{13},\overline{17},\overline{19},\overline{23},\overline{25},\overline{29},\overline{31},\overline{35}\right\rbrace$. \item It is observed $\odot$ in table\ref{tabla1} of Cayley. \begin{table}[ht] \centering \begin{tabular}{|>{\columncolor{gray!30}}c|>{\columncolor{blue!30}}c|c|c|c|c|>{\columncolor{blue!30}}c|>{\columncolor{magenta!30}}c|c|c|c|c|>{\columncolor{magenta!30}}c|} \hline \rowcolor{gray!30} \cellcolor{white}{$\odot$} & $\overline{1}$ & $\overline{5}$ & $\overline{7}$ & $\overline{11}$ & $\overline{13}$ & $\overline{17}$ & $\overline{19}$ & $\overline{23}$ & $\overline{25}$ & $\overline{29}$ & $\overline{31}$ & $\overline{35}$\\ \hline \rowcolor{blue!30} \cellcolor{gray!30}{$\overline{1}$} & \cellcolor{green!20}{$\overline{1}$} & $\overline{5}$ & $\overline{7}$ & $\overline{11}$ & $\overline{13}$ & $\overline{17}$ & $\overline{19}$ & $\overline{23}$ & $\overline{25}$ & $\overline{29}$ & $\overline{31}$ & \cellcolor{orange!20}{$\overline{35}$} \\ \hline $\overline{5}$ & $\overline{5}$ & $\overline{25}$ & \cellcolor{orange!20}{$\overline{35}$} & $\overline{19}$ & $\overline{29}$ & $\overline{13}$ & $\overline{23}$ & $\overline{7}$ & $\overline{17}$ & \cellcolor{green!20}{$\overline{1}$} & $\overline{11}$ & $\overline{31}$\\ \hline $\overline{7}$ & $\overline{7}$ & \cellcolor{orange!20}{$\overline{35}$} & $\overline{13}$ & $\overline{5}$ & $\overline{19}$ & $\overline{11}$ & $\overline{25}$ & $\overline{17}$ & $\overline{31}$ & $\overline{23}$ & \cellcolor{green!20}{$\overline{1}$} & $\overline{29}$\\ \hline $\overline{11}$ & $\overline{11}$ & $\overline{19}$ & $\overline{5}$ & $\overline{13}$ & \cellcolor{orange!20}{$\overline{35}$} & $\overline{7}$ & $\overline{29}$ & \cellcolor{green!20}{$\overline{1}$} & $\overline{23}$ & $\overline{31}$ & $\overline{17}$ & $\overline{25}$\\ \hline $\overline{13}$ & $\overline{13}$ & $\overline{29}$ & $\overline{19}$ & \cellcolor{orange!20}{$\overline{35}$} & $\overline{25}$ & $\overline{5}$ & $\overline{31}$ & $\overline{11}$ & \cellcolor{green!20}{$\overline{1}$} & $\overline{17}$ & $\overline{7}$ & $\overline{23}$\\ \hline \rowcolor{blue!30} \cellcolor{gray!30}{$\overline{17}$} & $\overline{17}$ & $\overline{13}$ & $\overline{11}$ & $\overline{7}$ & $\overline{5}$ & \cellcolor{green!20}{$\overline{1}$} & \cellcolor{orange!20}{$\overline{35}$} & $\overline{31}$ & $\overline{29}$ & $\overline{25}$ & $\overline{23}$ & $\overline{19}$\\ \hline \rowcolor{magenta!30} \cellcolor{gray!30}{$\overline{19}$} & $\overline{19}$ & $\overline{23}$ & $\overline{25}$ & $\overline{29}$ & $\overline{31}$ & \cellcolor{orange!20}{$\overline{35}$} & \cellcolor{green!20}{$\overline{1}$} & $\overline{5}$ & $\overline{7}$ & $\overline{11}$ & $\overline{13}$ & $\overline{17}$\\ \hline $\overline{23}$ & $\overline{23}$ & $\overline{7}$ & $\overline{17}$ & \cellcolor{green!20}{$\overline{1}$} & $\overline{11}$ & $\overline{31}$ & $\overline{5}$ & $\overline{25}$ & \cellcolor{orange!20}{$\overline{35}$} & $\overline{19}$ & $\overline{29}$ & $\overline{13}$\\ \hline $\overline{25}$ & $\overline{25}$ & $\overline{17}$ & $\overline{31}$ & $\overline{23}$ & \cellcolor{green!20}{$\overline{1}$} & $\overline{29}$ & $\overline{7}$ & \cellcolor{orange!20}{$\overline{35}$} & $\overline{13}$ & $\overline{5}$ & $\overline{19}$ & $\overline{11}$\\ \hline $\overline{29}$ & $\overline{29}$ & \cellcolor{green!20}{$\overline{1}$} & $\overline{23}$ & $\overline{31}$ & $\overline{17}$ & $\overline{25}$ & $\overline{11}$ & $\overline{19}$ & $\overline{5}$ & $\overline{13}$ & \cellcolor{orange!20}{$\overline{35}$} & $\overline{7}$\\ \hline $\overline{31}$ & $\overline{31}$ & $\overline{11}$ & \cellcolor{green!20}{$\overline{1}$} & $\overline{17}$ & $\overline{7}$ & $\overline{23}$ & $\overline{13}$ & $\overline{29}$ & $\overline{19}$ & \cellcolor{orange!20}{$\overline{35}$} & $\overline{25}$ & $\overline{5}$\\ \hline \rowcolor{magenta!30} \cellcolor{gray!30}{$\overline{35}$} & \cellcolor{orange!20}{$\overline{35}$} & $\overline{31}$ & $\overline{29}$ & $\overline{25}$ & $\overline{23}$ & $\overline{19}$ & $\overline{17}$ & $\overline{13}$ & $\overline{11}$ & $\overline{7}$ & $\overline{5}$ & \cellcolor{green!20}{$\overline{1}$}\\ \hline \end{tabular} \caption{Table of Cayley of $\odot$ on $\Gamma(36)$}\label{tabla1} \end{table} \end{enumerate} \end{example} \begin{remark}\label{remar4} Table \ref{tabla1}, of Cayley shows the operation $\odot$ on $\Gamma ( 2m = 36)$, in addition to having beautiful symmetries, the following properties are observed: \begin{enumerate} \item $\odot$ is therefore closed, the results of the operation at $\overline{a} \odot \overline{b} \in \Gamma(2m)$ for each $\overline{a}, \overline{b} \in \Gamma(2m)$. \item $\odot$ is commutative only observing the equality between the upper triangular submatrix and the lower triangular submatrix. \item $\odot$ is modulative and the module is $\overline{1}$. The blue the rows columns can verify this property. \item In $\odot$ not only the co-opposite elements (orange cells), but also The module (green cells) are symmetry. \item $\odot$ is invertive, because the green cells guarantee the pair of inverses whose product is modulo $\overline{1}$. \item The magenta y column guarantees property (b) of Theorem \ref{teo5}, since operating any element $\overline{x} \in \Gamma(2m)$ with $\overline{2m - 1}$ through $\odot$ gives its co-opposite $\overline{2m - x}$ as a result. Furthermore, while the elements of the module $\overline{1}$ column increase, the elements of the $\overline{2m - 1}$ column decrease. Just to fix ideas, taking the element $x = \overline{19}$ and operating it with $\overline{2m - 1}$, it results in $\overline{17}$, which is the co-opposite of $\overline{19}$, since $\overline{2m - 19} = \overline{17}$. \item In the dark blue and magenta rows and columns, it can be seen that property (c) of Theorem \ref{teo5} holds; for example, when taking $a_{m} = \overline{17}$ and operating it with $\overline{2m - 1}$ through $\odot$, it gives its co-opposite $2m - a_{m} = \overline{19} = a_{m+1}$ as a result. \end{enumerate} \end{remark} Let's observe in the example that, the $(\overline{5},\overline{31}),(\overline{7},\overline{29}),(\overline{13},\overline{23}),(\overline{17},\overline{19})$ couples satisfy the conjecture and these are found in the blue and magenta columns. There are also the $(\overline{1},\overline{35}),(\overline{11},\overline{25})$ couples that, although their sum is even, do not satisfy the conjecture, because one of the coordinates is not a prime number. Being the commutative operation symmetric pairs that also satisfy the conjecture need not be analyzed; then, only half the elements of $\Gamma(2m)$ need to be traversed. Another present relationship is that, in the blue column, going through the coprimes smaller than $m$, an ascending order is observed; nevertheless, in the magenta column, the co-opposites are ordered in a decreasing manner, but the coprimes greater than $m$ are located there. This feature is important for proving the central outcome. \section{Proof of Goldbach's Conjecture} The Theorem \ref{teo11} is presented in the section, which is the main result of this investigation. When reviewing the possible regions where the conjecture is fulfilled, one must search in the line $x + y = 2m$, when solving for $y$ as a function of $x$. The search is refined when $x$ is prime, leading to dealing with primes between $1$ and $2m$; for $x$ and, for $y$, their values vary between $1$ and $2m$. It should select those and values that are prime. Figure \ref{fig3} shows all the co-primes with blue for the number $2m$ and the non-coprimes in magenta. The couples that comply with a circle and a cross are also marked if the number is also the co-opposite of $x$. \begin{figure} \caption{In $2m = 36$ the coprimes are in blue and the non-coprimes are in magenta. Those that satisfy the conjecture have a circle with a cross} \label{fig3} \end{figure} In the previous section, the operation $\odot$ on $\Gamma (2m)$ was built. Some properties that are necessary for the proof of the conjecture were demonstrated. \begin{theorem}[Goldbach's Conjecture]\label{teo11} $\forall m > 2 \exists p , q \in \mathbb{P} \left( p + q = 2m \right)$, that is, "for all natural $m \geq 2$, there exist primes $p, q$, such that $p + q = 2m$". \end{theorem} \begin{proof} \textbf{Reductio ad absurdum} Let's suppose that, there is $2m' > 4$, such that, for all $p, q \in \mathbb{P}$, we have to have $p + q \neq 2m'$ , which is equivalent to the fact that the ordered pairs $(p,q)\in \mathbb{P} \times \mathbb{P}$ are not on the line $x + y = 2m'$, solving for $y$ in terms of $x$, one has that of all the pairs $(x, 2m' -x)$ that are on the line, one of the two coordinates must not be a prime number. By constructing the group $\left\langle \Gamma(2m'), \odot, \overline{1}\right\rangle $ for $2m'>2$ by theorem \ref{teo4}, where are realized in the Cayley table the operation $\odot$. In the columns $\overline{1}$ and $\overline{2m' -1}$, find all the pairs of coprimes that are in line $x + y = 2m '$ and which are candidates to be the pairs that satisfy the conjecture. On the other hand, the Lemma \ref{lema1} guarantees the existence of primes in $\gamma(m')$, therefore, taking all the primes $x \in \gamma(2m')$, such that, we have that $1 \leq x \leq m'$ are coprime with $m'$, and their co-opposites $2m' - x \in \gamma(2m')$ must not be prime by hypothesis, Thus, all $2m ' - x$ between $m'$ and $2m'-1$ must be composite, this implies that for each $2m'-x$ in this region, there exist $s,t \in \gamma(2m')$ with $1<s <2m'$ and $1<t <2m'$ such that $2m' - x = s\cdot t$, applying the isomorphism $\theta$ by definition \ref{def8}, we have that, $\overline {m'} \preceq \overline{2m' - x}=\overline{s\cdot t} \preceq \overline{2m'-1}$. By theorem \ref{teo8} operating each term of the last inequality by $\overline{2m' - 1}$ through $\odot$, we have that, $\overline{1} \preceq \overline{x} = \overline{2m' -st} \preceq \overline{m'}$, which is equivalent by definition \ref{def6} to the inequality $\overline{1} \preceq \overline{x}= \overline{2m ' -s}\odot \overline{t} \preceq \overline{m'}$. Applying now the isomorphism $\theta^{-1}$ of poset, we have that $1 \leq x=(2m' -s)\cdot t \leq m'$, which means that all these coprimes $x$ between $1$ and $m'$ are composite numbers in $\gamma(m)$, absurd since it contradicts the existence of primes in this area, this is contradicting the Lemma \ref{lema1}. \end{proof} As a direct consequence of the proof of the previous theorem we have. \begin{corollary}\label{coro1} $\forall m > 2\exists p \in \mathbb{P}\left( m < p < 2m\right) $. \end{corollary} \section{Proof of twins primes conjecture} The purpose of this section is to demonstrate the twin primes conjecture, to visualize the ideas is presented in the following Table \ref{tabla2}, shows the operation $\odot$ on $\Gamma ( 2m = 26)$; in addition to having beautiful symmetries, the following properties are observed \begin{definition}\label{def9} Given $p, q \in \mathbb{P}$, it is said that $p,q$ are twins primes, if and only if, $\vert p - q \vert = 2$. We will notice for $\mathbb{F} = \left\lbrace (p,q) \in \mathbb{P}\times \mathbb{P} : {p,q} \text{is a pair of twins primes} \right \rbrace$ to the set formed by pairs of twins prime. \end{definition} \begin{remark}\label{remark5} If $p \in \mathbb{P}$ then $\phi(2p)=\phi(p) = p-1 $, when explicitly calculating $\gamma(2p)$, it is observed that they are the odd numbers less than $2m $. Note this result in the following example $\phi(2p=26)=\phi(p=13)=12$, in detail, $\gamma(2p=26)=\left\lbrace 1,3,5, 7,9,11,13,15,17,19,21,23,25 \right\rbrace $ \end{remark} \begin{example}\label{ejemp3} Given $2m = 26$, calculating $\Gamma (2m)$, the set of coprimes of $2m$, and the result of operating $\odot$ on $\Gamma(2m)$, is obtained, is the reflection that the properties validate for any pair $2m$, with $m>1$, proved in the previous section: \begin{enumerate} \item $\Gamma(26) = \left\lbrace \overline{1},\overline{3},\overline{5},\overline{7},\overline{9},\overline{11},\overline{15},\overline{17},\overline{19},\overline{21},\overline{23},\overline{25}\right\rbrace$. \item It is observed $\odot$ in table\ref{tabla2}. \begin{table}[ht] \centering \begin{tabular}{|>{\columncolor{gray!30}}c|>{\columncolor{blue!30}}c|c|c|c|c|>{\columncolor{blue!30}}c|>{\columncolor{magenta!30}}c|c|c|c|c|>{\columncolor{magenta!30}}c|} \hline \rowcolor{gray!30} \cellcolor{white}{$\odot$} & $\overline{1}$ & $\overline{3}$ & $\overline{5}$ & $\overline{7}$ & $\overline{9}$ & $\overline{11}$ & $\overline{15}$ & $\overline{17}$ & $\overline{19}$ & $\overline{21}$ & $\overline{23}$ & $\overline{25}$\\ \hline \rowcolor{blue!30} \cellcolor{gray!30}{$\overline{1}$} & \cellcolor{green!20}{$\overline{1}$} & $\overline{3}$ & $\overline{5}$ & $\overline{7}$ & $\overline{9}$ & $\overline{11}$ & $\overline{15}$ & $\overline{17}$ & $\overline{19}$ & $\overline{21}$ & $\overline{23}$ & \cellcolor{orange!20}{$\overline{25}$} \\ \hline $\overline{3}$ & $\overline{3}$ & $\overline{9}$ & $\overline{15}$ & $\overline{21}$ & \cellcolor{green!20}{$\overline{1}$} & $\overline{7}$ & $\overline{19}$ & \cellcolor{orange!20}{$\overline{25}$} & $\overline{5}$ & $\overline{11}$ & $\overline{17}$ & $\overline{23}$\\ \hline $\overline{5}$ & $\overline{5}$ & $\overline{15}$ & \cellcolor{orange!20}{$\overline{25}$} & $\overline{9}$ & $\overline{19}$ & $\overline{3}$ & $\overline{23}$ & $\overline{7}$ & $\overline{17}$ & \cellcolor{green!20}{$\overline{1}$} & $\overline{11}$ & $\overline{21}$\\ \hline $\overline{7}$ & $\overline{7}$ & $\overline{21}$ & $\overline{9}$ & $\overline{23}$ & $\overline{11}$ & \cellcolor{orange!20}{$\overline{25}$} & \cellcolor{green!20}{$\overline{1}$} & $\overline{15}$ & $\overline{3}$ & $\overline{17}$ & $\overline{5}$ & $\overline{19}$\\ \hline $\overline{9}$ & $\overline{9}$ & \cellcolor{green!20}{$\overline{1}$} & $\overline{19}$ & $\overline{11}$ & $\overline{3}$ & $\overline{21}$ & $\overline{5}$ & $\overline{23}$ & $\overline{15}$ & $\overline{7}$ & \cellcolor{orange!20}{$\overline{25}$} & $\overline{17}$\\ \hline \rowcolor{blue!30} \cellcolor{gray!30}{$\overline{11}$} & $\overline{11}$ & $\overline{7}$ & $\overline{3}$ & \cellcolor{orange!20}{$\overline{25}$} & $\overline{21}$ & $\overline{17}$ & $\overline{9}$ & $\overline{5}$ & \cellcolor{green!20}{$\overline{1}$} & $\overline{23}$ & $\overline{19}$ & $\overline{15}$\\ \hline \rowcolor{magenta!30} \cellcolor{gray!30}{$\overline{15}$} & $\overline{15}$ & $\overline{19}$ & $\overline{23}$ & \cellcolor{green!20}{$\overline{1}$} & $\overline{5}$ & $\overline{9}$ & $\overline{17}$ & $\overline{21}$ & \cellcolor{orange!20}{$\overline{25}$} & $\overline{3}$ & $\overline{7}$ & $\overline{11}$\\ \hline $\overline{17}$ & $\overline{17}$ & \cellcolor{orange!20}{$\overline{25}$} & $\overline{7}$ & $\overline{15}$ & $\overline{23}$ & $\overline{5}$ & $\overline{21}$ & $\overline{3}$ & $\overline{11}$ & $\overline{19}$ & \cellcolor{green!20}{$\overline{1}$} & $\overline{9}$\\ \hline $\overline{19}$ & $\overline{19}$ & $\overline{5}$ & $\overline{17}$ & $\overline{3}$ & $\overline{15}$ & \cellcolor{green!20}{$\overline{1}$} & \cellcolor{orange!20}{$\overline{25}$} & $\overline{11}$ & $\overline{23}$ & $\overline{9}$ & $\overline{21}$ & $\overline{7}$\\ \hline $\overline{21}$ & $\overline{21}$ & $\overline{11}$ & \cellcolor{green!20}{$\overline{1}$} & $\overline{17}$ & $\overline{7}$ & $\overline{23}$ & $\overline{3}$ & $\overline{19}$ & $\overline{9}$ & \cellcolor{orange!20}{$\overline{25}$} & $\overline{15}$ & $\overline{5}$\\ \hline $\overline{23}$ & $\overline{23}$ & $\overline{17}$ & $\overline{11}$ & $\overline{5}$ & \cellcolor{orange!20}{$\overline{25}$} & $\overline{19}$ & $\overline{7}$ & \cellcolor{green!20}{$\overline{1}$} & $\overline{21}$ & $\overline{15}$ & $\overline{9}$ & $\overline{3}$\\ \hline \rowcolor{magenta!30} \cellcolor{gray!30}{$\overline{25}$} & \cellcolor{orange!20}{$\overline{25}$} & $\overline{23}$ & $\overline{21}$ & $\overline{19}$ & $\overline{17}$ & $\overline{15}$ & $\overline{11}$ & $\overline{9}$ & $\overline{7}$ & $\overline{5}$ & $\overline{3}$ & \cellcolor{green!20}{$\overline{1}$}\\ \hline \end{tabular} \caption{$\odot$ on $\Gamma(36)$}\label{tabla2} \end{table} \end{enumerate} \end{example} \begin{remark}\label{remar6} Table \ref{tabla2}, shows the operation $\odot$ on $\Gamma ( 2m = 26)$; in addition to having beautiful symmetries, the following properties are observed: \begin{enumerate} \item $\odot$ is therefore closed, the results of the operation at $\overline{a} \odot \overline{b} \in \Gamma(2m)$, for each $\overline{a}, \overline{b} \in \Gamma(2m)$ \item The magenta y column guarantees property (b) of Theorem \ref{teo5}, since operating any element $\overline{x} \in \Gamma(2m)$ with $\overline{2m - 1}$ through $\odot$, gives its co-opposite $\overline{2m - x}$ as a result. Furthermore, while the elements of the module $\overline{1}$, column increase, the elements of the $\overline{2m - 1}$, column decrease. Just to fix ideas, taking the element $x = \overline{15}$, and operating it with $\overline{2m - 1}$, it results in $\overline{11}$, which is the co-opposite of $\overline{15}$, since $\overline{2m - 15} = \overline{11}$. \item In the dark blue and magenta rows and columns, it can be seen that property (c) of Theorem \ref{teo5} holds; for example, when taking $a_{m} = \overline{11}$, and operating it with $\overline{2m - 1}$ through $\odot$, it gives its co-opposite $2m - a_{m} = \overline{15} = a_{m+1}$ as a result. \end{enumerate} \end{remark} Let's observe in the example that, the $(\overline{3},\overline{5}),(\overline{5},\overline{7}),(\overline{17},\overline{19})$, couples satisfy the conjecture and these are found in the blue and magenta columns. There are also the $(\overline{7},\overline{9}),(\overline{15},\overline{17})$, couples that, although their distance is two, do not satisfy the conjecture, because one of the coordinates is not a prime number. Being the commutative operation, symmetric pairs that also satisfy the conjecture need not be analyzed; then, only half the elements of $\Gamma(2m)$, need to be traversed. Another present relationship is that, in the blue column, going through the coprimes smaller than $m$, an ascending order is observed; nevertheless, in the magenta column, the co-opposites are ordered in a decreasing manner, but the coprimes greater than $m$, are located there. This feature is important for proving the central outcome. \begin{theorem}[Twins primes Conjecture]\label{teo12} There are infinitely many twins primes. That is, $ \vert \mathbb{F} \vert > \infty$, where $\mathbb{F}$, as definition \ref{def9}. \end{theorem} \begin{proof} \textbf{Reductio ad absurdum} suppose that the set $\mathbb{F}$, is finite. Suppose there exists $m' \in \mathbb{P}$, such that, $m' > \max(S)$, where $S = \prod_{1}\mathbb{F} $, is the set of first few coordinates of $\mathbb{F}$, since the primes are not bounded. Then, outside of $S$, there are no twins primes, this implies that $\forall p, q \in \mathbb{P} \left( p,q \not\in S \Rightarrow \vert p - q \vert > 2\right) $, by comment \ref{remark5}, it is found that by taking $\gamma(2m')$, we have that for each pair of primes $p, q \in \mathbb{P}$ and $p, q \not\in S$, satisfying $m' < p < 2m'$ and $m' < q < 2m'$, cannot be twins primes. This implies that at least one of the consecutive coprimes $p,q$ must not be prime, also satisfying that $\vert p - q \vert > 2$, using the theory proposed, when considering the group $\langle \Gamma(2m'), \odot, 1 \rangle $, and the corresponding isomorphism $\theta$ between the posets $\left\langle \gamma(2m'),\leq \right\rangle$, and $\left\langle \Gamma(2m'),\preceq \right\rangle $, thanks to the definition \ref{def8}, applying the isomorphism $\theta$ to the previous inequalities. We obtain $\overline{m'} \prec \overline{p} \prec \overline{2m' - 1}$, and $\overline{m'} \prec \overline{q} \prec \overline{2m' - 1}$. Multiplying these inequalities by $2m' - 1$, with the operation $\odot$, by theorem\ref{teo7} to the theorem \ref{teo9}, we have that, $\overline{m'} \succ \overline{2m' - p} \succ \overline{1}$, and $\overline{m'} \succ \overline{2m' - q} \succ \overline{1}$. Now applying the isomorphism $\theta^{-1}$ to this inequality, it is obtained that $m' > 2m' - p > 1$, and $m' > 2m' - q > 1$, when we calculate the distance of these two consecutive co-opposite numbers, we have that, $\vert (2m' - p) - (2m' - q)\vert = \vert p - q \vert> 2 $, absurd, since it contradicts that for all the primes in $S$, by construction, they are also between $1$ and $m'$, their distance is exactly $2$, when they are consecutive. \end{proof} In the next section, images are presented that show the multiple symmetries, that are obtained in the operation $\odot$; furthermore, when searching for the cyclical subgroups of the $\langle \Gamma(2m) , \odot , 1 \rangle$, group of the present work, some patterns are observed that should be further explored. \section{The beauty of co-primes} The algorithm was implemented in Matlab 2020b; in addition, the operation was programmed for $\odot$ over $\Gamma(2m)$. The user can enter the desired $2m$ pair, with the aim of guaranteeing the Abelian group structure. The positions where the operation results in the co-opposite $2m - 1$ of module $1$. Figure \ref{fig4}, for example, shows the table \ref{tabla1} and table \ref{tabla2} for the operation of $\Gamma(2m = 296)$. \begin{figure} \caption{Feature surface shown the table Cayley of $\odot$ acting on $\Gamma(2m = 296)$ } \label{fig4} \end{figure} Different symmetries are observed when taking each element of $\Gamma(2m)$. The different cyclical subgroups were calculated for the example of $\Gamma(2m = 296)$. There are cyclical subgroups of order: $1, 2, 3, 4, 6, 9, 12, 18$ and $36$. The patterns they generate showed some elements of these groups. In the table \ref{tabla1} they are painted in different colors, according to the order of the generating element. Locating the result in figures \ref{fig5}-\ref{fig6}, this exercise can be identified. \begin{figure} \caption{Patterns of the cycles of order 2,3,4,6,9,12,18,36 for the structure $\langle \Gamma(2m = 296) , \odot , 1 \rangle$ } \label{fig5} \end{figure} The following images show the symmetries, patterns present in the orbits of cyclic subgroups for $\Gamma(2m = 180)$. \begin{figure} \caption{Patterns of the cycles of order $2,3,4,6,12$ and the primes that satisfy the conjecture for the structure $\langle \Gamma(2m = 180) , \odot , 1 \rangle$ } \label{fig6} \end{figure} \appendix \section{Proof of Theorems}\label{appendix} \begin{theorem}\label{teo1} For each $m>1$, it is had that $\equiv_{\mathbb{C}(m)}$ is an equivalence relation on $\mathbb{C}(m)$. \end{theorem} \begin{proof} It will be proved that $\equiv_{\mathbb{C}(m)}$, satisfies the reflexive, symmetric and transitive properties. \begin{description} \item[Reflexive] Taking $x \in \mathbb{C}(m)$ arbitrary, we have that $0 = 0m = x - x$ therefore, $x \equiv_{\mathbb{C}(m)} x$ \item[Symmetry] Let $x, y \in \mathbb{C}(m)$, be arbitrary, suppose $x \equiv_{ \mathbb{C}(m)} y$ equals $\exists k \in \mathbb{Z} \left( x - y = mk \right) $ multiplying by $-1$ gives that $\exists -k \in \mathbb{Z} \left( y - x = m(-k) \right)$ which equals $y \equiv_{\mathbb{C}(m)} x$ \item[Transitive] Let $x, y , z \in \mathbb{C}(m)$, be arbitrary, suppose $x \equiv_{\mathbb{C}(m)} y$ and $y \equiv_{\mathbb{C}(m)} z$ this equals $\exists k \in \mathbb{Z} \left(x - y = mk\right)$ and $\exists l \in \mathbb{Z} \left (z - y = ml\right)$ Solving $-y$ from the second equation and substituting it in the first equation, we have that $x + ml - z = mk$ which is equivalent to $x - z = m(k-l ) $ that is, there exists $k' = k-l \in \mathbb{Z}$ such that $x - z = mk'$ which is equivalent to $ x\equiv_{\mathbb{C}(m)} z$ \end{description} \end{proof} \begin{theorem}\label{teo2a} For each $m>1$, $\cdot$, is an operation on $\mathbb{C}(m)$, which is closure, modulative, associative and commutative. \end{theorem} \begin{proof} We will prove the closure of the product since it is the least intuitive, the other properties are inherited from the product in $\mathbb{Z}$.\\ Let $x,y \in \mathbb{C}(m)$, be arbitrary, this equals \begin{enumerate} \item $x \wedge m = 1$, by definition. \item $y \wedge m = 1$, by definition. \item $\exists s_{1}, t_{1} \in \mathbb{Z} \left( s_{1}\cdot x + t_{1}\cdot m = 1\right)$, by property \ref{ecua1} over the line (1). \item $\exists s_{2}, t_{2} \in \mathbb{Z} \left( s_{2}\cdot y + t_{2}\cdot m = 1\right)$, by property \ref{ecua1} over the line (2). \item $\left( s_{1}\cdot x + t_{1}\cdot m \right)\cdot \left( s_{2}\cdot y + t_{2}\cdot m \right) = 1$, multiplying lines (3) and (4). \item $ s_{1}\cdot s_{2}\cdot x\cdot y + \left( s_{1}\cdot t_{2}\cdot x+t_{1}\cdot s_{2}\cdot y +t_{1}\cdot t_{2}\cdot m \right)\cdot m = 1$, distributing and factoring. \item $ s_{3}\cdot x\cdot y + t_{3}\cdot m = 1$ taking $s_{3}=s_{1}\cdot s_{2}$ and $t_{3}=s_ {1}\cdot t_{2}\cdot x+t_{1}\cdot s_{2}\cdot y+t_{1}\cdot t_{2}\cdot m$ from the previous line. \item $x\cdot y \wedge m = 1$ \end{enumerate} \end{proof} \begin{theorem}\label{teo3a} For each $m>1$, it is had that \\ $\forall x,y,u,v \in \mathbb{C}(m) \left( x \equiv_{\mathbb{C}(m)} y \wedge u \equiv_{\mathbb{C}(m)} v \Rightarrow x \cdot u \equiv_{\mathbb{C}(m)} y\cdot v \right) $. \end{theorem} \begin{proof} Let $x,y, u, v \in \mathbb{C}(m)$, be arbitrary, such that: \begin{enumerate} \item $x \equiv_{\mathbb{C}(m)} y $, by hypothesis. \item $u \equiv_{\mathbb{C}(m)} v $, by hypothesis. \item $\exists s \in \mathbb{Z} \left( x - y = m\cdot s\right) $, also $x \wedge m = 1$ and $y \wedge m = 1$ by definition of $\equiv_{\mathbb{C}(m)}$ in $\mathbb{C}(m)$ in line (1). \item $\exists t \in \mathbb{Z} \left( u - v = m\cdot t \right) $, also $u \wedge m = 1$ and $v \wedge m = 1$ by definition of $\equiv_{\mathbb{C}(m)}$ in $\mathbb{C}(m)$ in line (2). \item $\exists s \in \mathbb{Z} \left( x = m\cdot s + y\right)$, further $x \wedge m = 1$ and $y \wedge m = 1$ solving for $x $ in line (3). \item $\exists t \in \mathbb{Z} \left( u = m\cdot t + v\right) $, further $u \wedge m = 1$ and $v \wedge m = 1$ solving for $u $ in line (4). \item $x\cdot u = \left( m\cdot s + y \right)\cdot \left( m\cdot t + v \right)$, multiplying lines (5) and (6). \item $ x\cdot u = m\cdot \left(m\cdot s\cdot t+s\cdot v +y\cdot t \right) + y\cdot v $, distributing and factoring. \item $ x\cdot u - y\cdot v = m\cdot s'$, taking $s'=m\cdot s\cdot t+s\cdot v +y\cdot t$ from the previous line. \end{enumerate} Therefore, $x \cdot u \equiv_{\mathbb{C}(m)} y\cdot v$. \end{proof} \begin{theorem}\label{teo5a} For each $m>1$, in $\left\langle \mathbb{C}(m)/\equiv_{\mathbb{C}(m)},\odot,\overline{1} \right\rangle $, it is had that $\overline{m-1}\odot\overline{m-1}=\overline{1}$. \end{theorem} \begin{proof} Suppose $m>1$ then, $m-1>0$. \begin{enumerate} \item $\overline{m-1}\odot\overline{m-1}=\overline{(m-1)\cdot (m-1)}$, by compatibility. \item $\; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; =\overline{m^2 - 2m + 1}$, by distributive property. \item $\; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; \; =\overline{m\cdot(m - 2) + 1}$, by factoring. \item $m\cdot(m - 2) + 1\equiv_{\mathbb{C}(m)} 1$, then $m\cdot(m - 2)\equiv_{m} 0$. \end{enumerate} Since the classes are equal when their elements are related and viceversa, and by the transitivity of equality, we obtain $\overline{m-1}\odot \overline{m-1}=\overline{1}$ \end{proof} \begin{theorem}\label{teo6a} For each $m>1$, it follows that $\left\langle \mathbb{C}(m)/\equiv_{\mathbb{C}(m)},\preceq \right\rangle $ is a poset. Also, $\preceq$ is total order. \end{theorem} \begin{proof} The reflexive, transitive and total being properties are simple, only the antisymmetry test is done. Let $\overline{x},\overline{y}\in \mathbb{C}(m)/\equiv_{\mathbb{C}(m)}$ such that: \begin{enumerate} \item $\overline{x} \preceq \overline{y}$, by hypothesis. \item $\overline{y} \preceq \overline{x}$, by hypothesis. \item $\exists s, t \in \left\lbrace 1,2,...,m-1\right\rbrace \left(s\leq t \wedge s\equiv_{\mathbb{C}(m) } x \wedge t\equiv_{\mathbb{C}(m)} y \right)$, is equivalent to line(1) by definition of $\preceq$. \item $\exists s, t \in \left\lbrace 1,2,...,m-1\right\rbrace \left(t\leq s\wedge t\equiv_{\mathbb{C}(m) } y \wedge s \equiv_{\mathbb{C}(m)} x \right)$, is equivalent to line(2), by definition of $\preceq$. \item $\exists s, t \in \left\lbrace 1,2,...,m-1\right\rbrace \left(t = s\wedge t\equiv_{\mathbb{C}(m)} y \wedge s \equiv_{\mathbb{C}(m)} x \right)$, by the antisymmetry of the usual order of $\leq$ on $\left\lbrace 1,2,...,m- 1\right\rbrace$. \item $x \equiv_{\mathbb{C}(m)} y $, by transitivity of $\equiv_{\mathbb{C}(m)}$. \end{enumerate} Since the classes are equal when their elements are related and viceversa, $\overline{x} = \overline{y}$. Such that, $\preceq$ it is antisymmetric. \end{proof} \subsection*{Acknowledgments:} To Gonzalo Medina Arellano for his support in LaTeX, and to my wife Elisabeth for her continued support in translating this document, I also want to thank Veritasium for their work in spreading scientific curiosities and mathematical conjectures that inspired me to tackle this challenge. \end{document}
\begin{document} \title{BV regularity near the interface for nonuniform convex discontinuous flux} \begin{abstract} In this paper, we discuss the total variation bound for the solution of scalar conservation laws with discontinuous flux. We prove the smoothing effect of the equation forcing the $BV_{loc}$ solution near the interface for $L^\infty$ initial data without the assumption on the uniform convexity of the fluxes made as in \cite{ARGS, ghoshaljde}. The proof relies on the method of characteristics and the explicit formulas. \end{abstract} \noindent Key words: Hamilton-Jacobi equation, conservation laws, discontinuous flux, explicit formula, characteristics, BV function. \section{Introduction} Let us consider the following conservation laws with discontinuous flux, \begin{equation}\label{bv11} \begin{array}{lllll} u_t+F(x,u)&=0\\ u(x,0)&=u_0(x), \end{array} \end{equation} the flux $F$ is given by, $F(x,u)=H(x)f(u)+(1-H(x))g(u)$, $H$ is the Heaviside function. Throughout this paper we assume the fluxes $f,g$ to be $C^2(\mathbb{R}),$ strictly convex, superlinear growth (see definition \ref{lt}) and $u_0\in L^\infty$. We denote by $f_+^{-1},g_+^{-1}$ to be the inverses of the increasing part of $f$, $g$ respectively and similarly $f_-^{-1},g_-^{-1}$, the inverses of the decreasing part of $f,g$ respectively. Let $\theta_f, \theta_g$ be the unique minimums of the fluxes $f$ and $g$ respectively. By uniform convexity of the fluxes $f,g$, we mean $f,g\in C^2$ and there exists a positive constant $\alpha$ such that $f^{\prime\prime}>\alpha,\ g^{\prime\prime}>\alpha.$ \par The first order partial differential equation of type (\ref{bv11}) has many applications namely, modeling gravity, continuous sedimentation in a clarifier-thickener unit, petroleum industry, traffic flow on a highway, semiconductor industry. For more details, one can see \cite{burKarlKlinRis, BurgKarRis, BurKarRisTow, BurKarTow, diehlSIAM, diehlengrg, geo, mochon} and the references therein. \par It is well known that even if the flux and the initial data are sufficiently smooth the global classical solution of scalar conservation laws, does not exist always, which allows to define the weak notion of the solution. In general, there might be infinitely many weak solutions even for smooth flux. Moreover, the well-posedness theory for the Cauchy problem for scalar conservation laws with Lipschitz flux function was completely settled by Kruzkov \cite{kruzkov}. \par In past few decades, the Cauchy problem for conservation laws with discontinuous flux of the type (\ref{bv11}) has been well studied, it has been tackled in several ways. For convergence analysis of several important numerical schemes we refer to \cite{siam, jhde, mathcomp, And2, karlsen, burger, BurgKarRis, BurKarRisTow, towers}. The solution of (\ref{bv11}) can be achieved by vanishing viscosity limit \cite{karlRisTowers,And1}, front tracking (\cite{burKarlKlinRis,gimseresebro, gimse, KlinRis}). On the other hand, one can obtain the solution of (\ref{bv11}) also via Hamilton-Jacobi equation \cite{Kyoto, jde, ostrov}, which is the one we exploit in the present paper. Consider the following Hamilton-Jacobi equation \begin{equation}\label{bv12} \begin{array}{lllll} v_t+g(v_x)&=&0 \ \ \mbox{if}\ x<0, t>0,\\ v_t+f(v_x)&=&0 \ \ \mbox{if} \ x>0, t>0,\\ v(x,0)&=&v_0 \ \mbox{if}\ x\in\mathbb{R}. \end{array} \end{equation} \par In \cite{jde}, they dealt with the Hamilton-Jacobi equation (\ref{bv12}) and proved the existence of the global Lipschitz solution of (\ref{bv12}). Then $u=v_x$, solves the discontinuous conservation laws (\ref{bv11}) with the initial data $v_{0_x}=u_0$. Also they proved the existence of infinitely many stable semigroups of entropy solutions based on $(A,B)$ interface entropy condition (see definition \ref{definitionInter}). For any such choice of $A,B$, they have given an explicit representation of the entropy solution for the conservation law (\ref{bv11}). Throughout our paper we use the $(A,B)$ entropy solution (see definition \ref{definitionSol}) obtained as in \cite{Kyoto, jde}. \par Despite the fact that the subject is well studied, the total variation bound near the interface remained unsolved for quite a long time. In \cite{karlsen}, they observed that the solution stays in BV, away from the interface $x=0$. Recently, in \cite{ARGS,ghoshaljde} the question regarding the total variation bound near interface has been answered. In general, BV regularity of the solution is an extremely important phenomenon in the theory of conservation laws. For the case when $f=g$ (not necessarily convex), total variation diminishing (TVD) property holds, that is to say, $TV(u(\cdot, t))\leq TV(u_0(\cdot))$, for all $t>0.$ Definitely, one cannot expect to have similar property in the case when $f\neq g,$ because of the fact that constant initial data may lead to a non constant solution. \par The first breakthrough results regarding the total variation bound have been obtained in \cite{ARGS}. First it was observed in \cite{ARGS} that the critical points plays a key role for the existence and nonexistence of total variation bound. It has been shown in \cite{ARGS} that if the connections $(A,B)$ avoid the critical points $(\theta_g,\theta_f)$ and $u_0\in BV$, then the BV regularity holds. They observed in \cite{ARGS} that $u_0\in BV$ is not enough, one needs to assume also $f^{-1}_+g(u_0),g^{-1}_-f(u_0)\in BV$ in order to prove that the solution is BV near the interface. Also they have constructed a counter example by rarefaction waves and shock waves separated by constant states near the interface to prove that total variation of the solution at time $t=1$ may blow up even if $u_0\in BV.$ Hence the assumption $f^{-1}_+g(u_0),g^{-1}_-f(u_0) \in BV$ are important. \par Later on in \cite{ghoshaljde}, it has been proved that if the connections $(A,B)$ avoid the critical points $(\theta_g,\theta_f)$ and $f^{\prime\prime},g^{\prime\prime}\geq \alpha>0,$ then $u\in BV$ for $u_0\in L^\infty.$ Also a very strong and surprising result has been obtained in \cite{ghoshaljde} that if the lower heights of both the fluxes are same that is if $f(\theta_f)=g(\theta_g),$ then the solution stays in BV near the interface, even if the fluxes are not uniformly convex or even if $u_0\notin BV.$ On the other hand, if $f(\theta_f)\neq g(\theta_g),$ then the results does not hold in general (for instance the counter example in \cite{ARGS} at $t=1$). If $f(\theta_f)\neq g(\theta_g),$ then one needs to put an extra assumption that Supp $u_0$ is compact, then the result holds true for uniformly convex flux, but for large time. One can not avoid to assume that the initial data $u_0$ is compactly supported due to the counter example (in \cite{ghoshaljde}), which shows that even for uniformly convex fluxes, $u(x,T_n)\notin BV$ while $u_0\in BV$, for all $n$ and $\lim\limits_{n\rightarrow\infty}T_n=\infty$. \par When $f=g,$ we have the following Lax-Oleinik formula: \begin{theorem}\label{thm1.1} (Lax-Oleinik formula) Let the initial data $u_0\in L^\infty(\mathbb{R})$. If the flux $f$ is $C^2$, uniformly convex and of super linear growth. Then there exists a function $y (x, t)$ such that \begin{enumerate} \item $x\mapsto y(x,t)$ is non decreasing. \item For a.e. $(x,t)\in \mathbb{R}\times (0,\infty)$, the solution $u$ of (\ref{bv11}) is given by \begin{align} u (x,t) = (f^\prime)^{-1}\left(\inftyrac{x-y (x,t)}{t}\right). \label{eq1.2} \end{align} \end{enumerate}\end{theorem} \par An immediate observation from theorem \ref{thm1.1} is the following: \noindent If $f^{\prime\prime}\geq \alpha>0$ and then for all $ t > 0, \ x \mapsto u (x, t)$ is in $ B V_{loc} (\mathbb{R}),$ even if $u_0\notin BV.$ When the flux is not uniformly convex, in the case $f=g$, BV regularity does not hold for $u_0\in L^\infty$, even for large time (one can see a counter example in \cite{Ssbv}, though $f^\prime(u)\in BV_{loc}$). For finer properties of characteristics in the case $f=g$, we refer to \cite{Ssbv,Ssh,ghoshal,duke,lax,Serre}. \par When $f\neq g$, Lax-Oleinik type formula still holds: \begin{theorem} (Adimurthi, Gowda \cite{Kyoto}, \cite{jde}) Let $u_0\in L^{\infty}(\mathbb{R})$, then there exists an entropy solution $u$ of (\ref{bv11}) with $u\in L^{\infty}(\mathbb{R}).$ Also there exist Lipschitz continuous functions $R(t)\geq 0,L(t)\leq 0.$ and monotone functions $y_{\pm}(x,t), \ t_{\pm}(x,t)$ such that For a fixed $t>0,$ \begin{itemize} \item $t\geq t_+(x,t)\geq 0$ is a non-increasing function of $x$ in $ [0,R(t)), $ \item $ y_+(x,t)\geq 0$ is a non-decreasing function of $x$ in $ [R(t),\infty), $ \item $t\geq t_-(x,t)\geq 0$ is a non-decreasing function of $x$ in $ (L(t),0], $ \item $ y_+(x,t)\leq 0$ is a non-decreasing function of $x$ in $ (-\infty,L(t)). $ \end{itemize} \begin{eqnarray} u(x,t)= \left\{\begin{array}{lll} (f^{\prime})^{-1}\left(\inftyrac{x-y_+(x,t)}{t}\right) &\mbox{ if }& x\geq R(t),\\ (f^{\prime})^{-1}\left(\inftyrac{x}{t-t_+(x,t)}\right) &\mbox{ if }& 0\leq x<R(t). \end{array}\right.\label{r2.12} \end{eqnarray} \begin{eqnarray} u(x,t)= \left\{\begin{array}{lll} (g^{\prime})^{-1}\left(\inftyrac{x-y_-(x,t)}{t}\right) &\mbox{ if }& x\leq L(t), \\ (g^{\prime})^{-1}\left(\inftyrac{x}{t-t_-(x,t)}\right) &\mbox{ if}& L(t)<x<0. \end{array}\right.\label{r2.13} \end{eqnarray} \end{theorem} \par One can ask the similar question as above that under which assumptions on $f,g,u_0$ can one expect to have $u \in BV_{loc}(-\infty,L(t))$, $u\in BV_{loc}(L(t),0)$, $ u\in BV_{loc}(0,R(t))$, $u\in BV_{loc}(R(t),\infty)?$ In this present article we prove that $ u\in BV_{loc}(L(t),0)$, $u\in BV_{loc}(0,R(t))$ with nonuniform convex flux and $u_0\in L^\infty.$ that is to say we relax $f^{\prime\prime},g^{\prime\prime}\geq \alpha>0$ and avoid $u_0\in BV$ to prove the existence of the BV regularity near the interface. For the case when the connections avoid the critical points, we prove that the solution stays in BV near the interface, even if the flux is not uniformly convex (e.g. $f(u)=u^4, g(u)=u^6+1$) and even if the initial data is highly oscillatory ($u_0\in L^\infty$). Also we prove that if one allows the connection to be the critical points, then similar results hold true without uniform convexity of fluxes and only with $u_0\in L^\infty$, but for large time. The result in this paper is very surprising because, even if $f=g,$ but not uniformly convex then in general there does not exist any region where the solution stays BV for $L^\infty$ initial data, in other words, for $f=g$ and nonuniform convex fluxes one can always choose some $u_0\in L^\infty$ such that $u(\cdot,t)\notin BV(K)$, where $K$ is an interval in $\mathbb{R}$ and $t>0$ (one can see the details in \cite{Ssbv}). This happens due to the lack of Lipschitz continuity of $(f^\prime)^{-1}$. Here in this article we prove that near the interface i.e, in the region $(L(t),0)\cup (0,R(t))$, either the solution does not oscillates too much or $(f^\prime)^{-1},(g^\prime)^{-1}$ are Lipschitz continuous. Note that for $x\in(L(t),0)\cup (0,R(t))$, the characteristic passing through the point $(x,t)$, bended before time $t$ if and only if $f\neq g.$ The paper present a special case in which one can prove BV regularity for the solution near the interface (starting from $L^\infty$ data) without enforcing the assumption on uniform convexity of the flux. However, the price to pay in order to relax the hypothesis on uniform convexity is not negligible. Our proof relies on the explicit Lax-Oleinik type formulas obtained in \cite{Kyoto, jde} and the finer analysis of the characteristics curves \cite{Kyoto, jde, Ssh}. \par In order to make the present article self contained, we describe enough prerequisites in section 2 before presenting the main results in section 3. \section{Preliminaries} We recall some definitions and known results from \cite{ARGS, Ssh, Kyoto, jde, ghoshaljde}. \begin{definition} \textbf{Weak solution of} (\ref{bv11}): $u$ is said to be weak solution of (\ref{bv11}) if $u\in L^\infty_{\mbox{loc}}(\mathbb{R}\times \mathbb{R}_+)$ and it satisfies the following integral equality, for all $\phi\in C^\infty_{0}(\overline{{\mathbb{R}\times \mathbb{R}_+}})$ \begin{equation}\label{bv21} \int\limits_{0}^{\infty} \int\limits_{-\infty}^{\infty}\left(u\inftyrac{\partial \phi}{\partial t}+(H(x)f(u)+(1-H(x)g(u)) \inftyrac{\partial \phi}{\partial x}\right)dxdt + \int\limits_{-\infty}^{\infty}u_0(x)\phi(x,0)dx=0. \end{equation} It is immediate to check that $u$ satisfies (\ref{bv21}) if and only if $u$ satisfies the following in weak sense \begin{align}\label{bv4} \begin{array}{lllll} u_t+g(u)_x&=0 \ \ &\mbox{if}\ x<0, t>0,\\ u_t+f(u)_x&=0 \ \ &\mbox{if} \ x>0, t>0,\\ u(x,0)&=u_0 \ &\mbox{if}\ x\in\mathbb{R}. \end{array} \end{align} \end{definition} \noindent\textbf{Rankine-Hugoniot condition at interface}: Let us denote $u(0+,t)=\lim\limits_{x\rightarrow 0+}u(x,t)$ and $u(0-,t)=\lim\limits_{x\rightarrow 0-}u(x,t)$. Then at $x=0$, $u$ satisfies the following R-H condition \begin{equation}\label{bv22} f(u(0+,t))=g(u(0-,t)), \ \mbox{a.e.}\ t>0. \end{equation} \begin{definition} \textbf{Interior entropy condition}: A weak solution $u$ of (\ref{bv4}) is said to satisfy the interior entropy condition if \begin{equation}\label{bv23} \begin{array}{llll} \inftyrac{\partial \phi_1(u)}{\partial t} + \inftyrac{\partial \psi_1(u)}{\partial x}\leq 0 \ \mbox{for} \ x>0,t>0,\\ \inftyrac{\partial \phi_2(u)}{\partial t} + \inftyrac{\partial \psi_2(u)}{\partial x}\leq 0 \ \mbox{for} \ x<0,t>0, \end{array} \end{equation} in the sense of distributions, where $(\phi_i,\psi_i)$ are the convex entropy pairs such that $(\psi_1^\prime(u),\psi_2^\prime(u))=(\phi_1^\prime(u)f^\prime(u), \phi_2^\prime(u)g^\prime(u))$. \end{definition} \begin{definition} ($\mathbf{(A,B)}$ \textbf{Connection}): Let $(A,B)\in\mathbb{R}^2$, is called a connection if it satisfies the following \\ (i) $g(A)=f(B)$.\\ (ii) $ g'(A)\leq0, f'(B)\geq0.$ \end{definition} \begin{definition}\label{definitionInter} \noindent \textbf{$\mathbf{(A,B)}$ Interface entropy condition}: Let $u\in L^{\infty}_{\textrm{loc}}{(\mathbb{R}\times\mathbb{R}_+)}$ such that $u(0\pm,t)$ exist a.e. $t>0$. Define $I_{AB}(t)$ by \begin{equation} (g(u(0-,t))-g(A))\textrm{sign}(u(0-,t)-A)-(f(u(0+,t))-f(B))\textrm{sign}(u(0+,t)-B). \end{equation} Then $u$ is said to satisfy $(A,B)$ Interface entropy condition if for a.e. $t>0$, \begin{equation} I_{AB}(t)\geq 0.\label{interface} \end{equation} When $A=\theta_g$ or $B=\theta_f$, then (\ref{interface}) reduces to \begin{equation} \textrm{meas}\big\{t:f'(u(0+,t))>0,g'(u(0-,t))<0\big\}=0\label{interface2}. \end{equation} \end{definition} \begin{definition}\label{definitionSol} \noindent \textbf{$\mathbf{(A,B)}$ Interface entropy solution}: $u\in L^{\infty}_{\textrm{loc}}{(\mathbb{R}\times\mathbb{R}_+)}$ is said to be the $(A,B)$ Interface entropy solution of (\ref{bv11}), if it satisfy (\ref{bv21}), (\ref{bv23}) and (\ref{interface}). \end{definition} \begin{definition} \textbf{Control curve}: Let $0\leq t$ and $\gamma\in C([0,t],\mathbb{R})$. Then $\gamma $ is called a control curve if the following holds. \begin{enumerate} \item [(i).] $\gamma(t)=x$. \item[(ii).] $\gamma$ consists of at most three linear curves and each segment lies completely in either $x \geq 0$ or $x \leq 0 \,.$ \item[(iii).] Let $0 = t_3 \leq t_2 \leq t_1 \leq t_0 = t$ be such that for $i = 1, 2, 3, \quad \gamma_i = \gamma |_{[t_i, t_{i - 1}]}$ be the linear parts of $\gamma$. If $\gamma$ consists of three linear curves then $\gamma_2 = 0$ and $\gamma_1,\gamma_3>0$ or $\gamma_1,\gamma_3<0.$ \end{enumerate} \begin{figure} \caption{Control curves: fig (a), (b), (c) are positive control curves and fig (d), (e), (f) are negative control curves respectively.} \label{fig1} \end{figure} Let us denote the set of control curves by $\Gamma(x,t).$ \noindent Positive control curve: Let $x\geq 0$ then define the positive control curve $\Gamma_+(x,t)$ by \begin{eqnarray*} \Gamma_+(x,t)=\{\gamma\in \Gamma(x,t)\ : \ \gamma\geq 0 \}. \end{eqnarray*} \noindent Negative control curve: Let $x\leq 0$ then define the negative control curve $\Gamma_-(x,t)$ by \begin{eqnarray*} \Gamma_-(x,t)=\{\gamma\in \Gamma(x,t)\ : \ \gamma\leq 0 \}. \end{eqnarray*} Define $\bar{\Gamma}_\pm$ by \begin{eqnarray*} \bar{\Gamma}_\pm(x,t)=\big\{ \gamma\in \Gamma_\pm(x,t): \{s : \gamma(s)\neq 0\}=\mbox{the interval }(t_1,t),\ \mbox{for some}\ t_1\leq t\big\}. \end{eqnarray*} \end{definition} For simplicity, whenever we write the general flux $h$, it means that it is either $f$ or $g$ respectively in suitable sense. \begin{definition}\label{lt} \textbf{Convexity, super linear growth, Legendre transformation and some useful facts}: Let the flux $h$ be a $C^2$ function and strictly convex, i.e., $\inftyorall \ a,b\in \mathbb{R}$, and $\inftyorall \ r\in [0,1]$, the following holds \begin{eqnarray*} h(ra+(1-r)b)<rh(a)+(1-r)h(b). \end{eqnarray*} Also assume that the flux $h$ satisfy superlinear growth property, i.e., \begin{eqnarray*} \lim\limits_{|u|\rightarrow \infty} \inftyrac{h(u)}{|u|}=\infty. \end{eqnarray*} Then one can define the Legendre transform $h^*$ associated to $h$ by \begin{eqnarray*} h^*(p)=\sup_{q}\{pq-h(q)\}. \end{eqnarray*} It is easy to check that $h^*$ enjoys the following properties \begin{enumerate} \item $h^*$ is $C^1$ and strictly convex. \item $h^*$ has the superlinear growth property. \item $h^*{^{\prime}}=(h^\prime)^{-1}.$ \item $h=h^{**}.$ \item $h^*(h^\prime(p))=ph^\prime(p)-h(p).$ \item $h(h^{*^{\prime}}(p))=ph^{*^{\prime}}(p)-h^*(p).$ \item In addition, if $h^{\prime\prime}>\alpha>0,$ for some $\alpha$, then $(h^{\prime})^{-1}$ is Lipschitz continuous with Lipschitz constant bounded by $\inftyrac{1}{\alpha}.$ \end{enumerate} \end{definition} \begin{definition} \textbf{Cost functional}: Let $v_0$ be the initial data for (\ref{bv12}) and let $h$ be the flux, then define the cost functionals as follows \begin{align} J(\gamma,v_0,h)&=v_0(\gamma(0)) +\int\limits_0^th^*(\gamma^\prime(\theta))d\theta,\\ J_+(\gamma,v_0,h)&=v_0(\gamma(0)) +\int\limits_{\{t\ :\ \gamma(t)>0\}}h^*(\gamma^\prime(\theta))d\theta,\\ J_-(\gamma,v_0,h)&=v_0(\gamma(0)) +\int\limits_{\{t\ :\ \gamma(t)<0\}}h^*(\gamma^\prime(\theta))d\theta. \end{align} \end{definition} Let $v_0\in C^1(\mathbb{R}\setminus \{0\})\cup \mbox{Lip}(\mathbb{R})$ and $v_0(0)=0$. Define the following auxiliary functions $b_\pm$ by \begin{align} b_+(t):=b_+(t,v_0,f):=\inf\limits_{\{\gamma\in \Gamma_+(0,t)\}} J(\gamma,v_0,f)\\ b_-(t):= b_-(t,v_0,g):=\sup\limits_{\{\gamma\in \Gamma_-(0,t)\} }J(\gamma,v_0,g). \end{align} \begin{definition} \textbf{Characteristics and other related functions}: Let us define the set of characteristics $ch_\pm$ by \begin{align} \begin{array}{lllllllllll} ch_\pm(t)&=&\{\gamma\in \Gamma_\pm(0,t)\ :\ b_\pm(t)=J(\gamma,u_0,h)\}. \end{array} \end{align} Define $y_\pm,t_\pm$ by \begin{align} \begin{array}{lllllllllll} y_+(t,u_0,h) =\inf\{\gamma(0)\ : \ \gamma\in ch_+(t)\},\\ y_-(t,u_0,h) =\sup\{\gamma(0)\ : \ \gamma\in ch_-(t)\},\\ y_+(x,t)=\mbox{min}\{\gamma(0)\ : \ \gamma\in ch_+(t)\},\\ y_-(x,t)=\mbox{max}\{\gamma(0)\ : \ \gamma\in ch_-(t)\},\\ t_+(x,t)=\mbox{max}\{t_1(\gamma)\ : \ \gamma\in ch_+(t)\},\\ t_-(x,t)=\mbox{min}\{t_1(\gamma)\ : \ \gamma\in ch_-(t)\},\\ R(t)=\mbox{min}\{x \ : \ t_+(x,t)=0\},\\ L(t)=\mbox{max}\{x \ : \ t_-(x,t)=0\}, \end{array} \end{align} where $t\mapsto y_-(t,u_0,h),\ t\mapsto y_+(t,u_0,h)$ are non-increasing and non-decreasing functions respectively. For a fix $t>0,$ $x\mapsto y_\pm(x,t), \ t_-(x,t)$ are non-decreasing functions and $x\mapsto t_+(x,t)$ is non-increasing function. The functions $t\mapsto R(t),\ t\mapsto L(t)$ are Lipschitz continuous functions and there exists some constant $C>0$, such that $Ct\geq R(t)\geq0, \ -Ct\leq L(t)\leq 0.$ \end{definition} \begin{definition} \textbf{Boundary data}: Let us define the boundary data $\lambda_\pm$ by: \begin{align} \lambda_+(t) &=\left\{\begin{array}{lllll}f_-^{-1}(-b^\prime_+(t)) & \mbox{if}\ -b^\prime_+(t)>\mbox{max}(-b^\prime_-(t),f(B))\\ f_+^{-1}(\mbox{max}(-b^\prime_-(t),f(B))) & \mbox{if}\ -b^\prime_+(t)\leq \mbox{max}(-b^\prime_-(t),f(B))\end{array}\right.\\ \lambda_-(t)&=\left\{\begin{array}{lllll}g_+^{-1} (-b^\prime_-(t)) & \mbox{if}\ -b^\prime_-(t)\geq \mbox{max}(-b^\prime_+(t),g(A))\\ g_-^{-1} (\mbox{max}(-b^\prime_+(t),g(A))) &\mbox{if}\ -b^\prime_-(t)<\mbox{max}(-b^\prime_+(t),g(A)). \end{array}\right. \end{align} \end{definition} \begin{definition} \noindent \textbf{Value functions}: Let us define the value functions $v_\pm$ by: \begin{align} v_-(x,t) =\sup_{\gamma\in \Gamma_-(x,t)}\left[J_-(\gamma,v_0,g)-\int_{\gamma=0}g(\lambda_-(\theta))d\theta\right] \ \mbox{if} \ x\leq 0,\\ v_+(x,t) =\sup_{\gamma\in \Gamma_+(x,t)}\left[J_+(\gamma,v_0,f)-\int_{\gamma=0}f(\lambda_+(\theta))d\theta\right] \ \mbox{if} \ x\geq 0. \end{align} \end{definition} \begin{theorem} (Adimurthi et. al. \cite{jde}) Let the fluxes $f,g$ be strictly convex, smooth and of superlinear growth. Then \begin{enumerate} \item $f(\lambda_+(t))=g(\lambda_-(t))$. \item The value functions $v_\pm$ are Lipschitz continuous and $v_+(0,t)=v_-(0,t)$, for all $t>0$. $v_+,v_-$ are the solution of (\ref{bv12}). \item Define the following Lipschitz continuous function $v$ by \begin{align} v(x,t)=\left\{\begin{array}{lllll} v_-(x,t) \ \mbox{if}\ x<0, \ t>0,\\ v_+(x,t) \ \mbox{if}\ x>0, \ t>0. \end{array}\right. \end{align} Then $u=v_x$ is the weak entropy solution of (\ref{bv11}) and satisfies the interface entropy condition (\ref{interface}) and the interior entropy condition (\ref{bv23}). \end{enumerate} \end{theorem} \noindent\textbf{Some useful formulas}: The case when $A=\theta_g$ or $B=\theta_f$. At the points of differentiability of $t_\pm,y_\pm$, we have \begin{eqnarray} u(x,t)= \left\{\begin{array}{lll} (f^{\prime})^{-1}\left(\inftyrac{x-y_+(x,t)}{t}\right)=u_0(y_+(x,t)) &\mbox{ if }& 0\leq R(t)<x<\infty,\\ (f^{\prime})^{-1}\left(\inftyrac{x}{t-t_+(x,t)}\right) &\mbox{ if }& 0\leq x<R(t)\\ =u(0+,t_+(x,t))\\ =f_+^{-1}g(u(0-,t_+(x,t)))\\ =f_+^{-1}g(g^{\prime})^{-1}\left(\inftyrac{-y_-(0-,t_+(x,t))}{t_+(x,t)}\right)\\ =f_+^{-1}g(u_0(y_-(0-,t_+(x,t)))). \end{array}\right.\label{bv624} \end{eqnarray} \begin{eqnarray} u(x,t)= \left\{\begin{array}{lll} (g^{\prime})^{-1}\left(\inftyrac{x-y_-(x,t)}{t}\right)=u_0(y_-(x,t)) &\mbox{ if }& -\infty<x\leq L(t)\leq 0, \\ (g^{\prime})^{-1}\left(\inftyrac{x}{t-t_-(x,t)}\right) &\mbox{ if}& L(t)<x<0,\\ =u(0-,t_-(x,t))\\ =g_-^{-1}f(u(0+,t_-(x,t)))\\ =g_-^{-1}f(f^{\prime})^{-1}\left(\inftyrac{-y_+(0+,t_-(x,t))}{t_-(x,t)}\right)\\ =g_-^{-1}f(u_0(y_+(0+,t_-(x,t)))). \end{array}\right.\label{bv625} \end{eqnarray} The case when $A\neq\theta_g$ and $B\neq\theta_f$. At the points of differentiability of $t_\pm,y_\pm$, we have \begin{eqnarray} u(x,t)= \left\{\begin{array}{lll} (f^{\prime})^{-1}\left(\inftyrac{x-y_+(x,t)}{t}\right)=u_0(y_+(x,t) &\mbox{ if }& 0\leq R(t)<x<\infty,\\ (f^{\prime})^{-1}\left(\inftyrac{x}{t-t_+(x,t)}\right) &\mbox{ if }& 0\leq x<R(t)\\ =u(0-,t_+(x,t))\\ =\lambda_+(t_+(x,t))\\ =f_+^{-1}(\mbox{max}(-b^\prime_-(t_+(x,t)),f(B))).\\ \end{array}\right.\label{bv24} \end{eqnarray} \begin{eqnarray} u(x,t)= \left\{\begin{array}{lll} (g^{\prime})^{-1}\left(\inftyrac{x-y_-(x,t)}{t}\right)=u_0(y_-(x,t)) &\mbox{ if }& -\infty<x\leq L(t)\leq 0, \\ (g^{\prime})^{-1}\left(\inftyrac{x}{t-t_-(x,t)}\right) &\mbox{ if}& L(t)<x<0,\\ =u(0-,t_-(x,t))\\ =\lambda_-(t_-(x,t))\\ =g_-^{-1}(\mbox{max}(-b^\prime_+(t_-(x,t)),g(A))), \end{array}\right.\label{bv25} \end{eqnarray} where $b^\prime_\pm$ satisfies the following \begin{align} b^\prime_-(t)=\left\{\begin{array}{lll} \displaystyle-g\left((g^\prime)^{-1}\left(-\inftyrac{y_-(t,u_0,g)}{t}\right)\right) &\mbox{if}&y_-(t,u_0,g)<0,\\ -g(\theta_g) &\mbox{if}& y_-(t,u_0,g)=0, \end{array}\right. \end{align} \begin{align} b^\prime_+(t)=\left\{\begin{array}{lll} \displaystyle-f\left((f^\prime)^{-1}\left(-\inftyrac{y_+(t,u_0,g)}{t}\right)\right) &\mbox{if}&y_+(t,u_0,f)>0,\\ -f(\theta_f) &\mbox{if}& y_+(t,u_0,f)=0. \end{array}\right. \end{align} \begin{theorem} (Adimurthi et.al \cite{ARGS}). Let $u_0\in L^{\infty}(\mathbb{R})$ and $u$ be the solution of (\ref{bv11}). Let $\ t>0, \ \epsilon>0, M>\epsilon, \ I(M,\epsilon)=\{x:\epsilon\leq|x|\leq M\}$. Then \begin{enumerate} \item[(1).] Suppose there exists an $\alpha>0$ such that $f''\geq\alpha, \ g''\geq\alpha$, then there exist $C=C(\epsilon,M,\alpha)$ such that \begin{displaymath} \begin{array}{lll} \textrm{TV}\big(u(\cdot,t),I(M,\epsilon)\big)\leq C(\epsilon,M,t). \end{array} \end{displaymath} \item[(2).] Suppose $u_0\in\textrm{BV}$, and $T>0$. Then there exists $C(\epsilon,T)$ such that for all $0<t\leq T$ \begin{displaymath} \textrm{TV}\big(u(\cdot,t),|x|>\epsilon\big)\leq C(\epsilon,t)\textrm{TV}(u_0)+4||u_0||_{\infty}. \end{displaymath} \item[(3).] Let $u_0\in\textrm{BV}$, $T>0$ and $A\neq\theta_g$ and $B\neq\theta_f$. Then there exists $C>0$ such that for all $0<t\leq T$, \begin{displaymath} \textrm{TV}\big(u(\cdot,t)\big)\leq C \ \textrm{TV}(u_0)+6||u_0||_{\infty}. \end{displaymath} \item[(4).] Let $ u_0, \ f_+^{-1}(g(u_0)), \ g_-^{-1}(f(u_0))\in\textrm{BV}$, $T>0$ and $A=\theta_g$. Then for all $0<t\leq T$, \begin{displaymath} \begin{array}{lll} \textrm{TV}\big(u(\cdot,t)\big) \leq \textrm{TV}(u_0)+\displaystyle \max\big(TV(f_+^{-1}(g(u_0))),\textrm{TV}(g_-^{-1}(f(u_0)))\big)+6||u_0||_{\infty}.\\\\ \end{array} \end{displaymath} \item[(5).] For a certain choice of fluxes $f$ and $g$ there exists $u_0\in \textrm{BV}\cap L^{\infty}$ such that TV$(u(\cdot,1))=\infty$ if $A=\theta_g$ or $B=\theta_f$. \end{enumerate} \end{theorem} \begin{theorem}\label{th1}({Ghoshal \cite{ghoshaljde}}). Let $u_0\in L^{\infty}(\mathbb{R})$ and $u$ be a solution of (\ref{bv11}). Let $t>0,\ \epsilon>0,\ M>\epsilon $ and \begin{eqnarray*}I(M)&=&\{x\ :\ |x|<M\},\\ I(R_1(t))&=&\{x>0\ :\ x<R_1(t)\},\ I(L_1(t))=\{x<0\ :\ x>L_1(t)\}.\end{eqnarray*} \begin{enumerate} \item [(i).] Let $f(\theta_f)\neq g(\theta_g)$ and $f^{\prime\prime}\geq \alpha,\ g^{\prime\prime}\geq \alpha,$ for some $\alpha>0$, also assuming the fact that $\mbox{Supp}\ u_0\subset[-K,K],$ for some $K>0,$ then there exists a $T_0>0$ such that \mbox{for all} $t>T_0,$ \begin{eqnarray} TV\left(u(\cdotp,t),I(M)\right)\leq C(M,t).\label{r3.1} \end{eqnarray} As a consequence we have, \mbox{for all} $t>T_0,$ \begin{eqnarray} TV(u(\cdotp,t), \mathbb{R})\leq C(t),\label{r3.2} \end{eqnarray} where $C(t),C(M,t)>0$ are some constants. \item [(ii).] Let $f(\theta_f)= g(\theta_g)$ then for all $t>0,$ \begin{eqnarray} TV\left(u(\cdotp,t),I(R_1(t))\cup I(L_1(t))\right)\leq C(t).\label{r3.3} \end{eqnarray} In addition if $f^{\prime\prime}\geq \alpha,\ g^{\prime\prime}\geq \alpha,$ for some $\alpha>0,$ then for all $t>0,$ \begin{eqnarray} TV\left(u(\cdotp,t),I(M)\right)\leq C(M,t).\label{r3.4} \end{eqnarray} As a consequence, if $\mbox{Supp}\ u_0\subset[-K,K],$ for some $K>0,$ then for all $t>0,$ \begin{eqnarray} TV(u(\cdotp,t), \mathbb{R})\leq C(t).\label{r3.5} \end{eqnarray} \item[(iii).] Let $f(\theta_f)=g(\theta_g)$ and $u_0\in BV(\mathbb{R})$ then \mbox{for all} $t>0,$ \begin{eqnarray} TV\left(u(\cdotp,t)\right)\leq C(t)(TV(u_0)+1)+4\|u_0\|\infty.\label{r3.6} \end{eqnarray} \item[(iv).] Let $A\neq \theta_g$ and $B\neq \theta_f$. If $f^{\prime\prime},g^{\prime\prime}\geq\alpha>0,u_0\in L^\infty(\mathbb{R})$ then for all $t>0$, \begin{eqnarray} TV\left(u(\cdotp,t),I(\epsilon)\cup I(M,\epsilon)\right)\leq C_1(\epsilon)+C_2(\epsilon,M,t). \end{eqnarray} As a consequence, if $u_0\in BV(\mathbb{R})$ then for all $t>0,$ \begin{eqnarray} TV\left(u(\cdotp,t)\right)\leq C(\epsilon,t)(TV(u_0)+1)+4\|u_0\|\infty. \end{eqnarray} \end{enumerate} \textbf{Counter example} (Ghoshal \cite{ghoshaljde}): Let $f(u)=(u-1)^2-1, \ g(u)=u^2$. Then there exists an initial data $u_0\in BV$ and a sequence $T_n,$ such that $\lim\limits_{n\rightarrow\infty} T_n=\infty$ and \begin{eqnarray} TV\left(u(\cdotp,T_n)\right)=\infty, \ \mbox{for all}\ n. \end{eqnarray} \end{theorem} \section{Main results} In the following Theorems we have relaxed the assumptions of uniform convexity of the fluxes made in \cite{ARGS, ghoshaljde} and allowed $u_0\in L^\infty$. Let us denote $I(R(t))=\{x>0\ :\ x<R(t)\},\ I(L(t))=\{x<0\ :\ x>L(t)\}.$\\ \noindent \textbf{Hypothesis on the fluxes $\boldsymbol f,\boldsymbol g$}: Let the fluxes satisfy the following \noindent $\boldsymbol H\boldsymbol 1$. $f,g$ be $C^2$, strictly convex and of superlinear growth (see definition \ref{lt}).\\ \noindent $\boldsymbol H\boldsymbol 2$. Either $f^{\prime\prime}>\alpha>0$, for some $\alpha$ or the zero of $f^\prime$ and $f^{\prime\prime}$ are the same, i.e., if there exists $p\in\mathbb{R}$ such that $f^{\prime\prime}(p)=0$, then $f^{\prime}(p)=0.$\\ \noindent $\boldsymbol H\boldsymbol 3$. Either $g^{\prime\prime}>\alpha>0$, for some $\alpha$ or the zero of $g^\prime$ and $g^{\prime\prime}$ are the same. \begin{theorem}{\label{m2}} Let $u_0\in L^\infty$. Let the fluxes satisfy $\boldsymbol H\boldsymbol 1$, $\boldsymbol H\boldsymbol 2$ and $\boldsymbol H\boldsymbol 3$ as above. Let the connection satisfies $A\neq \theta_g, B\neq \theta_f$, then \begin{eqnarray} TV(u(\cdotp,t), I(R(t))\cup I(L(t)))\leq C(t).\label{bv430} \end{eqnarray}\end{theorem} \begin{theorem}{\label{m1}} Let $u_0\in L^\infty$. Let the fluxes satisfy $\boldsymbol H\boldsymbol 1$, $\boldsymbol H\boldsymbol 2$ and $\boldsymbol H\boldsymbol 3$ as above. Let the connection satisfies $A=\theta_g$ or $B= \theta_f$. If $f(\theta_f)\neq g(\theta_g),f(\theta_f)\neq g(0),f(0)\neq g(\theta_g),f(0)\neq g(0)$ and $\mbox{Supp}\ u_0 \subset [-M,M]$, for some $M>0,$ then there exists $T>0$ such that for all $t>T,$ \begin{eqnarray} TV(u(\cdotp,t), I(R(t))\cup I(L(t)))\leq C(t).\label{bv531} \end{eqnarray}\end{theorem} \begin{proof} (Proof of Theorem \ref{m2}). We consider the following three cases. \\ Case 1: $R(t)>0, L(t)=0$.\\ Case 2: $R(t)=0, L(t)<0$.\\ Case 3: $R(t)>0,L(t)<0$.\\ \noindent Case 1: Since the characteristics speed is positive, for $x\in(0,R(t))$, we have \begin{align} u(x,t)=&(f^\prime)^{-1}\left(\inftyrac{x}{t-t_+(x,t)}\right)\label{bv431}\\ =& \lambda_+(t_+(x,t))\label{bv432}\\ =& f^{-1}_+(\mbox{max}(-b^\prime_-(t_+(x,t)),f(B))).\label{bv433} \end{align} Now $b^\prime_-$ satisfies the following relations \begin{align} b^\prime_-(t_+(x,t))=\left\{\begin{array}{lll} \displaystyle-g\left((g^\prime)^{-1}\left(-\inftyrac{y_-(t_+(x,t),u_0,g)}{t_+(x,t)}\right)\right) &\mbox{if}&y_-(t_+(x,t),u_0,g)<0,\\ -g(\theta_g) &\mbox{if}& y_-(t_+(x,t),u_0,g)=0, \end{array}\right.\label{bv434} \end{align} where $t\mapsto y_-(t,u_0,g)$ is a non-increasing function. Let us define $\epsilon(t)=\sup\{ x>0 \ : y_-(t_+(x,t),u_0,g)<0\}$. If $\epsilon(t)=0$, then from (\ref{bv432}), (\ref{bv433}) and (\ref{bv434}), we have $u(x,t)=f^{-1}_+(\mbox{max}(g(\theta_g),f(B)))=\mbox{constant}$, for all $x\in (0,R(t))$, hence in $u\in BV(0,R(t)).$ Now we assume that $\epsilon(t)>0.$ Due to the monotonicity of $y_-$, for all $x\in(0,\epsilon(t))$ \begin{align} y_-(t_+(0+,t),u_0,g)\leq y_-(t_+(x,t),t),u_0,g)\leq y_-(t_+(\epsilon(t),t),u_0,g)<0. \label{bv435} \end{align} First we prove the result in $(0,\epsilon(t))$ then in $(\epsilon(t),R(t)).$ From the monotonicity of $y_-,t_+$, we conclude \begin{align} \begin{array}{llllllll} \displaystyle \inftyrac{-y_-(t_+(0+,t),u_0,g)}{t_+(\epsilon(t),t)}&\geq& \displaystyle\inftyrac{-y_-(t_+(x,t),u_0,g)}{t_+(\epsilon(t),t)}\\ &\geq&\displaystyle \inftyrac{-y_-(t_+(x,t),u_0,g)}{t_+(x,t)}\\ &\geq& \displaystyle\inftyrac{-y_-(t_+(\epsilon(t),t),u_0,g)}{t_+(x,t)}\\&\geq& \displaystyle\inftyrac{-y_-(t_+(\epsilon(t),t),u_0,g)}{t}>0. \end{array}\label{bv436} \end{align} From (\ref{bv436}), $\displaystyle \inftyrac{-y_-(t_+(x,t),u_0,g)}{t_+(x,t)}$ is away from $0$, hence \begin{align} (g^\prime)^{-1} \ \mbox{is Lipschitz continuous in} \ \left(\displaystyle\inftyrac{-y_-(t_+(\epsilon(t),t),u_0,g)}{t}, \displaystyle \inftyrac{-y_-(t_+(0+,t),u_0,g)}{t_+(\epsilon(t),t)} \right).\label{bv437} \end{align} Let us choose a partition $\{x_i\}^{N}_{i=1}$ in $(0,\epsilon(t))$. In view of the fact that $t_+,y_-$ are monotone and using (\ref{bv435}), (\ref{bv436}) to obtain \begin{equation} \begin{array}{lllllllllllll} \displaystyle\sum\limits_{i=1}^{N}\left|\inftyrac{-y_-(t_+(x_{i+1},t),u_0,g)}{t_+(x_{i+1},t)}+\inftyrac{y_-(t_+(x_i,t),u_0,g)}{t_+(x_i,t)}\right|\\ \leq \displaystyle\sum\limits_{i=1}^{N}\inftyrac{|y_-(t_+(x_{i+1},t),u_0,g)||t_+(x_{i},t)-t_+(x_{i+1},t)|} {|t_+(x_{i},t)t_+(x_{i+1},t)|}\\ + \displaystyle\sum\limits_{i=1}^{N}\inftyrac{|t_+(x_{i+1},t)| |y_-(t_+(x_{i+1},t),u_0,g)-y_-(t_+(x_{i},t),u_0,g)| }{|t_+(x_{i},t)t_+(x_{i+1},t)|}\\ \displaystyle\leq \inftyrac{|y_-(t_+(0+,t),u_0,g)||t-t_+(\epsilon(t),t)|}{|t_+(\epsilon(t),t)|^2}+\displaystyle\inftyrac{t |y_-(t_+(0+,t),u_0,g)|}{|t_+(\epsilon(t),t)|^2}\\ \displaystyle\leq \inftyrac{2t|y_-(t_+(0+,t),u_0,g)|}{|t_+(\epsilon(t),t)|^2}. \end{array}\label{bv438} \end{equation} Since $B\neq \theta_f$, we conclude $f_+(\lambda_+(t_+(x,t)))=(\mbox{max}(-b^\prime_-(t_+(x,t)),f(B)))\geq f(B)>f(\theta_f)$, therefore \begin{equation} f_+^{-1} \ \mbox{is Lipschitz continuous in the interval} \ \big(f(B), \sup_{x\in (0,R(t)) }-b^\prime_-(t_+(x,t))\big).\label{bv440} \end{equation} Since $f,g\in C^2$ using (\ref{bv431}), (\ref{bv432}), (\ref{bv433}), (\ref{bv440}), (\ref{bv438}), we get \begin{align} \begin{array}{lllllllllllll} \displaystyle\sum\limits_{i=1}^{N}|u(x_{i+1},t)- u(x_{i},t)|\\ = \displaystyle\sum\limits_{i=1}^{N}\left|f^{-1}_+(\mbox{max}(-b^\prime_-(t_+(x_{i+1},t)),f(B))) -f^{-1}_+(\mbox{max}(-b^\prime_-(t_+(x_i,t)),f(B)))\right|\\ \leq C(t)\displaystyle\sum\limits_{i=1}^{N}\left|\mbox{max}(-b^\prime_-(t_+(x_{i+1},t)),f(B)) -\mbox{max}(-b^\prime_-(t_+(x_i,t)),f(B))\right|\\ \leq \displaystyle C(t)\sum\limits_{i=1}^{N}\left|b^\prime_-(t_+(x_{i+1},t))-b^\prime_-(t_+(x_i,t))\right|\\ =\displaystyle C(t)\sum\limits_{i=1}^{N}\left|\inftyrac{-y_-(t_+(x_{i+1},t),u_0,g)}{t_+(x_{i+1},t)}+\inftyrac{y_-(t_+(x_i,t),u_0,g)}{t_+(x_i,t)}\right|\\ \displaystyle\leq C(t) \inftyrac{2t|y_-(t_+(0+,t),u_0,g)|}{|t_+(\epsilon(t),t)|^2}. \end{array}\label{bv439} \end{align} Hence $u\in BV(0,\epsilon(t))$. Next, we prove the results in $(\epsilon(t),R(t))$. Due to the monotonicity of $t_+$, for $x\in (\epsilon(t),R(t))$, we have \begin{align}\label{bv441} \begin{array}{llll} t_+(\epsilon(t),t)\geq t_+(x,t)\geq t_+(R(t)-, t)\\ t- t_+(\epsilon(t),t)\leq t- t_+(x,t)\leq t-t_+(R(t)-, t)<t,\\ \end{array} \end{align} hence for some $C>0$, \begin{equation}\label{bv442} \begin{array}{lll} \displaystyle 0<\inftyrac{\epsilon(t)}{t} \leq \inftyrac{\epsilon(t)}{t-t_+(R(t)-, t)} \leq \inftyrac{x}{t-t_+(x, t)} \displaystyle \leq \inftyrac{R(t)}{t-t_+(x, t)} &\leq& \displaystyle\inftyrac{R(t)}{t-t_+(\epsilon(t), t)}\\ &\leq& \displaystyle \inftyrac{Ct}{t-t_+(\epsilon(t), t)}. \end{array} \end{equation} Whence from (\ref{bv442}), $\displaystyle \inftyrac{x}{t-t_+(x, t)}$ is away from $0$, which allows \begin{align}\label{bv443} (f^\prime)^{-1} \ \mbox{is Lipschitz continuous in}\ \displaystyle\left[\inftyrac{\epsilon(t)}{t}, \inftyrac{Ct}{t-t_+(\epsilon(t), t)}\right] \end{align} and the Lipschitz constant depends on $t.$ Let us choose a partition $\{x_i\}^{N}_{i=1}$ in $(\epsilon(t),R(t))$. Because of the monotonicity of $t_+$, (\ref{bv441}) and (\ref{bv442}) we observe \begin{align}\label{bv444} \begin{array}{lllllllllllllll} \displaystyle\sum\limits_{i=1}^{N} \left|\inftyrac{x_{i+1}}{t-t_+(x_{i+1},t)}-\inftyrac{x_{i}}{t-t_+(x_{i},t)}\right|\\ \leq \displaystyle\sum\limits_{i=1}^{N} \inftyrac{t|x_{i+1}-x_i| + |x_{i+1}t_+(x_i,t)-x_it_+(x_{i+1},t)|}{(t-t_+(x_{i+1},t))(t-t_+(x_{i},t))}\\ \leq \displaystyle\sum\limits_{i=1}^{N} \inftyrac{t|x_{i+1}-x_i| + |x_{i+1}t_+(x_i,t)-x_it_+(x_{i+1},t)|}{(t-t_+(\epsilon(t),t))^2}\\ \leq \displaystyle\sum\limits_{i=1}^{N} \inftyrac{t|x_{i+1}-x_i| + |x_{i+1}||t_+(x_i,t)-t_+(x_{i+1},t)|+t_+(x_{i+1},t)|x_{i+1}-x_i|}{(t-t_+(\epsilon(t),t))^2}\\ \leq \displaystyle \inftyrac{3R(t)t}{(t-t_+(\epsilon(t),t))^2}\\ \leq \displaystyle \inftyrac{3Ct^2}{(t-t_+(\epsilon(t),t))^2}. \end{array} \end{align} (\ref{bv431}), (\ref{bv443}) and (\ref{bv444}) yields \begin{equation} \begin{array}{lllllllllllll} \displaystyle\sum\limits_{i=1}^{N}|u(x_{i+1},t)- u(x_{i},t)|\\ = \displaystyle\sum\limits_{i=1}^{N} \left|(f^\prime)^{-1}\left(\inftyrac{x_{i+1}}{t-t_+(x_{i+1},t)}\right) -(f^\prime)^{-1}\left(\inftyrac{x_{i}}{t-t_+(x_{i},t)}\right)\right|\\ \leq \displaystyle\sum\limits_{i=1}^{N} C(t) \left|\inftyrac{x_{i+1}}{t-t_+(x_{i+1},t)} -\inftyrac{x_{i}}{t-t_+(x_{i},t)}\right|\\ \leq \displaystyle \inftyrac{3CC(t)t^2}{(t-t_+(\epsilon(t),t))^2}. \end{array} \end{equation} Hence for the Case 1, $u(\cdot,t)\in BV(0,R(t)).$ \noindent Case 2 and Case 3: When $L(t)<0$, one can prove the result exactly as $R(t)>0$. Therefore both the cases follows exactly like as Case 1. Hence the Theorem. \end{proof} \begin{proof} (Proof of Theorem \ref{m1}). Let us assume that $g(\theta_g)>f(\theta_f)$. Denote $0<\Deltata_1=g(\theta_g)-f(\theta_f)$. We have to consider the following three cases. \noindent Case 1: $R(t)>0, L(t)=0$.\\ \noindent Case 2: $R(t)=0, L(t)<0$.\\ \noindent Case 3: $R(t)>0,L(t)<0$. \noindent Case 1: In this case, \begin{equation}\label{bv31} u(x,t)=(f^\prime)^{-1}\left(\inftyrac{x}{t-t_+(x,t)}\right)\ \mbox{for}\ x\in(0,R(t)). \end{equation} Let $\epsilon(t)>0$ be a small number such that $\epsilon(t)<R(t).$ First we prove the result in $(0,\epsilon(t))$ then in $(\epsilon(t),R(t)).$ Suppose there exist constants $T_0>0, C >0$ such that \begin{equation}\label{bv32} |t_+(x,t)|<C, \ \mbox{for}\ t>T_0, x\in(0,R(t)). \end{equation} Since for $t>T_0$, $\epsilon(t),t_+(x,t)$ are bounded, hence from (\ref{bv31}), (\ref{bv32}) and the fact that the characteristics speed is positive for $x\in(0,R(t))$ there exists a small $\Deltata_2>0$, such that \begin{equation}\label{bv33} u(x,t)\in [\theta_f,\theta_f+\Deltata_2], \ \mbox{for} \ t>T_0,x\in (0,\epsilon(t)). \end{equation} One can re-choose $T_0$ large, $\Deltata_2>0$ small such that $f(\theta_f+\Deltata_2)<g(\theta_g)$ and (\ref{bv33}) still holds. From (\ref{bv33}), it is clear that for $t>T_0$, \begin{equation}\label{bv34} u(0+,t)\in [\theta_f,\theta_f+\Deltata] \ \mbox{and so} \ f(u(0+,t))\in [f(\theta_f),g(\theta_g)) \end{equation} Therefore from (\ref{bv34}), it is easy to see that R-H condition $f(u(0+,t)=g(u(0-,t))$ does not hold for $t>T_0,$ which is a contradiction. Hence (\ref{bv32}) is false and so for $ x\in(0,\epsilon(t)),$ \begin{align} \varlimsup\limits_{t\rightarrow \infty} t_+(x,t)=\infty . \label{bv35} \end{align} From the R-H condition and the explicit formulas we have \begin{align} f(u(x,t))=f(u(0+,t_+(x,t)))&=g(u(0-,t_+(x,t)))\label{bv46}\\ &=g\left(\left(g^\prime\right)^{-1}\left(-\inftyrac{y_-(0-,t_+(x,t))}{t_+(x,t)}\right)\right)\label{bv36}\\ &=g(u_0(y_-(0-,t_+(x,t)))).\label{bv37} \end{align} \par Now if for some $x_0\in (0,\epsilon(t))$, $y_-(0-,t_+(x,t))<-M,$ then by the monotonicity of $y_-$, $y_-(0-,t_+(x,t))<-M$ for all $x\in (0,x_0).$ Since $\mbox{Supp}\ u_0\subset [-M,M]$ and (\ref{bv37}), we obtain $u(x,t)=f^{-1}g(0)$. Hence choosing $\epsilon(t)=x_0$, one has $u(\cdot,t)\in BV (0,\epsilon(t))$. \par Let us assume the case when \begin{align} y_-(0-,t_+(x,t))\in [-M,0] \ \mbox{for all}\ x\in (0,\epsilon(t)).\label{bv38} \end{align} Using (\ref{bv35}), (\ref{bv36}) and (\ref{bv38}), it is immediate that $\displaystyle-\inftyrac{y_-(0-,t_+(x,t))}{t_+(x,t)}\rightarrow 0$ as $t\rightarrow\infty$. In view of the fact that the characteristic speed in positive in $(0,R(t))$, there exists a small $\Deltata_3>0$ and a large $T_0$ (re-choosing the previous one), such that for all $x\in(0,\epsilon(t)), t>T_0$ \begin{align} g\left(\left(g^\prime\right)^{-1}\left(-\inftyrac{y_-(0-,t_+(x,t))}{t_+(x,t)}\right)\right)\in [g(\theta_g), g(\theta_g)+\Deltata_3]. \label{bv39} \end{align} Thanks to $g(\theta_g)>f(\theta_f)$ and (\ref{bv39}), we get \begin{align}\label{bv44} f^{-1}\ \mbox{is Lipschitz continuous in} \ [g(\theta_g), g(\theta_g)+\Deltata_3] \end{align} and the Lipschitz constant depends on $t$. Since $y_+,t_+$ are monotone functions we have the following relations \begin{eqnarray} t_+(\epsilon(t),t)\leq t_+(x,t)<t, \ \mbox{for all} \ x\in (0,\epsilon(t))\label{bv40}\\ -M\leq y_-(0-,t_+(x,t))\leq y_-(0-,t_+(\epsilon(t),t))\leq 0,\ \mbox{for all} \ x\in (0,\epsilon(t)) \label{bv41}\\ 0<-\inftyrac{y_-(0-,t_+(\epsilon(t),t))}{t}< -\inftyrac{y_-(0-,t_+(\epsilon(t),t))}{t_+(x,t)}\\\leq -\inftyrac{y_-(0-,t_+(x,t))}{t_+(x,t)}\leq -\inftyrac{y_-(0-,t_+(x,t))}{t_+(\epsilon(t),t)} \leq \inftyrac{M}{t_+(\epsilon(t),t)}, \label{bv42}\\ \mbox{for all} \ x\in (0,\epsilon(t)).\nonumber \end{eqnarray} Therefore from (\ref{bv42}), it is clear that for a fixed $t>T_0$, $\displaystyle-\inftyrac{y_-(0-,t_+(x,t))}{t_+(x,t)}$ is away from $0$, hence \begin{align}\label{bv45} (g^\prime)^{-1}\ \mbox{is Lipschitz continuous in } \ \displaystyle\left(-\inftyrac{y_-(0-,t_+(\epsilon(t),t))}{t},\inftyrac{M}{t_+(\epsilon(t),t)}\right) \end{align} and the Lipschitz constant depends on $t$. Let us choose a partition $\{x_i\}^{N}_{i=1}$ in $(0,\epsilon(t))$. In view of the fact that $t_+,y_-$ are monotone and using (\ref{bv40}), (\ref{bv41}), (\ref{bv42}), to obtain \begin{equation} \begin{array}{lllllllllllll} \displaystyle\sum\limits_{i=1}^{N}\left|-\inftyrac{y_-(0-,t_+(x_{i+1},t))}{t_+(x_{i+1},t)}+\inftyrac{y_-(0-,t_+(x_{i},t))}{t_+(x_{i},t)}\right|\\ \leq \displaystyle\sum\limits_{i=1}^{N}\inftyrac{|y_-(0-,t_+(x_{i+1},t))||t_+(x_{i},t)-t_+(x_{i+1},t)|}{|t_+(x_{i},t)t_+(x_{i+1},t)|}\\ + \displaystyle\sum\limits_{i=1}^{N}\inftyrac{|t_+(x_{i+1},t)||y_-(0-,t_+(x_{i+1},t))-y_-(0-,t_+(x_{i},t))| }{|t_+(x_{i},t)t_+(x_{i+1},t)|}\\ \displaystyle\leq \inftyrac{M |t-t_+(\epsilon(t),t)|}{|t_+(\epsilon(t),t)|^2} + \inftyrac{Mt}{|t_+(\epsilon(t),t)|^2}\\ \displaystyle \leq \inftyrac{2Mt}{|t_+(\epsilon(t),t)|^2} \end{array}\label{bv43} \end{equation} As a results of (\ref{bv46}), (\ref{bv36}), (\ref{bv44}), (\ref{bv45}) and (\ref{bv43}), we get \begin{equation} \begin{array}{lllllllllllll} \sum\limits_{i=1}^{N}|u(x_{i+1},t)- u(x_{i},t)|\\ = \sum\limits_{i=1}^{N}\left|f^{-1}g\left(\left(g^\prime\right)^{-1}\left(-\inftyrac{y_-(0-,t_+(x_{i+1},t))}{t_+(x_{i+1},t)}\right)\right) -f^{-1}g\left(\left(g^\prime\right)^{-1}\left(-\inftyrac{y_-(0-,t_+(x_{i},t))}{t_+(x_i,t)}\right)\right)\right|\\ \leq C_1(t)\sum\limits_{i=1}^{N}\left|g\left(\left(g^\prime\right)^{-1}\left(-\inftyrac{y_-(0-,t_+(x_{i+1},t))}{t_+(x_{i+1},t)}\right)\right) -g\left(\left(g^\prime\right)^{-1}\left(-\inftyrac{y_-(0-,t_+(x_{i},t))}{t_+(x_i,t)}\right)\right)\right|\\ \leq C_2 C_1(t)\sum\limits_{i=1}^{N}\left|(g^\prime)^{-1}\left(-\inftyrac{y_-(0-,t_+(x_{i+1},t))}{t_+(x_{i+1},t)}\right) -(g^\prime)^{-1}\left(-\inftyrac{y_-(0-,t_+(x_{i},t))}{t_+(x_i,t)}\right)\right|\\ \leq C_3(t)C_2 C_1(t)\sum\limits_{i=1}^{N}\left|\left(-\inftyrac{y_-(0-,t_+(x_{i+1},t))}{t_+(x_{i+1},t)}\right) -\left(-\inftyrac{y_-(0-,t_+(x_{i},t))}{t_+(x_i,t)}\right)\right|\\ \leq C_3(t)C_2 C_1(t) \inftyrac{2M |t-t_+(\epsilon(t),t)|}{|t_+(\epsilon(t),t)|^2}. \end{array} \end{equation} Hence $u\in BV(0,\epsilon(t))$. Next, we prove the results in $(\epsilon(t),R(t))$. Due to the monotonicity of $t_+$, for $x\in (\epsilon(t),R(t))$, we have \begin{align}\label{bv47} \begin{array}{llll} t_+(\epsilon(t),t)\geq t_+(x,t)\geq t_+(R(t)-, t)\\ t- t_+(\epsilon(t),t)\leq t- t_+(x,t)\leq t-t_+(R(t)-, t)<t.\\ \end{array} \end{align} Whence \begin{equation}\label{bv48} \begin{array}{lll} \displaystyle 0<\inftyrac{\epsilon(t)}{t} < \inftyrac{x}{t-t_+(x, t)}< \displaystyle \inftyrac{C_4t}{t-t_+(\epsilon(t), t)}, \end{array} \end{equation} for some constant $C_4>0.$ For a fix $t>T_0$, it is clear from (\ref{bv48}) that $\displaystyle \inftyrac{x}{t-t_+(x, t)}$ is away from $0$, which allows \begin{align}\label{bv49} (f^\prime)^{-1} \ \mbox{is Lipschitz continuous in}\ \displaystyle\left[\inftyrac{\epsilon(t)}{t}, \inftyrac{C_4t}{t-t_+(\epsilon(t), t)}\right] \end{align} and the Lipschitz constant depends on $t.$ Let us choose a partition $\{x_i\}^{N}_{i=1}$ in $(\epsilon(t),R(t))$. Because of the monotonicity of $t_+$, (\ref{bv47}) and (\ref{bv48}) we observe \begin{align}\label{bv50} \begin{array}{lllllllllllllll} \displaystyle\sum\limits_{i=1}^{N} \left|\inftyrac{x_{i+1}}{t-t_+(x_{i+1},t)}-\inftyrac{x_{i}}{t-t_+(x_{i},t)}\right|\\ \leq \displaystyle\sum\limits_{i=1}^{N} \inftyrac{t|x_{i+1}-x_i| + |x_{i+1}t_+(x_i,t)-x_it_+(x_{i+1},t)|}{(t-t_+(\epsilon(t),t))^2}\\ \leq \displaystyle\sum\limits_{i=1}^{N} \inftyrac{t|x_{i+1}-x_i| + |x_{i+1}||t_+(x_i,t)-t_+(x_{i+1},t)|+t_+(x_{i+1},t)|x_{i+1}-x_i|}{(t-t_+(\epsilon(t),t))^2}\\ \leq \displaystyle \inftyrac{3R(t)t}{(t-t_+(\epsilon(t),t))^2}\\ \leq \displaystyle \inftyrac{3C_4t^2}{(t-t_+(\epsilon(t),t))^2} \end{array} \end{align} (\ref{bv31}), (\ref{bv49}) and (\ref{bv50}) yields \begin{equation} \begin{array}{lllllllllllll}\label{bv51} \displaystyle\sum\limits_{i=1}^{N}|u(x_{i+1},t)- u(x_{i},t)|\\ = \displaystyle\sum\limits_{i=1}^{N} \left|(f^\prime)^{-1}\left(\inftyrac{x_{i+1}}{t-t_+(x_{i+1},t)}\right) -(f^\prime)^{-1}\left(\inftyrac{x_{i}}{t-t_+(x_{i},t)}\right)\right|\\ \leq \displaystyle\sum\limits_{i=1}^{N} C_5(t) \left|\inftyrac{x_{i+1}}{t-t_+(x_{i+1},t)} -\inftyrac{x_{i}}{t-t_+(x_{i},t)}\right|\\ \leq \displaystyle \inftyrac{3CC_5(t)t^2}{(t-t_+(\epsilon(t),t))^2}. \end{array} \end{equation} Hence for the Case 1, $u(\cdot,t)\in BV(0,R(t)).$ \noindent Case 2: For $x\in (L(t),0)$, we have the following \begin{align} u(x,t)&=\left(g^\prime\right)^{-1}\left(\inftyrac{x}{t-t_-(x,t)}\right)\label{bv52}\\ &=u(0-,t_-(x,t))\label{bv53}\\ &=g^{-1}f\left((f^\prime)^{-1}\left(-\inftyrac{y_+(0+,t_-(x,t))}{t_-(x,t)}\right)\right)\label{bv54}\\ &=g^{-1}f\left((u_0(y_+(x,t))\right)\label{bv55}. \end{align} We consider the following four subcases. \noindent Subcase I. For all $x\in (L(t),0)$, and for all $t>T_0$, $y_+(0+,t_-(x,t))\leq M$, $t_-(x,t)<C$, for some constant $C>0.$ \noindent Subcase II. For some $x_0\in (L(t),0)$, $y_+(0+,t_-(x_0,t))> M$ and for all $t>T_0$,for all $x\in (L(t),0)$, $t_-(x,t)<C$, for some constant $C>0.$ \noindent Subcase III. For some $x_0\in (L(t),0)$, for all $t>T_0$, $y_+(0+,t_-(x_0,t))> M$ and for $x\in (L(t),0)$, $\varlimsup\limits_{t\rightarrow\infty}t_-(0-,t)=\infty$ \noindent Subcase IV. For all $x\in (L(t),0)$, for all $t>T_0$, $y_+(0+,t_-(x,t))\leq M$ and $\varlimsup\limits_{t\rightarrow\infty}t_-(0-,t)=\infty$. \noindent Subcase I: Since $t_-(x,t)<C$, whence for $t>T_0,$ we obtain \begin{align}\label{bv56} u(0-,t)=\displaystyle \lim\limits_{x\rightarrow 0-} u(x,t)= \displaystyle \lim\limits_{x\rightarrow 0-} \left(g^\prime\right)^{-1}\left(\inftyrac{x}{t-t_-(x,t)}\right) =\left(g^\prime\right)^{-1}\left(0\right)=\theta_g. \end{align} In this Case 2, $R(t)=0.$ For $x>0,$ we have $\displaystyle u(x,t)=\left(f^\prime\right)^{-1}\left(\inftyrac{x-y_+(x,t)}{t}\right)$. If $y_+(0+,t)>M$ then due to the compact support of $u_0$, and monotonicity of $y_+$, \begin{align}\label{bv57} u(0+,t)=\displaystyle \lim\limits_{x\rightarrow 0+} u(x,t)= \displaystyle \lim\limits_{x\rightarrow 0+} 0=0. \end{align} As a result of R-H condition, (\ref{bv56}) and (\ref{bv57}), we obtain $f(0)=g(\theta_g),$ which contradicts the assumption $f(0)\neq g(\theta_g).$ \par If $y_+(0+,t)<M$, then there exists a large $T_0>0$ and a small $\Deltata>0$ such that for $x\in (0,\epsilon(t))$ \begin{align}\label{bv58} u(0+,t) \in [\theta_f-\Deltata,\theta_f]. \end{align} Since $\Deltata>0$ is small and $g(\theta_g)>f(\theta_f)$, (\ref{bv58}) violets the R-H condition $f(u(0+,t))=g(u(0-,t))$. Therefore the Subcase I, can never occur. \\ \noindent SubCase II: Since for some $x_0\in (L(t),0)$, $y_+(0+,t_-(x_0,t))>M$. From the monotonicity of $y_+$ and Supp $u_0\subset [-M,M]$, we deduce $u(x,t)=g^{-1}f(0),$ for $x\in(x_0,0)$, therefore $u\in BV(x_0,0)$. Then one can repeat the same argument as in (\ref{bv51}) to prove $u\in BV(L(t),x_0).$\\ \noindent Subcase III: Follows as in Subcase II.\\ \noindent Subcase IV: Since $y_+(0+,t_-(x,t))\leq M$, for $x\in (L(t),0),$ therefore from (\ref{bv54}) and similarly as in (\ref{bv58}), it contradicts the R-H condition. Therefore the Subcase IV, can never occur. \par This proves Case 2. \noindent Case 3: This case is not allowed due to the interface entropy condition (\ref{interface2}). \par Similarly one can repeat the arguments above for the case when $f(\theta_f)>g(\theta_g).$ Hence the Theorem. \end{proof} \end{document}
\begin{document} \title{Nonlinearity in oscillating bridges} \author{Filippo GAZZOLA\\ {\small Dipartimento di Matematica del Politecnico, Piazza L. da Vinci 32 - 20133 Milano (Italy)}} \date{} \maketitle \begin{abstract} We first recall several historical oscillating bridges that, in some cases, led to collapses. Some of them are quite recent and show that, nowadays, oscillations in suspension bridges are not yet well understood. Next, we survey some attempts to model bridges with differential equations. Although these equations arise from quite different scientific communities, they display some common features. One of them, which we believe to be incorrect, is the acceptance of the linear Hooke law in elasticity. This law should be used only in presence of small deviations from equilibrium, a situation which does not occur in strongly oscillating bridges. Then we discuss a couple of recent models whose solutions exhibit self-excited oscillations, the phenomenon visible in real bridges. This suggests a different point of view in modeling equations and gives a strong hint how to modify the existing models in order to obtain a reliable theory. The purpose of this paper is precisely to highlight the necessity of revisiting classical models, to introduce reliable models, and to indicate the steps we believe necessary to reach this target.\par\noindent {\em AMS Subject Classification 2010: 74B20, 35G31, 34C15, 74K10, 74K20.} \end{abstract} \tableofcontents \eject \section{Introduction} The story of bridges is full of many dramatic events, such as uncontrolled oscillations which, in some cases, led to collapses. To get into the problem, we invite the reader to have a look at the videos \cite{assago,london,tacoma,volgograd}. These failures have to be attributed to the action of external forces, such as the wind or traffic loads, or to macroscopic mistakes in the projects. {From} a theoretical point of view, there is no satisfactory mathematical model which, up to nowadays, perfectly describes the complex behavior of bridges. And the lack of a reliable analytical model precludes precise studies both from numerical and engineering points of views.\par The main purpose of the present paper is to show the necessity of revisiting existing models since they fail to describe the behavior of real bridges. We will explain which are the weaknesses of the so far considered equations and suggest some possible improvements according to the fundamental rules of classical mechanics. Only with some nonlinearity and with a sufficiently large number of degrees of freedom several behaviors may be modeled. We do not claim to have a perfect model, we just wish to indicate the way to reach it. Much more work is needed and we explain what we believe to be the next steps.\par We first survey and discuss some historical events, we recall what is known in elasticity theory, and we describe in full detail the existing models. With this database at hand, our purpose is to analyse the oscillating behavior of certain bridges, to determine the causes of oscillations, and to give an explanation to the possible appearance of different kinds of oscillations, such as torsional oscillations. Due to the lateral sustaining cables, suspension bridges most emphasise these oscillations which, however, also appear in other kinds of bridges: for instance, light pedestrian bridges display similar behaviors even if their mechanical description is much simpler.\par According to \cite{goldstein}, chaos is a disordered and unpredictable behavior of solutions in a dynamical system. With this characterization, there is no doubt that chaos is somehow present in the disordered and unpredictable oscillations of bridges. From \cite[Section 11.7]{goldstein} we recall a general principle (GP) of classical mechanics: \begin{center} \begin{minipage}{162mm} {\bf (GP)} {\em The minimal requirements for a system of first-order equations to exhibit chaos is that they be nonlinear and have at least three variables.} \end{minipage} \end{center} This principle suggests that \begin{center} {\bf any model aiming to describe oscillating bridges should be nonlinear and with enough degrees of freedom.} \end{center} Most of the mathematical models existing in literature fail to satisfy (GP) and, therefore, must be accordingly modified. We suggest possible modifications of the corresponding differential equations and we believe that, if solved, this would lead to a better understanding of the underlying phenomena and, perhaps, to several practical actions for the plans of future bridges, as well as remedial measures for existing structures. In particular, one of the major scopes of this paper is to convince the reader that linear theories are not suitable for the study of bridges oscillations whereas, although they are certainly too naive, some recent nonlinear models do display self-excited oscillations as visible in bridges.\par In Section \ref{story}, we collect a number of historical events and observations about bridges, both suspended and not. A complete story of bridges is far beyond the scopes of the present paper and the choice of events is mainly motivated by the phenomena that they displayed. The description of the events is accompanied by comments of engineers and of witnesses, and by possible theoretical explanations of the observed phenomena. The described events are then used in order to figure out a common behavior of oscillating bridges; in particular, it appears that the requirements of (GP) must be satisfied. Recent events testify that the problems of controlling and forecasting bridges oscillations is still unsolved.\par In Section \ref{howto}, we discuss several equations appearing in literature as models for oscillating bridges. Most of them use in some point the well-known linear Hooke law (${\cal LHL}$ in the sequel) of elasticity. This is what we believe to be a major weakness, but not the only one, of all these models. This is also the opinion of McKenna \cite[p.16]{mckmonth}: \begin{center} \begin{minipage}{162mm} {\em We doubt that a bridge oscillating up and down by about 10 meters every 4 seconds obeys Hooke's law.} \end{minipage} \end{center} {From} \cite{britannica}, we recall what is known as ${\cal LHL}$. \begin{center} \begin{minipage}{162mm} {\em The linear Hooke law (${\bf {\cal LHL}}$) of elasticity, discovered by the English scientist Robert Hooke in 1660, states that for relatively small deformations of an object, the displacement or size of the deformation is directly proportional to the deforming force or load. Under these conditions the object returns to its original shape and size upon removal of the load. ... At relatively large values of applied force, the deformation of the elastic material is often larger than expected on the basis of ${\cal LHL}$, even though the material remains elastic and returns to its original shape and size after removal of the force. ${\cal LHL}$ describes the elastic properties of materials only in the range in which the force and displacement are proportional.} \end{minipage} \end{center} Hence, by no means one should use ${\cal LHL}$ in presence of large deformations. In such case, the restoring elastic force $f$ is ``more than linear''. Instead of having the usual form $f(s)=ks$, where $s$ is the displacement from equilibrium and $k>0$ depends on the elasticity of the deformed material, it has an additional superlinear term $\varphi(s)$ which becomes negligible for small displacements $s$. More precisely, $$f(s)=ks+\varphi(s)\qquad\mbox{with}\qquad\lim_{s\to0}\frac{\varphi(s)}{s}=0\ .$$ The simplest example of such term is $\varphi(s)=\varepsilon s^p$ with $\varepsilon>0$ and $p>1$; this superlinear term may become arbitrarily small for $\varepsilon$ small and/or $p$ large. Therefore, the parameters $\varepsilon$ and $p$, which do exist, may be chosen in such a way to describe with a better precision the elastic behavior of a material when large displacements are involved. As we shall see, this apparently harmless and tiny nonlinear perturbation has devastative effects on the models and, moreover, it is amazingly useful to display self-excited oscillations as the ones visible in real bridges. On the contrary, linear models prevent to view the real phenomena which occur in bridges, such as the sudden increase of the width of their oscillations and the switch to different ones.\par The necessity of dealing with nonlinear models is by now quite clear also in more general elasticity problems; from the preface of the book by Ciarlet \cite{ciarletbook}, let us quote \begin{center} \begin{minipage}{162mm} {\em ... it has been increasingly acknowledged that the classical linear equations of elasticity, whose mathematical theory is now firmly established, have a limited range of applicability, outside of which they should be replaced by genuine nonlinear equations that they in effect approximate.} \end{minipage} \end{center} In order to model bridges, the most natural way is to view the roadway as a thin narrow rectangular plate. In Section \ref{elasticity}, we quote several references which show that classical linear elastic models for thin plates do not describe with a sufficient accuracy large deflections of a plate. But even linear theories present considerable difficulties and a further possibility is to view the bridge as a one dimensional beam; this model is much simpler but, of course, it prevents the appearance of possible torsional oscillations. This is the main difficulty in modeling bridges: find simple models which, however, display the same phenomenon visible in real bridges.\par In Section \ref{models} we survey a number of equations arising from different scientific communities. The first equations are based on engineering models and mainly focus the attention on quantitative aspects such as the exact values of the parameters involved. Some other equations are more related to physical models and aim to describe in full details all the energies involved. Finally, some of the equations are purely mathematical models aiming to reach a prototype equation and proving some qualitative behavior. All these models have to face a delicate choice: either consider uncoupled behaviors between vertical and torsional oscillations of the roadway or simplify the model by decoupling these two phenomena. In the former case, the equations have many degrees of freedom and become terribly complicated: hence, very few results can be obtained. In the latter case, the model fails to satisfy the requirements of (GP) and appears too far from the real world.\par As a compromise between these two choices, in Section \ref{blup} we recall the model introduced in \cite{gazpav,gazpav3} which describes vertical oscillations and torsional oscillations of the roadway within the same simplified beam equation. The solution to the equation exhibits self-excited oscillations quite similar to those observed in suspension bridges. We do not believe that the simple equation considered models the complex behavior of bridges but we do believe that it displays the same phenomena as in more complicated models closer related to bridges. In particular, finite time blow up occurs with wide oscillations. These phenomena are typical of differential equations of at least fourth order since they do not occur in lower order equations, see \cite{gazpav}. We also show that the same phenomenon is visible in a $2\times2$ system of nonlinear ODE's of second order related to a system suggested by McKenna \cite{mckmonth}.\par Putting all together, in Section \ref{afford} we afford an explanation in terms of the energies involved. Starting from a survey of milestone historical sources \cite{bleich,tac2}, we attempt a qualitative but detailed energy balance and we attribute the appearance of torsional oscillations in bridges to some ``hidden'' elastic energy which is not visible since it involves second order derivatives of the displacement of the bridge: this suggests an analogy between bridges oscillations and a ball bouncing on the floor. The discovery of the phenomenon usually called in literature {\em flutter speed} has to be attributed to Bleich \cite{bleichsolo}; in our opinion, the flutter speed should be seen as a {\bf critical energy threshold} which, if exceeded, gives rise to uncontrolled phenomena such as torsional oscillations. We give some hints on how to determine the critical energy threshold, according to some eigenvalue problems whose eigenfunctions describe the oscillating modes of the roadway.\par In bridges one should always expect vertical oscillations and, in case they become very large, also torsional oscillations; in order to display the possible transition between these two kinds of oscillations, in Section \ref{newmodel} we suggest a new equation as a model for suspension bridges, see \eq{truebeam}. With all the results and observations at hand, in Section \ref{possibleTacoma} we also attempt a detailed description of what happened on November 10, 1940, the day when the Tacoma Narrows Bridge collapsed. As far as we are aware a universally accepted explanation of this collapse in not yet available. Our explanation fits with all the material developed in the present paper. This allows us to suggest a couple of precautions when planning future bridges, see Section \ref{howplan}.\par We recently had the pleasure to participate to a conference on bridge maintenance, safety and management, see \cite{iabmas}. There were engineers from all over the world, the atmosphere was very enjoyable and the problems discussed were extremely interesting. And there was a large number of basic questions still unsolved, most of the results and projects had some percentage of incertitude. Many talks were devoted to suggest new equations to model the studied phenomena and to forecast the impact of new structural issues: even apparently simple problems are still unsolved. We believe this should be a strong motivation for many mathematicians (from mathematical physics, analysis, numerics) to get interested in bridges modeling, experiments, and performances. Throughout the paper we suggest a number of open problems which, if solved, could be a good starting point to reach a deeper understanding of oscillations in bridges. \section{What has been observed in bridges}\label{story} A simplified picture of a suspension bridge can be sketched as in Figure \ref{67} \begin{figure} \caption{Suspension bridges without girder and with girder.} \label{67} \end{figure} where one sees the difference between the elastic structure of a bridge without girder and the more stiff structure of a bridge with girder.\par Although the first project of a suspension bridge is due to the Italian engineer Verantius around 1615, see \cite{veranzio} and \cite[p.7]{navier2} or \cite[p.16]{kawada2}, the first suspension bridges were built only about two centuries later in Great Britain. According to \cite{bender}, \begin{center} \begin{minipage}{162mm} {\em The invention of the suspension bridges by Sir Samuel Brown sprung from the sight of a spider's web hanging across the path of the inventor, observed on a morning's walk, when his mind was occupied with the idea of bridging the Tweed.} \end{minipage} \end{center} Samuel Brown (1776-1852) was an early pioneer of suspension bridge design and construction. He is best known for the Union Bridge of 1820, the first vehicular suspension bridge in Britain.\par An event deserving mention is certainly the inauguration of the Menai Straits Bridge, in 1826. The project of the bridge was due to Thomas Telford and the opening of the bridge is considered as the beginning of a new science nowadays known as ``Structural Engineering''. The construction of this bridge had a huge impact in the English society, a group of engineers founded the ``Institution of Civil Engineers'' and Telford was elected the first president of this association. In 1839 the Menai Bridge collapsed due to a hurricane. In that occasion, unexpected oscillations appeared; Provis \cite{provis} provided the following description: \begin{center} \begin{minipage}{162mm} {\em ... the character of the motion of the platform was not that of a simple undulation, as had been anticipated, but the movement of the undulatory wave was oblique, both with respect to the lines of the bearers, and to the general direction of the bridge.} \end{minipage} \end{center} Also the Broughton Suspension Bridge was built in 1826. It collapsed in 1831 due to mechanical resonance induced by troops marching over the bridge in step. A bolt in one of the stay-chains snapped, causing the bridge to collapse at one end, throwing about 40 men into the river. As a consequence of the incident, the British Army issued an order that troops should ``break step'' when crossing a bridge. These two pioneering bridges already show how the wind and/or traffic loads, both vehicles and pedestrians, play a crucial negative role in the bridge stability.\par A further event deserving to be mentioned is the collapse of the Brighton Chain Pier, built in 1823. It collapsed a first time in 1833, it was rebuilt and partially destroyed once again in 1836. Both the collapses are attributed to violent windstorms. For the second collapse a witness, William Reid, reported valuable observations and sketched a picture illustrating the destruction \cite[p.99]{reid}, see Figure \ref{brighton} \begin{figure} \caption{Destruction of the Brighton Chain Pier.} \label{brighton} \end{figure} which is taken from \cite{rocard}. This is the first reliable report on oscillations appearing in bridges, the most intriguing part of the report being \cite{reid,rocard}: \begin{center} \begin{minipage}{162mm} {\em For a considerable time, the undulations of all the spans seemed nearly equal ... but soon after midday the lateral oscillations of the third span increased to a degree to make it doubtful whether the work could withstand the storm; and soon afterwards the oscillating motion across the roadway, seemed to the eye to be lost in the undulating one, which in the third span was much greater than in the other three; the undulatory motion which was along the length of the road is that which is shown in the first sketch; but there was also an oscillating motion of the great chains across the work, though the one seemed to destroy the other ...} \end{minipage} \end{center} More comments about this collapse are due to Russell \cite{russell}; in particular, he claims that \begin{center} \begin{minipage}{162mm} {\em ... the remedies I have proposed, are those by which such destructive vibrations would have been rendered impossible.} \end{minipage} \end{center} These two comments may have several interpretations. However, what appears absolutely clear is that different kinds of oscillations appeared (undulations, lateral oscillations, oscillation motion of the great chains) and some of them were considered destructive. Further details on the Brighton Chair Pier collapse may be found in \cite[pp.4-5]{bleich}.\par Some decades earlier, at the end of the eighteenth century, the German physicist Ernst Chladni was touring Europe and showing, among other things, the nodal line patterns of vibrating plates, see Figure \ref{patterns}. \begin{figure} \caption{Chladni patterns in a vibrating plate.} \label{patterns} \end{figure} Chladni's technique, first published in \cite{chl}, consisted of creating vibrations in a square-shaped metal plate whose surface was covered with light sand. The plate was bowed until it reached resonance, when the vibration caused the sand to concentrate along the nodal lines of vibrations, see \cite{chladniexperiment} for the nowadays experiment. This simple but very effective way to display the nodal lines of vibrations was seen by Navier \cite{navier} as \begin{center} \begin{minipage}{162mm} {\em Les curieuses exp\'eriences de M. Chaldni sur les vibrations des plaques...} \end{minipage} \end{center} It appears quite clearly from Figure \ref{patterns} how complicated may be the vibrations of a thin plate and hence, see Section \ref{elasticity}, of a bridge. And, indeed, the just described events testify that, besides the somehow expected vertical oscillations, also different kinds of oscillations may appear. For instance, one may have ``an oblique undulatory wave'' or some kind of resonance or the interaction with other structural components such as the suspension chains. The description of different coexisting forms of oscillations is probably the most important open problem in suspension bridges.\par It is not among the scopes of this paper to give the complete story of bridges collapses for which we refer to \cite[Section 1.1]{bleich}, to \cite[Chapter IV]{rocard}, to \cite{aer,tac1,hayden,ward}, to the recent monographs \cite{akesson,kawada2}, and also to \cite{bridgefailure} for a complete database. Let us just mention that between 1818 and 1889, ten suspension bridges suffered major damages or collapsed in windstorms, see \cite[Table 1, p.13]{tac1}, which is commented by \begin{center} \begin{minipage}{162mm} {\em An examination of the British press for the 18 years between 1821 and 1839 shows it to be more replete with disastrous news of suspension bridges troubles than Table 1 reveals, since some of these structures suffered from the wind several times during this period and a number of other suspension bridges were damaged or destroyed as a result of overloading.} \end{minipage} \end{center} The story of bridges, suspended and not, contains many further dramatic events, an amazing amount of bridges had troubles for different reasons such as the wind, the traffic loads, or macroscopic mistakes in the project, see e.g.\ \cite{hao,pearson}. Among them, the most celebrated is certainly the Tacoma Narrows Bridge, collapsed in 1940 just a few months after its opening, both because of the impressive video \cite{tacoma} and because of the large number of studies that it has inspired starting from the reports \cite{Tacoma1,bleich,tac1,tac3,tac4,tac2,tac5}.\par Let us recall some observations made on the Tacoma collapse. Since we were unable to find the Federal Report \cite{Tacoma1} that we repeatedly quote below, we refer to it by trusting the valuable historical research by Scott \cite{wake} and by McKenna and coauthors, see in particular \cite{mck1,mckmonth,mck,mck4}. A good starting point to describe the Tacoma collapse is... the Golden Gate Bridge, inaugurated a few years earlier, in 1937. This bridge is usually classified as ``very flexible'' although it is strongly stiffened by a thick girder, see Figure \ref{69}. \begin{figure} \caption{Girder at the Golden Gate Bridge.} \label{69} \end{figure} The original roadway was heavy and made with concrete; the weight was reduced in 1986 when a new roadway was installed, see \cite{perks}. Nowadays, in spite of the girder, the bridge can swing more than an amazing 8 meters and flex about 3 meters under big loads, which explains why the bridge is classified as very flexible. The huge mass involved and these large distances from equilibrium explain why ${\cal LHL}$ certainly fails. Due to high winds around 120 kilometers per hour, the Golden Gate Bridge has been closed, without suffering structural damage, only three times: in 1951, 1982, 1983, always during the month of December. A further interesting phenomenon is the appearance of traveling waves in 1938: in \cite[Appendix IX]{Tacoma1} (see also \cite{mck4}), the chief engineer of the Golden Gate Bridge writes \begin{center} \begin{minipage}{162mm} {\em ... I observed that the suspended structure of the bridge was undulating vertically in a wavelike motion of considerable amplitude ...} \end{minipage} \end{center} see also the related detailed description in \cite[Section 1]{mck4}. Hence, one should also expect traveling waves in bridges, see the sketched representation in the first picture in Figure \ref{nuove}. \begin{figure} \caption{Traveling waves and torsional motion in bridges without girder.} \label{nuove} \end{figure} All this may occur also in apparently stiff structures. And in presence of extremely flexible structures, these traveling waves can generate further dangerous phenomena such as torsional oscillations, see the second picture in Figure \ref{nuove}.\par When comparing the structure of the Golden Gate Bridge with the one of the original Tacoma Narrows Bridge, one immediately sees a main macroscopic difference: the thick girder sustaining the bridge, compare Figures \ref{69} and \ref{tacoma12}. The girder gives more stiffness to the bridge; this is certainly the main reason why in the Golden Gate Bridge no torsional oscillation ever appeared. A further reason is that larger widths of the roadway seem to prevent torsional oscillations, see \eq{speedflutter} below; from \cite[p.186]{rocard} we quote \begin{center} \begin{minipage}{162mm} {\em ... a bridge twice as wide will have exactly double the critical speed wind.} \end{minipage} \end{center} The Tacoma Bridge was rebuilt with a thick girder acting as a strong stiffening structure, see \cite{tac4}: as mentioned by Scanlan \cite[p.840]{scanlan}, \begin{center} \begin{minipage}{162mm} {\em the original bridge was torsionally weak, while the replacement was torsionally stiff.} \end{minipage} \end{center} The replacement of the original bridge opened in 1950, see \cite{tac4} for some remarks on the project, and still stands today as the westbound lanes of the present-day twin bridge complex, the eastbound lanes opened in 2007. Figure \ref{tacoma12} - picture by Michael Goff, Oregon Department of Transportation, USA - shows the striking difference between the original Tacoma Bridge collapsed in 1940 and the twin bridges as they are today. \begin{figure} \caption{The collapsed Tacoma Bridge and the current twins Tacoma Bridges.} \label{tacoma12} \end{figure} Let us go back to the original Tacoma Bridge: even if it was extremely flexible, it is not clear why torsional oscillations appeared. According to Scanlan \cite[p.841]{scanlan}, \begin{center} \begin{minipage}{162mm} {\em ... some of the writings of von K\'arm\'an leave a trail of confusion on this point. ... it can clearly be shown that the rhythm of the failure (torsion) mode has nothing to do with the natural rhythm of shed vortices following the K\'arm\'an vortex street pattern. ... Others have added to the confusion. A recent mathematics text, for example, seeking an application for a developed theory of parametric resonance, attempts to explain the Tacoma Narrows failure through this phenomenon.} \end{minipage} \end{center} Hence, Scanlan discards the possibility of the appearance of von K\'arm\'an vortices and raises doubts on the appearance of resonance which, indeed, is by now also discarded. Of course, it is reasonable to expect resonance in presence of a single-mode solicitation, such as for the Broughton Bridge. But for the Tacoma Bridge, Lazer-McKenna \cite[Section 1]{mck1} raise the question \begin{center} \begin{minipage}{162mm} {\em ... the phenomenon of linear resonance is very precise. Could it really be that such precise conditions existed in the middle of the Tacoma Narrows, in an extremely powerful storm?} \end{minipage} \end{center} So, no plausible explanation is available nowadays. In a letter \cite{farq}, Prof.\ Farquharson claimed that \begin{center} \begin{minipage}{162mm} {\em ... a violent change in the motion was noted. This change appeared to take place without any intermediate stages and with such extreme violence ... The motion, which a moment before had involved nine or ten waves, had shifted to two.} \end{minipage} \end{center} All this happened under not extremely strong winds, about 80km/h, and under a relatively high frequency of oscillation, about 36cpm, see \cite[p.23]{tac1}. See \cite[Section 2.3]{mckmonth} for more details and for the conclusion that \begin{center} \begin{minipage}{162mm} {\em there is no consensus on what caused the sudden change to torsional motion.} \end{minipage} \end{center} This is confirmed by the following ambiguous comments taken from \cite[Appendix D]{bleich}: \begin{center} \begin{minipage}{162mm} {\em If vertical and torsional oscillations occur, they must be caused by vertical components of wind forces or by some structural action which derives vertical reactions from a horizontally acting wind.} \end{minipage} \end{center} This part is continued in \cite{bleich} by stating that there exist references to both alternatives and that \begin{center} \begin{minipage}{162mm} {\em A few instrumental measurements have been made ... which showed the wind varying up to 8 degrees from the horizontal. Such variation from the horizontal is not the only, and perhaps not the principal source of vertical wind force on a structure.} \end{minipage} \end{center} Besides the lack of consensus on the causes of the switch between vertical and torsional oscillations, all the above comments highlight a strong instability of the oscillation motion as if, after reaching some critical energy threshold, an impulse (a Dirac delta) generated a new unexpected motion. Refer to Section \ref{conclusions} for our own interpretation of this phenomenon which is described in \cite{Tacoma1,bleich} (see also \cite[pp.50-51]{wake}) as: \begin{center} \begin{minipage}{162mm} {\em large vertical oscillations can rapidly change, almost instantaneously, to a torsional oscillation.} \end{minipage} \end{center} We do not completely agree with this description since a careful look at \cite{tacoma} shows that vertical oscillations continue also after the appearance of torsional oscillations; in the video, one sees that at the beginning of the bridge the street-lamps oscillate in opposition of phase when compared with the street-lamps at the end of the bridge. So, the phenomenon which occurs may be better described as follows: \begin{center} \begin{minipage}{162mm} {\bf large vertical oscillations can rapidly create, almost instantaneously, additional torsional oscillations.} \end{minipage} \end{center} Roughly speaking, we believe that part of the energy responsible of vertical oscillations switches to another energy which generates torsional oscillations; the switch occurs without intermediate stages as if an impulse was responsible of it. Our own explanation to this fact is that \begin{center} \begin{minipage}{162mm} {\bf since vertical oscillations cannot be continued too far downwards below the equilibrium position due to the hangers, when the bridge reaches some limit horizontal position with large kinetic energy, part of the energy transforms into elastic energy and generates a crossing wave, namely a torsional oscillation.} \end{minipage} \end{center} We make this explanation more precise in Section \ref{energybalance}, after some further observations. In order to explain the ``switch of oscillations'' several mathematical models were suggested in literature. In next section we survey some of these models which are quite different from each other although they have some common features.\par The Deer Isle Bridge, see Figure \ref{deer}, \begin{figure} \caption{The Deer Isle Bridge (left) and the Bronx-Whitestone Bridge (right).} \label{deer} \end{figure} is a suspension bridge in the state of Maine (USA) which encountered wind stability problems similar to those of the original Tacoma Bridge. Before the bridge was finished, in 1939, the wind induced motion in the relatively lightweight roadway. Diagonal stays running from the sustaining cables to the stiffening girders on both towers were added to stabilize the bridge. Nevertheless, the oscillations of the roadway during some windstorms in 1942 caused extensive damage and destroyed some of the stays. At that time everybody had the collapse of the Tacoma Bridge in mind, so that stronger and more extensive longitudinal and transverse diagonal stays were added. In her report \cite{moran}, Barbara Moran wrote \begin{center} \begin{minipage}{162mm} {\em The Deer Isle Bridge was built at the same time as the Tacoma Narrows, and with virtually the same design. One difference: it still stands.} \end{minipage} \end{center} This shows strong instability: even if two bridges are considered similar they can react differently to external solicitations. Of course, much depends on what is meant by ``virtually similar''...\par The Bronx-Whitestone Bridge displayed in Figure \ref{deer}, was built in New York in 1939 and has shown an intermitted tendency to mild vertical motion from the time the floor system was installed. The reported motions have never been very large, but were noticeable to the traveling public. Several successive steps were taken to stabilise the structure, see \cite{anon}. Midspan diagonal stays and friction dampers at the towers were first installed; these were later supplemented by diagonal stayropes from the tower tops to the roadway level. However, even these devices were not entirely adequate and in 1946 the roadway was stiffened by the addition of truss members mounted above the original plate girders, the latter becoming the lower chords of the trusses \cite{ammann,pavlo}. This is a typical example of bridge built without considering all the possible external effects, subsequently damped by means of several unnatural additional components. Our own criticism is that \begin{center} \begin{minipage}{162mm} {\bf instead of just solving the problem, one should understand the problem.} \end{minipage} \end{center} And precisely in order to understand the problem, we described above those events which displayed the pure elastic behavior of bridges. These were mostly suspension bridges without girders and were free to oscillate. This is a good reason why the Tacoma collapse should be further studied for deeper knowledge: it displays the pure motion without stiffening constraints which hide the elastic features of bridges.\par The Tacoma Bridge collapse is just the most celebrated and dramatic evidence of oscillating bridge but bridges oscillations are still not well understood nowadays. On May 2010, the Russian authorities closed the Volgograd Bridge to all motor traffic due to its strong vertical oscillations (traveling waves) caused by windy conditions, see \cite{volgograd} for the BBC report and video. Once more, these oscillations may appear surprising since the Volgograd Bridge is a concrete girder bridge and its stiffness should prevent oscillations. However, it seems that strong water currents in the Volga river loosened one of the bridge's vertical supports so that the stiffening effect due to the concrete support was lost and the behavior became more similar to that of a suspension bridge. The bridge remained closed while it was inspected for damage. As soon as the original effect was restored the bridge reopened for public access. In Figure \ref{volgabridge} the reader finds pictures of the bridge and of the damped sustaining support. \begin{figure} \caption{The Volgograd Bridge.} \label{volgabridge} \end{figure} These pictures are taken from \cite{volgobridge}, where one can also find full details on the damping system of the bridge. The Volgograd Bridge well shows how oscillation induced fatigue of the structural members of bridges is a major factor limiting the life of the bridge. In \cite{kawada} one may find a mathematical analysis and wind tunnel tests for examining oscillations which occur under ``constant low wind'', rather than under violent windstorms: \begin{center} \begin{minipage}{162mm} {\em Limited oscillation could even cause a collapse of light suspension bridges in a reasonably short time.} \end{minipage} \end{center} As already observed, the wind is not the only possible external source which generates bridges oscillations which also appear in pedestrian bridges where lateral swaying is the counterpart of torsional oscillation. In June 2000, the very same day when the London Millennium Bridge opened and the crowd streamed on it, the bridge started to sway from side to side, see \cite{london}. Many pedestrians fell spontaneously into step with the vibrations, thereby amplifying them. According to Sanderson \cite{sanderson}, the bridge wobble was due to the way people balanced themselves, rather than the timing of their steps. Therefore, the pedestrians acted as negative dampers, adding energy to the bridge's natural sway. Macdonald \cite[p.1056]{macdonald} explains this phenomenon by writing \begin{center} \begin{minipage}{162mm} {\em ... above a certain critical number of pedestrians, this negative damping overcomes the positive structural damping, causing the onset of exponentially increasing vibrations.} \end{minipage} \end{center} Although we have some doubts about the real meaning of ``exponentially increasing vibrations'' we have no doubts that this description corresponds to a superlinear behavior which has also been observed in several further pedestrian bridges, see \cite{franck} and \cite{zivanovic} from which we quote \begin{center} \begin{minipage}{162mm} {\em ... damping usually increases with increasing vibration magnitude due to engagement of additional damping mechanisms.} \end{minipage} \end{center} The Millennium Bridge was made secure by adding some stiffening trusses below the girder, see Figure \ref{LMB} (Photo $\copyright$ Peter Visontay). \begin{figure} \caption{The London Millennium Bridge.} \label{LMB} \end{figure} The mathematical explanation of this solution is that trusses lessen swaying and force the bridge to remain closer to its equilibrium position, that is, closer to a linear behavior as described by ${\cal LHL}$. Although trusses delay the appearance of the superlinear behavior, they do not solve completely the problem as one may wonder what would happen if 10.000 elephants would simultaneously walk through the Millennium Bridge... In this respect, let us quote from \cite[p.13]{tac1} a comment on suspension bridges strengthened by stiffening girders: \begin{center} \begin{minipage}{162mm} {\em That significant motions have not been recorded on most of these bridges is conceivably due to the fact that they have never been subjected to optimum winds for a sufficient period of time.} \end{minipage} \end{center} Another pedestrian bridge, the Assago Bridge in Milan (310m long), had a similar problem. In February 2011, just after a concert the publics crossed the bridge and, suddenly, swaying became so violent that people could hardly stand, see \cite{fazzo} and \cite{assago}. Even worse was the subsequent panic effect when the crowd started running in order to escape from a possible collapse; this amplified swaying but, quite luckily, nobody was injured. In this case, the project did not take into account that a large number of people would go through the bridge just after the events; when swaying started there were about 1.200 pedestrians on the footbridge. This problem was solved by adding positive dampers, see \cite{stella}.\par According to \cite{bridgefailure}, around 400 recorded bridges failed for several different reasons and the ones who failed after year 2000 are more than 70. Probably, some years after publication of this paper, these numbers will have increased considerably... The database \cite{bridgefailure} consists mainly of brief descriptions and statistics for each bridge failure: location, number of fatalities/injuries, etc. rather than in-depth analysis of the cause of the failure for which we refer to the nice book by Akesson \cite{akesson}.\par As we have seen, the reasons of failures are of different kinds. Firstly, strong and/or continued winds: these may cause wide vertical oscillations which may switch to different kinds of oscillations. Especially for suspension bridges the latter phenomenon appears quite evident, due to the many elastic components (cables, hangers, towers, etc.) which appear in it. A second cause are traffic loads, such as some precise resonance phenomenon, or some unpredictable synchronised behavior, or some unexpected huge load; these problems are quite common in many different kinds of bridges. Finally, a third cause are mistakes in the project; these are both theoretical, for instance assuming ${\cal LHL}$, and practical, such as wrong assumptions on the possible maximum external actions.\par After describing so many disasters, we suggest a joke which may sound as a provocation. Since many bridges projects did not forecast oscillations it could be more safe to build old-fashioned rock bridges, such as the Roman aqueduct built in Segovia (Spain) during the first century and still in perfect shape and in use. Of course, we are not suggesting here to replace the Golden Gate Bridge with a Roman-style bridge! But we do suggest to plan bridges by taking into account all possible kinds of solicitations. Moreover, we suggest not to hide unsolved problems with some unnatural solutions such as stiff and heavy girders or more extensive longitudinal and transverse diagonal stays, see Section \ref{howplan} for more suggestions.\par Throughout this section we listed a number of historical events about bridges. They taught us the following facts:\par 1. Self-excited oscillations appear in bridges. Often this is somehow unexpected since the project does not take into account several external strong and/or prolonged effects. And even if expected, oscillations can be much wider than estimated.\par 2. Oscillations can be extenuated by stiffening the structure or by adding positive (and heavy, and expensive) dampers to the structure. However, none of these solutions can completely prevent oscillations, especially in presence of highly unfavorable events such as strong and prolonged winds, not necessarily hurricanes, or heavy and synchronised traffic loads. Due to the unnatural stiffness of the structure, trusses and dampers may cause cracks, see \cite{crack} and references therein; but we leave this problem to engineers... see \cite{kawada2}.\par 3. The oscillations are amplified by an observable superlinear effect. More the bridge is far from its equilibrium position, more the impact of external forces is relevant. It is by now well understood that suspension bridges behave nonlinearly, see e.g.\ \cite{brown,lacarbonara2}.\par 4. In extremely flexible bridges, such as the Tacoma Bridge which had no stiffening truss, vertical oscillations can partially switch to torsional oscillations and even to more complicated oscillations, see the pictures in Figure \ref{kable} which are taken from \cite[p.143]{cable} \begin{figure} \caption{Combined oscillations of a bridge roadway.} \label{kable} \end{figure} and see also \cite[pp.94-95]{rocard} and Figure \ref{ghaffer} below. This occurs when vertical oscillations become too large and, in such case, vertical and torsional oscillations coexist. Up to nowadays, there is no convincing explanation why the switch occurs. \section{How to model bridges}\label{howto} The amazing number of failures described in the previous section shows that the existing theories and models are not adequate to describe the statics and the dynamics of oscillating bridges. In this section we survey different points of view, different models, and we underline their main weaknesses. We also suggest how to modify them in order to fulfill the requirements of (GP). \subsection{A quick overview on elasticity: from linear to semilinear models}\label{elasticity} A quite natural way to describe the bridge roadway is to view it as a thin rectangular plate. This is also the opinion of Rocard \cite[p.150]{rocard}: \begin{center} \begin{minipage}{162mm} {\em The plate as a model is perfectly correct and corresponds mechanically to a vibrating suspension bridge...} \end{minipage} \end{center} In this case, a commonly adopted theory is the linear one by Kirchhoff-Love \cite{kirchhoff,love}, see also \cite[Section 1.1.2]{gazgruswe}, which we briefly recall. The bending energy of a plate involves curvatures of the surface. Let $\kappa_1$, $\kappa_2$ denote the principal curvatures of the graph of a smooth function $u$ representing the deformation of the plate, then a simple model for the bending energy of the deformed plate $\Omega$ is \neweq{curva} \mathbb{E}(u)=\int_\Omega\left(\frac{\kappa_1^2}{2}+\frac{\kappa_2^2}{2}+\sigma\kappa_1\kappa_2\right)\, dx_1dx_2 \end{equation} where $\sigma$ denotes the Poisson ratio defined by $\sigma=\frac{\lambda}{2\left(\lambda +\mu \right)}$ with the so-called Lam\'e constants $\lambda,\mu $ that depend on the material. For physical reasons it holds that $\mu >0$ and usually $\lambda \geq 0$ so that $0\le\sigma<\frac{1}{2}$. In the linear theory of elastic plates, for small deformations $u$ the terms in \eq{curva} are considered to be purely quadratic with respect to the second order derivatives of $u$. More precisely, for small deformations $u$, one has $$(\kappa_1+\kappa_2)^2\approx(\Delta u)^2\ ,\quad\kappa_1\kappa_2\approx\det(D^2u)=(u_{x_1x_1}u_{x_2x_2}-u_{x_1x_2}^{2})\ ,$$ and therefore $$\frac{\kappa_1^2}{2}+\frac{\kappa_2^2}{2}+\sigma\kappa_1\kappa_2\approx\frac{1}{2}(\Delta u)^2+(\sigma-1)\det(D^2u).$$ Then \eq{curva} yields \neweq{energy-gs} \mathbb{E}(u)=\int_{\Omega }\left(\frac{1}{2}\left( \Delta u\right) ^{2}+(\sigma-1)\det(D^2u)\right) \, dx_1dx_2\, . \end{equation} Note that for $-1<\sigma<1$ the functional $\mathbb{E}$ is coercive and convex. This modern variational formulation appears in \cite{Friedrichs}, while a discussion for a boundary value problem for a thin elastic plate in a somehow old fashioned notation is made by Kirchhoff \cite{kirchhoff}. And precisely the choice of the boundary conditions is quite delicate since it depends on the physical model considered.\par Destuynder-Salaun \cite[Section I.2]{destuyndersalaun} describe this modeling by \begin{center} \begin{minipage}{162mm} {\em ... Kirchhoff and Love have suggested to assimilate the plate to a collection of small pieces, each one being articulated with respect to the other and having a rigid-body behavior. It looks like these articulated wooden snakes that children have as toys. Hence the transverse shear strain remains zero, while the planar deformation is due to the articulation between small blocks. But this simplified description of a plate movement can be acceptable only if the components of the stress field can be considered to be negligible.} \end{minipage} \end{center} The above comment says that ${\cal LHL}$ should not be adopted if the components of the stress field are not negligible. An attempt to deal with large deflections for thin plates is made by Mansfield \cite[Chapters 8-9]{mansfield}. He first considers approximate methods, then with three classes of asymptotic plate theories: membrane theory, tension field theory, inextensional theory. Roughly speaking, the three theories may be adopted according to the ratio between the thickness of the plate and the typical planar dimension: for the first two theories the ratio should be less than $10^{-3}$, whereas for the third theory it should be less than $10^{-2}$. Since a roadway has a length of the order of 1km, the width of the order of 10m, even for the less stringent inextensional theory the thickness of the roadway should be less than 10cm which, of course, appears unreasonable. Once more, this means that ${\cal LHL}$ should not be adopted in bridges. In any case, Mansfield \cite[p.183]{mansfield} writes \begin{center} \begin{minipage}{162mm} {\em The exact large-deflection analysis of plates generally presents considerable difficulties...} \end{minipage} \end{center} Destuynder-Salaun \cite[Section I.2]{destuyndersalaun} also revisit an alternative model due to Naghdi \cite{naghdi} by using a mixed variational formulation. They refer to \cite{mindlin,reissner1,reissner2} for further details and modifications, and conclude by saying that none between the Kirchhoff-Love model or one of these alternative models is always better than the others. Moreover, also the definition of the transverse shear energy is not universally accepted: from \cite[p.149]{destuyndersalaun}, we quote \begin{center} \begin{minipage}{162mm} {\em ... this discussion has been at the origin of a very large number of papers from both mathematicians and engineers. But to our best knowledge, a convincing justification concerning which one of the two expressions is the more suitable for numerical purpose, has never been formulated in a convincing manner. This question is nevertheless a fundamental one ...} \end{minipage} \end{center} It is clear that a crucial role is played by the word ``thin''. What is a thin plate? Which width is it allowed to have? If we assume that the width is zero, a quite unrealistic assumption for bridges, a celebrated two-dimensional equation was suggested by von K\'arm\'an \cite{karman}. This equation has been widely, and satisfactorily, studied from several mathematical points of view such as existence, regularity, eigenvalue problems, semilinear versions, see e.g.\ \cite{gazgruswe} for a survey of results. On the other hand, quite often several doubts have been raised on their physical soundness. For instance, Truesdell \cite[pp.601-602]{truesdell} writes \begin{center} \begin{minipage}{162mm} {\em Being unable to explain just why the von K\'arm\'an theory has always made me feel a little nauseated as well as very slow and stupid, I asked an expert, Mr. Antman, what was wrong with it. I can do no better than paraphrase what he told me: it relies upon\par 1) ``approximate geometry'', the validity of which is assessable only in terms of some other theory.\par 2) assumptions about the way the stress varies over a cross-section, assumptions that could be justified only in terms of some other theory.\par 3) commitment to some specific linear constitutive relation - linear, that is, in some special measure of strain, while such approximate linearity should be outcome, not the basis, of a theory.\par 4) neglect of some components of strain - again, something that should be proved mathematically from an overriding, self-consistent theory.\par 5) an apparent confusion of the referential and spatial descriptions - a confusion that is easily justified for the classical linearised elasticity but here is carried over unquestioned, in contrast with all recent studies of the elasticity of finite deformations.} \end{minipage} \end{center} Truesdell then concludes with a quite eloquent comment: \begin{center} \begin{minipage}{162mm} {\em These objections do not prove that anything is wrong with von K\'arm\'an strange theory. They merely suggest that it would be difficult to prove that there is anything right about it.} \end{minipage} \end{center} Let us invite the interested reader to have a careful look at the paper by Truesdell \cite{truesdell}; it contains several criticisms exposed in a highly ironic and exhilarating fashion and, hence, very effective.\par Classical books for elasticity theory are due to Love \cite{love}, Timoshenko \cite{timoshenko}, Ciarlet \cite{ciarletbook}, Villaggio \cite{villaggio}, see also \cite{nadai,naghdi,timoshenkoplate} for the theory of plates. Let us also point out a celebrated work by Ball \cite{ball} who was the first analyst to approach the real 3D boundary value problems for nonlinear elasticity. Further nice attempts to tackle nonlinear elasticity in particular situations were done by Antman \cite{antman1,antman2} who, however, appears quite skeptic on the possibility to have a general theory: \begin{center} \begin{minipage}{162mm} {\em ... general three-dimensional nonlinear theories have so far proved to be mathematically intractable.} \end{minipage} \end{center} Summarising, what we have seen suggests to conclude this short review about plate models by claiming that classical modeling of thin plates should be carefully revisited. This suggestion is absolutely not new. In this respect, let us quote a couple of sentences written by Gurtin \cite{gurtin} about nonlinear elasticity: \begin{center} \begin{minipage}{162mm} {\em Our discussion demonstrates why this theory is far more difficult than most nonlinear theories of mathematical physics. It is hoped that these notes will convince analysts that nonlinear elasticity is a fertile field in which to work.} \end{minipage} \end{center} Since the previously described Kirchhoff-Love model implicitly assumes ${\cal LHL}$, and since quasilinear equations appear too complicated in order to give useful information, we intend to add some nonlinearity only in the source $f$ in order to have a semilinear equation, something which appears to be a good compromise between too poor linear models and too complicated quasilinear models. This compromise is quite common in elasticity, see e.g.\ \cite[p.322]{ciarletbook} which describes the method of asymptotic expansions for the thickness $\varepsilon$ of a plate as a ``partial linearisation'' \begin{center} \begin{minipage}{162mm} {\em ... in that a system of quasilinear partial differential equations, i.e., with nonlinearities in the higher order terms, is replaced as $\varepsilon\to0$ by a system of semilinear partial differential equations, i.e., with nonlinearities only in the lower order terms.} \end{minipage} \end{center} In Section \ref{newmodel}, we suggest a new 2D mathematical model described by a semilinear fourth order wave equation. Before doing this, in next section we survey some existing models and we suggest some possible variants based on the observations listed in Section \ref{story}. \subsection{Equations modeling suspension bridges}\label{models} Although it is oversimplified in several respects, the celebrated report by Navier \cite{navier2} has been for more than one century the only mathematical treatise of suspension bridges. The second milestone contribution is certainly the monograph by Melan \cite{melan}. After the Tacoma collapse, the engineering communities felt the necessity to find accurate equations in order to attempt explanations of what had occurred. In this respect, a first source is certainly the work by Smith-Vincent \cite{tac2} which was written precisely {\em with special reference to the Tacoma Narrows Bridge}. The bridge is modeled as a one dimensional beam, say the interval $(0,L)$, and in order to obtain an autonomous equation, Smith-Vincent consider the function $\eta=\eta(x)$ representing the amplitude of the oscillation at the point $x\in(0,L)$. By linearising they obtain a fourth order linear ODE \cite[(4.2)]{tac2} which can be integrated explicitly. We will not write this equation because we prefer to deal with the function $v=v(x,t)$ representing the deflection at any point $x\in(0,L)$ and at time $t>0$; roughly speaking, $v(x,t)=\eta(x)\sin(\omega t)$ for some $\omega>0$. In this respect, a slightly better job was done in \cite{bleich} although this book was not very lucky since two of the authors (McCullogh and Bleich) passed away during its preparation. Equation \cite[(2.7)]{bleich} is precisely \cite[(4.2)]{tac2}; but \cite[(2.6)]{bleich} considers the deflection $v$ and reads \neweq{primissima} m\, v_{tt}+EI\, v_{xxxx}-H_w\, v_{xx}+\frac{w\, h}{H_w}=0\, ,\qquad x\in(0,L)\, ,\ t>0\, , \end{equation} where $E$ and $I$ are, respectively, the elastic modulus and the moment of inertia of the stiffening girder so that $EI$ is the stiffness of the girder; moreover, $m$ denotes the mass per unit length, $w=mg$ is the weight which produces a cable stress whose horizontal component is $H_w$, and $h$ is the increase of $H_w$ as a result of the additional deflection $v$. In particular, this means that $h$ depends on $v$ although \cite{bleich} does not emphasise this fact and considers $h$ as a constant.\par An excellent source to derive the equation of vertical oscillations in suspension bridges is \cite[Chapter IV]{rocard} where all the details are perfectly explained. The author, the French physicist Yves-Andr\'e Rocard (1903-1992), also helped to develop the atomic bomb for France. Consider again that a long span bridge roadway is a beam of length $L>0$ and that it is oscillating; let $v(x,t)$ denote the vertical component of the oscillation for $x\in(0,L)$ and $t>0$. The equation derived in \cite[p.132]{rocard} reads \neweq{flutter} m\, v_{tt}+EI\, v_{xxxx}-\big(H_w+\gamma v\big)\, v_{xx}+\frac{w\, \gamma}{H_w}v=f(x,t)\, ,\quad x\in(0,L)\, ,\ t>0, \end{equation} where $H_w$, $EI$ and $m$ are as in \eq{primissima}, $\gamma v$ is the variation $h$ of $H_w$ supposed to vary linearly with $v$, and $f$ is an external forcing term. Note that a nonlinearity appears here in the term $\gamma v v_{xx}$. In fact, \eq{flutter} is closely related to an equation suggested much earlier by Melan \cite[p.77]{melan} but it has not been subsequently attributed to him. \begin{problem} {\em Study oscillations and possible blow up in finite time for traveling waves to \eq{flutter} having velocity $c>0$, $v=v(x,t)=y(x-ct)$ for $x\in\mathbb{R}$ and $t>0$, in the cases where $f\equiv1$ is constant and where $f$ depends superlinearly on $v$. Putting $\tau=x-ct$ one is led to find solutions to the ODE $$ EI\, y''''(\tau)-\Big(\gamma y(\tau)+H_w-mc^2\Big)\, y''(\tau)+\frac{w\, \gamma}{H_w}y(\tau)=1\, ,\quad \tau\in\mathbb{R}\, . $$ By letting $w(\tau)=y(\tau)-\frac{H_w}{w\, \gamma}$ and normalising some constants, we arrive at \neweq{y4} w''''(\tau)-\Big(\alpha w(\tau)+\beta\Big)\, w''(\tau)+w(\tau)=0\, ,\quad \tau\in\mathbb{R}\, , \end{equation} for some $\alpha>0$ and $\beta\in\mathbb{R}$; we expect different behaviors depending on $\alpha$ and $\beta$. It would be interesting to see if local solutions to \eq{y4} blow up in finite time with wide oscillations. Moreover, one should also consider the more general problem $$w''''(\tau)-\Big(\alpha w(\tau)+\beta\Big)\, w''(\tau)+f(w(\tau))=0\, ,\quad \tau\in\mathbb{R}\, ,$$ with $f$ being superlinear, for instance $f(s)=s+\varepsilon s^3$ with $\varepsilon>0$ small. Incidentally, we note that such $f$ satisfies \eq{f} and \eq{fmono}-\eq{f2} below.} $\Box$\end{problem} Let us also mention that Rocard \cite[pp.166-167]{rocard} studies the possibility of simultaneous excitation of different bending and torsional modes and obtains a coupled system of linear equations of the kind of \eq{flutter}. With few variants, equations \eq{primissima} and \eq{flutter} seem nowadays to be well-accepted among engineers, see e.g.\ \cite[Section VII.4]{aer}; moreover, quite similar equations are derived to describe related phenomena in cable-stayed bridges \cite[(1)]{bruno} and in arch bridges traversed by high-speed trains \cite[(14)-(15)]{lacarbonara}.\par Let $v(x,t)$ and $\theta(x,t)$ denote respectively the vertical and torsional components of the oscillation of the bridge, then the following system is derived in \cite[(1)-(2)]{como} for the linearised equations of the elastic combined vertical-torsional oscillation motion: \renewcommand{1.5}In this setting, instead of \eq{beam}{2.8} \neweq{eqqq} \left\{\begin{array}{l} \displaystyle m\, v_{tt}+EI\, v_{xxxx}-H_w\, v_{xx}+\frac{w^2}{H_w^2}\, \frac{EA}{L}\int_0^L v(z,t)\, dz=f(x,t)\\ \displaystyle I_0\, \theta_{tt}+C_1\, \theta_{xxxx}-(C_2+H_w\ell^2)\, \theta_{xx}+\frac{\ell^2w^2}{H_w^2}\, \frac{EA}{L}\int_0^L\theta(z,t)\, dz=g(x,t)\\ x\in(0,L)\, ,\ t>0, \end{array}\right. \end{equation} \renewcommand{1.5}In this setting, instead of \eq{beam}{1.5}where $m$, $w$, $H_w$ are as in \eq{primissima}, $EI$, $C_1$, $C_2$, $EA$ are respectively the flexural, warping, torsional, extensional stiffness of the girder, $I_0$ the polar moment of inertia of the girder section, $2\ell$ the roadway width, $f(x,t)$ and $g(x,t)$ are the lift and the moment for unit girder length of the self-excited forces. The linearisation here consists in dropping the term $\gamma v v_{xx}$ but a preliminary linearisation was already present in \eq{flutter} in the zero order term. And the nonlocal linear term $\int_0^L v$, which replaces the zero order term in \eq{flutter}, is obtained by assuming ${\cal LHL}$. The nonlocal term in \eq{eqqq} represents the increment of energy due to the external wind during a period of time; this will be better explained in Section \ref{energies}.\par A special mention is deserved by an important paper by Abdel-Ghaffar \cite{abdel} where variational principles are used to obtain the combined equations of a suspension bridge motion in a fairly general nonlinear form. The effect of coupled vertical-torsional oscillations as well as cross-distortional of the stiffening structure is clarified by separating them into four different kinds of displacements: the vertical displacement $v$, the torsional angle $\theta$, the cross section distortional angle $\psi$, the warping displacement $u$, although $u$ can be expressed in terms of $\theta$ and $\psi$. These displacements are well described in Figure \ref{ghaffer} which is taken from \cite[Figure 2]{abdel}. \begin{figure} \caption{The four different kinds of displacements.} \label{ghaffer} \end{figure} A careful analysis of the energies involved is made, reaching up to fifth derivatives in the equations, see \cite[(15)]{abdel}. Higher order derivatives are then neglected and the following nonlinear system of three PDE's of fourth order in the three unknown displacements $v$, $\theta$, $\psi$ is obtained, see \cite[(28)-(29)-(30)]{abdel}: $$\left\{\begin{array}{l} \frac{w}{g}\, v_{tt}+EI\, v_{xxxx}-\Big(2H_w+H_1(t)+H_2(t)\Big)\, v_{xx}+\frac{b}{2}\, \Big(H_1(t)-H_2(t)\Big)\, (\theta_{xx}+\psi_{xx})\\ \ \ \ +\frac{w}{2H_w}\, \Big(H_1(t)+H_2(t)\Big)-\frac{w_s\, r^2}{g}\, \left(1+\frac{EI}{2G\mu r^2}\right)\, v_{xxtt}+\frac{w_s^2\, r^2}{4gG\mu}\, v_{tttt}=0\\ I_m\, \theta_{tt}+E\Gamma\, \theta_{xxxx}-GJ\, \theta_{xx}-\frac{H_w\, b^2}{2}\, (\theta_{xx}+\psi_{xx})-\frac{\gamma\, \Gamma}{g}\, \theta_{xxtt} -\frac{b^2}{4}\, \Big(H_1(t)+H_2(t)\Big)\, (\theta_{xx}+\psi_{xx})\\ \ \ \ +\frac{b}{2}\, \Big(H_1(t)-H_2(t)\Big)\, v_{xx}-\frac{\gamma\, \Lambda}{g} \psi_{xxtt}+\frac{b\, w}{4H_w}\, \Big(H_2(t)-H_1(t)\Big)+E\Lambda\, \psi_{xxxx}+\frac{w_c\, b^2}{4g}\, \psi_{tt}=0\\ \frac{w_c\, b^2}{4g}\, (\psi_{tt}+\theta_{tt})+\frac{EA\, b^2d^2}{4}\, \psi_{xxxx}-\frac{H_w\, b^2}{2}\, (\psi_{xx}+\theta_{xx})- \frac{\gamma Ab^2d^2}{4g}\, \psi_{xxtt}-\frac{\gamma\, \Lambda}{g}\theta_{xxtt}+E\Lambda\, \theta_{xxxx}\\ \ \ \ -\frac{b^2}{4}\, \Big(H_1(t)+H_2(t)\Big)\, (\theta_{xx}+\psi_{xx})+\frac{b}{2}\, \Big(H_1(t)-H_2(t)\Big)\, v_{xx} +\frac{w\, b}{4H_w}\, \Big(H_2(t)-H_1(t)\Big)=0\ . \end{array}\right.$$ We will not explain here what is the meaning of all the constants involved, it would take several pages... Some of the constants have a clear meaning, for the interpretation of the remaining ones, we refer to \cite{abdel}. Let us just mention that $H_1$ and $H_2$ represent the vibrational horizontal components of the cable tension and depend on $v$, $\theta$, $\psi$, and their first derivatives, see \cite[(3)]{abdel}. We wrote these equations in order to convince the reader that the behavior of the bridge is modeled by terribly complicated equations and by no means one should make use of ${\cal LHL}$. After making such huge effort, Abdel-Ghaffar simplifies the problem by neglecting the cross section deformation, the shear deformation and rotatory inertia; he obtains a coupled nonlinear vertical-torsional system of two equations in the two unknowns functions $v$ and $\theta$. These equations are finally linearised, by neglecting $H_1$ and $H_2$ which are considered small when compared with the initial tension $H_w$. Then the coupling effect disappears and equations \eq{eqqq} are recovered, see \cite[(34)-(35)]{abdel}. What a pity, an accurate modeling ended up with a linearisation! But there was no choice... how can one imagine to get any kind of information from the above system?\par Summarising, after the previously described pioneering models from \cite{bleich,melan,navier2,rocard,tac2} there has not been much work among engineers about alternative differential equations; the attention has turned to improving performances through design factors, see e.g.\ \cite{hhs}, or on how to solve structural problems rather than how to understand them more deeply. In this respect, from \cite[p.2]{mckmonth} we quote a personal discussion between McKenna and a distinguished civil engineer who said \begin{center} \begin{minipage}{162mm} {\em ... having found obvious and effective physical ways of avoiding the problem, engineers will not give too much attention to the mathematical solution of this fascinating puzzle ...} \end{minipage} \end{center} Only modeling modern footbridges has attracted some interest from a theoretical point of view. As already mentioned, pedestrian bridges are extremely flexible and display elastic behaviors similar to suspension bridges, although the oscillations are of different kind. In this respect, we would like to mention an interesting discussion with Diana \cite{diana}. He explained that when a suspension bridge is attacked by wind its starts oscillating, but soon afterwards the wind itself modifies its behavior according to the bridge oscillation; so, the wind amplifies the oscillations by blowing synchronously. A qualitative description of this phenomenon was already attempted by Rocard \cite[p.135]{rocard}: \begin{center} \begin{minipage}{162mm} {\em ... it is physically certain and confirmed by ordinary experience, although the effect is known only qualitatively, that a bridge vibrating with an appreciable amplitude completely imposes its own frequency on the vortices of its wake. It appears as if in some way the bridge itself discharges the vortices into the fluid with a constant phase relationship with its own oscillation... .} \end{minipage} \end{center} This reminds the above described behavior of footbridges where {\em pedestrians fall spontaneously into step with the vibrations}: in both cases, external forces synchronise their effect and amplify the oscillations of the bridge. This is one of the reasons why self-excited oscillations appear in suspension and pedestrian bridges.\par In \cite{bodgi} a simple 1D model was proposed in order to describe the crowd-flow phenomena occurring when pedestrians walk on a flexible footbridge. The resulting equation \cite[(2)]{bodgi} reads \neweq{pedestrian} \left(m_s(x)+m_p(x,t)\right)u_{tt}+\delta(x)u_t+\gamma(x)u_{xxxx}=g(x,t) \end{equation} where $x$ is the coordinate along the beam axis, $t$ the time, $u=u(x,t)$ the lateral displacement, $m_s(x)$ is the mass per unit length of the beam, $m_p(x,t)$ the linear mass of pedestrians, $\delta(x)$ the viscous damping coefficient, $\gamma(x)$ the stiffness per unit length, $g(x,t)$ the pedestrian lateral force per unit length. In view of the superlinear behavior for large displacements observed for the London Millennium Bridge, see Section \ref{story}, we wonder if instead of a linear model one should consider a lateral force also depending on the displacement, $g=g(x,t,u)$, being superlinear with respect to $u$. \begin{problem} {\em Study \eq{pedestrian} modified as follows $$ u_{tt}+\delta u_t+\gamma u_{xxxx}+f(u)=g(x,t)\qquad(x\in\mathbb{R}\, ,\ t>0) $$ where $\delta>0$, $\gamma>0$ and $f(s)=s+\varepsilon s^3$ for some $\varepsilon>0$ small. One could first consider the Cauchy problem $$u(x,0)=u_0(x)\ ,\quad u_t(x,0)=u_1(x)\quad(x\in\mathbb{R})$$ with $g\equiv0$. Then one could seek traveling waves such as $u(x,t)=w(x-ct)$ which solve the ODE $$ \gamma w''''(\tau)+c^2w''(\tau)+\delta c w'(\tau)+f(w(\tau))=0\qquad(x-ct=\tau\in\mathbb{R}). $$ Finally, one could also try to find properties of solutions in a bounded interval $x\in(0,L)$.} $\Box$\end{problem} Scanlan-Tomko \cite{scantom} introduce a model in which the torsional angle $\theta$ of the roadway section satisfies the equation \neweq{scann} I\, [\theta''(t)+2\zeta_\theta\omega_\theta \theta'(t)+\omega_\theta^2\theta(t)]=A\theta'(t)+B\theta(t)\ , \end{equation} where $I$, $\zeta_\theta$, $\omega_\theta$ are, respectively, associated inertia, damping ratio, and natural frequency. The r.h.s.\ of \eq{scann} represents the aerodynamic force and was postulated to depend linearly on both $\theta'$ and $\theta$ with the positive constants $A$ and $B$ depending on several parameters of the bridge. Since \eq{scann} may be seen as a two-variables first order linear system, it fails to fulfill both the requirements of (GP). Hence, \eq{scann} is not suitable to describe the disordered behavior of a bridge. And indeed, elementary calculus shows that if $A$ is sufficiently large, then solutions to \eq{scann} are positive exponentials times trigonometric functions which do not exhibit a sudden appearance of self-excited oscillations, they merely blow up in infinite time. In order to have a more reliable description of the bridge, in Section \ref{blup} we consider the fourth order nonlinear ODE $w''''+kw''+f(w)=0$ ($k\in\mathbb{R}$). We will see that solutions to this equation blow up in finite time with self-excited oscillations appearing suddenly, without any intermediate stage.\par That linearisation yields wrong models is also the opinion of McKenna \cite[p.4]{mckmonth} who comments \eq{scann} by writing \begin{center} \begin{minipage}{162mm} {\em This is the point at which the discussion of torsional oscillation starts in the engineering literature.} \end{minipage} \end{center} He claims that the problem is in fact nonlinear and that \eq{scann} is obtained after an incorrect linearisation. McKenna concludes by noticing that \begin{center} \begin{minipage}{162mm} {\em Even in recent engineering literature ... this same mistake is reproduced.} \end{minipage} \end{center} The mistake claimed by McKenna is that the equations are often linearised by taking $\sin\theta=\theta$ and $\cos\theta=1$ also for large amplitude torsional oscillations $\theta$. The corresponding equation then becomes linear and the main torsional phenomenon disappears. Avoiding this rude approximation, but considering the cables and hangers as linear springs obeying ${\cal LHL}$, McKenna reaches an uncoupled second order system for the functions representing the vertical displacement $y$ of the barycenter $B$ of the cross section of the roadway and the deflection from horizontal $\theta$, see Figure \ref{9}. Here, $2\ell$ denotes the width of the roadway whereas $C_1$ and $C_2$ denote the two lateral hangers which have opposite extension behaviors. \begin{figure} \caption{Vertical displacement and deflection of the cross section of the roadway.} \label{9} \end{figure} McKenna-Tuama \cite{mckO} suggest a slightly different model. They write: \begin{center} \begin{minipage}{162mm} {\em ... there should be some torsional forcing. Otherwise, there would be no input of energy to overcome the natural damping of the system ... we expect the bridge to behave like a stiff spring, with a restoring force that becomes somewhat superlinear.} \end{minipage} \end{center} We completely agree with this, see the conclusions in Section \ref{conclusions}. McKenna-Tuama end up with the following coupled second order system \neweq{coupled} \frac{m\ell^2}{3}\, \theta''=\ell\cos\theta\, \Big(f(y-\ell\sin\theta)-f(y+\ell\sin\theta)\Big)\ ,\quad m\, y''=-\Big(f(y-\ell\sin\theta)+f(y+\ell\sin\theta)\Big)\ , \end{equation} see again Figure \ref{9}. The delicate point is the choice of the superlinearity $f$ which \cite{mckO} take first as $f(s)=(s+1)^+-1$ and then as $f(s)=e^s-1$ in order to maintain the asymptotically linear behavior as $s\to0$. Using \eq{coupled}, \cite{mckmonth,mckO} were able to numerically replicate the phenomenon observed at the Tacoma Bridge, namely the sudden transition from vertical oscillations to torsional oscillations. They found that if the vertical motion was sufficiently large to induce brief slackening of the hangers, then numerical results highlighted a rapid transition to a torsional motion. Nevertheless, the physicists Green-Unruh \cite{green} believe that the hangers were not slack during the Tacoma Bridge oscillation. If this were true, then the piecewise linear forcing term $f$ becomes totally linear. Moreover, by commenting the results in \cite{mckmonth,mckO}, McKenna-Moore \cite[p.460]{mckmoore} write that \begin{center} \begin{minipage}{162mm} {\em ...the range of parameters over which the transition from vertical to torsional motion was observed was physically unreasonable ... the restoring force due to the cables was oversimplified ... it was necessary to impose small torsional forcing}. \end{minipage} \end{center} Summarising, \eq{coupled} seems to be the first model able to reproduce the behavior of the Tacoma Bridge but it appears to need some improvements. First, one should avoid the possibility of a linear behavior of the hangers, the nonlinearity should appear before possible slackening of the hangers. Second, the restoring force and the parameters involved should be chosen carefully. \begin{problem} {\em Try a doubly superlinear term $f$ in \eq{coupled}. For instance, take $f(s)=s+\varepsilon s^3$ with $\varepsilon>0$ small, so that \eq{coupled} becomes \neweq{mia} \frac{m\ell^2}{3}\, \theta''+2\ell^2\cos\theta\sin\theta\, \Big(1+3\varepsilon y^2+\varepsilon\ell^2\sin^2\theta\Big)=0\ ,\quad m\, y''+2\Big(1+3\varepsilon\ell^2\sin^2\theta\Big)y+2\varepsilon y^3=0\ . \end{equation} It appears challenging to determine some features of the solution $(y,\theta)$ to \eq{mia} and also to perform numerical experiments to see what kind of oscillations are displayed by the solutions.} $\Box$\end{problem} System \eq{coupled} is a $2\times2$ system which should be considered as a nonlinear fourth order model; therefore, it fulfills the necessary conditions of the general principle (GP). Another fourth order differential equation was suggested in \cite{lzmck,McKennaWalter,mck4} as a one-dimensional model for a suspension bridge, namely a beam of length $L$ suspended by hangers. When the hangers are stretched there is a restoring force which is proportional to the amount of stretching, according to ${\cal LHL}$. But when the beam moves in the opposite direction, there is no restoring force exerted on it. Under suitable boundary conditions, if $u(x,t)$ denotes the vertical displacement of the beam in the downward direction at position $x$ and time $t$, the following nonlinear beam equation is derived \neweq{beam} u_{tt}+u_{xxxx}+\gamma u^+=W(x,t)\, ,\qquad x\in(0,L)\, ,\quad t>0\, , \end{equation} where $u^+=\max\{u,0\}$, $\gamma u^+$ represents the force due to the cables and hangers which are considered as a linear spring with a one-sided restoring force, and $W$ represents the forcing term acting on the bridge, including its own weight per unit length, the wind, the traffic loads, or other external sources. After some normalisation, by seeking traveling waves $u(x,t)=1+w(x-ct)$ to \eq{beam} and putting $k=c^2>0$, McKenna-Walter \cite{mck4} reach the following ODE \neweq{maineq} w''''(\tau)+kw''(\tau)+f(w(\tau))=0\qquad(x-ct=\tau\in\mathbb{R}) \end{equation} where $k\in(0,2)$ and $f(s)=(s+1)^+-1$. Subsequently, in order to maintain the same behavior but with a smooth nonlinearity, Chen-McKenna \cite{chenmck} suggest to consider \eq{maineq} with $f(s)=e^s-1$. For later discussion, we notice that both these nonlinearities satisfy \neweq{f} f\in {\rm Lip}_{{\rm loc}}(\mathbb{R})\,,\quad f(s)\,s>0 \quad \forall s\in \mathbb{R}\setminus\{0\}. \end{equation} Hence, when $W\equiv0$, \eq{beam} is just a special case of the more general semilinear fourth order wave equation \neweq{beam2} u_{tt}+u_{xxxx}+f(u)=0\, ,\qquad x\in(0,L)\, ,\quad t>0\, , \end{equation} where the natural assumptions on $f$ are \eq{f} plus further conditions, according to the model considered. Traveling waves to \eq{beam2} solve \eq{maineq} with $k=c^2$ being the squared velocity of the wave. Recently, for $f(s)=(s+1)^+-1$ and its variants, Benci-Fortunato \cite{benci} proved the existence of special solutions to \eq{maineq} deduced by solitons of the beam equation \eq{beam2}. \begin{problem} {\em It could be interesting to insert into the wave-type equation \eq{beam2} the term corresponding to the beam elongation, that is, $$\int_0^L\Big(\sqrt{1+u_x(x,t)^2}-1\Big)\, dx.$$ This would lead to a quasilinear equation such as $$u_{tt}+u_{xxxx}-\left(\frac{u_x}{\sqrt{1+u_x^2}}\right)_x+f(u)=0$$ with $f$ satisfying \eq{f}. What can be said about this equation? Does it admit oscillating solutions in a suitable sense? One should first consider the case of an unbounded beam ($x\in\mathbb{R}$) and then the case of a bounded beam ($x\in(0,L)$) complemented with some boundary conditions.} $\Box$\end{problem} Motivated by the fact that it appears unnatural to ignore the motion of the main sustaining cable, a slightly more sophisticated and complicated string-beam model was suggested by Lazer-McKenna \cite{mck1}. They treat the cable as a vibrating string, coupled with the vibrating beam of the roadway by piecewise linear springs that have a given spring constant $k$ if expanded, but no restoring force if compressed. The sustaining cable is subject to some forcing term such as the wind or the motions in the towers. This leads to the system $$\left\{\begin{array}{ll} v_{tt}-c_1v_{xx}+\delta_1v_t-k_1(u-v)^+=f(x,t)\, ,\qquad x\in(0,L)\, ,\quad t>0\, ,\\ u_{tt}+c_2u_{xxxx}+\delta_2u_t+k_2(u-v)^+=W_0\, ,\qquad x\in(0,L)\, ,\quad t>0\, , \end{array}\right.$$ where $v$ is the displacement from equilibrium of the cable and $u$ is the displacement of the beam, both measured in the downwards direction. The constants $c_1$ and $c_2$ represent the relative strengths of the cables and roadway respectively, whereas $k_1$ and $k_2$ are the spring constants and satisfy $k_2\ll k_1$. The two damping terms can possibly be set to $0$, while $f$ and $W_0$ are the forcing terms. We also refer to \cite{ahmed} for a study of the same problem in a rigorous functional analytic setting.\par Since the Tacoma Bridge collapse was mainly due to a wide torsional motion of the bridge, see \cite{tacoma}, the bridge cannot be considered as a one dimensional beam. In this respect, Rocard \cite[p.148]{rocard} states \begin{center} \begin{minipage}{162mm} {\em Conventional suspension bridges are fundamentally unstable in the wind because the coupling effect introduced between bending and torsion by the aerodynamic forces of the lift.} \end{minipage} \end{center} Hence, if some model wishes to display instability of bridges, it should necessarily take into account more degrees of freedom than just a beam. In fact, to be exhaustive one should consider vertical oscillations $y$ of the roadway, its torsional angle $\theta$, and coupling with the two sustaining cables $u$ and $v$. This model was suggested by Matas-O\v cen\'a\v sek \cite{matas} who consider the hangers as linear springs and obtain a system of four equations; three of them are second order wave-type equations, the last one is again a fourth order equation such as $$m\, y_{tt}+k\, y_{xxxx}+\delta\, y_t+E_1(y-u-\ell\sin\theta)+E_2(y-v+\ell\sin\theta)=W(x)+f(x,t)\ ;$$ we refer to $(SB_4)$ in \cite{drabek} for an interpretation of the parameters involved.\par In our opinion, any model which describes the bridge as a one dimensional beam is too simplistic, unless the model takes somehow into account the possible appearance of a torsional motion. In \cite{gazpav} it was suggested to maintain the one dimensional model provided one also allows displacements below the equilibrium position and these displacements replace the deflection from horizontal of the roadway of the bridge; in other words, \renewcommand{1.5}In this setting, instead of \eq{beam}{1.1} \neweq{w0} \begin{array}{c} \mbox{the unknown function $w$ represents the upwards vertical displacement when $w>0$}\\ \mbox{and the deflection from horizontal, computed in a suitable unity measure, when $w<0$.} \end{array} \end{equation} \renewcommand{1.5}In this setting, instead of \eq{beam}{1.5}In this setting, instead of \eq{beam} one should consider the more general semilinear fourth order wave equation \eq{beam2} with $f$ satisfying \eq{f} plus further conditions which make $f(s)$ superlinear and unbounded when both $s\to\pm\infty$; hence, ${\cal LHL}$ is dropped by allowing $f$ to be as close as one may wish to a linear function but eventually superlinear for large displacements. The superlinearity assumption is justified both by the observations in Section \ref{story} and by the fact that more the position of the bridge is far from the horizontal equilibrium position, more the action of the wind becomes relevant because the wind hits transversally the roadway of the bridge. If ever the bridge would reach the limit vertical position, in case the roadway is torsionally rotated of a right angle, the wind would hit it orthogonally, that is, with full power.\par In this section we listed a number of attempts to model bridges mechanics by means of differential equations. The sources for this list are very heterogeneous. However, except for some possible small damping term, none of them contains odd derivatives. Moreover, none of them is acknowledged by the scientific community to perfectly describe the complex behavior of bridges. Some of them fail to satisfy the requirements of (GP) and, in our opinion, must be accordingly modified. Some others seem to better describe the oscillating behavior of bridges but still need some improvements. \section{Blow up oscillating solutions to some fourth order differential equations}\label{blup} If the trivial solution to some dynamical system is unstable one may hope to magnify self-excitement phenomena through finite time blow up. In this section we survey and discuss several results about solutions to \eq{maineq} which blow up in finite time. Let us rewrite the equation with a different time variable, namely \neweq{maineq2} w''''(t)+kw''(t)+f(w(t))=0\qquad(t\in\mathbb{R})\ . \end{equation} We first recall the following results proved in \cite{bfgk}: \begin{theorem}\label{global} Let $k\in \mathbb{R}$ and assume that $f$ satisfies \eqref{f}.\par $(i)$ If a local solution $w$ to \eqref{maineq2} blows up at some finite $R\in\mathbb{R}$, then \neweq{pazzo} \liminf_{t\to R}w(t)=-\infty\qquad\mbox{and}\qquad\limsup_{t\to R}w(t)=+\infty\, . \end{equation} $(ii)$ If $f$ also satisfies \neweq{ff3} \limsup_{s\to+\infty}\frac{f(s)}{s}<+\infty\qquad\mbox{or}\qquad\limsup_{s\to-\infty}\frac{f(s)}{s}<+\infty, \end{equation} then any local solution to \eqref{maineq2} exists for all $t\in\mathbb{R}$. \end{theorem} If both the conditions in \eq{ff3} are satisfied then global existence follows from classical theory of ODE's; but \eq{ff3} merely requires that $f$ is ``one-sided at most linear'' so that statement $(ii)$ is far from being trivial and, as shown in \cite{gazpav}, it does not hold for equations of order at most 3. On the other hand, Theorem \ref{global} $(i)$ states that, under the sole assumption \eq{f}, the only way that finite time blow up can occur is with ``wide and thinning oscillations'' of the solution $w$; again, in \cite{gazpav} it was shown that this kind of blow up is a phenomenon typical of at least fourth order problems such as \eq{maineq2} since it does not occur in related lower order equations. Note that assumption \eq{ff3} includes, in particular, the cases where $f$ is either concave or convex.\par Theorem \ref{global} does not guarantee that the blow up described by \eq{pazzo} indeed occurs. For this reason, we assume further that \neweq{fmono} f\in {\rm Lip}_{{\rm loc}}(\mathbb{R})\cap C^2(\mathbb{R}\setminus\{0\})\ ,\quad f'(s)\ge0\quad\forall s\in\mathbb{R}\ ,\quad\liminf_{s\to\pm\infty}|f''(s)|>0 \end{equation} and the growth conditions \neweq{f2} \exists p>q\ge1,\ \alpha\ge0,\ 0<\rho\le \beta,\quad\mbox{s.t.}\quad\rho|s|^{p+1}\le f(s)s\le\alpha|s|^{q+1}+\beta|s|^{p+1}\quad\forall s\in\mathbb{R}\ . \end{equation} Notice that \eq{fmono}-\eq{f2} strengthen \eq{f}. In \cite{gazpav3} the following sufficient conditions for the finite time blow up of local solutions to \eq{maineq2} has been proved. \begin{theorem}\label{blowup} Let $k\le0$, $p>q\ge1$, $\alpha\ge0$, and assume that $f$ satisfies \eqref{fmono} and \eqref{f2}. Assume that $w=w(t)$ is a local solution to \eqref{maineq2} in a neighborhood of $t=0$ which satisfies \neweq{tech} w'(0)w''(0)-w(0)w'''(0)-kw(0)w'(0)>0\, . \end{equation} Then, $w$ blows up in finite time for $t>0$, that is, there exists $R\in(0,+\infty)$ such that \eqref{pazzo} holds. \end{theorem} Since self-excited oscillations such as \eq{pazzo} should be expected in any equation attempting to model suspension bridges, linear models should be avoided. Unfortunately, even if the solutions to \eq{maineq2} display these oscillations, they cannot be prevented since they arise suddenly after a long time of apparent calm. In Figure \ref{duemila}, we display the plot of a solution to \eq{maineq2}. \begin{figure} \caption{Solution to \eq{maineq2} \label{duemila} \end{figure} It can be observed that the solution has oscillations with increasing amplitude and rapidly decreasing ``nonlinear frequency"; numerically, the blow up seems to occur at $t=8.164$. Even more impressive appears the plot in Figure \ref{mille}. \begin{figure} \caption{Solution to \eq{maineq2} \label{mille} \end{figure} Here the solution has ``almost regular'' oscillations between $-1$ and $+1$ for $t\in[0,80]$. Then the amplitude of oscillations nearly doubles in the interval $[80,93]$ and, suddenly, it violently amplifies after $t=96.5$ until the blow up which seems to occur only slightly later at $t=96.59$. We also refer to \cite{gazpav,gazpav2,gazpav3} for further plots.\par We refer to \cite{gazpav,gazpav3} for numerical results and plots of solutions to \eq{maineq2} with nonlinearities $f=f(s)$ having different growths as $s\to\pm\infty$. In such case, the solution still blows up according to \eq{pazzo} but, although its ``limsup'' and ``liminf'' are respectively $+\infty$ and $-\infty$, the divergence occurs at different rates. We represent this qualitative behavior in Figure \ref{blow}. \begin{figure} \caption{Qualitative blow up for solutions to \eq{maineq2} \label{blow} \end{figure} Traveling waves to \eq{beam2} which propagate at some velocity $c>0$, depending on the elasticity of the material of the beam, solve \eq{maineq2} with $k=c^2>0$. Further numerical results obtained in \cite{gazpav,gazpav3} suggest that a statement similar to Theorem \ref{blowup} also holds for $k>0$ and, as expected, that the blow up time $R$ is decreasing with respect to the initial height $w(0)$ and increasing with respect to $k$. Since $k=c^2$ and $c$ represents the velocity of the traveling wave, this means that the time of blow up is an increasing function of $k$. In turn, since the velocity of the traveling wave depends on the elasticity of the material used to construct the bridge (larger $c$ means less elastic), this tells us that more the bridge is stiff more it will survive to exterior forces such as the wind and/or traffic loads. \begin{problem} {\em Prove Theorem \ref{blowup} when $k>0$. This would allow to show that traveling waves to \eq{beam2} blow up in finite time. Numerical results in \cite{gazpav,gazpav3} suggest that a result similar to Theorem \ref{blowup} also holds for $k>0$.} $\Box$\end{problem} \begin{problem} {\em Prove that the blow up time of solutions to \eq{maineq2} depends increasingly with respect to $k\in\mathbb{R}$. The interest of an analytical proof of this fact relies on the important role played by $k$ within the model.} $\Box$\end{problem} \begin{problem} {\em The blow up time $R$ of solutions to \eq{maineq2} is the expectation of life of the oscillating bridge. Provide an estimate of $R$ in terms of $f$ and of the initial data.} $\Box$\end{problem} \begin{problem} {\em Condition \eq{f2} is a superlinearity assumption which requires that $f$ is bounded both from above and below by the same power $p>1$. Prove Theorem \ref{blowup} for more general kinds of superlinear functions $f$.} $\Box$\end{problem} \begin{problem} {\em Can assumption \eq{tech} be relaxed? Of course, it cannot be completely removed since the trivial solution $w(t)\equiv0$ is globally defined, that is, $R=+\infty$. Numerical experiments in \cite{gazpav,gazpav3} could not detect any nontrivial global solution to \eq{maineq2}.} $\Box$\end{problem} \begin{problem} {\em Study \eq{maineq2} with a damping term: $w''''(t)+kw''(t)+\delta w'(t)+f(w(t))=0$ for some $\delta>0$. Study the competition between the damping term $\delta w'$ and the nonlinear self-exciting term $f(w)$.} $\Box$\end{problem} Note that Theorems \ref{global} and \ref{blowup} ensure that there exists an increasing sequence $\{z_j\}_{j\in\mathbb{N}}$ such that:\par $(i)$ $z_j\nearrow R$ as $j\to\infty$;\par $(ii)$ $w(z_j)=0$ and $w$ has constant sign in $(z_j,z_{j+1})$ for all $j\in\mathbb{N}$.\par It is also interesting to compare the rate of blow up of the displacement and of the acceleration on these intervals. By slightly modifying the proof of \cite[Theorem 3]{gazpav3} one can obtain the following result which holds for any $k\in\mathbb{R}$. \begin{theorem}\label{asymptotics} Let $k\in \mathbb{R}$, $p>q\ge1$, $\alpha\ge0$, and assume that $f$ satisfies \eqref{fmono} and \eqref{f2}. Assume that $w=w(t)$ is a local solution to $$ w''''(t)+kw''(t)+f(w(t))=0\qquad(t\in\mathbb{R}) $$ which blows up in finite time as $t\nearrow R<+\infty$. Denote by $\{z_j\}$ the increasing sequence of zeros of $w$ such that $z_j\nearrow R$ as $j\to+\infty$. Then \neweq{estimate} \int_{z_j}^{z_{j+1}}w(t)^2\, dt\ \ll\ \int_{z_j}^{z_{j+1}}w''(t)^2\, dt\ ,\qquad \int_{z_j}^{z_{j+1}}w'(t)^2\, dt\ \ll\ \int_{z_j}^{z_{j+1}}w''(t)^2\, dt \end{equation} as $j\to\infty$. Here, $g(j)\ll\psi(j)$ means that $g(j)/\psi(j)\to0$ as $j\to\infty$. \end{theorem} The estimate \eq{estimate}, clearly due to the superlinear term, has a simple interpretation in terms of comparison between blowing up energies, see Section \ref{energies}. \begin{remark}\label{pde} {\em Equation \eq{maineq2} also arises in several different contexts, see the book by Peletier-Troy \cite{pt} where one can find some other physical models, a survey of existing results, and further references. Moreover, besides \eq{beam2}, \eq{maineq2} may also be fruitfully used to study some other partial differential equations. For instance, one can consider nonlinear elliptic equations such as $$ \Delta^2u+e^u=\frac{1}{|x|^4}\qquad\mbox{in }\mathbb{R}^4\setminus\{0\}\ , $$ \neweq{critical} \Delta^2 u+|u|^{8/(n-4)}u=0\mbox{ in }\mathbb{R}^n\ (n\ge5),\qquad\Delta\Big(|x|^2\Delta u\Big)+|x|^2|u|^{8/(n-2)}u=0\mbox{ in }\mathbb{R}^n\ (n\ge3); \end{equation} it is known (see, e.g.\ \cite{gazgruswe}) that the Green function for some fourth order elliptic problems displays oscillations, differently from second order problems. Furthermore, one can also consider the semilinear parabolic equation $$u_t+\Delta ^2u=|u|^{p-1}u\mbox{ in }\mathbb{R}_{+}^{n+1}\ ,\qquad u(x,0)=u_0(x)\mbox{ in }\mathbb{R}^{n}$$ where $p>1+4/n$ and $u_0$ satisfies suitable assumptions. It is shown in \cite{fgg,gg2} that the linear biharmonic heat operator has an ``eventual local positivity'' property: for positive initial data $u_0$ the solution to the linear problem with no source is eventually positive on compact subsets of $\mathbb{R}^n$ but negativity can appear at any time far away from the origin. This phenomenon is due to the sign changing properties, with infinite oscillations, of the biharmonic heat kernels. We also refer to \cite{bfgk,gazpav3} for some results about the above equations and for the explanation of how they can be reduced to \eq{maineq2} and, hence, how they display self-excited oscillations. $\Box$} \end{remark} \begin{problem} {\em For any $q>0$ and parameters $a,b,k\in\mathbb{R}$, $c\ge0$, study the equation \neweq{subcrit} w''''(t)+aw'''(t)+kw''(t)+bw'(t)+cw(t)+|w(t)|^qw(t)=0\qquad(t\in\mathbb{R})\ . \end{equation} Any reader who is familiar with the second order Sobolev space $H^2$ recognises the critical exponent in the first equation in \eq{critical}. In view of Liouville-type results in \cite{ambrosio} when $q\le8/(n-4)$, it would be interesting to study the equation $\Delta^2 u+|u|^qu=0$ with the same technique. The radial form of this equation may be written as \eq{maineq2} only when $q=8/(n-4)$ since for other values of $q$ the transformation in \cite{GG} gives rise to the appearance of first and third order derivatives as in \eq{subcrit}: this motivates \eq{subcrit}. The values of the parameters corresponding to the equation $\Delta^2 u+|u|^qu=0$ can be found in \cite{GG}.} $\Box$\end{problem} Our target is now to reproduce the self-excited oscillations found in Theorem \ref{blowup} in a suitable second order system. Replace $\sin\theta\cong\theta$ and $\cos\theta\cong1$, and put $x=\ell\theta$. After these transformations, the McKenna system \eq{coupled} reads \neweq{truesystem} \left\{\begin{array}{ll} x''+\omega^2 f(y+x)-\omega^2 f(y-x)=0\\ y''+f(y+x)+f(y-x)=0\ . \end{array}\right.\end{equation} We further modify \eq{truesystem}; for suitable values of the parameters $\beta$ and $\delta$, we consider the system \neweq{miosystxy} \left\{\begin{array}{ll} x''-f(y-x)+\beta(y+x)=0\\ y''-f(y-x)+\delta(y+x)=0 \end{array}\right. \end{equation} which differs from \eq{truesystem} in two respects: the minus sign in front of $f(y-x)$ in the second equation and the other restoring force $f(y+x)$ being replaced by a linear term. To \eq{miosystxy} we associate the initial value problem \neweq{cauchy} x(0)=x_0\, ,\ x'(0)=x_1\, ,\ y(0)=y_0\, ,\ y'(0)=y_1\ . \end{equation} The following statement holds. \begin{theorem}\label{oscill} Assume that $\beta<\delta\le-\beta$ (so that $\beta<0$). Assume also that $f(s)=\sigma s+cs^2+ds^3$ with $d>0$ and $c^2\le2d\sigma$. Let $(x_0,y_0,x_1,y_1)\in\mathbb{R}^4$ satisfy \neweq{initial} (3\beta-\delta)x_0y_1+(3\delta-\beta)x_1y_0>(\beta+\delta)(x_0x_1+y_0y_1)\ . \end{equation} If $(x,y)$ is a local solution to \eqref{miosystxy}-\eqref{cauchy} in a neighborhood of $t=0$, then $(x,y)$ blows up in finite time for $t>0$ with self-excited oscillations, that is, there exists $R\in(0,+\infty)$ such that $$\liminf_{t\to R}x(t)=\liminf_{t\to R}y(t)=-\infty\qquad\mbox{and}\qquad\limsup_{t\to R}x(t)=\limsup_{t\to R}y(t)=+\infty\, .$$ \end{theorem} \begin{proof} After performing the change of variables \eq{change}, system \eq{miosystxy} becomes $$w''+(\delta-\beta)z=0\ ,\qquad z''-2f(w)+(\beta+\delta)z=0$$ which may be rewritten as a single fourth order equation \neweq{mia4} w''''(t)+(\beta+\delta)w''(t)+2(\delta-\beta)f(w(t))=0\ . \end{equation} Assumption \eq{initial} reads $$w'(0)w''(0)-w(0)w'''(0)-(\beta+\delta)w(0)w'(0)>0\, .$$ Furthermore, in view of the above assumptions, $f$ satisfies \eq{fmono}-\eq{f2} with $\rho=d/2$, $p=3$, $\alpha=2\sigma$, $q=1$, $\beta=3d$. Whence, Theorem \ref{asymptotics} states that $w$ blows up in finite time for $t>0$ and that there exists $R\in(0,+\infty)$ such that \neweq{puzzo} \liminf_{t\to R}w(t)=-\infty\qquad\mbox{and}\qquad\limsup_{t\to R}w(t)=+\infty\, . \end{equation} Next, we remark that \eq{mia4} admits a first integral, namely \begin{eqnarray} E(t) &:=& \frac{\beta+\delta}{2}\,w'(t)^2+w'(t)w'''(t)+2(\delta-\beta)F(w(t))-\frac{1}{2}\,w''(t)^2 \notag \\ \ &=& \frac{\beta+\delta}{2}\,w'(t)^2+(\beta-\delta)w'(t)z'(t)+2(\delta-\beta)F(w(t))-\frac{(\beta-\delta)^2}{2}\,z(t)^2\equiv\overline{E}\ , \label{E} \end{eqnarray} for some constant $\overline{E}$. By \eq{puzzo} there exists an increasing sequence $m_j\to R$ of local maxima of $w$ such that $$z(m_j)=\frac{w''(m_j)}{\beta-\delta}\ge0\ ,\quad w'(m_j)=0\ ,\quad w(m_j)\to+\infty\mbox{ as }j\to\infty\ .$$ By plugging $m_j$ into the first integral \eq{E} we obtain $$\overline{E}=E(m_j)=2(\delta-\beta)F(w(m_j))-\frac{(\beta-\delta)^2}{2}\,z(m_j)^2$$ which proves that $z(m_j)\to+\infty$ as $j\to+\infty$. We may proceed similarly in order to show that $z(\mu_j)\to-\infty$ on a sequence $\{\mu_j\}$ of local minima of $w$. Therefore, we have $$ \liminf_{t\to R}z(t)=-\infty\qquad\mbox{and}\qquad\limsup_{t\to R}z(t)=+\infty\, . $$ Assume for contradiction that there exists $K\in\mathbb{R}$ such that $x(t)\le K$ for all $t<R$. Then, recalling \eq{change}, on the above sequence $\{m_j\}$ of local maxima for $w$, we would have $y(m_j)-K\ge y(m_j)-x(m_j)=w(m_j)\to+\infty$ which is incompatible with \eq{E} since $$2(\delta-\beta)F(y(m_j)-x(m_j))-\frac{(\beta-\delta)^2}{2}\, (y(m_j)+x(m_j))^2\equiv\overline{E}$$ and $F$ has growth of order 4 with respect to its divergent argument. Similarly, by arguing on the sequence $\{\mu_j\}$, we rule out the possibility that there exists $K\in\mathbb{R}$ such that $x(t)\ge K$ for ll $t<R$. Finally, by changing the role of $x$ and $y$ we find that also $y(t)$ is unbounded both from above and below as $t\to R$. This completes the proof.\end{proof} \begin{remark} {\em Numerical results in \cite{gazpav3} suggest that the assumption $\delta\le-\beta$ is not necessary to obtain \eq{puzzo}. So, most probably, Theorem \ref{oscill} and the results of this section hold true also without this assumption. $\Box$}\end{remark} A special case of function $f$ satisfying the assumptions of Theorem \ref{oscill} is $f_\varepsilon(s)=s+\varepsilon s^3$ for any $\varepsilon>0$. We wish to study the situation when the problem tends to become linear, that is, when $\varepsilon\to0$. Plugging such $f_\varepsilon$ into \eq{miosystxy} gives the system \neweq{fe} \left\{\begin{array}{ll} x''+(\beta+1)x+(\beta-1)y+\varepsilon(x-y)^3=0\\ y''+(\delta+1)x+(\delta-1)y+\varepsilon(x-y)^3=0 \end{array}\right. \end{equation} so that the limit linear problem obtained for $\varepsilon=0$ reads \neweq{f0} \left\{\begin{array}{ll} x''+(\beta+1)x+(\beta-1)y=0\\ y''+(\delta+1)x+(\delta-1)y=0\ . \end{array}\right. \end{equation} The theory of linear systems tells us that the shape of the solutions to \eq{f0} depends on the signs of the parameters $$A=\beta+\delta\ ,\quad B=2(\delta-\beta)\ ,\quad \Delta=(\beta+\delta)^2+8(\beta-\delta)\ .$$ Under the same assumptions of Theorem \ref{oscill}, for \eq{f0} we have $A\le0$ and $B>0$ but the sign of $\Delta$ is not known a priori and three different cases may occur.\par $\bullet$ If $\Delta<0$ (a case including also $A=0$), then we have exponentials times trigonometric functions so either we have self-excited oscillations which increase amplitude as $t\to\infty$ or we have damped oscillations which tend to vanish as $t\to\infty$. Consider the case $\delta=-\beta=1$ and $(x_0,y_0,x_1,y_1)=(1,0,1,-1)$, then \eq{initial} is fulfilled and Theorem \ref{oscill} yields \begin{corollary} For any $\varepsilon>0$ there exists $R_\varepsilon>0$ such that the solution $(x^\varepsilon,y^\varepsilon)$ to the Cauchy problem \neweq{feps} \left\{\begin{array}{ll} x''-2y+\varepsilon(x-y)^3=0\\ y''+2x+\varepsilon(x-y)^3=0\\ x(0)=1,\ y(0)=0,\ x'(0)=1,\ y'(0)=-1 \end{array}\right. \end{equation} blows up as $t\to R_\varepsilon$ and satisfies $$ \liminf_{t\to R_\varepsilon}x^\varepsilon(t)=\liminf_{t\to R_\varepsilon}y^\varepsilon(t)=-\infty\qquad\mbox{and}\qquad\limsup_{t\to R_\varepsilon}x^\varepsilon(t)=\limsup_{t\to R_\varepsilon}y^\varepsilon(t)=+\infty\, . $$ \end{corollary} A natural conjecture, supported by numerical experiments, is that $R_\varepsilon\to\infty$ as $\varepsilon\to0$. For several $\varepsilon>0$, we plotted the solution to \eq{feps} and the pictures all looked like Figure \ref{plot1}. \begin{figure} \caption{The solution $x^\varepsilon$ (black) and $y^\varepsilon$ (green) to \eq{feps} \label{plot1} \end{figure} When $\varepsilon=0.1$ the blow up seems to occur at $R_\varepsilon=4.041$. Notice that $x^\varepsilon$ and $y^\varepsilon$ ``tend to become the same'', in the third picture they are indistinguishable. After some time, when wide oscillations amplifies, $x^\varepsilon$ and $y^\varepsilon$ move almost synchronously. When $\varepsilon=0$, the solution to \eq{feps} is explicitly given by $x^0(t)=e^t\cos(t)$ and $y^0(t)=-e^t\sin(t)$, thereby displaying oscillations blowing up in infinite time similar to those visible in \eq{scann}.\par If we replace the Cauchy problem in \eq{feps} with $$x(0)=1,\ y(0)=0,\ x'(0)=-1,\ y'(0)=1$$ then \eq{initial} is not fulfilled. However, for any $\varepsilon>0$ that we tested, the corresponding numerical solutions looked like in Figure \ref{plot1}. In this case, the limit problem with $\varepsilon=0$ admits as solutions $x^0(t)=e^{-t}\cos(t)$ and $y^0(t)=e^{-t}\sin(t)$ which do exhibit oscillations but, now, strongly damped.\par Let us also consider the two remaining limit systems which, however, do not display oscillations.\par $\bullet$ If $\Delta=0$, since $A\le0$, there are no trigonometric functions in the limit case \eq{f0}.\par $\bullet$ If $\Delta>0$, then necessarily $A<0$ since $B>0$, and hence only exponential functions are involved: the solution to \eq{f0} may blow up in infinite time or vanish at infinity.\par The above results explain why we believe that \eq{scann} is not suitable to display self-excited oscillations as the ones which appeared for the TNB. Since it has only two degrees of freedom, it fails to consider both vertical and torsional oscillations which, on the contrary, are visible in the McKenna-type system \eq{miosystxy}. We have seen in Theorem \ref{oscill} that destructive self-excited oscillations may blow up in finite time, something very similar to what may be observed in \cite{tacoma}. Hence, \eq{miosystxy} shows more realistic self-excited oscillations than \eq{scann}.\par Although the blow up occurs at $t=4.04$, the solution plotted in Figure \ref{plot1} is relatively small until $t=3.98$. This, together with the behavior displayed in Figures \ref{duemila} and \ref{mille}, allows us to conclude that \begin{center} \mbox{\bf in nonlinear systems, self-excited oscillations appear suddenly, without any intermediate stage.} \end{center} The material presented in this section also enables us to conclude that \begin{center} \mbox{\bf the linear case cannot be seen as a limit situation of the nonlinear case} \end{center} since the behavior of the solution to \eq{f0} depends on $\beta$, $\delta$, and on the initial conditions, while nothing can be deduced from the sequence of solutions $(x^\varepsilon,y^\varepsilon)$ to problem \eq{fe} as $\varepsilon\to0$ because these solutions all behave similarly independently of $\beta$ and $\delta$. Furthermore, the solutions to the limit problem \eq{f0} may or may not exhibit oscillations and if they do, these oscillations may be both of increasing amplitude or of vanishing amplitude as $t\to+\infty$. All this shows that linearisation may give misleading and unpredictable answers.\par In this section we have seen that the blow up of solutions to \eq{maineq2} and to \eq{miosystxy} occurs with wide oscillations after a long time of apparent calm. Hence, the solution does not display any visible behavior which may warn some imminent danger. The reason is that second derivatives of the solution to \eq{maineq2} blow up at a higher rate, see Theorem \ref{asymptotics}, and second derivatives are not visible by simply looking at the graph. Wide oscillations after a long time of apparent calm suggest that some hidden energy is present in the system. Finally, we have seen that self-excited oscillating blow up also appears for a wide class of superlinear fourth order differential equations, including PDE's. \section{Affording an explanation in terms of energies}\label{afford} \subsection{Energies involved}\label{energies} The most important tools to describe any structure are the energies which appear. A precise description of all the energies involved would lead to perfect models and would give all the information to make correct projects. Unfortunately, bridges, as well as many other structures, do not allow simple characterisations of all the energies present in the structure and, maybe, not all possible existing energies have been detected up to nowadays. Hence, it appears impossible to make a precise list of all the energies involved in the complex behavior of a bridge.\par The kinetic energy is the simplest energy to be described. If $v(x,t)$ denotes the vertical displacement at $x\in\Omega$ and at $t>0$, then the total kinetic energy at time $t$ is given by $$\frac{m}{2}\int_\Omega v_t(x,t)^2\, dx$$ where $m$ is the mass and $\Omega$ can be either a segment (beam model) or a thin rectangle (plate model). This energy gives rise to the term $mv_{tt}$ in the corresponding Euler-Lagrange equation, see Section \ref{models}.\par Then one should consider potential energy, which is more complicated. From \cite[pp.75-76]{bleich}, we quote \begin{center} \begin{minipage}{162mm} {\em The potential energy is stored partly in the stiffening frame in the form of elastic energy due to bending and partly in the cable in the form of elastic stress-strain energy and in the form of an increased gravity potential.} \end{minipage} \end{center} Hence, an important role is played by stored energy. Part of the stored energy is potential energy which is quite simple to determine: in order to avoid confusion, in the sequel we call potential energy only the energy due to gravity which, in the case of a bridge, is computed in terms of the vertical displacement $v$. However, the dominating part of the stored energy in a bridge is its elastic energy.\par The distinction between elastic and potential stored energies, which in our opinion appears essential, is not highlighted with enough care in \cite{bleich} nor in any subsequent treatise of suspension bridges. A further criticism about \cite{bleich} is that it often makes use of ${\cal LHL}$, see \cite[p.214]{bleich}. Apart these two weak points, \cite{bleich} makes a quite careful quantitative analysis of the energies involved. In particular, concerning the elastic energy, the contribution of each component of the bridge is taken into account in \cite{bleich}: the chords (p.145), the diagonals (p.146), the cables (p.147), the towers (pp.164-168), as well as quantitative design factors (pp.98-103).\par A detailed energy method is also introduced at p.74, as a practical tool to determine the modes of vibrations and natural frequencies of suspension bridges: the energies considered are expressed in terms of the amplitude of the oscillation $\eta=\eta(x)$ and therefore, they do not depend on time. As already mentioned, the nonlocal term in \eq{eqqq} represents the increment of energy due to the external wind during a period of time. Recalling that $v(x,t)=\eta(x)\sin(\omega t)$, \cite[p.28]{bleich} represents the net energy input per cycle by \neweq{dissipation} A:=\frac{w^2}{H_w^2}\, \frac{EA}{L}\int_0^L \eta(z)\, dz-C\int_0^L \eta(z)^2\, dz \end{equation} where $L$ is the length of the beam and $C>0$ is a constant depending on the frequency of oscillation and on the damping coefficient, so that the second term is a quantum of energy being dissipated as heat: mechanical hysteresis, solid friction damping, aerodynamic damping, etc. It is explained in Figure 13 in \cite[p.33]{bleich} that \begin{center} \begin{minipage}{162mm} {\em the kinetic energy will continue to build up and therefore the amplitude will continue to increase until $A=0$.} \end{minipage} \end{center} Hence, the larger is the input of energy $\int_0^L \eta$ due to the wind, the larger needs to be the displacement $v$ before the kinetic energy will stop to build up. This is related to \cite[pp.241-242]{bleich}, where an attempt is made \begin{center} \begin{minipage}{162mm} {\em to approach by rational analysis the problem proper of self-excitation of vibrations in truss-stiffened suspension bridges. ... The theory discloses the peculiar mechanism of catastrophic self-excitation in such bridges...} \end{minipage} \end{center} The word ``self-excitation'' suggests behaviors similar to \eq{pazzo}. As shown in \cite{gazpav3}, the oscillating blow up of solutions described by \eq{pazzo} occurs in many fourth order differential equations, including PDE's, see also Remark \ref{pde}, whereas it does not occur in lower order equations. But these oscillations, and the energy generating them, are somehow hidden also in fourth order equations; let us explain qualitatively what we mean by this. Engineers usually say that {\em the wind feeds into the structure an increment of energy} (see \cite[p.28]{bleich}) and that {\em the bridge eats energy} but we think it is more appropriate to say that {\bf the bridge ruminates energy}. That is, first the bridge stores the energy due to prolonged external sources. Part of this stored energy is indeed dissipated (eaten) by the structural damping of the bridge. From \cite[p.211]{bleich}, we quote \begin{center} \begin{minipage}{162mm} {\em Damping is dissipation of energy imparted to a vibrating structure by an exciting force, whereby a portion of the external energy is transformed into molecular energy.} \end{minipage} \end{center} Every bridge has its own damping capacity defined as the ratio between the energy dissipated in one cycle of oscillation and the maximum energy of that cycle. The damping capacity of a bridge depends on several components such as elastic hysteresis of the structural material and friction between different components of the structure, see \cite[p.212]{bleich}. A second part of the stored energy becomes potential energy if the bridge is above its equilibrium position. The remaining part of the stored energy, namely the part exceeding the damping capacity plus the potential energy, is stored into inner elastic energy; only when this stored elastic energy reaches a critical threshold (saturation), the bridge starts ``ruminating'' energy and gives rise to torsional or more complicated oscillations.\par When \eq{pazzo} occurs, the estimate \eq{estimate} shows that $|w''(t)|$ blows up at a higher rate when compared to $|w(t)|$ and $|w'(t)|$. Although any student is able to see if a function or its first derivative are large just by looking at the graph, most people are unable to see if the second derivative is large. Roughly speaking, the term $\int w''(t)^2$ measures the elastic energy, the term $\int w'(t)^2$ measures the kinetic energy, whereas $\int w(t)^2$ is a measure of the potential energy due to gravity. Hence, \eq{estimate} states that the elastic energy has a higher rate of blow up when compared to the kinetic and potential energies; equivalently, we can say that both the potential energy, described by $|w|$, and the kinetic energy, described by $|w'|$, are negligible with respect to the elastic energy, described by $|w''|$. But since large $|w''(t)|$ cannot be easily detected, the bridge may have large elastic energy, and hence large total energy, without revealing it. Since $|w(t)|$ and $|w'(t)|$ blow up later than $|w''(t)|$, the total energy can be very large without being visible; this is what we mean by hidden elastic energy. This interpretation well agrees with the numerical results described in Section \ref{blup} which show that blow up in finite time for \eq{maineq2} occurs after a long waiting time of apparent calm and sudden wide oscillations. Since the apparent calm suggests low energy whereas wide oscillations suggest high energy, this means that some hidden energy is indeed present in the bridge. And the stored elastic energy, in all of its forms, seems the right candidate to be the hidden energy. Summarising, the large elastic energy $|w''(t)|$ hides the blow up of $|w(t)|$ for some time. Then, with some delay but suddenly, also $|w(t)|$ becomes large. If one could find a simple way to measure $|w''(t)|$ which is an approximation of the elastic energy or, even better, $|w''(t)|/\sqrt{1+w'(t)^2}$ which is the mean curvature, then one would have some time to prevent possible collapses.\par A flavor of what we call hidden energy was already present in \cite{bleich} where the energy storage capacity of a bridge is often discussed, see (p.34, p.104, p.160, p.164) for the storage capacity of the different vibrating components of the bridge. Moreover, the displayed comment just before \eq{coupled} shows that McKenna-Tuama \cite{mckO} also had the feeling that some energy could be hidden. \subsection{Energy balance}\label{energybalance} As far as we are aware, the first attempt for a precise quantitative energy balance in a beam representing a suspension bridge was made in \cite[Chapter VII]{tac2}. Although all the computations are performed with precise values of the constants, in our opinion the analysis there is not complete since it does not distinguish between different kinds of potential energies; what is called potential energy is just the energy stored in bending a differential length of the beam.\par A better attempt is made in \cite[p.107]{bleich} where the plot displays the behavior of the stored energies: the potential energy due to gravity and the elastic energies of the cables and of the stiffening frame. Moreover, the important notion of flutter speed is first used. Rocard \cite[p.185]{rocard} attributes to Bleich \cite{bleichsolo} \begin{center} \begin{minipage}{162mm} {\em ... to have pointed out the connection with the flutter speed of aircraft wings ... He distinguishes clearly between flutter and the effect of the staggered vortices and expresses the opinion that two degrees of freedom (bending and torsion) at least are necessary for oscillations of this kind.} \end{minipage} \end{center} A further comment on \cite{bleichsolo} is given at \cite[p.80]{wake}: \begin{center} \begin{minipage}{162mm} {\em ... Bleich's work ... ultimately opened up a whole new field of study. Wind tunnel tests on thin plates suggested that higher wind velocities increased the frequency of vertical oscillation while decreasing that of torsional oscillation.} \end{minipage} \end{center} The conclusion is that when the two frequencies corresponded, a flutter critical velocity was reached, as manifested in a potentially catastrophic coupled oscillation. In order to define the flutter speed, \cite[pp.246-247]{bleich} assumes that the bridge is subject to a natural steady state oscillating motion; the flutter speed is then defined by: \begin{center} \begin{minipage}{162mm} {\em With increasing wind speed the external force necessary to maintain the motion at first increases and then decreases until a point is reached where the air forces alone sustain a constant amplitude of the oscillation. The corresponding velocity is called the critical velocity or flutter speed.} \end{minipage} \end{center} The importance of the flutter speed is then described by \begin{center} \begin{minipage}{162mm} {\em Below the critical velocity $V_c$ an exciting force is necessary to maintain a steady-state motion; above the critical velocity the direction of the force must be reversed (damping force) to maintain the steady-state motion. In absence of such a damping force the slightest increase of the velocity above $V_c$ causes augmentation of the amplitude.} \end{minipage} \end{center} This means that self-excited oscillations appear as soon as the flutter speed is exceeded.\par Also Rocard devotes a large part of \cite[Chapter VI]{rocard} to \begin{center} \begin{minipage}{162mm} {\em ... predict and delimit the range of wind speeds that inevitably produce and maintain vibrations of restricted amplitudes.} \end{minipage} \end{center} This task is reached by a careful study of the natural frequencies of the structure. Moreover, Rocard aims to \begin{center} \begin{minipage}{162mm} {\em ... calculate the really critical speed of wind beyond which oscillatory instability is bound to arise and will always cause fracture.} \end{minipage} \end{center} The flutter speed $V_c$ for a bridge without damping is computed on \cite[p.163]{rocard} and reads \neweq{speedflutter} V_c^2=\frac{2r^2\ell^2}{2r^2+\ell^2}\, \frac{\omega_T^2-\omega_B^2}{\alpha} \end{equation} where $2\ell$ denotes the width of the roadway, see Figure \ref{9}, $r$ is the radius of gyration, $\omega_B$ and $\omega_T$ are the lowest modes circular frequencies of the bridge in bending and torsion respectively, $\alpha$ is the mass of air in a unit cube divided by the mass of steel and concrete assembled within the same unit length of the bridge; usually, $r\approx\ell/\sqrt{2}$ and $\alpha\approx0.02$. More complicated formulas for the flutter speed are obtained in presence of damping factors. Moreover, Rocard \cite[p.158]{rocard} shows that, for the original Tacoma Bridge, \eq{speedflutter} yields $V_c=47$mph while the bridge collapsed under the action of a wind whose speed was $V=42$mph; he concludes that his computations are quite reliable.\par In pedestrian bridges, the counterpart of the flutter speed is the {\em critical number of pedestrians}, see the quoted sentence by Macdonald \cite{macdonald} in Section \ref{story} and also \cite[Section 2.4]{franck}. For this reason, in the sequel we prefer to deal with energies rather than with velocities: the flutter speed $V_c$ corresponds to a {\bf critical energy threshold} $\overline{E}$ above which the bridge displays self-excited oscillations. We believe that \begin{center} \begin{minipage}{162mm} {\bf The critical energy threshold, generated by the flutter speed for suspension bridges and by the critical number of pedestrians for footbridges, is the threshold where the nonlinear behavior of the bridge really appears, due to sufficiently large displacements of the roadway from equilibrium.} \end{minipage} \end{center} The threshold $\overline{E}$ depends on the elasticity of the bridge, namely on the materials used for its construction. This is in accordance with the numerical results obtained in \cite{gazpav3} where it is shown that the blow up time for solutions to \eq{maineq2} depends increasingly on the parameter $k$. We refer to Section \ref{howto2} for a more precise definition of $\overline{E}$ and a possible way to determine it.\par In this section, we attempt a qualitative energy balance involving more kinds of energies. A special role is played by the elastic energy which should be distinguished form the potential energy that, as we repeatedly said, merely denotes the potential energy due to gravity; its level zero is taken in correspondence of the equilibrium position of the roadway.\par Let us first describe what we believe to happen in a single cross section $\Gamma$ of the bridge; let ${\cal E}$ denote its total energy. Let $A$, $B$, and $C$ denote the three positions of the endpoint $P$ of $\Gamma$, as described in Figure \ref{1} \begin{figure} \caption{Different positions for the bridge side.} \label{1} \end{figure} where the thick grey part denotes the roadway whereas the dotted line displays the behavior of the solution $w$ to \eq{maineq2} when $w<0$, namely when the deflection from horizontal appears, see \eq{w0}. In what follows, we denote by $A$, $B$, $C$, both the positions in Figure \ref{1} and the instants of time when they occur for $P$. When $P$ is in its highest position $A$, $\Gamma$ has maximal potential energy $E_p$ and zero kinetic energy $E_k$: $E_p(A)={\cal E}$, $E_k(A)=0$. In the interval of time $t$ when $P$ goes from position $A$ to position $B$ the potential and kinetic energies of $\Gamma$ exhibit the well-known behavior with constant sum, see the first picture in Figure \ref{2345} \begin{figure} \caption{Energy balance for different positions of the bridge.} \label{2345} \end{figure} where $E_\ell$ denotes the portion of the stored elastic energy exceeding the structural damping of the bridge: when it is $0$, it means that there is no stored elastic energy. When $P$ reaches position $B$, corresponding to the maximal elongation of the sustaining hangers, all the energy has been transformed into kinetic energy: $E_p(B^-)=0$, $E_k(B^-)={\cal E}$. In this position the falling of $\Gamma$ is violently stopped by the extended hangers by means of a Dirac delta impulse and the existing kinetic energy of $\Gamma$ is instantaneously stored into elastic energy $E_\ell$: $E_k(B^-)=E_\ell(B^+)$, see the second picture in Figure \ref{2345}. If the total energy ${\cal E}$ is smaller than the critical threshold $\overline{E}$, corresponding to the flutter speed, nothing seems to happen because the elastic energy is not visible; in this case, after a while it reaches position $C$ and thanks to a further impulse, the elastic energy transforms back to kinetic energy. Then $P$ starts raising up towards position $A$ and the sum of the potential and kinetic energies is again constant, see the third picture in Figure \ref{2345}. In the meanwhile, if the wind keeps blowing or traffic loads generate further negative damping, the total energy ${\cal E}$ increases. Hence, after a cycle, when $P$ is back in position $A$ the total energy ${\cal E}$ of $\Gamma$ may have become larger. In turn, $E_k(B^-)$ will also be larger and, after a certain number of cycles, if the wind velocity is larger than the flutter speed, the total energy exceeds the critical threshold: ${\cal E}>\overline{E}$. In turn, also $E_k(B^-)$ exceeds the critical threshold: $E_k(B^-)>\overline{E}$. When this occurs, the energy splits into two parts: the saturated elastic energy $E_\ell(B^+)=\overline{E}$ and a torsional elastic energy $E_t(B^+)={\cal E}-\overline{E}$ which immediately gives rise to torsional oscillations, see the fourth picture in Figure \ref{2345}. As long as ${\cal E}>\overline{E}$, when $P$ reaches position $B$ the torsional elastic energy becomes positive and $P$ ``virtually'' goes from $B$ to $C$ along the dotted line in Figure \ref{1}. In the interval of time when $P$ is between $B^+$ and $C^-$, the torsional elastic energy remains constant and equal to $E_t(C^+)$, see the fourth picture in Figure \ref{2345}. Only after a further impact, in position $B$, it may vary due to the new impulse. Finally, there exists a second critical threshold: if ${\cal E}-\overline{E}$ becomes too large, namely if the total energy ${\cal E}$ itself is too large, then the bridge collapses. \begin{remark} {\rm With some numerical results at hand, Lazer-McKenna \cite[p.565]{mck1} attempt to explain the Tacoma collapse with the following comment: \begin{center} \begin{minipage}{162mm} {\em An impact, due to either an unusual strong gust of wind, or to a minor structural failure, provided sufficient energy to send the bridge from one-dimensional to torsional orbits.}\end{minipage} \end{center} We believe that what they call {\em an unusual impact} is, in fact, a cyclic impulse for the transition between positions $B^-$ and $B^+$. $\Box$} \end{remark} \begin{problem} {\em The above energy balance should become quantitative. An exact way to compute all the energies involved should be determined. Of course, the potential and kinetic energy are straightforward. But the elastic energy needs deeper analysis.} $\Box$\end{problem} Let us now consider the entire bridge which we model as a rectangular plate $\Omega=(0,L)\times(-\ell,\ell)\subset\mathbb{R}^2$. For all $x_1\in(0,L)$ let ${\cal E}_{x_1}$ denote the total energy of the cross section $\Gamma_{x_1}=\{x_1\}\times(-\ell,\ell)$, computed following the above explained method. Then the total energy of the plate $\Omega$ is given by \neweq{allsections} {\cal E}_\Omega=\int_0^L{\cal E}_{x_1}\, dx_1\ . \end{equation} For simplicity we have here neglected the stretching energy which is a kind of ``interaction energy between cross sections''. If one wishes to consider also this energy, one usually assumes that the elastic force is proportional to the increase of surface. Then the stretching energy of the horizontal plate $\Omega$ whose vertical deflection is $u$ reads $${\cal E}_S(u)=\int_\Omega\left(\sqrt{1+|\nabla u|^2}-1\right) \, dx_1dx_2$$ and, after multiplication by a suitable coefficient, it should be added to ${\cal E}_\Omega$. For small deformations $u$ the asymptotic expansion leads to the usual Dirichlet integral $\frac12 \int_\Omega|\nabla u|^2$ and, in turn, to the appearance of the second order term $\Delta u$ in the corresponding Euler-Lagrange equation. Clearly, the bridge is better described with the addition of the stretching energy but, at least to have a description of qualitative behaviors, we may neglect it in a first simplified model.\par The just described elastic phenomenon may also be seen in a much simpler model. Imagine there is a ball at some given height above an horizontal plane $P$, see position $A$ in Figure \ref{10}. \begin{figure} \caption{A suspension bridge is like a bouncing ball.} \label{10} \end{figure} The ball is falling down until it reaches the position tangent to the plane as in position $B$. Then there is some positive time where the ball touches $P$; the reason is that it is squeezed and deformed, although probably less than illustrated in the $B\to C$ picture. But, of course, a very soft ball may have an important deformation. Just after the impact, the ball stores elastic energy which is hardly visible. After some time, the ball recovers its initial spherical shape and is ready to bounce up, see position $C$. When it is back in position $A$ it may store further energy, for instance with a hand pushing it downwards. For these reasons, we believe that there is some resemblance between bouncing balls and oscillating bridges. \subsection{Oscillating modes in suspension bridges: seeking the correct boundary conditions}\label{modes} Smith-Vincent \cite[Section I.2]{tac2} analyse the different forms of motion of a suspension bridge and write \begin{center} \begin{minipage}{162mm} {\em The natural modes of vibration of a suspension bridge can be classified as vertical and torsional. In pure vertical modes all points on a given cross section move vertically the same amount and in phase... The amount of this vertical motion varies along the longitudinal axis of the bridge as a modified sine curve.} \end{minipage} \end{center} Then, concerning torsional motions, they write \begin{center} \begin{minipage}{162mm} {\em In pure torsional modes each cross section rotates about an axis which is parallel to the longitudinal axis of the bridge and is in the same vertical plane as the centerline of the roadway. Corresponding points on opposite sides of the centerline of the roadway move equal distances but in opposite directions.} \end{minipage} \end{center} Moreover, Smith-Vincent also analyse small oscillations: \begin{center} \begin{minipage}{162mm} {\em For small torsional amplitudes the movement of any point is essentially vertical, and the wave form or variation of amplitude along a line parallel to the longitudinal centerline of the bridge ... is the same as for a corresponding pure vertical mode.} \end{minipage} \end{center} With these remarks at hand, in this section we try to set up a reliable eigenvalue problem. We consider the roadway bridge as a long narrow rectangular thin plate, simply supported on its short sides. So, let $\Omega=(0,L)\times(-\ell,\ell)\subset\mathbb{R}^2$ where $L$ is the length of the bridge and $2\ell$ is its width; a realistic assumption is that $2\ell\ll L$.\par As already mentioned in Section \ref{elasticity}, the choice of the boundary conditions is delicate since it depends on the physical model considered. We first recall that the boundary conditions $u=\Delta u=0$ are the so-called Navier boundary conditions, see Figure \ref{navierbc} which is taken from \cite[p.96]{navier}. \begin{figure} \caption{First appearance of Navier boundary conditions.} \label{navierbc} \end{figure} On flat parts of the boundary where no curvature is present, they describe simply supported plates, see e.g.\ \cite{gazgruswe}. When $x_1$ is fixed, either $x_1=0$ or $x_1=L$, these conditions reduce to $u=u_{x_1x_1}=0$. And precisely on these two sides, the roadway $\Omega$ is assumed to be simply supported; this is uniformly accepted in any of the models we met. The delicate point is the determination of the boundary conditions on the other sides.\par In order to get into the problem, we start by dealing with the linear Kirchhoff-Love theory described in Section \ref{elasticity}. In view of \eq{energy-gs}, the energy of the vertical deformation $u$ of a rectangular plate $\Omega=(0,L)\times(-\ell,\ell)$ subject to a load $f=f(x_1,x_2)$ is given by \neweq{energy-f} \mathbb{E}(u)=\int_{\Omega }\left(\frac{1}{2}\left( \Delta u\right) ^{2}+(\sigma-1)\det(D^2u)-f\, u\right) \, dx_1dx_2 \end{equation} and the corresponding Euler-Lagrange equation reads $\Delta^2u=f$ in $\Omega$. For a fully simply supported plate, that is $u=u_{x_1x_1}=0$ on the vertical sides and $u=u_{x_2x_2}=0$ on the horizontal sides, this problem has been solved by Navier \cite{navier} in 1823, see also \cite[Section 2.1]{mansfield}. But Cauchy \cite{cauchy} criticised the work by Navier by claiming that \begin{center} \begin{minipage}{162mm} {\em ... Navier ... avait consid\'er\'e deux esp\`eces de forces produites, les unes par la dilatation ou la contraction, les autres par la flexion de ce m\^eme plan. ... Il me parut que ces deux esp\`eces de forces pouvaient \^etre r\'eduites \`a une seule... .} \end{minipage} \end{center} Did Cauchy already have in mind the difference/analogy between bending, stretching, and torsion? In any case, since the bridge is not a fully simply supported plate, different boundary conditions should be considered on the horizontal sides. The load problem on the rectangle $\Omega$ with only the vertical sides being simply supported was considered by L\'evy \cite{levy}, Zanaboni \cite{zanaboni}, and Nadai \cite{nadai}, see also \cite[Section 2.2]{mansfield} for the analysis of different kinds of boundary conditions on the remaining two sides $x_2=\pm\ell$. Let us also mention the more recent monograph \cite[Chapter 3]{ventsel} for a very clear description of bending of rectangular plates.\par A first natural possibility is to consider the horizontal sides to be free. If no physical constraint is present on the horizontal sides, then the boundary conditions there become (see e.g.\ \cite[(2.40)]{ventsel}) $$ u_{x_2x_2}(x_1,\pm\ell)+\sigma u_{x_1x_1}(x_1,\pm\ell)=0\, ,\quad u_{x_2x_2x_2}(x_1,\pm\ell)+(2-\sigma)u_{x_1x_1x_2}(x_1,\pm\ell)=0\, ,\quad x_1\in(0,L)\, . $$ Unfortunately, some physical constraints are present on the two horizontal sides, both because of the action of the hangers and because the cross section of the bridge may be considered to be rigid. Our purpose is to describe the oscillating modes of the plate $\Omega$ under the most possible realistic boundary conditions; we suggest here some new conditions to be required on the horizontal sides $x_2=\pm\ell$. Hopefully, they should allow to emphasise both vertical and torsional oscillations.\par First of all, note that if the cross section of the roadway is rigid and behaves as in Figure \ref{9} then each cross section has constant deflection from horizontal, that is, it rotates around its barycenter $B$: Prof.\ Farquharson, the man escaping in \cite{tacoma}, ran following the middle line of the roadway precisely in order to avoid torsional oscillations. Denoting by $u=u(x_1,x_2)$ the vertical displacement of the roadway, this amounts to say that \neweq{small} u_{x_2}(x_1,x_2,t)=\Psi(x_1,t)\qquad(x\in\Omega\, ,\ t>0) \end{equation} for some $\Psi$. If we translate this constraint on the vertical sides $x_2=\pm\ell$ of the plate $\Omega$, we obtain $$\begin{array}{cc} u_{x_2x_2}(x_1,x_2)=0\quad x\in(0,L)\times\{-\ell,\ell\}\, ,\\ 2\ell u_{x_2}(x_1,-\ell)=2\ell u_{x_2}(x_1,\ell)=u(x_1,\ell)-u(x_1,-\ell)\quad x_1\in(0,L)\, . \end{array}$$ Indeed, by \eq{small} we have $u_{x_2x_2}\equiv0$ in $\Omega$, which justifies both these conditions. Taking all the above boundary conditions into account, we are led to consider the following eigenvalue problem \neweq{eigen1} \left\{\begin{array}{ll} \Delta^2 u=\lambda u\quad & x=(x_1,x_2)\in\Omega\, ,\\ u(x_1,x_2)=u_{x_1x_1}(x_1,x_2)=0\quad & x\in\{0,L\}\times(-\ell,\ell)\, ,\\ u_{x_2x_2}(x_1,x_2)=0\quad & x\in(0,L)\times\{-\ell,\ell\}\, ,\\ 2\ell u_{x_2}(x_1,-\ell)=2\ell u_{x_2}(x_1,\ell)=u(x_1,\ell)-u(x_1,-\ell)\quad & x_1\in(0,L)\, . \end{array}\right. \end{equation} This is a nonlocal problem which combines boundary conditions on different parts of $\partial\Omega$. The oscillating modes of $\Omega$ are the eigenfunctions to \eq{eigen1}. By separating variables, we find the two families of eigenfunctions \neweq{autofunzione} \sin\left(\frac{m\pi}{L}x_1\right)\ ,\qquad x_2\sin\left(\frac{m\pi}{L}x_1\right)\qquad(m\in\mathbb{N}\setminus\{0\})\ . \end{equation} The first family describes pure vertical oscillations whereas the second family describes pure torsional oscillations with no vertical displacement of the middle line of the roadway. For fixed $m$, both these eigenfunctions correspond to the eigenvalue $$\lambda_m=\frac{m^4\pi^4}{L^4}\ .$$ Although the ``interesting'' eigenfunctions to \eq{eigen1} are the ones in \eq{autofunzione}, further eigenfunctions might exist and are expected to have the form \neweq{formeigen} \psi_m(x_2)\, \sin\left(\frac{m\pi}{L}x_1\right)\qquad(m\in\mathbb{N}\setminus\{0\}) \end{equation} for some $\psi_m\in C^4(-\ell,ell)$ satisfying a suitable linear fourth order ODE, see \cite{fergaz}. \begin{problem} {\em Determine all the eigenvalues and eigenfunctions to \eq{eigen1}. Is $\lambda=0$ an eigenvalue? Which subspace of $H^2(\Omega)$ is spanned by these eigenfunctions?} $\Box$\end{problem} It would be interesting to find out if the corresponding loaded plate problem has an equilibrium. \begin{problem} {\em For any $f\in L^2(\Omega)$ study existence and uniqueness of a function $u\in H^4(\Omega)$ satisfying $\Delta^2 u=f$ in $\Omega$ and \eq{eigen1}$_2$-\eq{eigen1}$_3$-\eq{eigen1}$_4$. Try first some particular forms of $f$ as in \cite[Sections 2.2, 2.2.2]{mansfield} and then general $f=f(x_1,x_2)$. Is there any reasonable weak formulation for this problem?} $\Box$\end{problem} Problem \eq{eigen1} may turn out to be quite complicated from a mathematical point of view: it is not a variational problem and standard elliptic regularity does not apply. So, let us suggest an alternative model which seems to fit slightly better in well-known frameworks and also admits \eq{autofunzione} as eigenfunctions. Consider the eigenvalue problem \neweq{eigen2} \left\{\begin{array}{ll} \Delta^2 u=\lambda u\quad & x=(x_1,x_2)\in\Omega\, ,\\ u(x_1,x_2)=u_{x_1x_1}(x_1,x_2)=0\quad & x\in\{0,L\}\times(-\ell,\ell)\, ,\\ u_{x_2x_2}(x_1,x_2)=u_{x_2x_2x_2}(x_1,x_2)=0\quad & x\in(0,L)\times\{-\ell,\ell\}\, . \end{array}\right. \end{equation} Here, the condition on the third normal derivative replaces \eq{eigen1}$_4$. This condition somehow ``forces $u_{x_2x_2}$ to remain zero" which is precisely what happens in the bridge. It is straightforward to verify that \eq{autofunzione} are eigenfunctions to \eq{eigen2} so that similar problems arise. \begin{problem} {\em Determine all the eigenvalues and eigenfunctions to \eq{eigen2}. Which subspace of $H^2(\Omega)$ is spanned by these eigenfunctions? For any $f\in L^2(\Omega)$ study existence and uniqueness of a function $u\in H^4(\Omega)$ satisfying $\Delta^2 u=f$ in $\Omega$ and \eq{eigen2}$_2$-\eq{eigen2}$_3$.} $\Box$\end{problem} A further alternative eigenvalue problem is also of some interest. A possible additional simplification in the model would be to assume that \neweq{mezzeria} u(x_1,0,t)\simeq0\qquad\mbox{for all }x_1\in(0,L)\, ,\ t>0\, , \end{equation} namely that the center line of the roadway has small vertical oscillations. If on one hand this seems realistic in view of \cite{tacoma}, on the other hand this would preclude the appearance of wide vertical oscillations on the center line. In the whole, we believe that, qualitatively, the behavior of the plate will not change too much. By assuming \eq{small} and that equality holds in \eq{mezzeria}, for all $x_1\in(0,L)$ and $t>0$ we obtain $$u_{x_2}(x_1,-\ell,t)=u_{x_2}(x_1,\ell,t)=\frac{u(x_1,\ell,t)-u(x_1,-\ell,t)}{2\ell}\ ,\quad u(x_1,\ell,t)=-u(x_1,-\ell,t)\, .$$ By putting these together and decoupling equations on $x_2=\pm\ell$ we arrive at the Robin conditions $$\ell u_{x_2}(x_1,-\ell,t)+u(x_1,-\ell,t)=0\, ,\ \ell u_{x_2}(x_1,\ell,t)-u(x_1,\ell,t)=0\quad(x_1\in(0,L)\, ,\ t>0)\, .$$ We believe that these boundary conditions may help to obtain oscillation properties also of the solutions to equations derived without assuming \eq{mezzeria}. \begin{problem} {\em Determine the eigenvalues $\lambda$ and the properties of the eigenfunctions to the following problem $$\left\{\begin{array}{ll} \Delta^2 u=\lambda u\quad & x\in\Omega\, ,\\ u(x_1,x_2)=u_{x_1x_1}(x_1,x_2)=0\quad & (x_1,x_2)\in\{0,L\}\times(-\ell,\ell)\, ,\\ u_{x_2x_2}(x_1,x_2)=0\quad & (x_1,x_2)\in(0,L)\times\{-\ell,\ell\}\, ,\\ \ell u_{x_2}(x_1,-\ell)+u(x_1,-\ell)=0\quad & x_1\in(0,L)\, ,\\ \ell u_{x_2}(x_1,\ell)-u(x_1,\ell)=0\quad & x_1\in(0,L)\, .\\ \end{array}\right.$$ This would give an idea of what kind of oscillations should be expected in the below model equation \eq{truebeam}.} $\Box$\end{problem} In this section we set up several eigenvalue problems for thin rectangular plates simply supported on two opposite sides. There is no evidence on which could be the best boundary conditions on the remaining sides. Once these are determined, it could be of great interest to have both theoretical and numerical information on the the behavior of eigenvalues and eigenfunctions. The conditions should be sought in order to perfectly describe the oscillating modes of suspension bridges. In a work in preparation \cite{fergaz} we tackle these problems. \subsection{Seeking the critical energy threshold}\label{howto2} The title of this section should not deceive. We will not give a precise method how to determine the energy threshold which gives rise to torsional oscillations in a plate. We do have an idea how to proceed but several steps are necessary before reaching the final goal.\par Consider the plate $\Omega=(0,L)\times(-\ell,\ell)$ and the incomplete eigenvalue problem \neweq{incomplete} \left\{\begin{array}{ll} \Delta^2 u=\lambda u\quad & x\in\Omega\, ,\\ u(x_1,x_2)=u_{x_1x_1}(x_1,x_2)=0\quad & (x_1,x_2)\in\{0,L\}\times(-\ell,\ell)\, . \end{array}\right. \end{equation} Problem incomplete lacks conditions on the remaining sided $x_2=\pm\ell$ and, as mentioned in the previous section, it is not clear which boundary conditions should be added there.\par In order to explain which could be the method to determine the critical energy threshold, consider the simple case where the plate is square, $\Omega=(0,\pi)\times(-\frac{\pi}{2},\frac{\pi}{2})$, and let us complete \eq{incomplete} with the ``simplest'' boundary conditions, namely the Navier boundary conditions which represent a fully simply supported plate. As already mentioned these are certainly not the correct boundary conditions for a bridge but they are quite helpful to describe the method we are going to suggest. For different and more realistic boundary conditions, we refer to the paper in preparation \cite{fergaz}. So, consider the problem \neweq{irrational} \left\{\begin{array}{ll} \Delta^2 u=\lambda u\quad & x\in\Omega\, ,\\ u(x_1,x_2)=u_{x_1x_1}(x_1,x_2)=0\quad & (x_1,x_2)\in\{0,\pi\}\times(-\frac{\pi}{2},\frac{\pi}{2})\, ,\\ u(x_1,x_2)=u_{x_2x_2}(x_1,x_2)=0\quad & (x_1,x_2)\in(0,\pi)\times\{-\frac{\pi}{2},\frac{\pi}{2}\}\ . \end{array}\right. \end{equation} It is readily seen that, for instance, $\lambda=625$ is an eigenvalue for \eq{irrational} and that there are 4 linearly independent corresponding eigenfunctions \neweq{exxample} \{\sin(24x_1)\cos(7x_2),\, \sin(20x_1)\cos(15x_2),\, \sin(15x_1)\cos(20x_2),\, \sin(7x_1)\cos(24x_2)\}\ . \end{equation} It is well-known that similar facts hold for the second order eigenvalue problem $-\Delta u=\lambda u$ on the square, so what we are discussing is not surprising. What we want to emphasise here is that, associated to the same eigenvalue $\lambda=625$, we have 4 different kinds of vibrations in the $x_1$-direction and each one of these vibrations has its own counterpart in the $x_2$-direction corresponding to torsional oscillations. We believe that this will be true for any boundary conditions on $x_2=\pm\ell$ completing \eq{incomplete} and for any values of $L$ and $\ell$. We refer again to Figure \ref{patterns} for the patterns of some vibrating plates.\par Consider now a general plate $\Omega=(0,L)\times(-\ell,\ell)$ and let $f\in L^2(\Omega)$; in view of \cite[Section 2.2]{mansfield}, we expect the solution to the problem \neweq{withsource} \left\{\begin{array}{ll} \Delta^2 u=f\quad & x\in\Omega\, ,\\ u(x_1,x_2)=u_{x_1x_1}(x_1,x_2)=0\quad & (x_1,x_2)\in\{0,\pi\}\times(-\frac{\pi}{2},\frac{\pi}{2})\, ,\\ \mbox{other boundary conditions}\quad & (x_1,x_2)\in(0,\pi)\times\{-\frac{\pi}{2},\frac{\pi}{2}\}\ , \end{array}\right. \end{equation} to be of the kind $$u(x_1,x_2)=\sum_{m=1}^\infty \psi_m(x_2)\sin\left(\frac{m\pi}{L}x_1\right)\qquad(x_1,x_2)\in\Omega$$ for some functions $\psi_m$ depending on the Fourier coefficients of $f$. Since we have in mind small $\ell$, we can formally expand $\psi_m$ in Taylor polynomials and obtain $$\psi_m(x_2)=\psi_m(0)+\psi_m'(0)x_2+o(x_2)\qquad\mbox{as }x_2\to0\ .$$ Hence, $u$ may approximately be written as a combination of the functions in \eq{autofunzione}: $$u(x_1,x_2)\approx\sum_{m=1}^\infty[a_m+b_mx_2]\sin\left(\frac{m\pi}{L}x_1\right)\qquad(x_1,x_2)\in\Omega$$ where $a_m=\psi_m(0)$ and $b_m=\psi'_m(0)$. If instead of a stationary problem such as \eq{withsource}, $u=u(x_1,x_2,t)$ satisfies an evolution problem with the same boundary conditions, then also its coefficients depend on time: \neweq{fourier} u(x_1,x_2,t)\approx\sum_{m=1}^\infty \Big(a_m(t)+b_m(t)x_2\Big)\sin\left(\frac{m\pi}{L}x_1\right)\qquad(x_1,x_2)\in\Omega\, ,\ t>0\ . \end{equation} Let now ${\cal E}(t)$ denote the instantaneous total energy of the bridge, as determined in \eq{allsections}. What follows is not precise, it is a qualitative attempt to describe combined vertical and torsional oscillations. In particular, due to the restoring cables and hangers, the sine functions in \eq{fourier} should be modified in order to display different behaviors for positive and negative arguments. Moreover, we call ``small'' any quantity which is less than unity and ``almost zero'' (in symbols $\cong0$) any quantity which has a smaller order of magnitude when compared with small quantities. Finally, in order to avoid delicate sign arguments, we will often refer to $a_m^2$ and $b_m^2$ instead of $a_m$ and $b_m$.\par $\bullet$ {\bf Small energy.} As long as ${\cal E}(t)$ is small one may not even see oscillations, but if somebody stands on the bridge he might be able to feel oscillations. For instance, standing on the sidewalk of a bridge, one can feel the oscillations created by a car going through the roadway but the oscillations will not be visible to somebody watching the roadway from a point outside the bridge. For small energies ${\cal E}(t)$ only small oscillations appear and the corresponding solution \eq{fourier} has small coefficients $a_m(t)$ while $b_m(t)\cong0$. More precisely, \neweq{ambm} \left\{\begin{array}{ll} \forall\varepsilon>0\ \exists\delta>0\quad\mbox{such that}\quad{\cal E}(t)<\delta\ \Longrightarrow\ a_m(t)^2<\varepsilon\ \forall m\, ,\\ \exists\gamma>0\quad\mbox{such that}\quad{\cal E}(t)<\gamma\ \Longrightarrow\ b_m(t)\cong0\ \forall m\, . \end{array}\right. \end{equation} The reason of the second of \eq{ambm} is that even small variations of the $b_m$'s correspond to a large variation of the total energy ${\cal E}$ because the huge masses of the cross sections would rotate along the large length $L$ of the roadway. On the other hand, the first of \eq{ambm} may be strengthened by assuming that also some of the $a_m$'s are almost zero for small ${\cal E}$; in particular, we expect that this happens for large $m$ since these coefficients correspond to higher eigenvalues \neweq{vanishlargea} \forall\overline{m}\in\mathbb{N}\setminus\{0\}\quad \exists{\cal E}_{\overline{m}}>0 \quad\mbox{such that}\quad{\cal E}(t)<{\cal E}_{\overline{m}}\ \Longrightarrow\ a_m(t)\cong0\quad \forall m>\overline{m}\ . \end{equation} To better understand this point, let us compute the elongation $\Gamma_m$ due to the $m$-th mode: \neweq{elongation} \Gamma_m(t):=\int_0^L\bigg(\sqrt{1+\frac{m^2\pi^2}{L^2}\, a_m(t)^2\, \cos^2\left(\frac{m\pi}{L}x_1\right)}\, -\, 1\bigg)\, dx_1\ ; \end{equation} this describes the stretching elastic energy since it is the difference between the length of the roadway deformed by one single mode and the length of the roadway at rest. Due to the coefficient $\frac{m^2\pi^2}{L^2}$, it is clear that if $a_m^2\equiv a_{m+1}^2$ then $\Gamma_m(t)<\Gamma_{m+1}(t)$. This is the reason why \eq{vanishlargea} holds.\par $\bullet$ {\bf Increasing energy.} According to \eq{vanishlargea}, as long as ${\cal E}(t)<{\cal E}_{\overline{m}}$ one has $a_m(t)\cong0$ for all $m>\overline{m}$. If ${\cal E}(t)$ increases but remains smaller than ${\cal E}_{\overline{m}}$, then the coefficients $a_m(t)^2$ for $m=1,...,\overline{m}$ also increase. But they cannot increase to infinity since \eq{elongation} implies that the length of the roadway would also increase to infinity. So, when the total energy ${\cal E}(t)$ reaches the threshold ${\cal E}_{\overline{m}}$ the superlinear elastic structure of the bridge forces the solution \eq{fourier} to add one mode, so that $a_{\overline{m}+1}(t)\not\cong0$. Hence, the number of modes $\not\cong0$ is a nondecreasing function of ${\cal E}$.\par $\bullet$ {\bf Critical energy threshold.} What is described above is purely theoretical, but the bridge has several physical constraints. Of course, it cannot be stretched to infinity, it will break down much before. In particular, the number of active modes cannot increase to infinity. The elastic properties of the bridge determine a critical (maximal) number of possible active modes, say $\mu$. If the energy is distributed on the $\mu$ coefficients $a_1$,...,$a_\mu$, and if it increases up to ${\cal E}_\mu$, once again the superlinear elastic structure of the bridge forces the solution \eq{fourier} to change mode, but this time from the $a_m$ to the $b_m$; due to \eq{elongation}, further stretching of the roadway would require much more energy than switching oscillations on torsional modes. The switch is due to an impulse caused by the instantaneous stopping of the falling roadway imposed by the sustaining cables and the elongated hangers. And which torsional modes will be activated depends on which coupled modes have the same eigenvalue; as an example, consider \eq{exxample} which, roughly speaking, says that the motion may change from $24$ to $7$ oscillations in the $x_1$-direction with a consequent change of oscillation also in the $x_2$-direction.\par $\bullet$ {\bf Summarising...} Let $u$ in \eq{fourier} describe the vertical displacement of the roadway. The bridge has several characteristic values which depend on its elastic structure.\par $\diamondsuit$\quad An integer number $\mu\in\mathbb{N}$ such that $a_m(t)\cong0$ and $b_m(t)\cong0$ for all $m>\mu$, independently of the value of ${\cal E}(t)$.\par $\diamondsuit$\quad $\mu$ different energy ``increasing modes thresholds'' $E_1,...,E_\mu$.\par $\diamondsuit$\quad The critical energy threshold $\overline{E}=E_\mu$.\par Assume that ${\cal E}(0)=0$, in which case $u(x_1,x_2,0)=0$, and that $t\mapsto{\cal E}(t)$ is increasing. As long as ${\cal E}(t)\le E_1$ we have $a_m\cong0$ for all $m\ge2$ and $b_m\cong0$ for all $m\ge1$; moreover, $t\mapsto a_1(t)^2$ is increasing. When ${\cal E}(t)$ reaches and exceeds $E_1$ there is a first switch: the function $a_2^2$ starts being positive while, as long as ${\cal E}(t)\le E_2$, we still have $a_m\cong0$ for all $m\ge3$ and $b_m\cong0$ for all $m\ge1$. And so on, until ${\cal E}(t)=E_\mu=\overline{E}$. At this point, also because of an impulse, the energy forces the solution to have a nonzero coefficient $b_1$ rather than a nonzero coefficient $a_{\mu+1}$. The impulse forces $u$ to lower the number of modes for which $a_m\not\cong0$. For instance, the observation by Farquharson ({\em The motion, which a moment before had involved nine or ten waves, had shifted to two}) quoted in Section \ref{story} shows that, for the Tacoma Bridge, there was a change such as \neweq{change} \Big(a_m\cong0\ \forall m\ge11,\ b_m\cong0\ \forall m\ge1\Big)\ \longrightarrow\ \Big(a_m\cong0\ \forall m\ge3,\ b_m\cong0\ \forall m\ge2\Big)\ . \end{equation} In order to complete the material in this section, two major problems are still to be solved.\par - Find the correct boundary conditions on $x_2=\pm\ell$.\par - Find at which energy levels the ``transfer of energy between modes'' occurs, see \eq{change}.\par Both these problems are addressed in a forthcoming paper \cite{fergaz}. \section{Conclusions and future perspectives}\label{conclusions} So far, we observed phenomena displayed by real structures, we discussed models, and we recalled some theoretical results. In this section we take advantage from all this work, we summarise all the phenomena and remarks previously discussed, and we reach several conclusions. \subsection{A new mathematical model for suspension bridges}\label{newmodel} We suggest here a new mathematical model for the description of oscillations in suspension bridges. We expect the solution to the corresponding equation to display both self-excited oscillations and instantaneous switch between vertical and torsional oscillations. Moreover, the critical energy threshold (corresponding to the flutter speed) appears in the equation.\par Let $\Omega=(0,L)\times(-\ell,\ell)\subset\mathbb{R}^2$ where $L$ represents the length of the bridge and $2\ell$ represents the width of the roadway. Assume that $2\ell\ll L$ and consider the initial-boundary value problem \neweq{truebeam} \left\{\begin{array}{ll} u_{tt}+\Delta^2 u+\delta u_t+f(u)=\varphi(x,t)\ & x=(x_1,x_2)\in\Omega,\ t>0,\\ u(x_1,x_2,t)=u_{x_1x_1}(x_1,x_2,t)=0\ & x\in\{0,L\}\times(-\ell,\ell),\ t>0,\\ u_{x_2x_2}(x_1,x_2,t)=0\ & x\in(0,L)\times\{-\ell,\ell\},\ t>0,\\ u_{x_2}(x_1,-\ell,t)=u_{x_2}(x_1,\ell,t)\ & x_1\in(0,L),\ t>0,\\ u_t(x_1,-\ell,t)+u(x_1,-\ell,t)=E(t)\, [u_t(x_1,\ell,t)+u(x_1,\ell,t)]\ & x_1\in(0,L),\ t>0,\\ u(x,0)=u_0(x)\ & x\in\Omega,\\ u_t(x,0)=u_1(x)\ & x\in\Omega. \end{array}\right. \end{equation} Here, $u=u(x,t)$ represents the vertical displacement of the plate, $u_0(x)$ is its initial position while $u_1(x)$ is its initial vertical velocity. Before discussing the other terms and conditions, let us remark that if one wishes to have smooth solutions, a compatibility condition between boundary and initial conditions is needed: \neweq{cc} u_1(x_1,-\ell)+u_0(x_1,-\ell)=E(0)\, [u_1(x_1,\ell)+u_0(x_1,\ell)]\qquad\forall x_1\in(0,L)\ . \end{equation} The function $\varphi$ represents an external source, such as the wind, which is responsible for the variation of the total energy ${\cal E}(t)$ inserted in the structure which, in turn, can be determined by \neweq{gust} {\cal E}(t)=\int_\Omega \varphi(x,t)^2\, dx\ . \end{equation} Of course, it is much simpler to compute ${\cal E}(t)$ in terms of the known source $\varphi$ rather than in terms of the unknown solution $u$ and its derivatives. The function $E$ is then defined by \neweq{EE} E(t)=\left\{\begin{array}{ll} 1\quad & \mbox{if }{\cal E}(t)\le\overline{E}\\ -1\quad & \mbox{if }{\cal E}(t)>\overline{E} \end{array}\right. \end{equation} where $\overline{E}>0$ is the critical energy threshold defined in Section \ref{energybalance}; hence, the function $E(t)$ is a discontinuous nonlocal term which switches to $\pm1$ according to whether the total energy is smaller/larger than the critical threshold. So, \eq{truebeam}$_5$ is a dynamic boundary condition involving the total energy of the bridge. When ${\cal E}(t)\le\overline{E}$ there is no torsional energy $E_t$ and the motion tends to become of pure vertical-type, that is, with $u_{x_2}\cong0$: to see this, note that in this case \eq{truebeam}$_5$ may be written as $$\frac{\partial}{\partial t}\Big\{[u(x_1,\ell,t)-u(x_1,-\ell,t)]e^t\Big\}=0\qquad\forall x_1\in(0,L)\ .$$ This means that as long as ${\cal E}(t)\le\overline{E}$, the map $t\mapsto|u(x_1,\ell,t)-u(x_1,-\ell,t)|$ decreases so that the two opposite endpoints of any cross section tend to have the same vertical displacement and to move synchronously as in a pure vertical motion. When ${\cal E}(t)>\overline{E}$ condition \eq{truebeam}$_5$ may be written as $$\frac{\partial}{\partial t}\Big\{[u(x_1,\ell,t)+u(x_1,-\ell,t)]e^t\Big\}=0\qquad\forall x_1\in(0,L)\ .$$ This means that as long as ${\cal E}(t)>\overline{E}$, the map $t\mapsto|u(x_1,\ell,t)+u(x_1,-\ell,t)|$ decreases so that the two opposite endpoints of any cross section tend to have zero average and to move asynchronously as in a pure torsional motion, that is, with $u(x_1,0,t)\cong\frac12 [u(x_1,\ell,t)+u(x_1,-\ell,t)]\cong0$.\par Note that in \eq{truebeam} the jump of $E(t)$ from/to $\pm1$ occurs simultaneously and instantaneously along all the points located on the sides of the roadway; hence, either all or none of the cross sections have some torsional motion, in agreement with what has been observed for the Tacoma Bridge, see \cite{Tacoma1} and also \cite[pp.50-51]{wake}. The form \eq{gust} and the switching criterion for $E(t)$ in \eq{EE} mean that problem \eq{truebeam} models a situation where if a gust of wind is sufficiently strong then, instantaneously, a torsional motion appears. One could also consider the case where $${\cal E}(t)\simeq\int_0^t\int_\Omega \varphi(x,\tau)^2\, dx\, d\tau\ $$ which would model a situation where if the wind blows for too long then at some critical time, instantaneously, a torsional motion appears. However, the problem with \eq{gust} is much simpler because it is local in time.\par The differential operator in \eq{truebeam} is derived according to the linear Kirchhoff-Love model for a thin plate, see Section \ref{elasticity}. We have neglected a strong distinction between the bending and stretching elastic energies which are, however, quite different in long narrow plates, see \cite[Section 8.3]{mansfield} and previous work by Cox \cite{cox}; if one wishes to make some corrections, one should add a further nonlinear term $g(\nabla u,D^2u)$ and the equation would become quasilinear, see Problem \ref{gDu}. But, as already mentioned in Section \ref{elasticity}, we follow here a compromise and merely consider a semilinear problem. Concerning the nonlinearity $f(u)$, some superlinearity should be required. For instance, $f(u)=u+\varepsilon u^3$ with $\varepsilon>0$ small could be a possible choice; alternatively, one could take $f(u)=a(e^{bu}-1)$ as in \cite{mckO} for some $a,b>0$. In the first case the hangers are sought as ideal springs and gravity is somehow neglected, in the second case more relevance is given to gravity and to the possibility of slackening hangers. Finally, $\delta u_t$ is a damping term which represents the positive structural damping of the structure; its role should be to weaken the effect of the nonlinear term $f(u)$, see Problem \ref{competition}.\par As far as we are aware, there is no standard theory for problems like \eq{truebeam}. It is a nonlocal problem since it links behaviors on different parts of the boundary and involves the function $E(t)$ in \eq{EE}. It also has dynamic boundary conditions which are usually delicate to handle. \begin{problem} {\em Prove that if $\varphi(x,t)\equiv0$ then \eq{truebeam} only admits the trivial solution $u\equiv0$. The standard trick of multiplying the equation in \eq{truebeam} by $u$ or $u_t$ and integrating over $\Omega$ does not allow to get rid of all the boundary terms. Note that, in this case, $E(t)\equiv1$.} $\Box$\end{problem} \begin{problem}\label{ill} {\em Study existence and uniqueness results for \eq{truebeam}; prove continuous dependence results with respect to the data $\varphi$, $u_0$, $u_1$, and with respect to possible perturbations of $f$. Of course, the delicate conditions to deal with are \eq{truebeam}$_4$ and \eq{truebeam}$_5$. If problem \eq{truebeam} were ill-posed, what must be changed in order to have a well-posed problem?} $\Box$\end{problem} \begin{problem}\label{competition} {\em Study \eq{truebeam} with no damping, that is, $\delta=0$: does the solution display oscillations such as \eq{pazzo} when $t$ tends to some finite blow up instant? Then study the competition between the damping term $\delta u_t$ and the self-exciting term $f(u)$: for a given $f$ is it true that if $\delta$ is sufficiently large then the solution $u$ is global in time? We believe that the answer is negative and that the only effect of the damping term is to delay the blow up time.} $\Box$\end{problem} \begin{problem} {\em Determine the regularity of the solutions $u$ to \eq{truebeam} and study the importance of the compatibility condition \eq{cc}.} $\Box$\end{problem} \begin{problem}\label{gDu} {\em Insert into the equation \eq{truebeam} a correction term for the elastic energies, something like $$g(\nabla u,D^2u)=-\left(\frac{u_{x_1}}{\sqrt{1+u_{x_1}^2}}\right)_{x_1}-\gamma\left(\frac{u_{x_1}}{\sqrt{1+u_{x_1}^2}}\right)_{x_2}$$ with $\gamma>0$ small. Then prove existence, uniqueness and continuous dependence results.} $\Box$\end{problem} An important tool to study \eq{truebeam} would be the eigenvalues and eigenfunctions of the corresponding stationary problem. In view of the dynamic boundary conditions \eq{truebeam}$_5$, a slightly simpler model could be considered, see \eq{eigen1} or \eq{eigen2} and subsequent discussion in Section \ref{modes}. We have no feeling on what could be the better choice... \subsection{A possible explanation of the Tacoma collapse}\label{possibleTacoma} Hopefully, this paper sheds some further light on oscillating bridges. We have emphasised the necessity of models fulfilling the requirements of (GP) since, otherwise, the solution will not display the phenomena visible in real bridges. In particular, any equation aiming to model the complex behavior of bridges should contain information on at least two possible kinds of oscillations: this target may be achieved either by considering a PDE, or by considering coupled systems of ODE's, or by linking the two oscillations within a unique function solving a suitable ODE. A further contribution of this paper is the remark that there might be some hidden elastic energy in the bridge and that there is no simple way to detect it. Not only this energy is larger than the kinetic and potential energy but also it may give rise, almost instantaneously, to self-excited oscillations.\par We now put together all these observations in order to afford an explanation of the Tacoma collapse. As we shall see, our explanation turns out to agree with all of them.\par \noindent \textsf{On November 7, 1940, for some time before 10:00 AM, the bridge was oscillating as it did many other times before. The wind was apparently more violent than usual and, moreover, it continued for a long time. The oscillations of the bridge were completely similar to those displayed in the interval $(0,80)$ of the plot in Figure \ref{mille}. Since the energy involved was quite large, also the oscillations were quite large. The roadway was far from its equilibrium position and, consequently, the restoring force due to the sustaining cables and to the hangers did not obey ${\cal LHL}$. The oscillations were governed by a fairly complicated differential equation such as \eq{truebeam} which, however, may be approximated by \eq{maineq2} after assuming \eq{w0}, since this equation possesses the main features of many fourth order differential equations, both ODE's and PDE's. It is not clear which superlinear function $f$ would better describe the restoring nonlinear elastic forces, but any function $f=f(s)$ asymptotically linear as $s\to0$ and superlinear as $|s|\to\infty$ generates the same qualitative behavior of solutions, see Theorem \ref{blowup}. As the wind was keeping on blowing, the total energy ${\cal E}$ in the bridge was increasing; the bridge started ruminating energy and, in particular, its stored elastic energy $E_\ell$ was also increasing. Unfortunately, nobody knew how to measure $E_\ell$ because, otherwise, \eq{estimate} would have taught in some advance that sudden wider vertical oscillations and subsequent torsional oscillations would have appeared. After each cycle, when the cross section of the bridge violently reached position $B$ in Figure \ref{1}, a delta Dirac mass increased the internal elastic energy $E_\ell$. As the wind was continuously blowing, after some cycles the elastic energy became larger than the critical energy threshold $\overline{E}$ of the bridge, see Section \ref{energybalance}. This threshold may be computed provided one knows the elastic structure of the bridge, see Section \ref{howto2}. As soon as $E_\ell>\overline{E}$, the function $E(t)$ in \eq{EE} switched from $+1$ to $-1$, a torsional elastic energy $E_t$ appeared and gave rise, almost instantaneously, to a torsional motion. As described in \eq{change}, due to the impulse, the energy switched to the first torsional mode $b_1$ rather than to a further vertical mode $a_{11}$; so, the impulse forced $u$ to lower the number of modes for which $a_m\not\cong0$ and the motion, which a moment before had involved nine or ten waves, shifted to two. At that moment, the graph of the function $w=w(t)$, describing the bridge according to \eq{w0}, reached time $t=95$ in the plot in Figure \ref{mille}. Oscillations immediately went out of control and after some more oscillations the bridge collapsed.}\par One should compare this description with the original one from the Report \cite{Tacoma1}, see also \cite[pp.26-29]{tac1} and \cite[Chapter 4]{wake}. \subsection{What about future bridges?}\label{howplan} Equation \eq{maineq2} is a simple prototype equation for the description of self-excited oscillations. None of the previously existing mathematical models ever displayed this phenomenon which is also visible in oscillating bridges. The reason is not that the behavior of the bridge is too complicated to be described by a differential equation but mainly because they fail to satisfy (GP); this prevents the appearance of oscillations and therefore the projects based on the corresponding equation may contain huge errors. In order to avoid bad surprises as in the past, many projects nowadays include stiffening trusses or strong dampers. This has the great advantage to maintain the bridge much closer to its equilibrium position and to justify ${\cal LHL}$. But this also has disadvantages, see \cite{kawada2}. First of all, they create an artificial stiffness which can give rise to the appearances of cracks in the more elastic structure of the bridge. Second, damping effects and stiffening trusses significantly increase the weight and the cost of the whole structure. Moreover, in extreme conditions, they may become useless: under huge external solicitations the bridge would again be too far from its equilibrium position and would violate ${\cal LHL}$. So, hopefully, one should find alternative solutions, see again \cite{kawada2}.\par One can act both on the structure and on the model. In order to increase the flutter speed, some suggestions on how to modify the design were made by Rocard \cite[pp.169-173]{rocard}: he suggests how to modify the components of the bridge in order to raise the right hand side of \eq{speedflutter}. More recently, some attempts to improve bridges performances can be found in \cite{hhs} where, in particular, a careful analysis of the role played by the hangers is made. But much work has still to be done; from \cite[p.1624]{hhs}, we quote \begin{center} \begin{minipage}{162mm} {\em Research on the robustness of suspension bridges is at the very beginning.} \end{minipage} \end{center} From a theoretical point of view, one should first determine a suitable equation satisfying (GP). Our own suggestion is to consider \eq{truebeam} where one should choose a reliable nonlinearity $f$ and add coefficients to the other terms, according to the expected technical features of the final version of the bridge: its length, its width, its weight, the materials used for the construction, the expected external solicitations, the structural damping... Since we believe that the solution to this equation may display blow up, which means a strong instability, a crucial role is played by all the constants which appear in the equation. Hence, a careful measurement of these parameters is necessary. Moreover, a sensitivity analysis for the continuous dependence of the solution on the parameters should be performed. Once the most reliable nonlinearity and parameters are chosen, the so obtained equation should be tested numerically to see if the solution displays dangerous phenomena. In particular, one should try to estimate, at least numerically, the critical energy threshold and the possible blow up time. Also purely theoretical estimates are of great interest; in general, these are difficult to obtain but even if they are not very precise they can be of some help.\par In a ``perfect model'' for a suspension bridge, one should also take into account the role of the sustaining cables and of the towers. Each cable links all the hangers on the same side of the bridge, its shape is a catenary of given length and the hangers cannot all elongate at the same time. The towers link the two cables and, in turn, all the hangers on both sides of the roadway. Cables and towers are further elastic components of the structure which, of course, modify considerably the model, its oscillating modes, its total elastic energy, etc. In this paper we have not introduced these components but, in the next future, this would be desirable.\par An analysis of the more flexible parts of the roadway should also be performed; basically, this consists in measuring the ``instantaneous local energy'' defined by \neweq{localenergy} {\bf E}(u(t),\omega)=\int_\omega\left[\left(\frac{|\Delta u(t)|^2}{2}+(\sigma-1)\det(D^2u(t))\right)+\frac{u_t(t)^2}{2}+F(u(t))\right]\, dx_1dx_2 \end{equation} for solutions $u=u(t)$ to \eq{truebeam}, for any $t\in(0,T)$, and for any subregion $\omega\subset\Omega$ of given area. In \eq{localenergy} we recognize the first term to be as in the Kirchhoff-Love model, see \eq{energy-gs}; moreover, $F(s)=\int_0^s f(\sigma)d\sigma$. \begin{problem} {\em Let $\Omega=(0,L)\times(-\ell,\ell)$, consider problem \eq{truebeam} and let $u=u(t)$ denote its solution provided it exists and is unique, see Problem \ref{ill}. For given lengths $a<L$ and $b<2\ell$ consider the set $\mathbb{R}e$ of rectangles entirely contained in $\Omega$ and whose sides have lengths $a$ (horizontal) and $b$ (vertical). Let ${\bf E}$ be as in \eq{localenergy} and consider the maximisation problem $$\max_{\omega\in\mathbb{R}e}\ {\bf E}(u(t),\omega)\ .$$ Using standard tools from calculus of variations, prove that there exists an optimal rectangle and study its dependence on $t\in(0,T)$; the natural conjecture is that, at least as $t\to T$, it is the ``middle rectangle'' $(\frac{L-a}{2},\frac{L+a}{2})\times(-\frac{b}{2},\frac{b}{2})$. Then one should find out if there exists some optimal ratio $a/b$. Finally, it would be extremely useful to find the dependence of the energy ${\bf E}(u(t),\omega)$ on the measure $ab$ of the rectangle $\omega$; this would allow to minimise costs for reinforcing the plate. We do not expect analytical tools to be able to locate optimal rectangles nor to give exact answers to the above problems so that a good numerical procedure could be of great help.} $\Box$\end{problem} In \cite[Chapter IV]{tac2} an attempt to estimate the impact of stiffening trusses is made, although only one kind of truss design is considered. In order to determine the best way to display the truss, one should solve the following simplified problems from calculus of variations. A first step is to consider the linear model. \begin{problem}\label{pisa} {\em Assume that the rectangular plate $\Omega=(0,L)\times(-\ell,\ell)$ is simply supported on all its sides and that it is submitted to a constant load $f\equiv1$. In the linear theory by Kirchhoff-Love model, see \eq{energy-f}, its elastic energy is given by $$E_0(\Omega)=\ -\min_{u\in H^2\cap H^1_0(\Omega)}\ \int_{\Omega }\left(\frac{1}{2}\left(\Delta u\right)^{2}+(\sigma-1)\det(D^2u)-u\right)\, dx_1dx_2\ .$$ Here, $H^2\cap H^1_0(\Omega)$ denotes the usual Hilbertian Sobolev space which, since we are in the plane, is embedded into $C^{0,\alpha}(\overline{\Omega})$. The unique minimiser $u$ solves the corresponding Euler-Lagrange equation which reads $$\Delta^2u=1\mbox{ in }\Omega\, ,\quad u=\Delta u=0\mbox{ on }\partial\Omega$$ and which may be reduced to a system involving the torsional rigidity of $\Omega$: $$-\Delta u=v\, ,\ -\Delta v=1\mbox{ in }\Omega\, ,\quad u=v=0\mbox{ on }\partial\Omega\, .$$ Let $\lambda>0$ and denote by $\Gamma_\lambda$ the set of connected curves $\gamma$ contained in $\overline{\Omega}$, such that $\gamma\cap\partial\Omega\neq\emptyset$, and whose length is $\lambda$: the curves $\gamma$ represent the stiffening truss to be put below the roadway. For any $\gamma\in\Gamma_\lambda$ the elastic energy of the reinforced plate $\Omega\setminus\gamma$ is given by $$E_\gamma(\Omega)=\ -\min_{u\in H^2\cap H^1_0(\Omega\setminus\gamma)}\ \int_{\Omega }\left(\frac{1}{2}\left(\Delta u\right)^{2}+(\sigma-1)\det(D^2u)-u\right)\, dx_1dx_2\ ,$$ and this energy should be minimised among all possible $\gamma\in\Gamma_\lambda$: $$\min_{\gamma\in\Gamma_\lambda}\ E_\gamma(\Omega)\ .$$ Is there an optimal $\gamma_\lambda$ for any $\lambda>0$? When we asked this question to Buttazzo \cite{buttazzo} and Santambrogio \cite{filippo}, we received positive answers; their optimism is justified by the connectedness assumption and by their previous work \cite{oudet,tilli} which also gives hints on how the line $\gamma_\lambda$ should look like. In fact, for a realistic model, one should further require that all the endpoints of $\gamma_\lambda$ lie on the boundary $\partial\Omega$. Finally, since the stiffening truss has a cost $C>0$ per unit length, one should also solve the minimisation problem $$\min_{\lambda\ge0}\ \left\{C\lambda+\min_{\gamma\in\Gamma_\lambda}\ E_\gamma(\Omega)\right\}\, ;$$ if $C$ is sufficiently small, we believe that the minimum exists.} $\Box$\end{problem} Problem \ref{pisa} is just a simplified version of the ``true problem'' which... should be completed! \begin{problem}\label{upsilon1} {\em Let $\Omega=(0,L)\times(-\ell,\ell)$ and fix some $\lambda>0$. Denote by $\Gamma_\lambda$ the set of connected curves contained in $\overline{\Omega}$ whose length is $\lambda$ and whose endpoints belong to $\partial\Omega$. Study the minimisation problem $$\min_{\gamma\in\Gamma_\lambda}\ \left|\min_{u\in{\mathcal H}(\Omega\setminus\gamma)}\ \int_{\Omega}\left(\frac{1}{2}\left(\Delta u\right)^{2}+(\sigma-1)\det(D^2u)-u\right)\, dx_1dx_2\right|\ ,$$ where $$ {\mathcal H}(\Omega\setminus\gamma)=\{u\in H^2(\Omega\setminus\gamma);\ \mbox{\eq{eigen2}$_2$ holds}, \ \mbox{+ something on }x_2=\pm\ell\mbox{ and on }\gamma\}\ . $$ First of all, instead of ``something'', one should find the correct conditions on $x_2=\pm\ell$ and on $\gamma$. This could also suggest to modify the energy function to be minimised with an additional boundary integral. The questions are similar. Is there an optimal $\gamma_\lambda$ for any $\lambda>0$? Is there an optimal $\lambda>0$ if one also takes into account the cost?} $\Box$\end{problem} Then one should try to solve the same problems with variable loads. \begin{problem} {\em Solve Problems \ref{pisa} and \ref{upsilon1} with nonconstant loads $f\in L^2(\Omega)$, so that the minimum problem becomes $$\min_{\gamma\in\Gamma_\lambda}\ \left|\min_{u\in{\mathcal H}(\Omega\setminus\gamma)}\ \int_{\Omega}\left(\frac{1}{2}\left(\Delta u\right)^{2}+(\sigma-1)\det(D^2u)-\, fu\right)\, dx_1dx_2\right|\ .$$ What happens if $f\not\in L^2(\Omega)$? For instance, if $f$ is a delta Dirac mass concentrated at some point $x_0\in\Omega$.} $\Box$\end{problem} \par \noindent {\bf Acknowledgment.} The author is grateful to his colleague Pier Giorgio Malerba, a structural engineer at the Politecnico of Milan, for several interesting and stimulating discussions. \end{document}
\begin{document} \runningtitle{Parity of Latin squares} \title{There are asymptotically the same number of Latin squares of each parity} \author[1]{Nicholas J. Cavenagh} \address[1]{Department of Mathematics, University of Waikato, Private Bag 3105, Hamilton, New Zealand. \email{[email protected]} } \cauthor \author[2]{Ian M. Wanless} \address[2]{School of Mathematical Sciences, Monash University, Vic 3800, Australia. \email{[email protected]} } \authorheadline{N. J. Cavenagh and I. M. Wanless} \support{Research supported by ARC grant FT110100065.} \begin{abstract} A Latin square is reduced if its first row and column are in natural order. For Latin squares of a particular order $n$ there are four possible different parities. We confirm a conjecture of Stones and Wanless by showing asymptotic equality between the numbers of reduced Latin squares of each possible parity as the order $n\rightarrow\infty$. \end{abstract} \classification{05B15} \keywords{Latin square; parity; Alon-Tarsi conjecture; row cycle} \maketitle \section{Introduction}\label{s:intro} The parity of permutations plays a fundamental role in group theory. Latin squares can be thought of as two dimensional permutations and they also have a notion of parity. A Latin square has three attributes each of which can be even or odd, although any two of these attributes determines the third. There are thus four different parities that Latin squares of a given order may have. These parities account, for example, for the fragmentation of switching graphs \cite{KMOW14,Wan04} and the failure of certain topological biembeddings \cite{LDGG09}. They can also assist in diagnosing symmetries of Latin squares \cite{Kot12}. Unlike what happens for permutation groups, there can be different numbers of Latin squares of each parity. This difference is central to a famous conjecture by Alon-Tarsi~\cite{AT92} which has ramifications well beyond its apparent scope \cite{HR94,KL15}. Nevertheless, numerical evidence \cite{KMOW14,SW12,Wan04} suggests that within several natural classes of Latin squares there are very close to the same number of each parity. The present note and \cite{Alp16} are the first to prove parities are asymptotically equinumerous (although \cite{SW12} did show a weaker result in this direction). An advantage of the present work over \cite{Alp16} is that we prove a non-trivial result for all orders, whilst \cite{Alp16} only applies to even orders. A \emph{Latin square} of order $n$ is an $n \times n$ array of $n$ symbols such that each symbol occurs exactly once in each row and exactly once in each column. We will take the symbol set to be $[n]:=\{1,2,\dots,n\}$, matching the row and column indices. A Latin square is \emph{normalised} if the first row is $(1,2,\dots,n)$. A Latin square is \emph{reduced} if the first row is $(1,2,\dots,n)$ and the first column is $(1,2,\dots,n)^T$. A Latin square $L=(l_{ij})$ is \emph{unipotent} if $l_{11}=l_{22}=\dots=l_{nn}$. Suppose $P$ is a property of Latin squares of order $n$. Let $L^P_n$, $R^P_n$ and $U^P_n$ be the numbers respectively of Latin squares, reduced Latin squares and normalised unipotent Latin squares of order $n$ with property $P$. If $P$ is omitted we count the whole class. Let $\mathcal{S}_n$ denote the permutations of $[n]$ and $\zeta:\mathcal{S}_n\mapsto\mathbb{Z}_2$ the usual sign homomorphism with kernel the alternating group. Given a Latin square $L=(l_{ij})$ of order $n$, we can identify the following $3n$ permutations in $\mathcal{S}_n$. For all $i\in[n]$ define $\rowperm{i}$ by $\rowperm{i}(j)=l_{ij}$. For all $j \in [n]$ define $\colperm{j}$ by $\colperm{j}(i)=l_{ij}$. For all $\ell \in [n]$ define $\mathcal{S}perm{\ell}$ such that $\mathcal{S}perm{\ell}(i)$ is equal to the $j$ for which $l_{ij}=\ell$. We call $\pi_{\textrm{row}}:=\sum_i \zeta(\rowperm{i})$, $\,\pi_{\textrm{col}}:=\sum_j \zeta(\colperm{j})$ and $\pi_{\textrm{sym}}:=\sum_{\ell} \zeta(\mathcal{S}perm{\ell})$ the \emph{row-parity}, \emph{column-parity} and \emph{symbol-parity} of $L$, respectively. A Latin square is called \emph{even} or \emph{odd} if $\pi_{\textrm{row}}+\pi_{\textrm{col}}\equiv 0$ or $1$ mod $2$, respectively. A Latin square is called \emph{row-even} or \emph{row-odd} if $\pi_{\textrm{row}}\equiv0$ or $1$, respectively. A Latin square is called \emph{column-even} or \emph{column-odd} if $\pi_{\textrm{col}}\equiv0$ or $1$, respectively. A Latin square is called \emph{symbol-even} or \emph{symbol-odd} if $\pi_{\textrm{sym}}\equiv0$ or $1$, respectively. We define the properties: \begin{multicols}{2}{ \begin{itemize} \item[$\els$]= ``is an even Latin square'' \item[$\ols$]= ``is an odd Latin square'' \item[$\rels$]= ``is a row-even Latin square'' \item[$\rols$]= ``is a row-odd Latin square'' \item[$\cels$]= ``is a column-even Latin square'' \item[$\cols$]= ``is a column-odd Latin square'' \item[$\sels$]= ``is a symbol-even Latin square'' \item[$\sols$]= ``is a symbol-odd Latin square'' \end{itemize} } \end{multicols} We define the \emph{parity} of a Latin square $L$ to be the ordered triple $\pi=\pi_{\textrm{row}} \pi_{\textrm{col}} \pi_{\textrm{sym}}$. Writing $\pi$ as a superscript denotes that we are restricting to Latin squares with parity $\pi$. Some of the basic relationships that are proved in \cite{SW12} are summarised in \Tref{T:relat}. \begin{table} \begin{center} \begin{tabular}{|l|} \hline{\vrule height 2.75ex width 0ex depth0ex} If $n \equiv 0$ or $1 \pmod 4$ {\vrule height 0ex width 0ex depth1.2ex}\\ \hline{\vrule height 2.75ex width 0ex depth0ex} $R^{\els}_n=R^{\sels}_n=R^{000}_n+R^{110}_n$ {\vrule height 0ex width 0ex depth1.2ex}\\ $R^{\ols}_n=R^{\sols}_n=R^{011}_n+R^{101}_n$ {\vrule height 0ex width 0ex depth1.2ex}\\ \hline{\vrule height 2.75ex width 0ex depth0ex} $U^{\els}_n=R^{\cels}_n=R^{000}_n+R^{101}_n$ {\vrule height 0ex width 0ex depth1.2ex}\\ $U^{\ols}_n=R^{\cols}_n=R^{011}_n+R^{110}_n$ {\vrule height 0ex width 0ex depth1.2ex}\\ \hline{\vrule height 2.75ex width 0ex depth0ex} $R^{\rels}_n=R^{000}_n+R^{011}_n=U^{\els}_n$ {\vrule height 0ex width 0ex depth1.2ex}\\ $R^{\rols}_n=R^{101}_n+R^{110}_n=U^{\ols}_n$ {\vrule height 0ex width 0ex depth1.2ex}\\ \hline {\vrule height 2.75ex width 0ex depth0ex}{\vrule height 2.75ex width 0ex depth0ex} $R_n^{111}=R_n^{100}=R_n^{010}=R_n^{001}=0$ {\vrule height 0ex width 0ex depth1.2ex}\\ $R_n^{011}=R_n^{101}$ {\vrule height 0ex width 0ex depth1.2ex}\\ $R_n^{011}=R_n^{101}=R_n^{110}$ when $n$ is even {\vrule height 0ex width 0ex depth1.2ex}\\ \hline \end{tabular} \quad \begin{tabular}{|l|} \hline{\vrule height 2.75ex width 0ex depth0ex} If $n \equiv 2$ or $3 \pmod 4$ {\vrule height 0ex width 0ex depth1.2ex}\\ \hline{\vrule height 2.75ex width 0ex depth0ex} $R^{\els}_n=R^{\sols}_n=R^{111}_n+R^{001}_n$ {\vrule height 0ex width 0ex depth1.2ex}\\ $R^{\ols}_n=R^{\sels}_n=R^{100}_n+R^{010}_n$ {\vrule height 0ex width 0ex depth1.2ex}\\ \hline{\vrule height 2.75ex width 0ex depth0ex} $U^{\els}_n=R^{\cols}_n=R^{111}_n+R^{010}_n$ {\vrule height 0ex width 0ex depth1.2ex}\\ $U^{\ols}_n=R^{\cels}_n=R^{100}_n+R^{001}_n$ {\vrule height 0ex width 0ex depth1.2ex}\\ \hline{\vrule height 2.75ex width 0ex depth0ex} $R^{\rols}_n=R^{111}_n+R^{100}_n=U^{\els}_n$ {\vrule height 0ex width 0ex depth1.2ex}\\ $R^{\rels}_n=R^{010}_n+R^{001}_n=U^{\ols}_n$ {\vrule height 0ex width 0ex depth1.2ex}\\ \hline {\vrule height 2.75ex width 0ex depth0ex}{\vrule height 2.75ex width 0ex depth0ex} $R_n^{000}=R_n^{011}=R_n^{101}=R_n^{110}=0$ {\vrule height 0ex width 0ex depth1.2ex}\\ $R_n^{100}=R_n^{010}$ {\vrule height 0ex width 0ex depth1.2ex}\\ $R_n^{100}=R_n^{010}=R_n^{001}$ when $n$ is even {\vrule height 0ex width 0ex depth1.2ex}\\ \hline \end{tabular} \caption{\label{T:relat}Table of identities.} \end{center} \end{table} We use standard `$\sim$', `little-$o$', `big-$O$' and `big-$\Omega$' asymptotic notation, always with the order of our Latin squares $n\rightarrow\infty$. The aim of this note is to confirm a conjecture from \cite{SW12} by proving the following result. \begin{thrm}\label{t:asymequal} As $n\rightarrow\infty$, \begin{align*} \begin{rcases} \;L_n^{000}\sim L_n^{011}\sim L_n^{101}\sim L_n^{110} \sim \dfrac14 L_n\\ R_n^{000}\sim R_n^{011}\sim R_n^{101}\sim R_n^{110} \sim \dfrac14 R_n\\ U_n^{000}\sim U_n^{011}\sim U_n^{101}\sim U_n^{110} \sim \dfrac14 U_n\\ \end{rcases}&\hbox{\rm for }n\equiv0,1\pmod 4,\\ \begin{rcases} \;L_n^{111}\sim L_n^{100}\sim L_n^{010}\sim L_n^{001} \sim \dfrac14 L_n\\ R_n^{111}\sim R_n^{100}\sim R_n^{010}\sim R_n^{001} \sim \dfrac14 R_n\\ U_n^{111}\sim U_n^{100}\sim U_n^{010}\sim U_n^{001} \sim \dfrac14 U_n\\ \end{rcases}&\hbox{\rm for }n\equiv2,3\pmod 4,\\ L_n^{\els}\sim L_n^{\ols}\sim L_n^{\rels}\sim L_n^{\rols}\sim L_n^{\cels}\sim L_n^{\cols}&\sim L_n^{\sels}\sim L_n^{\sols}\sim \dfrac12 L_n,\\ R_n^{\els}\sim R_n^{\ols}\sim R_n^{\rels}\sim R_n^{\rols}\sim R_n^{\cels}\sim R_n^{\cols}&\sim R_n^{\sels}\sim R_n^{\sols}\sim \dfrac12 R_n,\\ U_n^{\els}\sim U_n^{\ols}\sim U_n^{\rels}\sim U_n^{\rols}\sim U_n^{\cels}\sim U_n^{\cols}&\sim U_n^{\sels}\sim U_n^{\sols}\sim \dfrac12 U_n. \end{align*} \end{thrm} In contrast, the Alon-Tarsi conjecture \cite{AT92} asserts that $L_n^{\els}\ne L_n^{\ols}$ for even $n$. Two distinct generalisations of this are by Zappa~\cite{Zap97}, who suggests that $U_n^{\els}\ne U_n^{\ols}$ for all $n$ and Stones and Wanless~\cite{SW12} who suggest that $R_n^{\els}\ne R_n^{\ols}$ for all $n$. These conjectures are only known to be true in some very special cases (see \cite{Kot12b,Sto12,SW12} for details). There is a natural action of $\mathcal{S}_n\times\mathcal{S}_n\times\mathcal{S}_n$ on Latin squares called {\em isotopism}. Its orbits are called {\em isotopism classes}. In essence, the reason that the Alon-Tarsi conjecture is restricted to even orders is that parity is an isotopism class invariant for even orders but not for odd orders. Since it is known that asymptotically almost all Latin squares have trivial stabiliser in the group of isotopisms \cite{MW05}, a corollary of \tref{t:asymequal} is that for even $n\rightarrow\infty$ there are asymptotically equal numbers of isotopism classes of Latin squares of each of the possible parities. \section{Parities are equinumerous} Whenever we use the word ``random'' it will be implicit that we are referring to the discrete uniform distribution (technically, actually a sequence of such distributions as $n\rightarrow\infty$). A {\em row cycle of length $\ell$} is a minimal (in the sense of containment) non-empty $2\times\ell$ submatrix of a Latin square such that each row of the submatrix contains the same symbols. We say that a row cycle is {\em even} or {\em odd} depending on whether its length $\ell$ is even or odd, respectively. The two rows within a row cycle can be switched to give a slightly different Latin square. By switching an odd row cycle we change the column parity and the symbol parity, while leaving the row parity unchanged \cite{Wan04}. This simple observation will be the key to our result. Our aim is to find an odd row cycle that does not meet the first row or first column. We want to show that a random reduced Latin square can be expected to have such a cycle. However, we do this by first showing that a random Latin square also has such a cycle. This allows us to employ techniques that may move beyond the set of reduced Latin squares. The techniques in question were developed in \cite{CGW08} to study row cycle lengths in a random Latin square. It will suit us to adapt the results of \cite{CGW08}, which dealt with the first two rows, to the last two rows instead. Similarly, \cite{CGW08} allowed conditioning on the contents of a set $F$ of columns. In that paper $F$ was the last $n-m$ columns, however it will suit us to use a variable set of columns which includes the first column. The results from \cite{CGW08} apply unchanged, given the symmetry between different columns, and between different rows. We will consider random Latin squares of order $n$ as $n\rightarrow\infty$. Soon we will want to consider probabilities that are conditional on our Latin square including a set $F$ of $n-m$ columns that includes the first column. A prerequisite for the methods of \cite{CGW08} is that $F$ must contain entire row cycles in its last two rows. We impose the extra condition that $F$ contains a single row cycle in its last two rows. With this assumption it turns out that $F$ is unlikely to be too big: \begin{lemma}\label{l:nolongcyc} With probability $1-o(1)$ a random Latin square of order $n$ has no cycle longer than $n-\log{n}$ within the last two rows. \end{lemma} \begin{proof} Let $p$ be the probability that a random permutation in $\mathcal{S}_n$ has a cycle of length at least $n-\log{n}$. Then \begin{equation}\label{e:randpermlongcyc} p=\frac{1}{n!}\sum_{i=\lceil n-\log n\rceil}^n\binom{n}{ i}(i-1)!(n-i)! =\sum_{i=\lceil n-\log n\rceil}^n\frac{1}{i} =O\bigg(\frac{\log n}{n}\bigg). \end{equation} Let $\xi$ be the multiset of the lengths of the row cycles in the last two rows of a random Latin square of order $n$. If $\xi$ has an element of size at least $n-\log{n}$, then $\xi$ has at most $(\log{n})/2+1$ elements. Hence by \eref{e:randpermlongcyc} and \cite[Cor.\,4.5]{CGW08}, the probability that $\xi$ has an element of size at least $n-\log{n}$ is at most $n^{1/3}2^{(\log{n})/2+1}p=o(n^{-0.3})$. \end{proof} As foreshadowed, we now wish to condition on a random Latin square $L$ containing a set $F$ of entries consisting of entire columns (including the first), where in the last two rows the entries of $F$ form a single row cycle. This framework is consistent with \cite{CGW08}. Let $m$ be the number of columns that are not in $F$. Let $\rho$ be the partition of $m$ formed by the lengths of the row cycles in the last two rows that are not in $F$. We will consider $m$ and $\rho$ to be discrete random variables in the resulting probability space. Our results will be phrased in terms of $m$ and $\rho$ but are otherwise independent of $F$. Note that with high probability $m\rightarrow\infty$ as $n\rightarrow\infty$, by \lref{l:nolongcyc}. From \cite[Thm~4.9]{CGW08}, we have: \begin{lemma}\label{l:fewcycs} There exists a constant $c$ with $0<c<1$ such that $\rho$ has fewer than $9\sqrt{m}$ parts with probability $1-o(c^m)$. \end{lemma} Let $P(m)$ denote the partitions of $m$ into parts of size at least $2$. Let \begin{equation}\label{e:gamma} \gamma(\lambda)=\frac{m!}{\displaystyle\prod_{i=2}^m \lambda_i!\,i^{\lambda_i}} \end{equation} be the number of derangements with cycle structure $\lambda=(2^{\lambda_2},3^{\lambda_3},\dots,m^{\lambda_m})\in P(m)$. Here and henceforth, $i^{\lambda_i}$ denotes $\lambda_i$ parts of size $i$, where we allow the possibility that $\lambda_i=0$. Let $S(\lambda,F)$ denote the set of Latin squares that contain $F$ and have $\rho=\lambda$. \begin{lemma}\label{l:mulam} Let $m$ be even and suppose that $\lambda=(2^{\lambda_2},4^{\lambda_4},\dots,m^{\lambda_m})\in P(m)$ has only even parts, including one of size $z$ where $z\rightarrow\infty$ as $n\rightarrow\infty$. Let $M$ be the set of $\mu\in P(m)$ such that $\mu$ is obtained from $\lambda$ by splitting a part of size $z$ into two parts of odd size. Then $$\sum_{\mu\in M}|S(\mu,F)|=|S(\lambda,F)|\,\Omega(\log{z}).$$ \end{lemma} \begin{proof} Let $\mu=(2^{\mu_2},3^{\mu_3},\dots,m^{\mu_m})\in M$ be such that $\mu$ is obtained from $\lambda$ by splitting one part of size $z$ into parts of size $a$ and $z-a$, where $a$ is odd (and thus $z-a$ is too) and $a<z-a$. Since $\lambda$ has only even parts, $\lambda_a=\lambda_{z-a}=0$ and $\mu_a=\mu_{z-a}=1$. Moreover $\lambda_{z}=\mu_z+1\geqslantqslant 1$. By \eref{e:gamma}, $$\frac{\gamma(\mu)}{\gamma(\lambda)}=\frac{z\lambda_z}{a(z-a)}.$$ By \cite[Lem~3.13]{CGW08}, this implies that $${|S(\mu,F)|}\geqslantqslant \frac{2z\lambda_z|S(\lambda,F)|}{3a(z-a)} \geqslantqslant \frac{2z|S(\lambda,F)|}{3a(z-a)}.$$ Thus, $$\sum_{\mu\in M}|S(\mu,F)| \geqslantqslant \frac{2}{3}|S(\lambda,F)|z\sum_{a=1}^{w} \frac{1}{(2a+1)(z-2a-1)},$$ where $w=\lfloor(z-3)/4\rfloor$ is the largest integer satisfying $2w+1<z-2w-1$. However, $1/((2x+1)(z-2x-1))$, is a decreasing function of $x$ for $1\leqslant x\leqslant w$ so \begin{align*} \sum_{a=1}^{w} \frac{1}{(2a+1)(z-(2a+1))} & \geqslantqslant \int_{1}^{w} \frac{dx}{(2x+1)(z-2x-1)} \\ & = \frac{1}{2z}\log\frac{(2w+1)(z-3)}{3(z-2w-1)} = \Omega\leqslantft(\frac{\log{z}}{z}\right), \end{align*} from which the result follows. \end{proof} We next show that with high probability there is an odd cycle that does not meet the first row or column (assuming $n>2$). We deduce this first for general Latin squares, then infer it for reduced Latin squares. \begin{thrm} With probability $1-o(1)$ there is a part of odd size in $\rho$. \end{thrm} \begin{proof} By \lref{l:nolongcyc} we know that $m\geqslant\log n$ with probability $1-o(1)$. By \lref{l:fewcycs}, there asymptotically almost surely are at most $9\sqrt{m}$ parts in $\rho$, so there is some part of size at least $\sqrt{m}/9$. By \lref{l:mulam}, the probability of $\rho$ having no odd parts is at most $O(1/\log m)=o(1)$, as claimed. \end{proof} \begin{corol}\label{cy:oddred} With probability $1-o(1)$, in the last two rows of a random reduced Latin square of order $n$ there is a cycle of odd length that does not include the first column. \end{corol} \begin{proof} We can reduce a Latin square by permuting the symbols so that the first column is in order, then permuting the columns so that the first row is in order. These operations do not affect whether the last two rows contain a cycle of odd length that does not include the first column (note that the first column does not move). Also, each reduced Latin square is produced the same number of times, namely $n!(n-1)!$ times, when the above reduction is applied to all Latin squares. So reduced Latin squares have the same probability of having the property of interest as general Latin squares do. \end{proof} We are now in a position to prove our main result. As already noted, by switching an odd row cycle we change the column parity and the symbol parity. Hence \cyref{cy:oddred} provides us with an involution, which acts on all but a negligible fraction of reduced Latin squares, and which reverses column parity and reverses symbol parity. It follows that $R^{\cels}_n\sim R^{\cols}_n$ and $R^{\sels}_n\sim R^{\sols}_n$. \Tref{T:relat} then tells us that, \begin{align*} &R_n^{000}\sim R_n^{011}\sim R_n^{101}\sim R_n^{110} \sim \dfrac14 R_n\quad\hbox{\rm for }n\equiv0,1\pmod 4,\\ &R_n^{111}\sim R_n^{100}\sim R_n^{010}\sim R_n^{001} \sim \dfrac14 R_n\quad \hbox{\rm for }n\equiv2,3\pmod 4, \end{align*} as $n\rightarrow\infty$. The remainder of \tref{t:asymequal} can then be easily deduced from \Tref{T:relat} and the following additional observations. Replacing each row of a Latin square by its inverse (when considered as a permutation) converts reduced Latin squares into normalised unipotent Latin squares and vice versa. Hence $R^{abc}_n=U^{acb}_n$ for all parities $\pi=abc$ and all $n$. We also know two more facts from \cite{SW12}. Firstly, for even $n$, \begin{equation*} L^{P}_n = n!\, (n-1)!\, R^{P}_n = n!\, (n-1)!\, U^{P}_n, \end{equation*} whenever $P \in \{\els,\ols,\rels,\rols,\cels,\cols,\sels,\sols\}$ or $P$ is any parity. Secondly, for odd $n\geqslant3$, \begin{equation*} L_n^{000}=L_n^{011}=L_n^{101}=L_n^{110}\hbox{ and } L_n^{111}=L_n^{100}=L_n^{010}=L_n^{001}. \end{equation*} \section{Concluding comments} We have confirmed a conjecture from \cite{SW12} and explained why the large components have comparable size in the switching graphs studied in \cite{Wan04}. Our results do not explain why the components in the switching graphs in \cite{KMOW14} have comparable size. At this stage we have no tools to study the lengths of cycles in random {\em symmetric} Latin squares. A much stronger result than \tref{t:asymequal} seems very likely to be true. By Wilf~\cite[p.209]{Wil94}, the proportion of permutations in $\mathcal{S}_n$ that have no odd cycles is $2^{-n}n!/(n/2)!^2\sim \sqrt{2/(\pi n)}$. It follows immediately that the proportion of derangements with no odd cycles is also $O(n^{-1/2})$. Hence the proportion of derangements with at most one odd cycle is $O(n^{-1/2})$ if $n$ is even and no more than \begin{equation*} \frac{(n{-}1)!}{n!} + \frac{O(1)}{n!} \sum_{i=0}^{(n-3)/2}{n\choose 2i+1} \frac{(2i)!(n-2i-1)!}{(n-2i-1)^{1/2}} = \frac{1}{n} + O(1)\!\!\sum_{i=0}^{(n-3)/2}\frac{1}{(2i+1)(n-2i-1)^{1/2}} \end{equation*} if $n$ is odd. Approximating the sum by an integral, we find that for all $n$ the proportion of derangements with at most one odd cycle is $O(n^{-1/2}\log n)$. If \cite[Conj.\,6.1]{CGW08} holds then a similar statement would be true about the cycles in the last two rows of a Latin square: namely there would be at least two odd cycles with probability $1-O(n^{-1/2}\log n)$. At least one of these cycles is switchable in the sense that it does not hit the first column. Amongst the squares with no switchable odd cycle in the last two rows, we can look for a switchable odd cycle in rows $n-3,n-2$, then in rows $n-5,n-4$ and so on up to, but not including, the first row. Switching the first switchable cycle that we find in this way would give us an involution, because switching cycles in rows $x$ and $y$ never affects the cycle lengths between rows other than $x$ and $y$. The domain of the involution includes all reduced Latin squares that have any switchable cycle in an appropriate pair of rows. It seems plausible that each pair of rows would have a switchable cycle with something close to an independent probability $1-O(n^{-1/2}\log n)$, meaning the proportion of reduced Latin squares outside the domain of our involution would be $O(n^{-cn})$ for some constant $c>0$. Hence for each given $n$ the numbers of reduced Latin squares with each of the four possible parities are probably very much closer to each other than our work has demonstrated. \leqslantt\oldthebibliography=\thebibliography \leqslantt\endoldthebibliography=\endthebibliography \renewenvironment{thebibliography}[1]{ \begin{oldthebibliography}{#1} \setlength{\parskip}{0.4ex plus 0.1ex minus 0.1ex} \setlength{\itemsep}{0.4ex plus 0.1ex minus 0.1ex} } { \end{oldthebibliography} } \end{document}
\begin{document} \global\long\def\operatorname{div}{\operatorname{div}} \global\long\def\operatorname{supp}{\operatorname{supp}} \global\long\def\operatorname{curl}{\operatorname{curl}} \global\long\def\nabla{\nabla} \global\long\def\partial{\partial} \global\long\defI_{\#2}^{g;\#1}{I_{\#2}^{g;\#1}} \global\long\def\mathbb{T}^{N}{\mathbb{T}^{N}} \setlength{\parskip}{1ex} \title[Epochs of regularity for wild H\"older solutions]{Epochs of regularity for wild Hölder-continuous solutions of the Hypodissipative Navier-Stokes System} \author{Aynur Bulut} \address{Louisiana State University, 303 Lockett Hall, Baton Rouge, LA 70803} \email{[email protected]} \author{Manh Khang Huynh} \address{Institute for Advanced Study, 1 Einstein Dr, Princeton, NJ 08540} \email{[email protected]} \author{Stan Palasek} \address{UCLA Department of Mathematics} \email{[email protected]} \begin{abstract} We consider the hypodissipative Navier-Stokes equations on $[0,T]\times\mathbb{T}^{d}$ and seek to construct non-unique, H\"older-continuous solutions with epochs of regularity (smooth almost everywhere outside a small singular set in time), using convex integration techniques. In particular, we give quantitative relationships between the power of the fractional Laplacian, the dimension of the singular set, and the regularity of the solution. In addition, we also generalize the usual vector calculus arguments to higher dimensions with Lagrangian coordinates. \end{abstract} \maketitle \section{Introduction} Fix $d\geq 3$. We consider the hypodissipative Navier-Stokes equations \begin{equation} \begin{cases} \partial_{t}v+(-\Delta)^{\gamma}v+\operatorname{div}\left(v\otimes v\right)+\nabla p=0\\ \operatorname{div} v=0 \end{cases}\label{eq:hypo_NS} \end{equation} on the periodic domain $\mathbb{T}^{d}$, where $0<\gamma<1$ denotes the strength of the fractional dissipation, $v:[0,T]\times\mathbb{T}^{d}\to\mathbb{R}^{d}$ is the velocity field and $p:[0,T]\times\mathbb{T}^{d}\to\mathbb{R}$ is the pressure. Recently, in the study of hydrodynamic turbulence, significant attention has been directed towards problems such as Onsager's conjecture, which roughly states that the kinetic energy of an ideal fluid may fail to be conserved when the regularity is less than $\frac{1}{3}$. The starting point for much of this work in recent years is a nonuniqueness result, using ideas from convex integration, due to De Lellis and Sz\'ekelyhidi Jr \cite{delellisEulerEquationsDifferential2007}. A sequence of results, e.g. in \cite{contiHPrincipleRigidityAlpha2009, DaneriS, BDLIS, IsettOh, isettProofOnsagerConjecture2018, buckmasterOnsagerConjectureAdmissible2017, buckmasterNonuniquenessWeakSolutions2018,buckmasterConvexIntegrationPhenomenologies2019}, and the references cited in these works, developed these ideas to tackle Onsager's conjecture. In \cite{isettProofOnsagerConjecture2018}, Isett reached the conjectured threshold of $\frac{1}{3}-$ for the three-dimensional Euler equation on the torus, using Mikado flows and a delicate gluing technique. Further developments include Buckmaster--De Lellis--Sz\'ekelyhidi, Jr.--Vicol \cite{buckmasterOnsagerConjectureAdmissible2017}, which forms the main basis for this work; we will refer to the strategy in \cite{buckmasterOnsagerConjectureAdmissible2017} as the {\it Onsager scheme}. The scheme produces a weak solution that can attain any arbitrary energy profile (this is sometimes referred to as {\it energy profile control}). After this recent progress, the main techniques of convex integration have also been used to construct various kinds of ``wild'' solutions (nonunique, or failing to conserve energy) for the Euler equations, the Navier-Stokes equations, as well as the fractional Navier-Stokes equations \cite{buckmasterNonuniquenessWeakSolutions2018,colomboIllposednessLeraySolutions2018,derosaInfinitelyManyLeray2019,cheskidovSharpNonuniquenessNavierStokes2020}. For the Navier-Stokes equations, the dissipation term $(-\Delta)v$ can dominate the nonlinear term $\operatorname{div}\left(v\otimes v\right)$, and this presents a difficult obstruction to convex integration. At present, this issue can be avoided by either using spatial intermittency (at the cost of non-uniform control on the solution) or considering the fractional Laplacian $(-\Delta)^{\gamma}$ instead. For an explanation of intermittency, as well as more history and references, we refer the interested readers to \cite{buckmasterConvexIntegrationPhenomenologies2019}. One direction of research has looked into the construction of wild solutions with {\it epochs of regularity} (that is, solutions that are smooth almost everywhere outside a temporal set of small dimension); this was carried out for the hyperdissipative Navier-Stokes equations (using intermittency) in \cite{buckmasterWildSolutionsNavierStokes2020}, the Navier-Stokes equations (using intermittency) in \cite{cheskidovSharpNonuniquenessNavierStokes2020}, and then for the Euler equations (not using intermittency) in \cite{derosaDimensionSingularSet2021}. We note that this goal stands in contradiction to the desire to have energy profile control, since whenever the solution is smooth the energy cannot increase. These approaches make use of the Onsager scheme, with several refinements to the gluing approach of Isett \cite{isettProofOnsagerConjecture2018}, combined with estimates on the overlapping (glued) regions. Because energy correction is no longer required, the scheme is also simplified. In this paper, we look at the case of the hypodissipative Navier-Stokes equations without using spatial intermittency, and try to determine for which values of $\gamma$ in $(-\Delta)^{\gamma}$ one can construct spatially Hölder-continuous solutions with epochs of regularity. In addition, we also extend the arguments involving the Biot-Savart operator and vector calculus (cf. the treatment in \cite{buckmasterOnsagerConjectureAdmissible2017}) to higher dimensions. We now state our main theorem. \begin{thm} \label{thm:thm1}Fix $d\geq3$. Let $V_{1}$ and $V_{2}$ be smooth solutions to (\ref{eq:hypo_NS}) such that $\int_{\mathbb{T}^{d}}\left(V_{1}-V_{2}\right)(t)=0$ for all $t$. For every positive $\beta,\gamma$ such that $\beta<\frac{1}{3}$ and $\beta+2\gamma<1$, there exist \[ \begin{cases} v\in C_{t}^{0}C_{x}^{\beta-}\cap L_{t}^{1}C_{x}^{\beta_{1}}\\ \mathcal{B}\subset[0,T]\text{ closed} \end{cases} \] where \begin{itemize} \item $v$ is a nonunique weak solution to (\ref{eq:hypo_NS}) given initial data $V_{1}\left(0\right)$. \item $v$ agrees with $V_{1}$ near $t=0$, and agrees with $V_{2}$ near $t=T$ \item $\beta_{1}=\left(\frac{1-\beta}{2}\right)^{-}$, $\dim_{\mathrm{Hausdorff}}(\mathcal{B})\leq\left(\frac{1+\beta}{2(1-\beta)}\right)^{+}$ \item $v\big|_{\mathcal{B}^{c}\times\mathbb{T}^{d}}$ is smooth. \end{itemize} \end{thm} In particular, Theorem \ref{thm:thm1} implies that, with what we currently know about the Onsager scheme, the best fractional Laplacian we can handle (using only temporal intermittency) is $(-\Delta)^{\frac{1}{2}-}$, which is quite a distance away from the full Navier-Stokes equation. This confirms the heuristic that without spatial intermittency, we want the dissipation term $(-\Delta)^{\gamma}v$ to be dominated by the nonlinear term $\operatorname{div}\left(v\otimes v\right)$. In addition, because $L_{t}^{\infty}C_{x}^{\beta}$ is supercritical for the $\gamma$-hypodissipative Navier-Stokes equations when $\beta+2\gamma<1$, we expect that this constraint is sharp. The proof of Theorem \ref{thm:thm1} makes use of the strategy of the Onsager scheme, and in particular follows from an iterative proposition based on the local existence theory, combined with a modification of Isett's gluing technique to preserve the ``good'' temporal regions. The main difficulty is to optimize the length of the overlapping regions (where the cutoff functions meet). The iterative proposition is presented in Section \ref{sec:prelim}, where it is shown to imply Theorem \ref{thm:thm1}. The proof of the iterative proposition itself is deferred to Section \ref{sec:Proof-of-iteration}, where, after a brief mollification argument, we reduce the issue to a series of technical estimates (first, a collection of estimates for the gluing construction, which we then treat in Section \ref{sec:Proof-of-gluing}; and then a perturbation result arising from convex integration, which we treat in Section \ref{sec:Convex-integration-and}). \begin{rem} As usual (see, e.g. \cite{derosaInfinitelyManyLeray2019,buckmasterOnsagerConjectureAdmissible2017}), any $C_{t}^{0}C_{x}^{\alpha}$ solution with $\alpha\in\left(0,\frac{1}{3}\right)$ is automatically a $C_{t,x}^{\alpha-}$ solution. For any given $\beta\in\left(0,\frac{1}{3}\right)$, we can construct a wild $v\in C_{t,x}^{\beta}$. For $\gamma<\frac{1}{3}$ and $\varepsilon\in\left(0,\frac{1}{3}\right)$, by interpolation, this leads to the construction of wild solutions in $C_{t}^{0}C_{x}^{\left(\frac{1}{3}-\varepsilon\right)^{-}}\cap L_{t}^{1}C_{x}^{\left(\frac{1}{3}+\frac{\varepsilon}{2}\right)^{-}}\cap L_{t}^{\frac{3}{2}-}C_{x}^{\frac{1}{3}}$, with the singular set having dimension less than $1$, and to construction of wild solutions in $C_{t}C_{x}^{0+}\cap L_{t}^{1}C_{x}^{\frac{1}{2}-}$, with the dimension of the singular set bounded by $\frac{1}{2}+$. On the other hand, in the range $\frac{1}{3}\leq\gamma<\frac{1}{2},$ for each $\beta<1-2\gamma$, the dimension of the singular set is bounded by \[ \left(\frac{1+\beta}{2(1-\beta)}\right)^{+}<\frac{1-\gamma}{2\gamma}. \] \end{rem} \begin{figure} \caption{Regularity and dimension parameters. Given any $p\in\left(1,\infty\right)$ and $\mathcal{\widetilde{\beta} \end{figure} \subsection*{Further comments and open questions} The arguments we use to prove Theorem \ref{thm:thm1} immediately lead to an analogous result for the Euler equations, since we treated $(-\Delta)^{\gamma}v$ as an error term. In particular, in the proof of Theorem \ref{thm:thm1}, we show nonuniqueness for $C_{t}^{0}C_{x}^{\frac{1}{3}-}\cap L_{t}^{\frac{3}{2}-}C_{x}^{\frac{1}{3}}$ solutions. In the Euler context, this can be compared to the nonuniqueness of $L_{t}^{\frac{3}{2}-}C_{x}^{\frac{1}{3}}$ solutions in \cite[Theorem 1.10]{cheskidovSharpNonuniquenessNavierStokes2020}. In \cite{cheskidovSharpNonuniquenessNavierStokes2020}, rather than using the Onsager scheme, the authors use spatial intermittency. As a consequence, the solution they construct is not spacetime continuous; their singular set $\mathcal{B}$ can have arbitrarily small Hausdorff dimension, and their scheme also works in two dimensions. Two open questions remain. The first is to ask if can we further minimize the dimension of the singular set $\mathcal{B}$, as suggested in \cite{derosaDimensionSingularSet2021}. The second question of interest is to determine whether the construction can be adapted to construct solutions that obey some form of energy inequality. Both questions lead to natural problems that we hope to consider in future works. \subsection*{Outline of the Paper} In Section \ref{sec:prelim}, we specify our notational conventions and introduce the main iterative scheme underlying the proof of Theorem \ref{thm:thm1}. The iterative step is formulated in Proposition \ref{prop:iterative}, which is then used to prove Theorem \ref{thm:thm1}. The proof of Proposition \ref{prop:iterative} is the subject of Section \ref{sec:Proof-of-iteration}. The proof is reduced to two technical lemmas (a collection of gluing estimates, and a perturbation argument) which are treated in Section \ref{sec:Proof-of-gluing} and Section \ref{sec:Convex-integration-and}, respectively. A short appendix recalls several geometric preliminaries used throughout the paper. \section{\label{sec:prelim}Preliminaries and the iteration scheme} We begin by establishing some notational conventions. We will write $A\lesssim_{x,\neg y}B$ for $A\leq CB$, where $C$ is a positive constant depending on $x$ and not $y$. Similarly, $A\sim_{x,\neg y}B$ means $A\lesssim_{x,\neg y}B$ and $B\lesssim_{x,\neg y}A$. We will omit the explicit dependence when it is either not essential or obvious by context. For any real number $x$, we write $x+$ or $x^{+}$ to denote some $y\in\left(x,x+\varepsilon\right)$ where $\varepsilon$ is some arbitrarily small constant. Similarly we write $x-$ or $x^{-}$ for some $y\in\left(x-\varepsilon,x\right)$. For any $N\in\mathbb{N}_{0}$ and $\alpha\in\left(0,1\right)$, we write \begin{align*} \left\Vert f\right\Vert _{N} & =\left\Vert f\right\Vert _{C^{N}},\quad\left[f\right]_{N}=\left\Vert \nabla^{N}f\right\Vert _{0},\quad\left[f\right]_{N+\alpha}=\left[\nabla^{N}f\right]_{C^{0,\alpha}}, \end{align*} and \begin{align*} \left\Vert f\right\Vert _{N+\alpha} & =\left\Vert f\right\Vert _{C^{N,\alpha}}:=\left\Vert f\right\Vert _{N}+\left[f\right]_{N+\alpha}, \end{align*} where $\left[\,\cdot\,\right]_{C^{0,\alpha}}$ denotes the H\"older seminorm. We will often make use of the following elementary inequality, \[ \left\Vert fg\right\Vert _{r}\lesssim\left\Vert f\right\Vert _{0}\left[g\right]_{r}+\left[f\right]_{r}\left\Vert g\right\Vert _{0}, \] which holds for any $r>0$. \begin{defn} For any $T>0$, $\nu>0$, vector field $v$ and $(2,0)$-tensor $R$ on $[0,T]\times\mathbb{T}^{d}$, we say $(v,R)$ solves the $(\nu,\gamma,T)$-fNSR equations (fractional Navier-Stokes-Reynolds) if there is a smooth pressure $p$ such that \begin{equation} \begin{cases} \partial_{t}v+\nu(-\Delta)^{\gamma}v+\operatorname{div} v\otimes v+\nabla p=\operatorname{div} R\\ \operatorname{div} v=0, \end{cases}\label{eq:hypo_NSR} \end{equation} When $R=0$, we also say $v$ solves the $(\nu,\gamma,T)$-fNS equations. \end{defn} \subsection{Formulation of the iterative argument} As we described in the introduction, the proof of Theorem \ref{thm:thm1} is based on an iterative argument. We now outline the main setup of the iteration, and establish notation that will be used throughout the remainder of the paper. We begin by fixing $\gamma\in (0,1)$ and $\beta<\frac{1}{3}$ with $\beta+2\gamma<1$. For any natural number $q\in\mathbb{N}_{0}$, we set \begin{align} \lambda_{q} & :=\left\lceil a^{(b^{q})}\right\rceil \\ \delta_{q} & :=\lambda_{q}^{-2\beta} \end{align} with $a\gg1$, $0<b-1\ll1$ (to be chosen later). We remark that $\lambda_{q}$ will be the frequency parameter (made an integer for phase functions), while $\delta_{q}$ will be the pointwise size of the Reynolds stress. With $\alpha>0$ sufficiently small, and $\sigma>0$ (to be chosen later), we set \begin{align} \epsilon_{q} & :=\lambda_{q}^{-\sigma}\quad\\ \tau_{q} & :=C_{q}\delta_{q}^{-\frac{1}{2}}\lambda_{q}^{-1-3\alpha}\label{eq:tau_q} \end{align} where $C_{q}\sim1$ is an inessential constant such that $\epsilon_{q-1}\tau_{q-1}\tau_{q}^{-1}\in\mathbb{N}_{1}$ (for gluing purposes). For convenience, from this point on, we will not write out $C_{q}$ explicitly. The parameter $\tau_{q}$ will be the time of local existence for regular solutions, while the quantity $\epsilon_{q}\tau_{q}$ will be the length of the overlapping region between two temporal cutoffs. We now formulate the main inductive hypothesis on which the construction is based. Let $T\geq 1$ and $\nu \in (0,1]$ be arbitrary constants. For the first step of the induction, we pick any positive $\epsilon_{-1},\tau_{-1}$ such that $5\epsilon_{-1}\tau_{-1}=\frac{T}{3}$. For every $q\in\mathbb{N}_{0}$, we assume that there exist $v_{q}$ and $R_{q}$ smooth such that, \begin{enumerate} \item[(i)] $(v_q,R_q)$ solves the $(\nu,\gamma,T)$-fNSR equations in (\ref{eq:hypo_NSR}), \item[(ii)] we have the estimates \begin{align} \|v_{q}\|_{L^{\infty}} & \leq1-\delta_{q}^{1/2},\label{eq:vq_sup}\\ \|\nabla v_{q}\|_{L^{\infty}} & \leq M\delta_{q}^{1/2}\lambda_{q},\label{eq:grad_vq_sup}\\ \|R_{q}\|_{L^{\infty}} & \leq\epsilon_{q}\delta_{q+1}\lambda_{q}^{-3\alpha},\label{eq:Rq_sup} \end{align} where $M$ is a universal geometric constant (depending on $d$), and \item[(iii)] letting $\mathcal{B}_{q}=\bigcup_{i}I_{i}^{b,q}$ denote the current ``bad'' set consisting of disjoint closed intervals of length $5\epsilon_{q-1}\tau_{q-1}$, and letting $$\mathcal{G}_{q}=[0,T]\setminus\mathcal{B}_{q}=\bigcup_{i}I_{i}^{g,q}$$ denote the current ``good'' set consisting of disjoint open intervals, we have \begin{equation} R_{q}|_{\mathcal{G}_{q}+B(0,\epsilon_{q-1}\tau_{q-1})}\equiv0,\label{eq:Rq_zerowhere} \end{equation} where $\mathcal{G}_{q}+B(0,\epsilon_{q-1}\tau_{q-1})$ denotes the $\epsilon_{q-1}\tau_{q-1}$-neighborhood of $\mathcal{G}_{q}$, and within this neighborhood we have the improved bounds \begin{align} \|v_{q}\|_{N+1} & \lesssim_{N,\neg q}\delta_{q-1}^{1/2}\lambda_{q-1}\ell_{q-1}^{-N},\quad\textrm{for all }N\geq0.\label{eq:goodbounds} \end{align} \end{enumerate} We note the presence of $\epsilon_{q}$ in (\ref{eq:Rq_sup}), which serves to compensate for the sharp time cutoffs in our gluing construction. The main iterative proposition is given by the following statement. \begin{prop}[Iteration for the $(\nu,\gamma,T)$-fNSR equations] \label{prop:iterative} We fix \begin{gather} 0<b-1 \ll_{\beta,\gamma}1,\label{eq:smallness_b}\\ 0<\sigma <\frac{(b-1)(1-\beta-2b\beta)}{b+1},\label{eq:sigma_bound}\\ 0<\alpha \ll_{\sigma,b,\beta,\gamma}1,\label{eq:alpha_bound}\\ a \gg_{\alpha,\sigma,b,\beta,\gamma}1,\nonumber \end{gather} and suppose that $v_{q}$ and $R_{q}$ are smooth functions which satisfy the properties (i)--(iii) above. Then there exist $v_{q+1}$ and $R_{q+1}$ satisfying those same properties but with $q$ replaced by $q+1$. Moreover, we have \begin{align} \|v_{q}-v_{q+1}\|_{0}+\lambda{}_{q+1}^{-1}\|v_{q}-v_{q+1}\|_{1} & \leq M\delta_{q+1}^{1/2}\label{eq:iter_prop_est} \end{align} and $v_{q+1}=v_{q}$ on $\mathcal{G}_{q}\times\mathbb{T}^{d}$, $\mathcal{G}_{q}\subset\mathcal{G}_{q+1},\left|\mathcal{B}_{q+1}\right|\leq\epsilon_{q}\left|\mathcal{B}_{q}\right|$. \end{prop} \begin{rem} \label{rem:crucial_rem}We crucially remark that the parameters $b,\sigma,\alpha$, and $a$ only depend on $\beta,\gamma$ and $d$. In particular, they do not depend on $q,T$ or $\nu$ (as long as $\nu\leq1$ and $T\geq1$). \end{rem} The proof of \Propref{iterative} is given in \Secref{Proof-of-iteration} below. In the remainder of this section, we use this result to prove Theorem \ref{thm:thm1}. \begin{proof}[Proof of Theorem \ref{thm:thm1} via \Propref{iterative}] Let $\eta$ be a smooth temporal cutoff on $[0,T]$ such that $\mathbf{1}_{[0,\frac{2}{5}T]}\geq\eta\geq\mathbf{1}_{[0,\frac{1}{3}T]}$, and set \[ v_{0}=\eta V_{1}+\left(1-\eta\right)V_{2}. \] Since the dissipative terms are linear and $\int_{\mathbb{T}^{d}}\left(V_{1}-V_{2}\right)=0$, if we set \[ R_{0}=\partial_{t}\eta\mathcal{R}\left(V_{1}-V_{2}\right)-\eta\left(1-\eta\right)\left(V_{1}-V_{2}\right)\otimes\left(V_{1}-V_{2}\right) \] where $\mathcal{R}$ is the antidivergence operator defined in \Appref{Geometric-preliminaries}, then $\left(v_{0},R_{0}\right)$ solves the $(1,\gamma,T)$-fNSR equations from (\ref{eq:hypo_NSR}). We now aim to apply \Propref{iterative}. To do this, we rescale in time by a positive parameter $\zeta$, i.e. \[ v_{0}^{\zeta}\left(t,x\right)=\zeta v_{0}\left(\zeta t,x\right),\quad R_{0}^{\zeta}=\zeta^{2}R_{0}\left(\zeta t,x\right) \] Then $(v_0^\zeta,R_0^\zeta)$ solves the $(\zeta,\gamma,\zeta^{-1}T)$-fNSR equations. We now recall that we are allowed to make $\zeta$ arbitrarily small because of Remark \ref{rem:crucial_rem}. For $\zeta=\zeta(T,V_{1},V_{2},a,b,\alpha,\sigma,\beta)$ small enough, the conditions (\ref{eq:vq_sup})-(\ref{eq:Rq_sup}) of (ii) in the inductive hypothesis for \Propref{iterative} are satisfied for the case $q=0$, and we also have $$\zeta^{-1}T > 1 > \zeta.$$ In addition, (iii) is satisfied by letting $\mathcal{B}_{0}^{\zeta}=\left[\frac{T}{3\zeta},\frac{2T}{3\zeta}\right]$. Repeatedly applying \Propref{iterative} for the $(\zeta,\gamma,\zeta^{-1} T)$-fNSR equations, we get a sequence $\left(v_{q}^{\zeta},R_{q}^{\zeta},\mathcal{B}_{q}^{\zeta}\right)$ such that \renewcommand{\arabic{enumi}.}{\roman{enumi}.} \begin{enumerate} \item[(a)] $\left(v_{q}^{\zeta}\right)_{q\in\mathbb{N}_{0}}$ converges in $C_{t}^{0}C_{x}^{\beta-}$ to some $v^{\zeta}$. \item[(b)] $\lVert R_{q}^{\zeta}\rVert_{C_{t,x}^0}\rightarrow 0$ as $q\rightarrow\infty$, and \item[(c)] $\mathcal{B}_{q+1}^{\zeta}\subset\mathcal{B}_{q}^{\zeta}$ and $v^{\zeta}=v_{q}^{\zeta}$ on $\mathcal{G}_{q}^{\zeta}\times\mathbb{T}^{d}$. \end{enumerate} \renewcommand{\arabic{enumi}.}{\arabic{enumi}.} As a consequence of (c), $v^\zeta$ is smooth on each set $\mathcal{G}_q^\zeta \times\mathbb{T}^d$. Moreover, $v^{\zeta}$ is a weak solution of the $(\zeta,\gamma,\zeta^{-1} T)$-fNS equations. To conclude, we note that the transformations \begin{align*} v_{q}\left(t,x\right) & :=\zeta^{-1}v_{q}^{\zeta}\left(\zeta^{-1}t,x\right)\\ v\left(t,x\right) & :=\zeta^{-1}v^{\zeta}\left(\zeta^{-1}t,x\right)\\ \mathcal{B}_{q} & :=\zeta^{-1}\mathcal{B}_{q}^{\zeta} \end{align*} invert the time-rescaling. The bad set is then \[ \mathcal{B}:=\bigcap_{q}\mathcal{B}_{q}. \] Moreover, noting that the choice of $V_2$ was arbitrary, the solution $v$ is nonunique. We now verify that $\mathcal{B}$ has the desired Hausdorff dimension. Note that the set $\mathcal{B}_{q}$ consists of $\sim\tau_{q}^{-1}\prod_{i=1}^{q-1}\epsilon_{i}$ intervals of length $\sim_{\zeta}\epsilon_{q}\tau_{q}$. It therefore follows that \begin{align*} \dim_{\mathrm{Hausdorff}}(\mathcal{B}) \leq\mathrm{dim}_{\mathrm{box}}(\mathcal{B})&\leq\lim_{q\to\infty}\frac{\ln\Big(\tau_{q}^{-1}\prod_{i=1}^{q-1}\epsilon_{i}\Big)}{\ln\left(\epsilon_{q}^{-1}\tau_{q}^{-1}\right)}\\ &=\lim_{q\to\infty}\frac{\ln(a)b^{q}\left(1+3\alpha-\beta-\frac{\sigma}{b-1}\right)}{\ln(a)b^{q}\left(1+3\alpha+\sigma-\beta\right)}\\ & =1-\frac{\sigma b}{\left(b-1\right)\left(1+3\alpha+\sigma-\beta\right).} \end{align*} Choosing $\alpha$ sufficiently small, and then choosing $\sigma$ sufficiently close to $\frac{(b-1)\left(1-\beta-2b\beta\right)}{b+1}$ and $b>1$ sufficiently close to $1$, we get the bound \[ \dim(\mathcal{B})\leq\left(\frac{1+\beta}{2(1-\beta)}\right)^{+} \] as desired. It remains to choose $\beta_{1}$ to ensure that the solution lies in $C_{t}^{0}C_{x}^{\beta-}\cap L_{t}^{1}C_{x}^{\beta_{1}}.$ For this, note that since $\left|\mathcal{B}_{q+1}\right|\lesssim_{\zeta}\prod_{i=1}^{q}\epsilon_{i}$, we have \[ \left\Vert v_{q+1}-v_{q}\right\Vert _{L_{t}^{1}C_{x}^{\beta_{1}}}\lesssim_{\zeta}\delta_{q+1}^{1/2}\lambda_{q+1}^{\beta_{1}}\left(\prod_{i=1}^{q}\epsilon_{i}\right) \] The right-hand side is then summable in $q$, provided that \[ -\beta+\beta_{1}-\frac{\sigma}{b-1}<0. \] We may therefore choose $\beta_{1}<\beta+\frac{\sigma}{b-1}<\frac{1-b\beta}{b+1}<\frac{1-\beta}{2}$, which completes the proof. \end{proof} \section{\label{sec:Proof-of-iteration}Proof of \Propref{iterative}} In this section, we give the proof of the main iterative result, \Propref{iterative}, which was used to prove Theorem \ref{thm:thm1} in the previous section. As we described in the introduction, the argument makes use of three steps -- a mollification procedure, a gluing construction, and a perturbation result arising from convex integration. To simplify the exposition, we discuss each step below, and after isolating a few technical lemmas whose proofs are deferred to \Secref{Proof-of-gluing} and \Secref{Convex-integration-and}, we give the proof of \Propref{iterative}. We define the length scale of mollification \begin{align} \ell_{q} & :=\frac{\delta_{q+1}^{\frac{1}{2}}}{\lambda_{q}^{1+\frac{\sigma}{2}+\frac{3\alpha}{2}}\delta_{q}^{\frac{1}{2}}}.\label{eq:length_scale} \end{align} To simplify notation, we will often abbreviate $\ell_{q}$ as $\ell$ (unless otherwise indicated). For technical convenience, we record several useful parameter inequalities. The first set of these are essential conversions, \begin{gather} \epsilon_{q}^{\frac{1}{2}}\tau_{q}\delta_{q+1}^{\frac{1}{2}}\ell_{q}^{-1} \ll1\label{eq:time-length}\\ \lambda_{q}\ll\ell_{q}^{-1} \ll\lambda_{q+1}\ll\lambda_{q}^{\frac{3}{2}}\label{eq:length_Freq}\\ \delta_{q-1}^{\frac{1}{2}}\lambda_{q-1}\ell_{q}^{2-2\alpha} \ll\epsilon_{q}\tau_{q}\delta_{q+1}.\label{eq:good_bad1} \end{gather} Indeed, the bound (\ref{eq:time-length}) comes from $\alpha>0$, while the bound $\ell_{q}^{-1}\ll\lambda_{q+1}$ in (\ref{eq:length_Freq}) follows by recalling that that $\alpha$ can be made arbitrarily small by (\ref{eq:alpha_bound}), so that (\ref{eq:sigma_bound}) implies $\sigma<2\left(b-1\right)\left(1-\beta\right)$, and thus \begin{equation} -\beta b+\beta-1-\frac{\sigma}{2}+b>0.\label{eq:intermed_para} \end{equation} Similarly, by neglecting $\alpha$, (\ref{eq:good_bad1}) comes from $\left(b-1\right)\left(1-\beta\right)>0$, which is obvious. In order to partition the time intervals for gluing, we also need the bound \begin{equation} \epsilon_{q}\tau_{q}\ll\tau_{q}\ll\epsilon_{q-1}\tau_{q-1}\label{eq:time_interval} \end{equation} which comes from the inequality $\sigma<\left(1-\beta\right)(b-1)$, a consequence of (\ref{eq:sigma_bound}). We also have the special case \[ \tau_{0}\ll\frac{1}{15}\leq\frac{T}{15}=\epsilon_{-1}\tau_{-1} \] because $T\geq1$. This allows $a,b,\beta,\alpha$ to be independent of $T$, and we use this crucial fact in the proof of Theorem \ref{thm:thm1}. To control the dissipative term in the gluing construction, we will also find it useful to observe the bound \begin{align} \tau_{q}\ell_{q}^{-\alpha-2\gamma} & \lesssim1,\label{eq:diss_1} \end{align} which, since $\alpha$ is negligible by (\ref{eq:alpha_bound}), comes from the inequality $\sigma<\left(1-\beta\right)\left(\frac{1-2\gamma}{\gamma}\right)-2b\beta$. Because of (\ref{eq:smallness_b}) and $\beta+2\gamma<1$, this is implied by (\ref{eq:sigma_bound}). Next, to control the stress size for the induction step, we note that \begin{align} \epsilon_{q}^{-1}\delta_{q+1}^{\frac{1}{2}}\delta_{q}^{\frac{1}{2}}\lambda_{q+1}^{-1+10\alpha}\lambda_{q}^{1+10\alpha} & \lesssim\epsilon_{q+1}\delta_{q+2}\lambda_{q+1}^{-4\alpha}\label{eq:stress_size_ind1}, \end{align} which, after neglecting $\alpha$, comes from \[ -b\beta-\beta-b+1+\sigma\leq-b\sigma-b^{2}\left(2\beta\right), \] which is precisely (\ref{eq:sigma_bound}). Lastly, for the dissipative error in the final stress, we observe that \begin{align} \delta_{q+1}^{1/2}\lambda_{q+1}^{-1+2\gamma+10\alpha} & \lesssim\epsilon_{q+1}\delta_{q+2}\lambda_{q+1}^{-4\alpha},\label{eq:stress_size_ind3} \end{align} which comes from \[ -\beta-1+2\gamma<b\left(-2\beta\right)-\sigma \] which in view of (\ref{eq:smallness_b}) and $\beta+2\gamma<1$, is a consequence of (\ref{eq:sigma_bound}). \subsection{The mollification step} With $\ell$ as defined in (\ref{eq:length_scale}) and $\psi_{\ell}$ a smooth standard radial mollifier in space of length $\ell$, we set \begin{align*} v_{\ell} & :=\psi_{\ell}*v_{q}. \end{align*} By standard mollification estimates and (\ref{eq:grad_vq_sup}) we have \begin{align} \left\Vert v_{\ell}-v_{q}\right\Vert _{0} & \lesssim\delta_{q}^{1/2}\lambda_{q}\ell=\epsilon_{q}^{\frac{1}{2}}\delta_{q+1}^{\frac{1}{2}}\lambda_{q}^{-\frac{3\alpha}{2}}\label{eq:lq_0}\\ \|\nabla^{N}v_{\ell}\|_{L^{\infty}} & \lesssim_{N}\delta_{q}^{1/2}\lambda_{q}\ell^{-N+1}\label{eq:grad_v_l} \end{align} for any $N\in\mathbb{N}_{1}$. Moreover, by setting \begin{align*} R_{\ell} & :=\psi_{\ell}*R_{q}+v_{\ell}\otimes v_{\ell}-\psi_{\ell}*(v_{q}\otimes v_{q}) \end{align*} the pair $(v_{\ell},R_{\ell})$ solves the $(\nu,\gamma,T)$-fNSR equations. Moreover, by using (\ref{eq:vq_sup}), (\ref{eq:grad_vq_sup}), (\ref{eq:Rq_sup}), (\ref{eq:length_scale}), (\ref{eq:length_Freq}) and the usual commutator estimate \[ \left\Vert \left(f*\psi_{l}\right)\left(g*\psi_{l}\right)-\left(fg\right)*\psi_{l}\right\Vert _{C^{r}}\lesssim_{r}l^{2-r}\left\Vert f\right\Vert _{C^{1}}\left\Vert g\right\Vert _{C^{1}} \] for $f,g\in C^{\infty}\left(\mathbb{T}^{d}\right)$ and $l>0,r\geq0$ (see, e.g. \cite[Proposition A.2]{buckmasterOnsagerConjectureAdmissible2017}), we obtain \begin{align} \|R_{\ell}\|_{N+\alpha} & \lesssim_{N}\ell^{-N-\alpha}\left\Vert R_{q}\right\Vert _{C^{0}}+\ell^{2-N-\alpha}\left\Vert v_{q}\right\Vert _{C^{1}}^{2}\nonumber \\ & \lesssim\ell^{-N-\alpha}\delta_{q+1}\epsilon_{q}\lambda_{q}^{-3\alpha}\lesssim\epsilon_{q}\delta_{q+1}\ell^{-N+\alpha}\label{eq:Rl_estimate} \end{align} for all $N\in\mathbb{N}_{0}$. \subsection{\label{subsec:The-gluing-step}The gluing step} Recalling that $\tau_{q}$ was defined in (\ref{eq:tau_q}), we set $$t_{j}:=j\tau_{q}.$$ Let $\mathcal{J}$ be the set of indices $j$ such that \[ \left[t_{j}-2\epsilon_{q}\tau_{q},t_{j}+3\epsilon_{q}\tau_{q}\right]\subset B_{q} \] These are the ``bad'' indices that will be part of $\mathcal{B}_{q+1}$ and we have $\#(\mathcal{J})\sim\tau_{q}^{-1}\prod_{p=1}^{q-1}\epsilon_{p}$. Then we define $\mathcal{J}^{*}=\{j\in\mathcal{J}|j+1\in\mathcal{J}\}$. These are the indices where we will apply the following local wellposedness result from \cite{derosaInfinitelyManyLeray2019}. \begin{lem}[Proposition 3.5 in \cite{derosaInfinitelyManyLeray2019}] \label{lem:local_existence}Given $\alpha\in\left(0,1\right)$, $\nu\in (0,1]$, any divergence-free vector field $u_{0}\in C^{\infty}(\mathbb T^d)$ and $T\lesssim_{\alpha}\left\Vert u_{0}\right\Vert _{1+\alpha}^{-1}$, there exists a unique solution $u$ to the $(\nu,\gamma,T)$-fNS equations on $[0,T]\times\mathbb{T}^{d}$ such that $u\left(0,\cdot\right)=u_{0}$ and \[ \left\Vert u\right\Vert _{N+\alpha}\lesssim_{N,\alpha}\left\Vert u_{0}\right\Vert _{N+\alpha}\quad\textrm{for all}\quad N\in\mathbb{N}_{1}. \] \end{lem} Using this lemma, for any $j\in\mathcal{J}^{*}$, we define $v_{j}$ to be the solution of the hypodissipative Navier-Stokes equations \begin{align*} \partial_{t}v_{j}+\nu (-\Delta)^{\gamma}v_{j}+\operatorname{div} v_{j}\otimes v_{j}+\nabla p_{j} & =0\\ \operatorname{div} v_{j} & =0\\ v_{j}(t_{j}) & =v_{\ell}(t_{j}) \end{align*} on $[t_{j},t_{j+2}]\times\mathbb{T}^{N}$. This is possible as \begin{align} \tau_{q} & \lesssim\frac{\ell^{2\alpha}}{\delta_{q}^{1/2}\lambda_{q}}\label{eq:tau_l}\\ & \ll\frac{\ell^{\alpha}}{\delta_{q}^{1/2}\lambda_{q}}\nonumber \\ &\lesssim\frac{1}{\|v_{\ell}(t_{j})\|_{1+\alpha}},\nonumber \end{align} where we have implicitly used (\ref{eq:length_Freq}) and (\ref{eq:grad_v_l}). We then have the bounds \begin{align} \|v_{j}\|_{L_{t}^{\infty}C_{x}^{N+\alpha}([t_{j},t_{{j+2}}]\times\mathbb{T}^{N})} & \lesssim_{N}\|v_{\ell}(t_{j})\|{}_{C_{x}^{N+\alpha}}\label{eq:badeulerbounds}\\ & \lesssim_{N}\delta_{q}^{1/2}\lambda_{q}\ell^{-N+1-\alpha}, \end{align} for $N\in\mathbb{N}_{1}$. \begin{figure} \caption{gluing scheme} \end{figure} Recall that $\mathcal{B}_{q}=\bigcup_{i}I_{i}^{b,q}$ is closed and $\mathcal{G}_{q}=[0,T]\setminus\mathcal{B}_{q}=\bigcup_{i}I_{i}^{g,q}$ is open. Let $\{\chi_{j}^{b}\}_{j}\cup\{\chi_{i}^{g}\}_{i}$ be a partition of unity of $[0,T]$ such that \begin{itemize} \item $\operatorname{supp}\chi_{j}^{b}\subset[t_{j},t_{j+1}+\epsilon_{q}\tau_{q}]$ for $j\in\mathcal{J}^{*}$, \item $\chi_{j}^{b}\equiv1$ in $[t_{j}+\epsilon_{q}\tau_{q},t_{j+1}]$ for $j\in\mathcal{J}^{*}$, \item $\operatorname{supp}\chi_{i}^{g}\subset I_{i}^{g,q}+B\left(0,\tau_{q}+\epsilon_{q}\tau_{q}\right)$, and \item for $N\in\mathbb{N}_{0}$, \begin{equation} \|\partial_{t}^{N}\chi_{i}^{g}\|_{L^{\infty}}+\|\partial_{t}^{N}\chi_{j}^{b}\|_{L^{\infty}}\lesssim_{N}(\epsilon_{q}\tau_{q})^{-N}.\label{eq:cutoff_time_deri} \end{equation} \end{itemize} Note that because of (\ref{eq:Rq_zerowhere}) and (\ref{eq:time_interval}), we have $R_{q}=0$ on $\operatorname{supp}\chi_{i}^{g}$. We now define the glued solution \begin{equation} \overline{v}_{q}:=\sum_{i}\chi_{i}^{g}v_{q}+\sum_{j\in\mathcal{J^{*}}}\chi_{j}^{b}v_{j}.\label{eq:vbar_q} \end{equation} We also define $\mathcal{B}_{q+1}$ as the union of the intervals $\left[t_{j}-2\epsilon_{q}\tau_{q},t_{j}+3\epsilon_{q}\tau_{q}\right]$ which lie in $\mathcal{B}_{q}$. We will show in \Secref{Proof-of-gluing} that there exists a smooth $\overline{R}_{q}$ such that $\left(\overline{v}_{q},\overline{R}_{q}\right)$ is a solution to (\ref{eq:hypo_NSR}). For convenient notation, we define the material derivatives \begin{align*} D_{t,\ell} & :=\partial_{t}+v_{\ell}\cdot\nabla\\ D_{t,q} & :=\partial_{t}+\overline{v}_{q}\cdot\nabla \end{align*} We will then obtain the following estimates, which will be used to prove Proposition \ref{prop:iterative}. \begin{prop}[Gluing estimates] \label{prop:glue_est} For any $N\in\mathbb{N}_{0}$, we have \begin{align} \left\Vert \overline{v}_{q}-v_{\ell}\right\Vert _{N+\alpha} & \lesssim_{N}\epsilon_{q}\tau_{q}\delta_{q+1}\ell^{-N-1+\alpha}\label{eq:glue1}\\ \left\Vert \overline{v}_{q}\right\Vert _{N+1} & \lesssim_{N}\delta_{q}^{\frac{1}{2}}\lambda_{q}\ell^{-N}\label{eq:glue2}\\ \|\overline{R}_{q}\|_{N+\alpha} & \lesssim_{N}\delta_{q+1}\ell^{-N+\alpha}\label{eq:glue3}\\ \|D_{t,q}\overline{R}_{q}\|_{N+\alpha} & \lesssim_{N}(\epsilon_{q}\tau_{q})^{-1}\delta_{q+1}\ell^{-N+\alpha}\label{eq:glue4} \end{align} \end{prop} We will prove \Propref{glue_est} in \Secref{Proof-of-gluing} below. We remark that the estimate (\ref{eq:glue1}) of \Propref{glue_est}, when combined with (\ref{eq:time-length}), implies in particular \begin{equation} \left\Vert \overline{v}_{q}-v_{\ell}\right\Vert _{\alpha}\lesssim\epsilon_{q}\tau_{q}\delta_{q+1}\ell^{-1+\alpha}\lesssim\delta_{q+1}^{\frac{1}{2}}\ell^{\alpha}\label{eq:glue5}. \end{equation} We also note that, because future modifications of the solution from this point on will only happen in the temporal regions $\left[t_{j}-\epsilon_{q}\tau_{q},t_{j}+2\epsilon_{q}\tau_{q}\right]$ (where $j\in\mathcal{J}$), we will later have $v_{q+1}=\overline{v}_{q}$ and $\overline{R}_{q}=0$ outside those temporal regions. Furthermore, (\ref{eq:Rq_zerowhere}) and (\ref{eq:goodbounds}) will hold with $q$ changed to $q+1$. \subsection{Perturbation step} The third key step in the proof of Proposition \ref{prop:iterative} is a perturbation lemma arising from the convex integration framework. We state this result in the next proposition. \begin{prop}[Convex integration] \label{prop:convex_int} There is a smooth solution $\left(v_{q+1},R_{q+1}\right)$ to (\ref{eq:hypo_NSR}) which satisfies $v_{q+1}=\overline{v}_{q}$ outside the temporal regions $\left[t_{j}-\epsilon_{q}\tau_{q},t_{j}+2\epsilon_{q}\tau_{q}\right]$ ($j\in\mathcal{J}$), along with the estimates \begin{align} \left\Vert v_{q+1}-\overline{v}_{q}\right\Vert _{0}+\frac{1}{\lambda_{q+1}}\left\Vert v_{q+1}-\overline{v}_{q}\right\Vert _{1} & \leq\frac{M}{2}\delta_{q+1}^{\frac{1}{2}},\label{eq:perturb_1} \end{align} and \begin{align} \left\Vert R_{q+1}\right\Vert _{0} & \lesssim\epsilon_{q+1}\delta_{q+2}\lambda_{q+1}^{-4\alpha},\label{eq:finalR_=00007Bq+1=00007D} \end{align} where $M>0$ is a universal geometric constant (depending on $d$). \end{prop} The proof of this proposition will be given in \Secref{Convex-integration-and} below. \subsection{Proof of the main iterative proposition} With the above tools in hand, we are now ready to prove Proposition \ref{prop:iterative}, making use of Proposition \ref{prop:glue_est} and Proposition \ref{prop:convex_int}, which are proved in Sections \ref{sec:Proof-of-gluing} and \ref{sec:Convex-integration-and}, respectively. \begin{proof}[Proof of Proposition \ref{prop:iterative}] We first observe that \begin{align*} \|v_{q}-v_{q+1}\|_{0} & \leq\|v_{q}-v_{\ell}\|_{0}+\|v_{\ell}-\overline{v}_{q}\|_{0}+\|\overline{v}_{q}-v_{q+1}\|_{0}\\ & \leq C\epsilon_{q}^{\frac{1}{2}}\delta_{q+1}^{\frac{1}{2}}\lambda_{q}^{-\frac{3\alpha}{2}}+C\delta_{q+1}^{\frac{1}{2}}\ell^{\alpha}+\frac{M}{2}\delta_{q+1}^{\frac{1}{2}}\leq M\delta_{q+1}^{\frac{1}{2}} \end{align*} where $C$ is shorthand for the implied constants of (\ref{eq:lq_0}) and (\ref{eq:glue5}). Since $$\max\{\epsilon_{q}^{\frac{1}{2}}\lambda_{q}^{-\frac{3\alpha}{2}},\ell^{\alpha}\}\rightarrow 0$$ as $a\rightarrow\infty$, the last inequality is true provided that $a$ is chosen sufficiently large. Similarly, for large $a$, because of (\ref{eq:grad_vq_sup}), (\ref{eq:glue2}) and (\ref{eq:perturb_1}), we have \begin{align*} \|v_{q}-v_{q+1}\|_{1} & \leq\underbrace{\left\Vert v_{q}\right\Vert _{1}+\|\overline{v}_{q}\|_{1}}_{C\delta_{q}^{\frac{1}{2}}\lambda_{q}}+\underbrace{\|\overline{v}_{q}-v_{q+1}\|_{1}}_{\frac{M}{2}\delta_{q+1}^{\frac{1}{2}}\lambda_{q+1}}\leq M\delta_{q+1}^{\frac{1}{2}}\lambda_{q+1}. \end{align*} We have thus shown (\ref{eq:iter_prop_est}), which in turn implies (\ref{eq:vq_sup}) and (\ref{eq:grad_vq_sup}) with $q$ replaced by $q+1$. On the other hand, (\ref{eq:finalR_=00007Bq+1=00007D}) yields the next iteration of (\ref{eq:Rq_sup}) (for large enough $a$). Recalling that all the desired properties regarding $\mathcal{B}_{q+1}$ were established in \Subsecref{The-gluing-step}, this completes the proof of the proposition. \end{proof} \section{\label{sec:Proof-of-gluing}Gluing estimates} In this section, we construct $\overline{R}_q$ and prove the gluing estimate results in \Propref{glue_est}, which played a key role in the proof of Proposition \ref{prop:iterative} in the previous section. We recall that $\overline{v}_{q}$ was defined in (\ref{eq:vbar_q}). We first note that (\ref{eq:glue2}) follows immediately from (\ref{eq:badeulerbounds}) and (\ref{eq:goodbounds}). On the other hand, (\ref{eq:glue3}) and (\ref{eq:glue4}) hold automatically outside the overlapping temporal regions $\left[t_{j},t_{j}+\epsilon_{q}\tau_{q}\right]$ (where $j\in\mathcal{J}$), since $\overline{v}_{q}$ is an exact solution and the stress is therefore zero in this regime. We now consider what happens near the overlapping regions. \subsection{Bad-bad interface} Consider any index $j\in\mathcal{J}^{*}$ such that $j+1\in\mathcal{J}^{*}$. Then $\operatorname{supp}(\chi_{j}^{b}\chi_{j+1}^{b})$ lies in an interval of length $\epsilon_{q}\tau_{q}$ where $\overline{v}_{q}$ satisfies \[ \partial_{t}\overline{v}_{q}+\nu(-\Delta)^{\gamma}\overline{v}_{q}+\operatorname{div}\overline{v}_{q}\otimes\overline{v}_{q}+\nabla\overline{p}_{q}=\operatorname{div}\overline{R}_{q}, \] where \begin{equation} \overline{R}_{q}=\partial_{t}\chi_{j}^{b}\mathcal{R}(v_{j}-v_{j+1})-\chi_{j}^{b}(1-\chi_{j}^{b})(v_{j}-v_{j+1})\otimes(v_{j}-v_{j+1}).\label{eq:glued_stress} \end{equation} and $\mathcal{R}$ is as defined in \Appref{Geometric-preliminaries}. To treat the fractional Laplacian term, we recall the following lemma from \cite{derosaInfinitelyManyLeray2019}. \begin{lem}[Theorem B.1 in \cite{derosaInfinitelyManyLeray2019}] For any $\gamma,\epsilon>0$ and $\beta\geq0$ such that $\beta+2\gamma+\epsilon\leq1$, we have \[ \left\Vert \left(-\Delta\right)^{\gamma}f\right\Vert _{\beta}\lesssim_{\epsilon}\left\Vert f\right\Vert _{\beta+2\gamma+\epsilon}\quad\forall f\in C^{\beta+2\gamma+\epsilon}. \] \end{lem} As usual, we decompose $v_{j}-v_{j+1}=(v_{j}-v_{\ell})-(v_{j+1}-v_{\ell})$. By symmetry, we only need to prove estimates for $v_{j}-v_{\ell}$. \begin{prop} \label{prop:vglue}For $N\in\mathbb{N}_{0}$ and $t\in\left(t_{j},t_{j}+2\tau_{q}\right)$, we have \begin{align} \|v_{j}-v_{\ell}\|_{N+\alpha} & \lesssim_{N}\epsilon_{q}\tau_{q}\delta_{q+1}\ell^{-N-1+\alpha}\label{eq:vestimate-1}\\ \|(\partial_{t}+v_{\ell}\cdot\nabla+\nu\left(-\Delta\right)^{\gamma})\left(v_{j}-v_{\ell}\right)\|_{N+\alpha} & \lesssim_{N}\epsilon_{q}\delta_{q+1}\ell^{-N-1+\alpha}\label{eq:vlestimate}\\ \|D_{t,\ell}\left(v_{j}-v_{\ell}\right)\|_{N+\alpha} & \lesssim_{N}\epsilon_{q}\delta_{q+1}\ell^{-N-1+\alpha}\label{eq:vtransportestimate-1} \end{align} \end{prop} \begin{proof} We observe that \begin{align} \left(\partial_{t}+v_{\ell}\cdot\nabla+\nu\left(-\Delta\right)^{\gamma}\right)\left(v_{\ell}-v_{j}\right) & =-\left(v_{\ell}-v_{j}\right)\cdot\nabla v_{j}-\nabla\left(p_{\ell}-p_{j}\right)+\Div R_{\ell},\label{eq:vl_vj_subtract} \end{align} and \begin{align} \nabla\left(p_{\ell}-p_{j}\right) & =\mathcal{P}_{1}\left(-\left(v_{\ell}-v_{j}\right)\cdot\nabla v_{\ell}-\left(v_{\ell}-v_{j}\right)\cdot\nabla v_{j}+\Div R_{\ell}\right),\label{eq:vl_vj_subtract2} \end{align} where $\mathcal{P}_{1}$ is as defined in \Appref{Geometric-preliminaries}, and (\ref{eq:ident_P1}) was implicitly used. Then, as usual, (\ref{eq:vestimate-1}) and (\ref{eq:vlestimate}) follow from Gronwall and modified transport estimates exactly as in \cite[Proposition 5.3]{derosaInfinitelyManyLeray2019} (which in turn mirrors \cite[Proposition 3.3]{buckmasterOnsagerConjectureAdmissible2017}). To derive (\ref{eq:vtransportestimate-1}) from (\ref{eq:vlestimate}), we observe that \begin{align} \|(-\Delta)^{\gamma}(v_{j}-v_{\ell})\|_{N+\alpha} & \lesssim\|v_{j}-v_{\ell}\|_{N+2\alpha+2\gamma}\nonumber \\ & \lesssim\epsilon_{q}\tau_{q}\delta_{q+1}\ell^{-1-2\gamma-N}\nonumber \\ & \lesssim\epsilon_{q}\delta_{q+1}\ell^{-N-1+\alpha}\label{eq:new_in_glue} \end{align} where the last inequality comes from (\ref{eq:diss_1}). \end{proof} We have proven (\ref{eq:glue1}) for any $t\in\left(t_{j},t_{j}+2\tau_{q}\right)$. Now we define the potentials $z_{j}:=\mathcal{B}v_{j}$, $z_{\ell}:=\mathcal{B}v_{\ell}$, where $\mathcal{B}$ is as defined in \Appref{Geometric-preliminaries}. \begin{prop} \label{prop:zglue}For $N\in\mathbb{N}_{0}$ and $t\in\left(t_{j},t_{j}+2\tau_{q}\right)$: \begin{align} \|z_{j}-z_{\ell}\|_{N+\alpha} & \lesssim_{N}\epsilon_{q}\tau_{q}\delta_{q+1}\ell^{-N+\alpha}\label{eq:zestimate}\\ \|(\partial_{t}+v_{\ell}\cdot\nabla+\nu\left(-\Delta\right)^{\gamma})\left(z_{j}-z_{\ell}\right)\|_{N+\alpha} & \lesssim_{N}\epsilon_{q}\delta_{q+1}\ell^{-N+\alpha}\label{eq:ztransport_lambda_est}\\ \|D_{t,\ell}\left(z_{j}-z_{\ell}\right)\|_{N+\alpha} & \lesssim_{N}\epsilon_{q}\delta_{q+1}\ell^{-N+\alpha}\label{eq:ztransportestimate} \end{align} \end{prop} \begin{proof} First, we note that for any divergence-free vector field $X$ and 2-form $\omega$, we have \begin{align*} X^{i}\partial_{i}\partial^{j}\omega_{jk} & =\partial^{j}\left(X^{i}\partial_{i}w_{jk}\right)-\partial_{i}\left(\partial^{j}X^{i}\omega_{jk}\right)\\ \left(\partial^{j}\omega_{jk}\right)\partial^{k}X^{i} & =\partial^{j}\left(\omega_{jk}\partial^{k}X^{i}\right)\\ \left[d,\nabla_{X}\right]\omega & =dx^{i}\wedge\partial_{i}\nabla_{X}\omega-\nabla_{X}\left(dx^{i}\wedge\partial_{i}\omega\right)\\ & =dx^{i}\wedge\left(\partial_{i}X^{j}\right)\left(\partial_{j}\omega\right)=\partial_{j}\left(\left(\partial_{i}X^{j}\right)dx^{i}\wedge\omega\right). \end{align*} Because we only care about estimates instead of how the indices contract, we can write in schematic notation (neglecting indices and linear combinations): \begin{align*} \nabla_{X}\delta\omega & =\delta\left(\nabla_{X}\omega\right)+\Div\left(\nabla X*\omega\right)\\ \left(\delta\omega\right)\cdot\nabla X & =\Div\left(\nabla X*\omega\right)\\ \left[d,\nabla_{X}\right]\omega & =\Div\left(\nabla X*\omega\right) \end{align*} Define $\widetilde{z}:=z_{\ell}-z_{j}$. Then we have $d\widetilde{z}=0$ and $\sharp\delta\widetilde{z}=v_{\ell}-v_{j}$. From (\ref{eq:vl_vj_subtract}) and the schematic identities above, we have, \begin{align*} \delta\left(\partial_{t}\widetilde{z}+\nabla_{v_{\ell}}\widetilde{z}+\nu \left(-\Delta\right)^{\gamma}\widetilde{z}\right) & =\Div\left(\nabla v_{j,\ell}*\widetilde{z}\right)-d\left(p_{\ell}-p_{j}\right)+\Div R_{\ell}\\ d\left(\partial_{t}\widetilde{z}+\nabla_{v_{\ell}}\widetilde{z}+\nu \left(-\Delta\right)^{\gamma}\widetilde{z}\right) & =\Div\left(\nabla v_{\ell}*\widetilde{z}\right), \end{align*} and thus \begin{align*} \partial_{t}\widetilde{z}+\nabla_{v_{\ell}}\widetilde{z}+\nu \left(-\Delta\right)^{\gamma}\widetilde{z} & =\left(-\Delta\right)^{-1}d\circ\Div\left(\nabla v_{j,\ell}*\widetilde{z}+R_{\ell}\right)+\left(-\Delta\right)^{-1}\delta\circ\Div\left(\nabla v_{\ell}*\widetilde{z}\right), \end{align*} where $v_{j,\ell}$ could be $v_{j}$ or $v_{\ell}$ (they obey the same estimates by \Lemref{local_existence}). As $\left(-\Delta\right)^{-1}d\circ\Div$ and $\left(-\Delta\right)^{-1}\delta\circ\Div$ are Calderón-Zygmund operators, we have \begin{align} &\left\Vert \left(D_{t,\ell}+\nu\left(-\Delta\right)^{\gamma}\right)\widetilde{z}\right\Vert _{N+\alpha}\nonumber \\ &\hspace{0.2in} \lesssim\left\Vert \nabla v_{j,\ell}\right\Vert _{N+\alpha}\left\Vert \widetilde{z}\right\Vert _{\alpha}+\left\Vert \nabla v_{j,\ell}\right\Vert _{\alpha}\left\Vert \widetilde{z}\right\Vert _{N+\alpha}+\left\Vert R_{\ell}\right\Vert _{N+\alpha}\nonumber \\ &\hspace{0.2in} \lesssim\ell^{-N-\alpha}\lambda_{q}\delta_{q}^{\frac{1}{2}}\left\Vert \widetilde{z}\right\Vert _{\alpha}+\ell^{-\alpha}\lambda_{q}\delta_{q}^{\frac{1}{2}}\left\Vert \widetilde{z}\right\Vert _{N+\alpha}+\ell^{-N+\alpha}\epsilon_{q}\delta_{q+1}\nonumber \\ &\hspace{0.2in} \lesssim\ell^{-N+\alpha}\tau_{q}^{-1}\left\Vert \widetilde{z}\right\Vert _{\alpha}+\ell^{\alpha}\tau_{q}^{-1}\left\Vert \widetilde{z}\right\Vert _{N+\alpha}+\ell^{-N+\alpha}\epsilon_{q}\delta_{q+1}\label{eq:Lztilde} \end{align} where we have used (\ref{eq:tau_l}) to pass to the last line. By the modified transport estimate in \cite[Proposition 3.3]{derosaInfinitelyManyLeray2019}, we also have \begin{align} \left\Vert \widetilde{z}\left(t\right)\right\Vert _{\alpha} & \lesssim\int_{t_{j}}^{t}\left\Vert \left(D_{t,\ell}+\nu\left(-\Delta\right)^{\gamma}\right)\widetilde{z}\left(s\right)\right\Vert _{\alpha}\;\mathrm{d}s\label{eq:modified_transpot}\\ & \lesssim\ell^{\alpha}\tau_{q}^{-1}\int_{t_{j}}^{t}\left\Vert \widetilde{z}(s)\right\Vert _{\alpha}\;\mathrm{d}s+\epsilon_{q}^{2}\tau_{q}\delta_{q+1}\ell^{\alpha}\nonumber \end{align} By Gronwall, we obtain (\ref{eq:zestimate}) for $N=0$. For $N\geq1,$ we observe that \begin{align*} \left\Vert z_{j}-z_{\ell}\right\Vert _{N+\alpha}&\lesssim\left\Vert \nabla\left(z_{j}-z_{\ell}\right)\right\Vert _{N-1+\alpha}\\ &=\left\Vert \nabla\mathcal{B}\left(v_{j}-v_{\ell}\right)\right\Vert _{N-1+\alpha}\\ &\lesssim\left\Vert v_{j}-v_{\ell}\right\Vert _{N-1+\alpha}, \end{align*} where we have implicitly used the facts that $\nabla\mathcal{B}$ is Calderón-Zygmund, and that $\left\Vert f\right\Vert _{L^{\infty}}\lesssim\left\Vert \nabla f\right\Vert _{L^{\infty}}$ for any mean-zero $f\in C^{1}\left(\mathbb{T}^{d}\right)$ (Poincaré inequality). Then by (\ref{eq:vestimate-1}), we obtain (\ref{eq:zestimate}). From here, we note that (\ref{eq:Lztilde}) and (\ref{eq:zestimate}) imply (\ref{eq:ztransport_lambda_est}). It remains to show (\ref{eq:ztransportestimate}). For this, we argue as in (\ref{eq:new_in_glue}) and use (\ref{eq:diss_1}) to write \begin{align*} \|(-\Delta)^{\gamma}(z_{j}-z_{\ell})\|_{N+\alpha} & \lesssim\|z_{j}-z_{\ell}\|_{N+2\alpha+2\gamma}\\ &\lesssim\epsilon_{q}\tau_{q}\delta_{q+1}\ell^{-2\gamma-N}\\ &\lesssim\epsilon_{q}\delta_{q+1}\ell^{-N+\alpha}, \end{align*} as desired. \end{proof} Combining (\ref{eq:cutoff_time_deri}), (\ref{eq:zestimate}) and (\ref{eq:vestimate-1}), as well as the boundedness of the Calderón-Zygmund operator $\mathcal{R}\delta$, we obtain \begin{align} \|\partial_{t}\chi_{j}^{b}\mathcal{R}(v_{j}-v_{j+1})\|_{N+\alpha}=\|\partial_{t}\chi_{j}^{b}\mathcal{R}\delta(z_{j}-z_{j+1})\|_{N+\alpha} & \lesssim_{N}\delta_{q+1}\ell^{-N+\alpha}\label{eq:1stRv}, \end{align} and \begin{align} \|\chi_{j}^{b}(1-\chi_{j}^{b})(v_{j}-v_{j+1})\otimes(v_{j}-v_{j+1})\|_{N+\alpha} & \lesssim_{N}(\epsilon_{q}\tau_{q}\delta_{q+1}\ell^{-1+\alpha})^{2}\ell^{-N},\label{eq:2ndRV} \end{align} for $N\in\mathbb{N}_{0}$ and $t\in\left(t_{j+1},t_{j+1}+\epsilon_{q}\tau_{q}\right)$. Before we proceed, we will need a usual singular-integral commutator estimate from \cite{buckmasterOnsagerConjectureAdmissible2017} to handle the Calderón-Zygmund operator $\mathcal{R}\operatorname{curl}$. \begin{lem}[Proposition D.1 in \cite{buckmasterOnsagerConjectureAdmissible2017}] \label{lem:singular_comm}Let $\alpha\in\left(0,1\right),N\in\mathbb{N}_{0}$, $T$ be a Calderón-Zygmund operator and $b\in C^{N+1,\alpha}$ be a divergence-free vector field on $\mathbb{T}^{d}$. Then for any $f\in C^{N+\alpha}\left(\mathbb{T}^{d}\right)$, we have \[ \left\Vert \left[T,b\cdot\nabla\right]f\right\Vert _{N+\alpha}\lesssim_{N,\alpha,T}\left\Vert b\right\Vert _{1+\alpha}\left\Vert f\right\Vert _{N+\alpha}+\left\Vert b\right\Vert _{N+1+\alpha}\left\Vert f\right\Vert _{\alpha} \] \end{lem} We are now able to establish the relevant estimates for $\overline{R}_q$. \begin{prop} \label{prop:glued_stress_est}$\overline{R}_{q}$ in (\ref{eq:glued_stress}) admits the bounds \begin{align} \|\overline{R}_{q}\|_{N+\alpha} & \lesssim_{N}\delta_{q+1}\ell^{-N+\alpha}\label{eq:gluedR}\\ \|(\partial_{t}+\overline{v}_{q}\cdot\nabla)\overline{R}_{q}\|_{N+\alpha} & \lesssim_{N}(\epsilon_{q}\tau_{q})^{-1}\delta_{q+1}\ell^{-N+\alpha}\label{eq:gluedRtransport} \end{align} for $N\in\mathbb{N}_{0}$ and $t\in\left(t_{j+1},t_{j+1}+\epsilon_{q}\tau_{q}\right)$. \end{prop} \begin{proof} We observe that (\ref{eq:1stRv}) and (\ref{eq:2ndRV}) imply \[ \|\overline{R}_{q}\|_{N+\alpha}\lesssim_{N}\delta_{q+1}\ell^{-N+\alpha}(1+\epsilon_{q}\tau_{q}\delta_{q+1}\ell^{-1+\alpha})^{2}, \] and then (\ref{eq:gluedR}) follows from (\ref{eq:time-length}). On the other hand, we have \begin{align*} \left\Vert \left(\partial_{t}+\nabla_{\overline{v}_{q}}\right)\overline{R}_{q}\right\Vert _{N+\alpha} & \leq\left\Vert D_{t,\ell}\overline{R}_{q}\right\Vert _{N+\alpha}+\left\Vert \nabla_{\overline{v}_{q}-v_{\ell}}\overline{R}_{q}\right\Vert _{N+\alpha} \end{align*} where \begin{align*} D_{t,\ell}\overline{R}_{q} & =\left(\partial_{t}^{2}\chi_{j}^{b}\right)\mathcal{R}\delta\left(z_{j}-z_{j+1}\right)\\ & +\left(\partial_{t}\chi_{j}^{b}\right)\mathcal{R}\delta D_{t,\ell}\left(z_{j}-z_{j+1}\right)+\left(\partial_{t}\chi_{j}^{b}\right)\left[v_{\ell}\cdot\nabla,\mathcal{R}\delta\right]\left(z_{j}-z_{j+1}\right)\\ & +\partial_{t}\left(\left(\chi_{j}^{b}\right)^{2}-\chi_{j}^{b}\right)\left(v_{j}-v_{j+1}\right)\otimes\left(v_{j}-v_{j+1}\right)\\ & +\left(\left(\chi_{j}^{b}\right)^{2}-\chi_{j}^{b}\right)\left(D_{t,\ell}\left(v_{j}-v_{j+1}\right)\otimes\left(v_{j}-v_{j+1}\right)\right.\\ &\hspace{1.8in}\left.+\left(v_{j}-v_{j+1}\right)\otimes D_{t,\ell}\left(v_{j}-v_{j+1}\right)\right) \end{align*} The term involving $\left[v_{\ell}\cdot\nabla,\mathcal{R}\delta\right]$ can be handled by \Lemref{singular_comm}. Then by (\ref{eq:cutoff_time_deri}), (\ref{eq:gluedR}), \Propref[s]{vglue} and \ref{prop:zglue}, we conclude \begin{align*} \|(\partial_{t}+\overline{v}_{q}\cdot\nabla)\overline{R}_{q}\|_{N+\alpha} & \lesssim_{N}\left(\epsilon_{q}\tau_{q}\right)^{-1}\delta_{q+1}\ell^{-N+\alpha}\\ &\hspace{0.4in}+\tau_{q}^{-1}\delta_{q+1}\ell^{-N+\alpha}\\ &\hspace{0.4in}+\epsilon_{q}\tau_{q}\delta_{q+1}^{2}\ell^{-2-N+2\alpha} \end{align*} which then yields (\ref{eq:gluedRtransport}) because of (\ref{eq:time-length}). \end{proof} \subsection{Good-bad interface} Next we consider any pair of indices $i$ and $j$ such that $\chi_{i}^{g}\chi_{j}^{b}\not\equiv0$. By construction, we observe that $\operatorname{supp}(\chi_{i}^{g}\chi_{j}^{b})$ lies in an interval of length $\sim\epsilon_{q}\tau_{q}$, where $R_{q}$ is 0. Without loss of generality (i.e., depending on whether $\chi_{i}^{g}$ or $\chi_{j}^{b}$ comes first in time), in this interval $\overline{v}_{q}$ satisfies \[ \partial_{t}\overline{v}_{q}+\nu(-\Delta)^{\gamma}\overline{v}_{q}+\operatorname{div}\overline{v}_{q}\otimes\overline{v}_{q}+\nabla\overline{p}_{q}=\operatorname{div}\overline{R}_{q} \] where \begin{equation} \overline{R}_{q}=\partial_{t}\chi_{i}^{g}\mathcal{R}(v_{q}-v_{j})-\chi_{i}^{g}(1-\chi_{i}^{g})(v_{q}-v_{j})\otimes(v_{q}-v_{j})\label{eq:glued_stress-1} \end{equation} which is a perfect analogue of (\ref{eq:glued_stress}). As before, we decompose \[ v_{q}-v_{j}=(v_{q}-v_{\ell})-(v_{j}-v_{\ell}) \] The estimates for $v_{j}-v_{\ell}$ are exactly as above. Turning to $v_{q}-v_{\ell}$, the relevant estimates are given by the following result. \begin{prop} \label{prop:vglue-1}For $N\in\mathbb{N}_{0}$ and $t\in\mathcal{G}_{q}+B\left(0,\tau_{q}+\epsilon_{q}\tau_{q}\right)$: \begin{align} \|v_{q}-v_{\ell}\|_{N+\alpha} & \lesssim_{N}\epsilon_{q}\tau_{q}\delta_{q+1}\ell^{-N-1+\alpha}\label{eq:vestimate-1-1}\\ \|(\partial_{t}+v_{\ell}\cdot\nabla+\nu \left(-\Delta\right)^{\gamma})\left(v_{q}-v_{\ell}\right)\|_{N+\alpha} & \lesssim_{N}\epsilon_{q}\delta_{q+1}\ell^{-N-1+\alpha}\label{eq:vlestimate-1}\\ \|D_{t,\ell}\left(v_{q}-v_{\ell}\right)\|_{N+\alpha} & \lesssim_{N}\epsilon_{q}\delta_{q+1}\ell^{-N-1+\alpha}\label{eq:vtransportestimate-1-1} \end{align} \end{prop} \begin{proof} By standard mollification estimates (cf. \cite[Lemma 2.1]{contiHPrincipleRigidityAlpha2009}), we have \begin{align*} \|v_{q}-v_{\ell}\|_{N+\alpha} & \lesssim_{N}\ell_{q}\|v_{q}\|_{N+1+\alpha}\\ & \lesssim_{N}\delta_{q-1}^{\frac{1}{2}}\lambda_{q-1}\ell_{q}\ell_{q-1}^{-N-\alpha}\ll\delta_{q-1}^{\frac{1}{2}}\lambda_{q-1}\ell_{q}^{1-N-\alpha}\\ & \lesssim\epsilon_{q}\tau_{q}\delta_{q+1}\ell_{q}^{-N-1+\alpha} \end{align*} where we used (\ref{eq:goodbounds}) to pass to the second line, and (\ref{eq:good_bad1}) to pass to the last line. Thus (\ref{eq:vestimate-1-1}) is proven. Then as $R_{q}=0$ on this temporal region, we have an analogue of (\ref{eq:vl_vj_subtract}) and (\ref{eq:vl_vj_subtract2}), namely \begin{align} \left(\partial_{t}+v_{\ell}\cdot\nabla+\nu\left(-\Delta\right)^{\gamma}\right)\left(v_{\ell}-v_{q}\right) & =-\left(v_{\ell}-v_{q}\right)\cdot\nabla v_{q}-\nabla\left(p_{\ell}-p_{q}\right)+\Div R_{\ell}\label{eq:vl_vj_subtract-1} \end{align} and \begin{align} \nabla\left(p_{\ell}-p_{q}\right) & =\mathcal{P}_{1}\left(-\left(v_{\ell}-v_{q}\right)\cdot\nabla v_{\ell}-\left(v_{\ell}-v_{q}\right)\cdot\nabla v_{q}+\Div R_{\ell}\right) \end{align} Thus we can estimate $\left\Vert \nabla\left(p_{\ell}-p_{q}\right)\right\Vert _{N+\alpha}$ and then $$\|(\partial_{t}+v_{\ell}\cdot\nabla+\nu\left(-\Delta\right)^{\gamma})\left(v_{q}-v_{\ell}\right)\|_{N+\alpha}$$ to obtain (\ref{eq:vlestimate-1}). We then argue as in (\ref{eq:new_in_glue}) (replacing $v_{j}$ by $v_{q}$) to obtain (\ref{eq:vtransportestimate-1-1}). \end{proof} Note that we have fully proven (\ref{eq:glue1}). To proceed, we define the potentials $z_{q}:=\mathcal{B}v_{q},z_{\ell}:=\mathcal{B}v_{\ell}$. By observing that \Propref{vglue-1} plays the exact same role as \Propref{vglue}, and by arguing exactly as in \Propref{zglue} (replacing $v_{j}$ with $v_{q}$, and $z_{j}$ with $z_{q}$) we obtain \begin{align} \|z_{q}-z_{\ell}\|_{N+\alpha} & \lesssim_{N}\epsilon_{q}\tau_{q}\delta_{q+1}\ell^{-N+\alpha}\label{eq:zestimate-1}\\ \|(\partial_{t}+v_{\ell}\cdot\nabla+\nu\left(-\Delta\right)^{\gamma})\left(z_{q}-z_{\ell}\right)\|_{N+\alpha} & \lesssim_{N}\epsilon_{q}\delta_{q+1}\ell^{-N+\alpha}\label{eq:ztransport_lambda_est-1}\\ \|D_{t,\ell}\left(z_{q}-z_{\ell}\right)\|_{N+\alpha} & \lesssim_{N}\epsilon_{q}\delta_{q+1}\ell^{-N+\alpha}\label{eq:ztransportestimate-1} \end{align} for any $N\in\mathbb{N}_{0}$ and $t\in\mathcal{G}_{q}+B\left(0,\tau_{q}+\epsilon_{q}\tau_{q}\right)$. Then, as with (\ref{eq:1stRv}) and (\ref{eq:2ndRV}), we have \begin{align} \|\partial_{t}\chi_{i}^{g}\mathcal{R}(v_{q}-v_{j})\|_{N+\alpha} & \lesssim_{N}\delta_{q+1}\ell^{-N+\alpha}\label{eq:1stRv-1}\\ \|\chi_{j}^{g}(1-\chi_{j}^{g})(v_{q}-v_{j})\otimes(v_{q}-v_{j})\|_{N+\alpha} & \lesssim_{N}(\epsilon_{q}\tau_{q}\delta_{q+1}\ell^{-1+\alpha})^{2}\ell^{-N}\label{eq:2ndRV-1} \end{align} for any $N\in\mathbb{N}_{0}$ and $t\in\operatorname{supp}(\chi_{i}^{g}\chi_{j}^{b})$. We also have the analogue of \Propref{glued_stress_est}. By making the obvious replacements ($v_{j}-v_{j+1}$ with $v_{q}-v_{j}$, $z_{j}-z_{j+1}$ with $z_{q}-z_{j}$, and $\chi_{j}^{b}$ with $\chi_{i}^{g}$), we have \begin{align} \|\overline{R}_{q}\|_{N+\alpha} & \lesssim_{N}\delta_{q+1}\ell^{-N+\alpha}\label{eq:gluedR-1}\\ \|(\partial_{t}+\overline{v}_{q}\cdot\nabla)\overline{R}_{q}\|_{N+\alpha} & \lesssim_{N}(\epsilon_{q}\tau_{q})^{-1}\delta_{q+1}\ell^{-N+\alpha}\label{eq:gluedRtransport-1} \end{align} for any $N\in\mathbb{N}_{0}$ and $t\in\operatorname{supp}(\chi_{i}^{g}\chi_{j}^{b})$. \section{\label{sec:Convex-integration-and}Perturbation estimates} In this section, we prove \Propref{convex_int}, the perturbation result which was used in the proof of Proposition \ref{prop:iterative} (in Section \ref{sec:Proof-of-iteration}). We begin by recalling the definition of the Mikado flows from \cite[Lemma 5.1]{buckmasterOnsagerConjectureAdmissible2017}, which is valid for any dimension $d\geq3$ (see also \cite[Section 4.1]{cheskidovSharpNonuniquenessNavierStokes2020}). For any compact subset $\mathcal{N}\subset\subset\mathcal{S}_{+}^{d\times d}$, there is a smooth vector field $W:\mathcal{N}\times\mathbb{T}^{d}\to\mathbb{R}^{d}$ such that \begin{align} \Div_{\xi}W(R,\xi)\otimes W(R,\xi) & =0\label{eq:divf1}\\ \Div_{\xi}W(R,\xi) & =0\label{eq:divf2}\\ \Xint-_{\mathbb{T}^{d}}W\left(R,\xi\right)\;\mathrm{d}\xi & =0\\ \Xint-_{\mathbb{T}^{d}}W(R,\xi)\otimes W(R,\xi)\;\mathrm{d}\xi & =R \end{align} Unless otherwise noted, we set $\mathcal{N}=\overline{B_{1/2}(\textrm{Id)}}$. By Fourier decomposition we have \begin{align*} W\left(R,\xi\right) & =\sum_{k\in\mathbb{Z}^{d}\backslash\{0\}}a_{k}\left(R\right)e^{i2\pi\left\langle k,\xi\right\rangle }, \end{align*} and \begin{align*} W(R,\xi)\otimes W(R,\xi) & =R+\sum_{k\in\mathbb{Z}^{d}\backslash\{0\}}C_{k}\left(R\right)e^{i2\pi\left\langle k,\xi\right\rangle }, \end{align*} where $a_{k}\left(R\right)$ and $C_{k}\left(R\right)$ are smooth in $R$, and with derivatives rapidly decaying in $k$. Furthermore, (\ref{eq:divf1}) and (\ref{eq:divf2}) imply \begin{align} k\cdot a_{k}\left(R\right) & =0\label{eq:k_ak} \end{align} and \begin{align} k^{\flat}\lrcorner C_{k}\left(R\right) & =0.\label{eq:kflat_c} \end{align} Now we recall the identity \[ v\lrcorner\left(\alpha\wedge\beta\right)=\left(v\lrcorner\alpha\right)\wedge\beta-\alpha\wedge\left(v\lrcorner\beta\right) \] for any vector field $v$, $1$-form $\alpha$, and differential form $\beta$. This implies \begin{align} \Div{}_{\xi}\left(\frac{k\wedge a_{k}}{i2\pi\left|k\right|^{2}}e^{i2\pi\left\langle k,\xi\right\rangle }\right) & =-\sharp\delta_{\xi}\left(\frac{k^{\flat}\wedge a_{k}^{\flat}}{i2\pi\left|k\right|^{2}}e^{i2\pi\left\langle k,\xi\right\rangle }\right)\label{eq:freeze_codiff}\\ & =\sharp\left(e^{i2\pi\left\langle k,\xi\right\rangle }i2\pi k\right)\lrcorner\left(\frac{k^{\flat}\wedge a_{k}^{\flat}}{i2\pi\left|k\right|^{2}}\right)=a_{k}e^{i2\pi\left\langle k,\xi\right\rangle }\label{eq:freeze_2} \end{align} where $k\wedge a_{k}$ is an alternating $(2,0)$-tensor dual to $k^{\flat}\wedge a_{k}^{\flat}$. Note that we implicitly used (\ref{eq:k_ak}). To handle the transport error later and generalize the ``vector calculus'' to higher dimensions, we also introduce a local-time version of Lagrangian coordinates.\footnote{The formalism is discussed in Tao's lecture notes, which can be found at \url{https://terrytao.wordpress.com/2019/01/08/255b-notes-2-onsagers-conjecture/}} \begin{defn}[Lagrangian coordinates] We define the backwards transport flow $\Phi_{i}$ as the solution to \begin{align*} \left(\partial_{t}+\overline{v}_{q}\cdot\nabla\right)\Phi_{i} & =0\\ \Phi_{i}\left(t_{i},\cdot\right) & =\mathrm{Id}_{\mathbb{T}^{d}} \end{align*} Then as in \cite[Proposition 3.1]{buckmasterOnsagerConjectureAdmissible2017}, for any $N\geq2$ and $\left|t-t_{i}\right|\lesssim\tau_{q}$: \begin{align} \left\Vert \nabla\Phi_{i}\left(t\right)-\mathrm{Id}\right\Vert _{0} & \lesssim\left|t-t_{i}\right|\left\Vert \nabla\overline{v}_{q}\right\Vert _{0}\lesssim\tau_{q}\delta_{q}^{\frac{1}{2}}\lambda_{q}=\lambda_{q}^{-3\alpha}\ll1\label{eq:phi_id}\\ \left\Vert \nabla^{N}\Phi_{i}\left(t\right)\right\Vert _{0} & \lesssim\left|t-t_{i}\right|\left\Vert \nabla^{N}\overline{v}_{q}\right\Vert _{0}\lesssim\lambda_{q}^{-3\alpha}\ell^{-N+1}\quad\label{eq:phi_id_2} \end{align} We also define the forward characteristic flow $X_{i}$ as the the flow generated by $\overline{v}_{q}$: \begin{align*} \partial_{\underline{t}}X_{i}\left(\underline{t},\underline{x}\right) & =\overline{v}_{q}\left(\underline{t},X_{i}\left(\underline{t},\underline{x}\right)\right)\\ X_{i}\left(t_{i},\cdot\right) & =\mathrm{Id}_{\mathbb{T}^{d}} \end{align*} Then $\partial_{\underline{t}}\left(\Phi_{i}\left(\underline{t},X_{i}\left(\underline{t},\underline{x}\right)\right)\right)=0$. By defining their spacetime versions \begin{align*} \mathbf{\Phi}_{i}\left(t,x\right) & :=\left(t,\Phi_{i}\left(t,x\right)\right)\\ \mathbf{X}_{i}\left(\underline{t},\underline{x}\right) & :=\left(\underline{t},X_{i}\left(\underline{t},\underline{x}\right)\right) \end{align*} we can conclude $\mathbf{X}_{i}=\left(\mathbf{\Phi}_{i}\right)^{-1}$, and that $\mathbf{X}_{i}$ maps from the Lagrangian spacetime $\left(\underline{t},\underline{x}\right)$ to the Eulerian spacetime $\left(t,x\right)$. Let $\vol$ be the standard volume form of the torus. Then $X_{i}\left(\underline{t}\right)^{*}\vol=\vol$ (volume-preserving)\footnote{$\partial_{\underline{t}}\left(X_{i}\left(\underline{t}\right)^{*}\vol\right)=X_{i}\left(\underline{t}\right)^{*}\left(\mathcal{L}_{\overline{v}_{q}\left(\underline{t}\right)}\vol\right)=X_{i}\left(\underline{t}\right)^{*}\left(\Div\overline{v}_{q}\left(\underline{t}\right)\vol\right)=0.$ See also (\ref{eq:pullback_lie}).} and \begin{equation} X_{i}\left(\underline{t}\right)^{*}\left(\Div u\right)=\Div\left(X_{i}\left(\underline{t}\right)^{*}u\right)\label{eq:Div_commute} \end{equation} for any vector field $u$.\footnote{$X_{i}\left(\underline{t}\right)^{*}\left(\Div u\right)\vol=X_{i}\left(\underline{t}\right)^{*}\left(\mathcal{L}_{u}\vol\right)=\mathcal{L}_{X_{i}\left(\underline{t}\right)^{*}u}X_{i}\left(\underline{t}\right)^{*}\vol=\Div\left(X_{i}\left(\underline{t}\right)^{*}u\right)\vol$} \end{defn} \subsection{Constructing the perturbation} We now specify the key terms used to define our perturbation. Set \[ \underline{R_{i}}:=\mathbf{X}_{i}^{*}\left(\textrm{Id}-\frac{\overline{R}_{q}}{\delta_{q+1}}\right) \] where we treat $\overline{R}_{q}$ as a $(2,0)$-tensor. Indeed, we can write this more explicitly as \begin{equation} \underline{R_{i}}\circ\mathbf{\Phi}_{i}=\nabla\Phi_{i}\left(\textrm{Id}-\frac{\overline{R}_{q}}{\delta_{q+1}}\right)\nabla\Phi_{i}^{T}\label{eq:R_phiii} \end{equation} Note that, for $\left|t-t_{i}\right|\lesssim\tau_{q}$, we have \[ \underline{R_{i}}\circ\mathbf{\Phi}_{i}\in B_{1/2}(\textrm{Id)}, \] because $\nabla\Phi_{i}$ is close to $\textrm{Id}$ and $\left\Vert \frac{\overline{R}_{q}}{\delta_{q+1}}\right\Vert _{0}\lesssim\ell^{\alpha}$ by (\ref{eq:glue3}). For each $i$ let $\rho_{i}$ be a smooth cutoff such that $\mathbf{1}_{\left[t_{i},t_{i}+\epsilon_{q}\tau_{q}\right]}\leq\rho_{i}\leq\mathbf{1}_{\left[t_{i}-\epsilon_{q}\tau_{q},t_{i}+2\epsilon_{q}\tau_{q}\right]}$ and satisfying the estimate \[ \left\Vert \partial_{t}^{N}\rho_{i}\right\Vert _{0}\lesssim\left(\epsilon_{q}\tau_{q}\right)^{-N}\;\forall N\in\mathbb{N}_{0} \] We now define the perturbation \begin{align*} w^{(o)} & :=\sum_{i}\delta_{q+1}^{1/2}\rho_{i}(t)\nabla\Phi_{i}^{-1}W(\underline{R_{i}}\circ\mathbf{\Phi}_{i},\lambda_{q+1}\Phi_{i}). \end{align*} For $t\in\left[t_{i}-\epsilon_{q}\tau_{q},t_{i}+2\epsilon_{q}\tau_{q}\right]$, in local-time Lagrangian coordinates with $$\underline{w^{(o)}}:=\mathbf{X}_{i}^{*}w^{(o)},$$ we have \begin{align*} \underline{w^{(o)}} & =\delta_{q+1}^{1/2}\rho_{i}(\underline{t})W(\underline{R_{i}},\lambda_{q+1}\underline{x})\\ & =\sum_{k\neq0}\underbrace{\delta_{q+1}^{1/2}\rho_{i}(\underline{t})a_{k}(\underline{R_{i}})}_{:=\underline{b_{i,k}}}e^{i2\pi\left\langle \lambda_{q+1}k,\underline{x}\right\rangle }=\sum_{k\neq0}\underline{b_{i,k}}e^{i2\pi\left\langle \lambda_{q+1}k,\underline{x}\right\rangle } \end{align*} and therefore, by defining $b_{i,k}:=\mathbf{\Phi}_{i}^{*}\underline{b_{i,k}}$ (zero-extended outside $\operatorname{supp}\rho_{i}$), we have \[ w^{\left(o\right)}=\sum_{i}\sum_{k\neq0}b_{i,k}e^{i2\pi\left\langle \lambda_{q+1}k,\Phi_{i}\right\rangle } \] Now, for $t\in\left[t_{i}-\epsilon_{q}\tau_{q},t_{i}+2\epsilon_{q}\tau_{q}\right]$, in local-time Lagrangian coordinates, we define the incompressibility corrector \[ \underline{w^{\left(c\right)}}:=\sum_{k\neq0}\underbrace{\delta_{q+1}^{1/2}\rho_{i}(\underline{t})\Div_{\underline{x}}\left(\frac{k\wedge a_{k}\left(\underline{R_{i}}\right)}{i2\pi\lambda_{q+1}\left|k\right|^{2}}\right)}_{:=\underline{c_{i,k}}}e^{i2\pi\left\langle \lambda_{q+1}k,\underline{x}\right\rangle }=\sum_{k\neq0}\underline{c_{i,k}}e^{i2\pi\left\langle \lambda_{q+1}k,\underline{x}\right\rangle }. \] Because of (\ref{eq:freeze_2}) and the identity $$\Div\left(fv\right)=\nabla f\lrcorner v^{\flat}+f\Div v,$$ which holds for any smooth function $f$ and vector field $v$, we have \begin{align*} \underline{w^{(o)}}+\underline{w^{\left(c\right)}} & =\delta_{q+1}^{1/2}\rho_{i}(\underline{t})\sum_{k\neq0}e^{i2\pi\left\langle \lambda_{q+1}k,\underline{x}\right\rangle }\left(a_{k}(\underline{R_{i}})+\Div_{\underline{x}}\left(\frac{k\wedge a_{k}\left(\underline{R_{i}}\right)}{i2\pi\lambda_{q+1}\left|k\right|^{2}}\right)\right)\\ & =\delta_{q+1}^{1/2}\rho_{i}(\underline{t})\sum_{k\neq0}\Div_{\underline{x}}\left(\frac{k\wedge a_{k}\left(\underline{R_{i}}\right)}{i2\pi\lambda_{q+1}\left|k\right|^{2}}e^{i2\pi\left\langle \lambda_{q+1}k,\underline{x}\right\rangle }\right) \end{align*} which is divergence-free, since $\Div\Div T=0$ for any alternating $(2,0)$-tensor on the flat torus. In Eulerian coordinates, we define $c_{i,k}:=\mathbf{\Phi}_{i}^{*}\underline{c_{i,k}}$ (zero-extended outside $\operatorname{supp}\rho_{i}$), as well as \begin{align*} w^{(c)} & :=\sum_{i}\sum_{k\neq0}c_{i,k}e^{i2\pi\left\langle \lambda_{q+1}k,\Phi_{i}\right\rangle } \end{align*} to obtain $\underline{w^{(c)}}=\mathbf{X}_{i}^{*}w^{(c)}$ for $t\in\left[t_{i}-\epsilon_{q}\tau_{q},t_{i}+2\epsilon_{q}\tau_{q}\right]$. Because of (\ref{eq:Div_commute}), the full perturbation \[ w_{q+1}:=w^{(o)}+w^{(c)} \] is divergence-free. With these ingredients in place, we now define \[ v_{q+1}:=\overline{v}_{q}+w_{q+1} \] and observe that \begin{align*} & \partial_{t}v_{q+1}+\Div\left(v_{q+1}\otimes v_{q+1}\right)+\nu\left(-\Delta\right)^{\gamma}v_{q+1}\\ =\; & \left(\partial_{t}\overline{v}_{q}+\Div\left(\overline{v}_{q}\otimes\overline{v}_{q}\right)+\nu\left(-\Delta\right)^{\gamma}\overline{v}_{q}\right)+\Div\left(w_{q+1}\otimes w_{q+1}\right)\\ & \quad+\partial_{t}w_{q+1}+\Div\left(\overline{v}_{q}\otimes w_{q+1}\right)+\Div\left(w_{q+1}\otimes\overline{v}_{q}\right)+\nu\left(-\Delta\right)^{\gamma}w_{q+1}\\ =\; & -\nabla\overline{p}_{q}+\Div\left(\overline{R}_{q}+w_{q+1}\otimes w_{q+1}\right)\\ & \quad+D_{t,q}w_{q+1}+w_{q+1}\cdot\nabla\overline{v}_{q}+\nu\left(-\Delta\right)^{\gamma}w_{q+1} \end{align*} We can then define the final stress as \begin{align*} R_{q+1} & :=R_{\mathrm{osc}}+R_{\mathrm{trans}}+R_{\mathrm{Nash}}+R_{\mathrm{dis}}\\ R_{\mathrm{osc}} & :=\mathcal{R}\Div\left(\overline{R}_{q}+w_{q+1}\otimes w_{q+1}\right)\\ R_{\mathrm{trans}} & :=\mathcal{R}D_{t,q}w_{q+1}\\ R_{\mathrm{Nash}} & :=\mathcal{R}\left(w_{q+1}\cdot\nabla\overline{v}_{q}\right)\\ R_{\mathrm{dis}} & :=\nu\mathcal{R}\left(-\Delta\right)^{\gamma}w_{q+1} \end{align*} \subsection{Perturbation estimates} To establish \Propref{convex_int}, we now have to estimate the perturbation constructed in the previous subsection. The desired bounds are established in the following series of results. \begin{prop} \label{prop:pertub_est} Suppose $t\in\left[t_{i}-\epsilon_{q}\tau_{q},t_{i}+2\epsilon_{q}\tau_{q}\right]$ and $N\in\mathbb{N}_{0}$. Then we have the following estimates, \begin{align} \left\Vert \nabla\Phi_{i}\right\Vert _{N}+\left\Vert \nabla\Phi_{i}^{-1}\right\Vert _{N} & \lesssim_{N}\ell^{-N}\label{eq:per_est1}\\ \left\Vert \underline{R_{i}}\circ\mathbf{\Phi}_{i}\right\Vert _{N} & \lesssim_{N}\ell^{-N}\label{eq:per_est2}\\ \left\Vert b_{i,k}\right\Vert _{N} & \lesssim_{N}\delta_{q+1}^{\frac{1}{2}}\ell^{-N}\left|k\right|^{-2d}\label{eq:per_est3}\\ \left\Vert c_{i,k}\right\Vert _{N} & \lesssim_{N}\delta_{q+1}^{\frac{1}{2}}\lambda_{q+1}^{-1}\ell^{-N-1}\left|k\right|^{-2d}\label{eq:per_est4} \end{align} along with their material derivative analogues, \begin{align} \left\Vert D_{t,q}\left(\nabla\Phi_{i}\right)\right\Vert _{N} & \lesssim_{N}\delta_{q}^{\frac{1}{2}}\lambda_{q}\ell^{-N}\label{eq:per_est5}\\ \left\Vert D_{t,q}\left(\underline{R_{i}}\circ\mathbf{\Phi}_{i}\right)\right\Vert _{N} & \lesssim_{N}\left(\epsilon_{q}\tau_{q}\right)^{-1}\ell^{-N+\alpha}\label{eq:per_est6}\\ \left\Vert D_{t,q}b_{i,k}\right\Vert _{N} & \lesssim_{N}\left(\epsilon_{q}\tau_{q}\right)^{-1}\delta_{q+1}^{1/2}\ell^{-N}\left|k\right|^{-2d}\label{eq:per_est7}\\ \left\Vert D_{t,q}c_{i,k}\right\Vert _{N} & \lesssim_{N}\left(\epsilon_{q}\tau_{q}\right)^{-1}\delta_{q+1}^{1/2}\lambda_{q+1}^{-1}\ell^{-N-1}\left|k\right|^{-2d}\label{eq:per_est8} \end{align} \end{prop} \begin{proof} We first observe that (\ref{eq:phi_id}) and (\ref{eq:phi_id_2}) imply $\left\Vert \nabla\Phi_{i}\right\Vert _{N}\lesssim\ell^{-N}$. Then the fact that $\nabla\Phi_{i}$ is close to $\mathrm{Id}$, and the elementary identity $d\left(A^{-1}\right)=-A^{-1}\left(dA\right)A^{-1}$ (for any invertible matrix $A$) imply (\ref{eq:per_est1}). Next, we observe that (\ref{eq:per_est1}) and (\ref{eq:glue3}) imply (\ref{eq:per_est2}), via the bounds \[ \left\Vert \underline{R_{i}}\circ\mathbf{\Phi}_{i}\right\Vert _{N}\lesssim_{N}\left\Vert \nabla\Phi_{i}\right\Vert _{0}^{2}\left\Vert \textrm{Id}-\frac{\overline{R}_{q}}{\delta_{q+1}}\right\Vert _{N}+\left\Vert \nabla\Phi_{i}\right\Vert _{N}\left\Vert \nabla\Phi_{i}\right\Vert _{0}\left\Vert \textrm{Id}-\frac{\overline{R}_{q}}{\delta_{q+1}}\right\Vert _{0}\lesssim\ell^{-N} \] Then, because of (\ref{eq:per_est2}), and the fact that the derivatives of $a_{k}$ rapidly decay in $k$, we obtain \begin{align*} \left\Vert b_{i,k}\right\Vert _{N} & =\left\Vert \delta_{q+1}^{1/2}\rho_{i}(t)\nabla\Phi_{i}^{-1}a_{k}\left(\underline{R_{i}}\circ\mathbf{\Phi}_{i}\right)\right\Vert _{N}\\ & \lesssim\delta_{q+1}^{1/2}\left(\left\Vert \nabla\Phi_{i}^{-1}\right\Vert _{N}\left\Vert a_{k}\left(\underline{R_{i}}\circ\mathbf{\Phi}_{i}\right)\right\Vert _{0}+\left\Vert \nabla\Phi_{i}^{-1}\right\Vert _{0}\left\Vert a_{k}\left(\underline{R_{i}}\circ\mathbf{\Phi}_{i}\right)\right\Vert _{N}\right)\\ & \lesssim\delta_{q+1}^{1/2}\ell^{-N}\left|k\right|^{-2d}, \end{align*} which establishes (\ref{eq:per_est3}). Similarly we obtain (\ref{eq:per_est4}) by writing, \begin{align*} \left\Vert c_{i,k}\right\Vert _{N} & =\left\Vert \delta_{q+1}^{1/2}\rho_{i}(t)\nabla\Phi_{i}^{-1}\Div_{\underline{x}}\left(\frac{k\wedge a_{k}\left(\underline{R_{i}}\right)}{i2\pi\lambda_{q+1}\left|k\right|^{2}}\right)\circ\mathbf{\Phi}_{i}\right\Vert _{N}\\ & \lesssim\delta_{q+1}^{1/2}\left|k\right|^{-1}\lambda_{q+1}^{-1}\left(\left\Vert \nabla\Phi_{i}^{-1}\right\Vert _{N}\left\Vert \nabla\left(a_{k}\left(\underline{R_{i}}\right)\right)\circ\mathbf{\Phi}_{i}\right\Vert _{0}\right.\\ &\hspace{1.6in}\left.+\left\Vert \nabla\Phi_{i}^{-1}\right\Vert _{0}\left\Vert \nabla\left(a_{k}\left(\underline{R_{i}}\right)\right)\circ\mathbf{\Phi}_{i}\right\Vert _{N}\right)\\ & \lesssim\delta_{q+1}^{1/2}\left|k\right|^{-2d}\lambda_{q+1}^{-1}\ell^{-N-1}, \end{align*} where we have implicitly used the chain rule \begin{equation} \nabla\left(a_{k}\left(\underline{R_{i}}\right)\right)\circ\mathbf{\Phi}_{i}=\nabla\left(a_{k}\left(\underline{R_{i}}\circ\mathbf{\Phi}_{i}\right)\right)\left(\nabla\Phi_{i}\right)^{-1}\label{eq:chain_r} \end{equation} in passing to the last line. We now turn to (\ref{eq:per_est5}), writing \begin{align*} \left\Vert D_{t,q}\nabla\Phi_{i}\right\Vert _{N} & =\left\Vert \nabla_{\overline{v}_{q}}\left(\nabla\Phi_{i}\right)+\nabla\partial_{t}\Phi_{i}\right\Vert _{N}\\ &=\left\Vert \left[\nabla_{\overline{v}_{q}},\nabla\right]\Phi_{i}\right\Vert _{N}\\ &\lesssim\left\Vert \nabla\overline{v}_{q}\right\Vert _{N}\left\Vert \nabla\Phi_{i}\right\Vert _{0}+\left\Vert \nabla\overline{v}_{q}\right\Vert _{0}\left\Vert \nabla\Phi_{i}\right\Vert _{N}\\ &\lesssim\delta_{q}^{\frac{1}{2}}\lambda_{q}\ell^{-N}. \end{align*} Next, we note that (\ref{eq:per_est5}), (\ref{eq:R_phiii}), (\ref{eq:glue3}) and (\ref{eq:glue4}) imply \begin{align*} &\left\Vert D_{t,q}\left(\underline{R_{i}}\circ\mathbf{\Phi}_{i}\right)\right\Vert _{N} \\ & \hspace{0.2in}\lesssim\left\Vert D_{t,q}\left(\nabla\Phi_{i}\right)\left(\textrm{Id}-\frac{\overline{R}_{q}}{\delta_{q+1}}\right)\nabla\Phi_{i}^{T}+\nabla\Phi_{i}\left(\textrm{Id}-\frac{\overline{R}_{q}}{\delta_{q+1}}\right)D_{t,q}\nabla\Phi_{i}^{T}\right\Vert _{N}\\ & \hspace{0.4in}+\delta_{q+1}^{-1}\left\Vert \nabla\Phi_{i}\left(D_{t,q}\overline{R}_{q}\right)\nabla\Phi_{i}^{T}\right\Vert _{N}\\ & \hspace{0.2in}\lesssim\delta_{q}^{\frac{1}{2}}\lambda_{q}\ell^{-N}+\left(\epsilon_{q}\tau_{q}\right)^{-1}\ell^{-N+\alpha}\\ &\hspace{0.2in}\lesssim\left(\epsilon_{q}\tau_{q}\right)^{-1}\ell^{-N+\alpha}, \end{align*} establishing (\ref{eq:per_est6}), where in passing to the last inequality, we have implicitly used $\delta_{q}^{\frac{1}{2}}\lambda_{q}\ll\epsilon_{q}^{-1}\tau_{q}^{-1}\ell^{\alpha}$, which comes from $\epsilon_{q}\ll1$ (after $\alpha$ is neglected). Turning to (\ref{eq:per_est7}), we recall the identity $\partial_{\underline{t}}\left(w\circ\mathbf{X}_{i}\right)=\left(D_{t,q}w\right)\circ\mathbf{X}_{i}$ (for any tensor $w$). We then use (\ref{eq:per_est6}), (\ref{eq:per_est2}), and (\ref{eq:per_est1}) to write \begin{align*} \left\Vert D_{t,q}b_{i,k}\right\Vert _{N}&=\left\Vert \partial_{\underline{t}}\left(\mathbf{\Phi}_{i}^{*}\underline{b_{i,k}}\circ\mathbf{X}_{i}\right)\circ\mathbf{\Phi}_{i}\right\Vert _{N}\\ &\hspace{0.2in}=\left\Vert \partial_{\underline{t}}\left(\left(\nabla X_{i}\right)\underline{b_{i,k}}\right)\circ\mathbf{\Phi}_{i}\right\Vert _{N}\\ &\hspace{0.2in}=\delta_{q+1}^{1/2}\left\Vert \partial_{\underline{t}}\left(\left(\nabla X_{i}\right)\rho_{i}\left(\underline{t}\right)a_{k}\left(\underline{R_{i}}\right)\right)\circ\mathbf{\Phi}_{i}\right\Vert _{N}. \end{align*} The right-hand side of the above is now bounded by \begin{align*} &\lesssim\delta_{q+1}^{1/2}\left(\epsilon_{q}\tau_{q}\right)^{-1}\left\Vert \left(\left(\nabla X_{i}\right)a_{k}\left(\underline{R_{i}}\right)\right)\circ\mathbf{\Phi}_{i}\right\Vert _{N}\\ &\hspace{0.2in}+\delta_{q+1}^{1/2}\left\Vert \partial_{\underline{t}}\left(\left(\nabla X_{i}\right)a_{k}\left(\underline{R_{i}}\right)\right)\circ\mathbf{\Phi}_{i}\right\Vert _{N}\\ &\lesssim\delta_{q+1}^{1/2}\left(\epsilon_{q}\tau_{q}\right)^{-1}\left\Vert \left(\nabla\Phi_{i}\right)^{-1}a_{k}\left(\underline{R_{i}}\circ\mathbf{\Phi}_{i}\right)\right\Vert _{N}\\ &\hspace{0.2in}+\delta_{q+1}^{1/2}\left\Vert \left(\nabla\left(\overline{v}_{q}\circ\mathbf{X}_{i}\right)a_{k}\left(\underline{R_{i}}\right)\right)\circ\mathbf{\Phi}_{i}\right\Vert _{N}\\ &\hspace{0.2in}+\delta_{q+1}^{1/2}\left\Vert \left(\nabla X_{i}\right)\left(\nabla a_{k}\left(\underline{R_{i}}\right)\partial_{\underline{t}}\left(\underline{R_{i}}\right)\right)\circ\mathbf{\Phi}_{i}\right\Vert _{N}\\ &\lesssim\left(\epsilon_{q}\tau_{q}\right)^{-1}\delta_{q+1}^{1/2}\ell^{-N}\left|k\right|^{-2d}\\ &\hspace{0.2in}+\delta_{q+1}^{1/2}\left\Vert \left(\nabla\overline{v}_{q}\right)\left(\nabla\Phi_{i}\right)^{-1}a_{k}\left(\underline{R_{i}}\circ\mathbf{\Phi}_{i}\right)\right\Vert _{N}\\ &\hspace{0.2in}+\delta_{q+1}^{1/2}\left\Vert \left(\nabla\Phi_{i}\right)^{-1}\left(\nabla a_{k}\left(\underline{R_{i}}\circ\mathbf{\Phi}_{i}\right)D_{t,q}\left(\underline{R_{i}}\circ\mathbf{\Phi}_{i}\right)\right)\right\Vert _{N}. \end{align*} This leads to the bound \begin{align*} &\left(\epsilon_{q}\tau_{q}\right)^{-1}\delta_{q+1}^{1/2}\ell^{-N}\left|k\right|^{-2d}+\delta_{q+1}^{1/2}\delta_{q}^{\frac{1}{2}}\lambda_{q}\ell^{-N}\left|k\right|^{-2d}+\left(\epsilon_{q}\tau_{q}\right)^{-1}\delta_{q+1}^{1/2}\ell^{-N+\alpha}\left|k\right|^{-2d}\\ &\hspace{0.2in}\lesssim\left(\epsilon_{q}\tau_{q}\right)^{-1}\delta_{q+1}^{1/2}\ell^{-N}\left|k\right|^{-2d} \end{align*} which completes the proof of (\ref{eq:per_est7}). It remains to show (\ref{eq:per_est8}). For this, we again use (\ref{eq:chain_r}), and write, using schematic notation, \begin{align*} & \left\Vert D_{t,q}c_{i,k}\right\Vert _{N}\\ &\hspace{0.2in}\sim\delta_{q+1}^{1/2}\lambda_{q+1}^{-1}\left|k\right|^{-1}\left\Vert \partial_{\underline{t}}\left(\rho_{i}(\underline{t})\left(\nabla X_{i}\right)*\nabla\left(a_{k}\left(\underline{R_{i}}\right)\right)\right)\circ\mathbf{\Phi}_{i}\right\Vert _{N}, \end{align*} which leads to the bound \begin{align*} &\delta_{q+1}^{1/2}\left(\epsilon_{q}\tau_{q}\right)^{-1}\lambda_{q+1}^{-1}\left|k\right|^{-1}\left\Vert \left(\nabla\Phi_{i}\right)^{-1}*\nabla\left(a_{k}\left(\underline{R_{i}}\circ\mathbf{\Phi}_{i}\right)\right)*\nabla\Phi_{i}^{-1}\right\Vert _{N}\\ &\hspace{0.2in} +\delta_{q+1}^{1/2}\lambda_{q+1}^{-1}\left|k\right|^{-1}\left\Vert \nabla\overline{v}_{q}*\nabla\Phi_{i}^{-1}*\nabla\left(a_{k}\left(\underline{R_{i}}\circ\mathbf{\Phi}_{i}\right)\right)*\nabla\Phi_{i}^{-1}\right\Vert _{N}\\ &\hspace{0.2in} +\delta_{q+1}^{1/2}\lambda_{q+1}^{-1}\left|k\right|^{-1}\left\Vert \left(\nabla\Phi_{i}\right)^{-1}*\nabla\left(\nabla a_{k}\left(\underline{R_{i}}\circ\mathbf{\Phi}_{i}\right)D_{t,q}\left(\underline{R_{i}}\circ\mathbf{\Phi}_{i}\right)\right)*\nabla\Phi_{i}^{-1}\right\Vert _{N}. \end{align*} This expression is in turn bounded by \begin{align*} &\left(\epsilon_{q}\tau_{q}\right)^{-1}\delta_{q+1}^{1/2}\lambda_{q+1}^{-1}\ell^{-N-1}\left|k\right|^{-2d}+\delta_{q+1}^{1/2}\lambda_{q+1}^{-1}\delta_{q}^{\frac{1}{2}}\lambda_{q}\ell^{-N-1}\left|k\right|^{-2d}\\ &\hspace{0.2in}\lesssim \left(\epsilon_{q}\tau_{q}\right)^{-1}\delta_{q+1}^{1/2}\lambda_{q+1}^{-1}\ell^{-N-1}\left|k\right|^{-2d}, \end{align*} as desired. This completes the proof of the stated estimates. \end{proof} We now record a useful corollary which will imply (\ref{eq:perturb_1}). \begin{cor} \label{cor:There-is-} There is $M=M(d)$ (independent of $q$) such that \begin{align} \|w^{(c)}\|_{0}+\lambda_{q+1}^{-1}\|\nabla w^{(c)}\|_{0} & \lesssim\delta_{q+1}^{1/2}\lambda_{q+1}^{-1}\ell^{-1}\label{eq:cor1}\\ \|w^{(o)}\|_{0}+\lambda_{q+1}^{-1}\|\nabla w^{(o)}\|_{0} & \leq\frac{M}{4}\delta_{q+1}^{1/2}\label{eq:cor2}\\ \|w_{q+1}\|_{0}+\lambda_{q+1}^{-1}\|\nabla w_{q+1}\|_{0} & \leq\frac{M}{2}\delta_{q+1}^{1/2}\label{eq:cor3} \end{align} \end{cor} \begin{proof} Without loss of generality, we may assume that $a$ is large enough to ensure $\left\Vert \nabla\Phi_{i}\right\Vert _{0}\leq2$. Recall that $t\in\left[t_{i}-\epsilon_{q}\tau_{q},t_{i}+2\epsilon_{q}\tau_{q}\right]$ so that $$\|w^{(c)}\|_{1}=\left\Vert \sum_{k\neq0}c_{i,k}e^{i2\pi\left\langle \lambda_{q+1}k,\Phi_{i}\right\rangle }\right\Vert _{1},$$ and (\ref{eq:cor1}) follows immediately from (\ref{eq:per_est4}) and (\ref{eq:length_Freq}). From the proof of (\ref{eq:per_est3}), there is $C=C(d)$ (independent of $q$) such that \[ \left\Vert b_{i,k}\right\Vert _{0}\leq C\delta_{q+1}^{\frac{1}{2}}\left|k\right|^{-2d} \] Then (\ref{eq:cor2}) and (\ref{eq:cor3}) follow immediately from (\ref{eq:per_est3}), (\ref{eq:length_Freq}) and (\ref{eq:cor1}). \end{proof} \subsection{Stress error estimates} Suppose $t\in\left[t_{i}-\epsilon_{q}\tau_{q},t_{i}+2\epsilon_{q}\tau_{q}\right]$. To complete the proof of \Propref{convex_int} it remains to prove \begin{equation} \left\Vert R_{q+1}\right\Vert _{\alpha}\lesssim\epsilon_{q+1}\delta_{q+2}\lambda_{q+1}^{-4\alpha}.\label{eq:fin_stress_er} \end{equation} We will often need to use an important antidivergence estimate from \cite{buckmasterOnsagerConjectureAdmissible2017}, stated in the following lemma. \begin{lem}[Proposition C.2 in \cite{buckmasterOnsagerConjectureAdmissible2017}] For any $N\in\mathbb{N}_{1}$, $u\in\mathfrak{X}\left(\mathbb{T}^{d}\right)$, and $\phi\in C^{\infty}\left(\mathbb{T}^{d}\to\mathbb{T}^{d}\right)$ such that $\frac{1}{2}\leq\left|\nabla\phi\right|\leq2$, we have \begin{align} &\left\Vert \mathcal{R}\left(u(x)e^{i2\pi\left\langle k,\phi\right\rangle }\right)\right\Vert _{\alpha}\nonumber\\ &\hspace{0.2in}\lesssim_{N}\left|k\right|^{\alpha-1}\left\Vert u\right\Vert _{0}+\left|k\right|^{\alpha-N}\left(\left\Vert u\right\Vert _{0}\left\Vert \phi\right\Vert _{N+\alpha}+\left\Vert u\right\Vert _{N+\alpha}\right).\label{eq:antidiv_est} \end{align} \end{lem} Another fact we will use often is that when $N$ is chosen large enough (independent of $q$), we have \begin{equation} \ell_{q}^{N+10\alpha}\lambda_{q+1}^{N-1-10\alpha}>1\label{eq:trick2} \end{equation} This comes from \[ -\beta b+\beta-1-\frac{\sigma}{2}+b\left(\frac{N-1-10\alpha}{N+10\alpha}\right)>0 \] which is implied by (\ref{eq:intermed_para}) when $N=N\left(b,\beta,\sigma,\alpha\right)$ is large enough. Unless otherwise noted, we will be using this choice of $N$. \subsubsection{Nash error} By using (\ref{eq:antidiv_est}) and \Propref{pertub_est}, we have \begin{align*} \left\Vert \mathcal{R}\left(w^{(o)}\cdot\nabla\overline{v}_{q}\right)\right\Vert _{\alpha} & \lesssim\sum_{k\neq0}\left\Vert \mathcal{R}\left(b_{i,k}\cdot\nabla\overline{v}_{q}e^{i2\pi\left\langle \lambda_{q+1}k,\Phi_{i}\right\rangle }\right)\right\Vert _{\alpha}\\ & \lesssim_{N}\sum_{k\neq0}\left|\lambda_{q+1}k\right|^{\alpha-1}\left|k\right|^{-2d}\delta_{q+1}^{\frac{1}{2}}\delta_{q}^{\frac{1}{2}}\lambda_{q}\\ & \quad+\left|\lambda_{q+1}k\right|^{\alpha-N}\left|k\right|^{-2d}\left(\delta_{q+1}^{\frac{1}{2}}\delta_{q}^{\frac{1}{2}}\lambda_{q}\ell^{-N-2\alpha}\right)\\ & \lesssim\delta_{q+1}^{\frac{1}{2}}\delta_{q}^{\frac{1}{2}}\lambda_{q+1}^{\alpha-1}\lambda_{q}\lesssim\epsilon_{q+1}\delta_{q+2}\lambda_{q+1}^{-4\alpha} \end{align*} where we used (\ref{eq:trick2}) to pass to the last line, and (\ref{eq:stress_size_ind1}) in the last inequality. Similarly, \begin{align*} \left\Vert \mathcal{R}\left(w^{(c)}\cdot\nabla\overline{v}_{q}\right)\right\Vert _{\alpha} & \lesssim\sum_{k\neq0}\left\Vert \mathcal{R}\left(c_{i,k}\cdot\nabla\overline{v}_{q}e^{i2\pi\left\langle \lambda_{q+1}k,\Phi_{i}\right\rangle }\right)\right\Vert _{\alpha}\\ & \lesssim_{N}\sum_{k\neq0}\left|\lambda_{q+1}k\right|^{\alpha-1}\left|k\right|^{-2d}\delta_{q+1}^{\frac{1}{2}}\delta_{q}^{\frac{1}{2}}\lambda_{q}\left(\lambda_{q+1}\ell\right)^{-1}\\ & \quad+\left|\lambda_{q+1}k\right|^{\alpha-N}\left|k\right|^{-2d}\left(\delta_{q+1}^{\frac{1}{2}}\delta_{q}^{\frac{1}{2}}\lambda_{q}\ell^{-N-2\alpha}\right)\left(\lambda_{q+1}\ell\right)^{-1}\\ & \lesssim\delta_{q+1}^{\frac{1}{2}}\lambda_{q+1}^{\alpha-1}\delta_{q}^{\frac{1}{2}}\lambda_{q}\lesssim\epsilon_{q+1}\delta_{q+2}\lambda_{q+1}^{-4\alpha} \end{align*} The only difference is the term $\left(\lambda_{q+1}\ell\right)^{-1}$ which is less than $1$ by (\ref{eq:length_Freq}). Thus we have \[ \left\Vert R_{\mathrm{Nash}}\right\Vert _{\alpha}\lesssim\epsilon_{q+1}\delta_{q+2}\lambda_{q+1}^{-4\alpha} \] \subsubsection{Transport error} The important observation here is that $D_{t,q}\left(e^{i2\pi\left\langle \lambda_{q+1}k,\Phi_{i}\right\rangle }\right)=0$, which helps avoid an extra factor $\lambda_{q+1}$. Arguing as above, we have \begin{align*} \left\Vert \mathcal{R}D_{t,q}w^{(o)}\right\Vert _{\alpha} & \lesssim\sum_{k\neq0}\left\Vert \mathcal{R}\left(D_{t,q}b_{i,k}e^{i2\pi\left\langle \lambda_{q+1}k,\Phi_{i}\right\rangle }\right)\right\Vert _{\alpha}\\ & \lesssim_{N}\sum_{k\neq0}\left|\lambda_{q+1}k\right|^{\alpha-1}\left|k\right|^{-2d}\delta_{q+1}^{\frac{1}{2}}\left(\epsilon_{q}\tau_{q}\right)^{-1}\\ & \quad+\left|\lambda_{q+1}k\right|^{\alpha-N}\left|k\right|^{-2d}\left(\delta_{q+1}^{\frac{1}{2}}\ell^{-N-\alpha}\right)\left(\epsilon_{q}\tau_{q}\right)^{-1}\\ & \lesssim\delta_{q+1}^{\frac{1}{2}}\lambda_{q+1}^{\alpha-1}\left(\epsilon_{q}\tau_{q}\right)^{-1}=\epsilon_{q}^{-1}\delta_{q+1}^{\frac{1}{2}}\delta_{q}^{\frac{1}{2}}\lambda_{q+1}^{\alpha-1}\lambda_{q}^{1+3\alpha}\\ & \lesssim\epsilon_{q+1}\delta_{q+2}\lambda_{q+1}^{-4\alpha} \end{align*} and \begin{align*} \left\Vert \mathcal{R}D_{t,q}w^{(c)}\right\Vert _{\alpha} & \lesssim\sum_{k\neq0}\left\Vert \mathcal{R}\left(D_{t,q}c_{i,k}e^{i2\pi\left\langle \lambda_{q+1}k,\Phi_{i}\right\rangle }\right)\right\Vert _{\alpha}\\ & \lesssim_{N}\sum_{k\neq0}\left|\lambda_{q+1}k\right|^{\alpha-1}\left|k\right|^{-2d}\delta_{q+1}^{\frac{1}{2}}\left(\epsilon_{q}\tau_{q}\right)^{-1}\left(\lambda_{q+1}\ell\right)^{-1}\\ & \quad+\left|\lambda_{q+1}k\right|^{\alpha-N}\left|k\right|^{-2d}\left(\delta_{q+1}^{\frac{1}{2}}\ell^{-N-\alpha}\right)\left(\epsilon_{q}\tau_{q}\right)^{-1}\left(\lambda_{q+1}\ell\right)^{-1}\\ & \lesssim\delta_{q+1}^{\frac{1}{2}}\lambda_{q+1}^{\alpha-1}\left(\epsilon_{q}\tau_{q}\right)^{-1}=\epsilon_{q}^{-1}\delta_{q+1}^{\frac{1}{2}}\delta_{q}^{\frac{1}{2}}\lambda_{q+1}^{\alpha-1}\lambda_{q}^{1+3\alpha}\\ & \lesssim\epsilon_{q+1}\delta_{q+2}\lambda_{q+1}^{-4\alpha} \end{align*} Thus we have $\left\Vert R_{\mathrm{trans}}\right\Vert _{\alpha}\lesssim\epsilon_{q+1}\delta_{q+2}\lambda_{q+1}^{-4\alpha}$. \subsubsection{Oscillation error} We observe that \begin{align*} R_{\mathrm{osc}} & :=\mathcal{R}\Div\left(\overline{R}_{q}+w_{q+1}\otimes w_{q+1}\right)\\ & =\underbrace{\mathcal{R}\Div\left(\overline{R}_{q}+w^{(o)}\otimes w^{(o)}\right)}_{:=\mathcal{O}_{1}}\\ &\hspace{0.2in}+\underbrace{\mathcal{R}\Div\left(w^{(c)}\otimes w^{(o)}+w^{(o)}\otimes w^{(c)}+w^{(c)}\otimes w^{(c)}\right)}_{:=\mathcal{O}_{2}} \end{align*} Then, using \Corref{There-is-}, and the fact that $\mathcal{R}\Div$ is a Calderón-Zygmund operator, we obtain \begin{align*} \left\Vert \mathcal{O}_{2}\right\Vert _{\alpha} & \lesssim\left\Vert w^{(c)}\right\Vert _{\alpha}\left\Vert w^{(o)}\right\Vert _{\alpha}+\left\Vert w^{(c)}\right\Vert _{\alpha}^{2}\lesssim\delta_{q+1}\left(\ell\lambda_{q+1}\right)^{-1}\\ & =\epsilon_{q}^{-\frac{1}{2}}\delta_{q+1}^{\frac{1}{2}}\delta_{q}^{\frac{1}{2}}\lambda_{q+1}^{-1}\lambda_{q}^{1+\frac{3\alpha}{2}}\lesssim\epsilon_{q+1}\delta_{q+2}\lambda_{q+1}^{-4\alpha} \end{align*} where we have once again used (\ref{eq:stress_size_ind1}). On the other hand, because $\rho_{i}^{2}=1$ on $\operatorname{supp}\overline{R}_{q}$, we have \begin{align*} \mathcal{O}_{1} & =\mathcal{R}\Div\left(\overline{R}_{q}+\delta_{q+1}\rho_{i}^{2}\mathbf{\Phi}_{i}^{*}\left(W(\underline{R_{i}},\lambda_{q+1}\cdot)\otimes W(\underline{R_{i}},\lambda_{q+1}\cdot)\right)\right)\\ & =\mathcal{R}\Div\bigg(\overline{R}_{q}+\delta_{q+1}\rho_{i}^{2}\bigg(\textrm{Id}-\frac{\overline{R}_{q}}{\delta_{q+1}}\bigg)\\ &\hspace{1.2in}+\delta_{q+1}\rho_{i}^{2}\mathbf{\Phi}_{i}^{*}\bigg(\sum_{k\in\mathbb{Z}^{d}\backslash\{0\}}C_{k}\left(\underline{R_{i}}\right)e^{i2\pi\left\langle \lambda_{q+1}k,\cdot\right\rangle }\bigg)\bigg)\\ & =\sum_{k\in\mathbb{Z}^{d}\backslash\{0\}}\delta_{q+1}\rho_{i}^{2}\mathcal{R}\Div\left(\mathbf{\Phi}_{i}^{*}\left(C_{k}\left(\underline{R_{i}}\right)\right)e^{i2\pi\left\langle \lambda_{q+1}k,\Phi_{i}\right\rangle }\right)\\ & =\sum_{k\in\mathbb{Z}^{d}\backslash\{0\}}\delta_{q+1}\rho_{i}^{2}\mathcal{R}\left(\Div\left(\mathbf{\Phi}_{i}^{*}\left(C_{k}\left(\underline{R_{i}}\right)\right)\right)e^{i2\pi\left\langle \lambda_{q+1}k,\Phi_{i}\right\rangle }\right)\\ &\hspace{0.8in}+\delta_{q+1}\rho_{i}^{2}\mathcal{R}\underbrace{\left(d_{x}\left(e^{i2\pi\left\langle \lambda_{q+1}k,\Phi_{i}\right\rangle }\right)\lrcorner\mathbf{\Phi}_{i}^{*}\left(C_{k}\left(\underline{R_{i}}\right)\right)\right)}_{\mathcal{O}_{3}} \end{align*} We note that \begin{align*} \mathcal{O}_{3} & =d_{x}\left(\mathbf{\Phi}_{i}^{*}e^{i2\pi\left\langle \lambda_{q+1}k,\cdot\right\rangle }\right)\lrcorner\mathbf{\Phi}_{i}^{*}\left(C_{k}\left(\underline{R_{i}}\right)\right)=\mathbf{\Phi}_{i}^{*}\left(\left(d_{\underline{x}}e^{i2\pi\left\langle \lambda_{q+1}k,\cdot\right\rangle }\right)\lrcorner C_{k}\left(\underline{R_{i}}\right)\right)=0 \end{align*} because of (\ref{eq:kflat_c}). Then because of (\ref{eq:antidiv_est}) and (\ref{eq:stress_size_ind1}): \begin{align*} \left\Vert \mathcal{O}_{1}\right\Vert _{\alpha} & \lesssim\sum_{k\in\mathbb{Z}^{d}\backslash\{0\}}\left\Vert \delta_{q+1}\mathcal{R}\left(\Div\left(\nabla\Phi_{i}^{-1}C_{k}\left(\underline{R_{i}}\circ\mathbf{\Phi}_{i}\right)\nabla\Phi_{i}^{-T}\right)e^{i2\pi\left\langle \lambda_{q+1}k,\Phi_{i}\right\rangle }\right)\right\Vert _{\alpha}\\ & \lesssim_{N}\sum_{k\neq0}\left|\lambda_{q+1}k\right|^{\alpha-1}\left|k\right|^{-2d}\delta_{q+1}\ell^{-1}+\left|\lambda_{q+1}k\right|^{\alpha-N}\left|k\right|^{-2d}\left(\delta_{q+1}\ell^{-N-3\alpha}\right)\ell^{-1}\\ & \lesssim\lambda_{q+1}^{\alpha-1}\delta_{q+1}\ell^{-1}=\epsilon_{q}^{-\frac{1}{2}}\delta_{q+1}^{\frac{1}{2}}\delta_{q}^{\frac{1}{2}}\lambda_{q+1}^{\alpha-1}\lambda_{q}^{1+\frac{3\alpha}{2}}\lesssim\epsilon_{q+1}\delta_{q+2}\lambda_{q+1}^{-4\alpha} \end{align*} Therefore $\left\Vert R_{\mathrm{osc}}\right\Vert _{\alpha}\lesssim\epsilon_{q+1}\delta_{q+2}\lambda_{q+1}^{-4\alpha}$. \subsubsection{Dissipative error} Without loss of generality, we may assume $2\alpha+2\gamma<1$ (by choosing $\alpha$ sufficiently small). Because $\mathcal{R}$ and $\left(-\Delta\right)^{\gamma}$ commute, and because $\left(-\Delta\right)^{\gamma}$ is a bounded map from $C^{2\gamma+2\alpha}$ to $C^{\alpha}$ (\cite[Theorem B.1]{derosaInfinitelyManyLeray2019}), we have \begin{align*} \left\Vert R_{\mathrm{dis}}\right\Vert _{\alpha} & \lesssim\left\Vert \mathcal{R}w_{q+1}\right\Vert _{2\alpha+2\gamma}\lesssim\left\Vert \mathcal{R}w_{q+1}\right\Vert _{1}^{2\gamma+2\alpha}\left\Vert \mathcal{R}w_{q+1}\right\Vert _{0}^{1-2\gamma-2\alpha}. \end{align*} Then because $\nabla\mathcal{R}$ is a Calderón-Zygmund operator, and because of \Corref{There-is-}.: \[ \left\Vert \mathcal{R}w_{q+1}\right\Vert _{1}\lesssim\left\Vert \nabla\mathcal{R}w_{q+1}\right\Vert _{0}\leq\left\Vert \nabla\mathcal{R}w_{q+1}\right\Vert _{\alpha}\lesssim\left\Vert w_{q+1}\right\Vert _{\alpha}\lesssim\delta_{q+1}^{\frac{1}{2}}\lambda_{q+1}^{\alpha} \] Meanwhile, because of (\ref{eq:antidiv_est}) : \begin{align*} \left\Vert \mathcal{R}w_{q+1}\right\Vert _{\alpha} & =\sum_{k\neq0}\left\Vert \mathcal{R}\left(\left(b_{i,k}+c_{i,k}\right)e^{i2\pi\left\langle \lambda_{q+1}k,\Phi_{i}\right\rangle }\right)\right\Vert _{\alpha}\\ & \lesssim_{N}\sum_{k\neq0}\left|\lambda_{q+1}k\right|^{\alpha-1}\left|k\right|^{-2d}\delta_{q+1}^{\frac{1}{2}}+\left|\lambda_{q+1}k\right|^{\alpha-N}\left|k\right|^{-2d}\delta_{q+1}^{\frac{1}{2}}\ell^{-N-\alpha}\\ & \lesssim\delta_{q+1}^{\frac{1}{2}}\lambda_{q+1}^{\alpha-1} \end{align*} Therefore: \[ \left\Vert R_{\mathrm{dis}}\right\Vert _{\alpha}\lesssim\delta_{q+1}^{\frac{1}{2}}\lambda_{q+1}^{\alpha\left(2\gamma+2\alpha\right)+\left(\alpha-1\right)\left(1-2\gamma-2\alpha\right)}\lesssim\epsilon_{q+1}\delta_{q+2}\lambda_{q+1}^{-4\alpha} \] because of (\ref{eq:stress_size_ind3}), when $\alpha=\alpha\left(\sigma,b,\beta,\gamma\right)$ is small enough. This completes the proof of (\ref{eq:fin_stress_er}), and therefore of Proposition \ref{prop:convex_int}. \appendix \section{\label{app:Geometric-preliminaries}Geometric preliminaries} We recall the Hodge decomposition \[ \mathrm{Id}=\mathcal{P}_{1}+\mathcal{P}_{2}+\mathcal{P}_{3} \] where $\mathcal{P}_{1}:=d\left(-\Delta\right)^{-1}\delta$ and $\mathcal{P}_{2}:=\delta\left(-\Delta\right)^{-1}d$ and $\mathcal{P}_{3}$ maps to harmonic forms (cf. \cite[Section 5.8]{Taylor_PDE1}). We observe that $\mathcal{P}_{1},\mathcal{P}_{2}$ are Calderón-Zygmund operators. We also recall that $\delta=-\Div$, where $\left(\Div T\right)^{i_{1}...i_{k}}=\nabla_{j}T^{ji_{1}...i_{k}}$ for any tensor $T$. Due to the musical isomorphism, the Hodge projections $\mathcal{P}_{i}$ are also defined on vector fields, and we also write $\sharp\mathcal{P}_{i}\flat$ as $\mathcal{P}_{i}$ for convenience (unless ambiguity arises). Because the torus is flat, we have the identities \begin{align} \delta\flat\left(X\cdot\nabla Y\right) & =\delta\flat\left(Y\cdot\nabla X\right)\nonumber \\ \mathcal{P}_{1}\left(X\cdot\nabla Y\right) & =\mathcal{P}_{1}\left(Y\cdot\nabla X\right)\label{eq:ident_P1} \end{align} for any divergence-free vector fields $X,Y$. On the torus, harmonic 1-forms (or vector fields) are precisely those which have mean zero. \begin{defn}[Time-dependent Lie derivative] For any smooth family of diffeomorphisms $\left(F_{t}\right)$ and differential forms $\left(\alpha_{t}\right)$ we have \[ \partial_{t}\left(F_{t}^{*}\alpha_{t}\right)=F_{t}^{*}\left(\mathcal{L}_{X_{t}}\alpha_{t}+\partial_{t}\alpha_{t}\right) \] where $\left(X_{t}\right)$ is a time-dependent vector field defined by $\partial_{t}F_{t}=X_{t}\circ F_{t}$. \end{defn} \begin{lem} For any diffeomorphism $\Phi$, vector field $u$ and differential form $\alpha$, we recall the pullback identity: \begin{align} \Phi^{*}\left(\mathcal{L}_{u}\alpha\right) & =\mathcal{L}_{\Phi^{*}u}\Phi^{*}\alpha\label{eq:pullback_lie} \end{align} \end{lem} \begin{rem} The pullback of a 1-form has a different meaning from the pullback of a vector field, and we do not have $\Phi^{*}\flat X=\flat\Phi^{*}X$ unless $\Phi$ is an isometry. \end{rem} We conclude this appendix by introducing several operators that play a key role in our analysis. In particular, we will make use of the antidivergence operator $$\mathcal{R}:C^{\infty}\left(\mathbb{T}^{d},\mathbb{R}^{d}\right)\to C^{\infty}\left(\mathbb{T}^{d},\mathcal{S}_{0}^{d\times d}\right),$$ given by \begin{align} \left(\mathcal{R}v\right)_{ij} & =\mathcal{R}_{ijk}v^{k},\label{eq:antidiv} \end{align} with \begin{align} \mathcal{R}_{ijk} & :=-\frac{d-2}{d-1}\Delta^{-2}\partial_{i}\partial_{j}\partial_{k}-\frac{1}{d-1}\Delta^{-1}\partial_{k}\delta_{ij}+\Delta^{-1}\partial_{i}\delta_{jk}+\Delta^{-1}\partial_{j}\delta_{ik}.\nonumber \end{align} Note that $\div\mathcal{R}v=v-\Xint-_{\mathbb{T}^{d}}v=\left(1-\mathcal{P}_{3}\right)v$ for any vector field $v$. Moreover, using the musical isomorphism, the operator $\mathcal{R}$ can also be defined on 1-forms, and we will often write $\mathcal{R}\sharp$ as $\mathcal{R}$ to simplify notation. We also define the higher-dimensional analogue of the Biot-Savart operator as $\mathcal{B}:=\left(-\Delta\right)^{-1}d\flat$, mapping from vector fields to 2-forms. We then have \[ \sharp\delta\mathcal{B}=\mathcal{P}_{2} \] which implies $\sharp\delta\mathcal{B}v=v-\Xint-_{\mathbb{T}^{d}}v=\mathcal{P}_{2}v$ for any divergence-free vector field $v$. \end{document}
\begin{document} \title{Stochastic homogenization of convolution type operators} \author{ A.~Piatnitski$^{\circ,\sharp}$, E.~Zhizhina$^\sharp$} \date{} \maketitle \parskip 0.04 truein \begin{center} $^\sharp$ Institute for Information Transmission Problems RAS\\ Bolshoi Karetny per., 19, Moscow, 127051, Russia \end{center} \begin{center} $^\circ$ Arctic University of Norway, UiT, campus Narvik,\\ Postbox 385, 8505 Narvik, Norway\\ \end{center} \begin{abstract} This paper deals with the homogenization problem for convolution type non-local operators in random statistically homogeneous ergodic media. Assuming that the convolution kernel has a finite second moment and satisfies the uniform ellipticity and certain symmetry conditions, we prove the almost sure homogenization result and show that the limit operator is a second order elliptic differential operator with constant deterministic coefficients. \end{abstract} \noindent {\bf Keywords}: \ stochastic homogenization, non-local random operators, convolution type kernels \\ \noindent {\bf AMS Subject Classification}: \ 35B27, 45E10, 60H25, 47B25 \section{Introduction} The paper deals with homogenization problem for integral operators of convolution type in $\mathbb R^d$ with dispersal kernels that have random statistically homogeneous ergodic coefficients. For such operators, under natural integrability, moment and uniform ellipticity conditions as well as the symmetry condition we prove the homogenization result and study the properties of the limit operator. The integral operators with a kernel of convolution type are of great interest both from the mathematical point of view and due to various important applications in other fields. Among such applications are models of population dynamics and ecological models, see \cite{OFetal}, \cite{DEE} and references therein, non-local diffusion problems, see \cite{AMRT, BCF}, continuous particle systems, see \cite{ FKK, KPZ}, image processing algorithms, see \cite{GiOs}. In the cited works only the case of homogeneous environments has been considered. In this case the corresponding dispersal kernel depends only on the displacement $y-x$. However, many applications deal with non-homogeneous environments. Such environments are described in terms of integral operator whose dispersal kernels depend not only on the displacement $x-y$ but also on the starting and the ending positions $x, y$. When studying the large-time behaviour of evolution processes in these environments it is natural to make the diffusive scaling in the corresponding integral operators and to consider the homogenization problem for the obtained family of operators with a small positive parameter. In what follows we call this parameter $\varepsilon$ The case of environments with periodic characteristics has been studied in the recent work \cite{PiZhi17}. It has been shown that under natural moment and symmetry conditions on the kernel the family of rescaled operators admits homogenization, and that for the corresponding jump Markov process the Central Limit Theorem and the Invariance Principle hold. Interesting homogenization problems for periodic operators containing both second order elliptic operator and nonlocal Levy type operator have been considered in \cite{Arisawa} and \cite{Sandric2016}. In the present paper we consider the more realistic case of environments with random statistically homogeneous characteristics. More precisely, we assume that the dispersal kernel of the studied operators has the form $\Lambda(x,y)a(x-y)$, $x,\,y\in\mathbb R^d$, where $a(z)$ is a deterministic even function that belongs to $L^1(\mathbb R^d)\cap L^2_{\rm loc}(\mathbb R^d)$ and has finite second moments, while $\Lambda(x,y)=\Lambda(x,y,\omega)$ is a statistically homogeneous symmetric ergodic random field that satisfies the uniform ellipticity conditions $0<\Lambda^-\leq \Lambda(x,y)\leq \Lambda^+$.\\ Making a diffusive scaling we obtain the family of operators \begin{equation}\label{L_u_biseps} (L^\varepsilon u)(x) \ = \ \varepsilon^{-d-2} \int\limits_{\mathbb R^d} a\Big(\frac{x-y}{\varepsilon}\Big) \Lambda\Big(\frac{x}{\varepsilon},\frac{y}{\varepsilon}\Big) (u(y) - u(x)) dy, \end{equation} where a positive scaling factor $\varepsilon$ is a parameter. For the presentation simplicity we assume in this paper that $\Lambda(x,y)=\mu(x)\mu(y)$ with a statistically homogeneous ergodic field $\mu$. However, all our results remain valid for the generic statistically homogeneous symmetric random fields $\Lambda(x,y)$ that satisfy the above ellipticity conditions. The main goal of this work is to investigate the limit behaviour of $L^\varepsilon$ as $\varepsilon\to 0$. We are going to show that the family $L^\varepsilon$ converges almost surely to a second order elliptic operator with constant deterministic coefficient in the so-called $G$-topology, that is for any $m>0$ the family of operators $(-L^\varepsilon+m)^{-1}$ almost surely converges strongly in $L^2(\mathbb R^d)$ to the operator $(-L^0+m)^{-1}$ where $L^0=\Theta^{ij}\frac{\partial^2}{\partial x^i\partial x^j}$, and $\Theta$ is a positive definite constant matrix. There is a vast existing literature devoted to homogenization theory of differential operators, at present it is a well-developed area, see for instance monographs \cite{BLP} and \cite{JKO}. The first homogenization results for divergence form differential operators with random coefficients were obtained in pioneer works \cite{Ko78} and \cite{PaVa79}. In these works it was shown that the generic divergence form second order elliptic operator with random statistically homogeneous coefficients admits homogenization. Moreover, the limit operator has constant coefficients, in the ergodic case these coefficients are deterministic. Later on a number of important homogenization results have been obtained for various elliptic and parabolic differential equations and system of equations in random stationary media. The reader can find many references in the book \cite{JKO}. Homogenization of elliptic difference schemes and discrete operators in statistically homogeneous media has been performed in \cite{Ko87}, \cite{Ko86}. Also, in \cite{Ko86} several limit theorems have been proved for random walks in stationary discrete random media that possess different types of symmetry. To our best knowledge in the existing literature there are no results on stochastic homogenization of convolution type integral operators with a dispersal kernel that has stationary rapidly oscillating coefficients. In the one-dimensional case a homogenization problem for the operators that have both local and non-local parts has been considered in the work \cite{Rho_Var2008}. This work deals with scaling limits of the solutions to stochastic differential equations in dimension one with stationary coefficients driven by Poisson random measures and Brownian motions. The annealed convergence theorem is proved, in which the limit exhibits a diffusive or superdiffusive behavior, depending on whether the Poisson random measure has a finite second moment or not. It is important in this paper that the diffusion coefficient does not degenerate. Our approach relies on asymptotic expansion techniques and using the so-called corrector. As often happens in the case of random environments we cannot claim the existence of a stationary corrector. Instead, we construct a corrector which is a random field in $\mathbb R^d$ with stationary increments and almost surely has a sublinear growth in $L^2(\mathbb R^d)$. \\ When substituting two leading terms of the expansion for the solution of the original equation, we obtain the discrepancies being oscillating functions with zero average. Some of these functions are not stationary. In order to show that the contributions of these discrepancies are asymptotically negligible we add to the expansion two extra terms. The necessity of constructing these terms is essentially related to the fact that, in contrast with the case of elliptic differential equations, the resolvent of the studied operator is not locally compact in $L^2(\mathbb R^d)$. The paper is organized as follows: In Section \ref{s_pbmset} we provide the detailed setting of the problem and formulate the main result of this work. The leading terms of the ansatz for a solution of equation $(L^\varepsilon-m)u^\varepsilon=f$ with $f\in C_0^\infty(\mathbb R^d)$ are introduced in Section \ref{s_asyexp}. Also in this section we outline the main steps of the proof of our homogenization theorem. Then in Section \ref{s_corr} we construct the principal corrector in the asymptotic expansion and study the properties of this corrector. Section \ref{s_addterms} is devoted to constructing two additional terms of the expansion of $u^\varepsilon$. Then we introduce the effective matrix and prove its positive definiteness. Estimates for the remainder in the asymptotic expansion are obtained in Section \ref{s_estrem}. Finally, in Section \ref{s_proofmain} we complete the proof of the homogenization theorem. \section{Problem setup and main result}\label{s_pbmset} \noindent We consider a homogenization problem for a random convolution type operator of the form \begin{equation}\label{L_u} (L_\omega u)(x) \ = \ \mu(x,\omega) \int\limits_{\mathbb R^d} a(x-y) \mu(y,\omega) (u(y) - u(x)) dy. \end{equation} For the function $a(z)$ we assume the following: \begin{equation}\label{A1} a(z) \in L^{1}(\mathbb R^d) \cap L^{2}_{\rm loc}(\mathbb R^d), \quad a(z) \ge 0; \quad a(-z) = a(z), \end{equation} and \begin{equation}\label{M2} \| a \|_{L^1(\mathbb R^d)} = \int\limits_{\mathbb R^d} a(z) \ dz = a_1 < \infty; \quad \sigma^2 = \int\limits_{\mathbb R^d} |z|^2 a(z) \ dz < \infty. \end{equation} We also assume that \begin{equation}\label{add} \mbox{there exists a constant} \; c_0>0 \; \mbox{ and a cube } \; {\bf B} \subset \mathbb R^d, \; \mbox{ such that } \; a(z) \ge c_0 \quad \mbox{for all } \; z \in {\bf B}. \end{equation} This additional condition on $a(z)$ is naturally satisfied for regular kernels, and we introduced \eqref{add} for a presentation simplicity. Assumption \eqref{add} essentially simplifies derivation of inequality \eqref{L2B}, on which the proof of the smallness of the first corrector is based, see Proposition \ref{1corrector} below. We notice that inequality \eqref{L2B} can also be derived without assumption \eqref{add}, however in this case additional arguments of measure theory are required. \\[5pt] Let $(\Omega,\mathcal{F}, \mathbb P)$ be a standard probability space. We assume that the random field $ \mu(x,\omega)= {\bm\mu} (T_x \omega) $ is stationary and bounded from above and from below: \begin{equation}\label{lm} 0< \alpha_1 \le \mu(x,\omega) \le \alpha_2 < \infty; \end{equation} here ${\bm\mu} (\omega) $ is a random variable, and $T_x$, $x\in \mathbb R^d$, is an ergodic group of measurable transformations acting in $\omega$-space $\Omega$, $T_x:\Omega \mapsto\Omega$, and possessing the following properties: \begin{itemize} \item $T_{x+y}=T_x\circ T_y\quad\hbox{for all }x,\,y\in\mathbb R^d,\quad T_0={\rm Id}$, \item $\mathbb P(A)=\mathbb P(T_xA)$ for any $A\in\mathcal{F}$ and any $x\in\mathbb R^d$, \item $T_x$ is a measurable map from $\mathbb R^d\times \Omega$ to $\Omega$, where $\mathbb R^d$ is equipped with the Borel $\sigma$-algebra. \end{itemize} Let us consider a family of the following operators \begin{equation}\label{L_eps} (L^{\varepsilon}_\omega u)(x) \ = \ \frac{1}{\varepsilon^{d+2}} \int\limits_{\mathbb R^d} a \Big( \frac{x-y}{\varepsilon} \Big) \mu \Big( \frac{x}{\varepsilon},\omega \Big) \mu \Big( \frac{y}{\varepsilon},\omega \Big) \Big( u(y) - u(x) \Big) dy. \end{equation} We are interested in the limit behavior of the operators $L^{\varepsilon}_\omega$ as $\varepsilon \to 0$ . We are going to show that for a.e. $\omega$ the operators $L^{\varepsilon}_\omega$ converge to a differential operator with constant coefficients in the topology of the resolvent convergence. Let us fix $m>0$, any $f \in L^2(\mathbb R^d)$, and define $u^{\varepsilon}$ as the solution of equation: \begin{equation}\label{u_eps} (L^{\varepsilon}_\omega - m) u^{\varepsilon} \ = \ f, \quad \mbox{ i.e. } \; u^{\varepsilon} \ = \ (L^{\varepsilon}_\omega - m)^{-1} f \end{equation} with $f \in L^2(\mathbb R^d)$. Denote by $\hat L$ the following operator in $L^2(\mathbb R^d)$: \begin{equation}\label{L_hat} \hat L u \ = \ \sum_{i,j = 1}^d \Theta_{i j} \frac{\partial^2 u}{\partial x_i \ \partial x_j}, \quad {\cal D}(\hat L) = H^2(\mathbb R^d) \end{equation} with a positive definite matrix $\Theta = \{ \Theta_{i j} \}, \ i,j = 1, \ldots, d,$ defined below, see (\ref{Positive}). Let $u_0(x)$ be the solution of equation \begin{equation}\label{u_0} \sum_{i,j = 1}^d \Theta_{i j} \frac{\partial^2 u_0}{\partial x_i \ \partial x_j} - m u_0 = f, \quad \mbox{ i.e. } \; u_0 \ = \ (\hat L - m)^{-1} f \end{equation} with the same right-hand side $f$ as in (\ref{u_eps}). \begin{theorem}\label{T1} Almost surely for any $f \in L^2(\mathbb R^d)$ and any $m>0$ the convergence holds: \begin{equation}\label{t1} \| (L^{\varepsilon}_\omega - m)^{-1} f - (\hat L - m)^{-1} f \|_{L^2(\mathbb R^d)} \ \to 0 \quad \mbox{ as } \; \varepsilon \to 0. \end{equation} \end{theorem} The statement of Theorem \ref{T1} remains valid in the case of non-symmetric operators $L^\varepsilon$ of the form \begin{equation}\label{L_eps_ns} (L^{\varepsilon,{\rm ns}}_\omega u)(x) \ = \ \frac{1}{\varepsilon^{d+2}} \int\limits_{\mathbb R^d} a \Big( \frac{x-y}{\varepsilon} \Big) \lambda \Big( \frac{x}{\varepsilon},\omega \Big) \mu \Big( \frac{y}{\varepsilon},\omega \Big) \Big( u(y) - u(x) \Big) dy \end{equation} with $\lambda(z,\omega)=\bm{\lambda}(T_z\omega)$ such that $0< \alpha_1 \le \lambda(x,\omega) \le \alpha_2 < \infty$. In this case the equation \eqref{u_eps} reads \begin{equation}\label{u_eps_nssss} (L^{\varepsilon,{\rm ns}}_\omega - m) u^{\varepsilon} \ = \ f. \end{equation} \begin{corollary}\label{cor_main} Let $\lambda(z,\omega)$ and $\mu(z,\omega)$ satisfy condition \eqref{lm}. Then a.s. for any $f\in L^2(\mathbb R^d)$ and any $m>0$ the limit relation in \eqref{t1} holds true with $\hat L^{\rm ns} u \ = \ \sum_{i,j = 1}^d \Theta^{\rm ns}_{i j} \frac{\partial^2 u}{\partial x_i \ \partial x_j}$, \ $\Theta^{\rm ns}=\big(\mathbb E \big\{\frac{\bm\mu}{\bm\lambda}\big\}\big)^{-1} \Theta$, and $\Theta$ defined in \eqref{Positive}. \end{corollary} \section{Asymptotic expansion for $u^\varepsilon$ }\label{s_asyexp} We begin this section by introducing a set of functions $f \in C_0^\infty(\mathbb R^d)$ such that $u_0 \ = \ (\hat L - m)^{-1} f\in C_0^\infty(\mathbb R^d)$. We denote this set by $ {\cal S}_0(\mathbb R^d)$. Observe that this set is dense in $L^2(\mathbb R^d)$. Indeed, if we take $\varphi(x)\in C^\infty(\mathbb R)$ such that $0\leq\varphi \leq 1$, $\varphi=1$ for $x\leq 0$ and $\varphi=0$ for $x\geq 1$, then letting $f_n=(\hat L-m)\big(\varphi(|x|-n)(\hat L-m)^{-1}f(x)\big)$ one can easily check that $f_n\in C_0^\infty(\mathbb R^d)$ and $\|f_n-f\|_{L^2(\mathbb R^d)}\to0$, as $n\to\infty$.\\ We consider first the case when $f \in {\cal S}_0(\mathbb R^d)$ and denote by $Q$ a cube centered at the origin and such that $\mathrm{supp}(u_0)\subset Q$. We want to prove the convergence \begin{equation}\label{convergence1} \| u^{\varepsilon} - u_0 \|_{L^2(\mathbb R^d)} \ \to 0, \quad \mbox{ as } \ \varepsilon \to 0, \end{equation} where the functions $u^\varepsilon$ and $u_0$ are defined in (\ref{u_eps}) and (\ref{u_0}), respectively. To this end we approximate the function $ u^\varepsilon (x, \omega)$ by means of the following ansatz \begin{equation}\label{v_eps} w^{\varepsilon}(x, \omega) \ = \ v^\varepsilon (x, \omega) + u_2^\varepsilon (x, \omega) + u_3^\varepsilon(x, \omega), \quad \mbox{ with } \; v^{\varepsilon}(x, \omega) \ = \ u_0(x)+ \varepsilon \theta \big(\frac{x}{\varepsilon}, \omega\big) \nabla u_0(x), \end{equation} where $\theta \big(z, \omega\big) $ is a vector function which is often called a corrector. It will be introduced later on as a solution of an auxiliary problem that does not depend on $\varepsilon$, see \eqref{korrkappa1}. A solution of this problem, $\theta(z,\omega)$ say, is defined up to an additive constant vector. \\ We set \begin{equation}\label{hi} \chi^\varepsilon (z,\omega) = \theta (z,\omega)+ c^\varepsilon (\omega), \quad c^\varepsilon (\omega) = - \frac{1}{|Q|} \int\limits_Q \theta \big( \frac{x}{\varepsilon},\omega \big) dx. \end{equation} Observe that under such a choice of the vector $c^\varepsilon$ the function $\chi^\varepsilon \big(\frac x\varepsilon,\omega\big)$ has zero average in $Q$. We show in Proposition \ref{1corrector} that $\varepsilon c^\varepsilon\to 0$ a.s. It should be emphasized that $\theta (y, \omega)$ need not be a stationary field, that is we do not claim that $\theta(y, \omega) = {\bm\theta} (T_y \omega)$ for some random vector ${\bm\theta}(\omega)$. Two other functions, $u_2^\varepsilon$ and $u_3^\varepsilon$, that appear in the ansatz in \eqref{v_eps} will be introduced in \eqref{corr-u2}, \eqref{u3}, respectively. After substitution $v_\varepsilon$ for $u$ to (\ref{L_eps}) we get $$ (L^{\varepsilon} v^{\varepsilon})(x) \ = \ \frac{1}{\varepsilon^{d+2}} \int\limits_{\mathbb R^d} a \big( \frac{x-y}{\varepsilon} \big) \mu \big( \frac{x}{\varepsilon} \big) \mu \big( \frac{y}{\varepsilon} \big) \Big( u_0(y)+ \varepsilon \theta \big(\frac{y}{\varepsilon}\big) \nabla u_0(y) - u_0(x)-\varepsilon \theta \big(\frac{x}{\varepsilon} \big) \nabla u_0(x) \Big) dy; $$ here and in what follows we drop the argument $\omega$ in the random fields $\mu(y,\omega)$, $\theta(y,\omega)$, etc., if it does not lead to ambiguity. After change of variables $\frac{x-y}{\varepsilon}=z$ we get \begin{equation}\label{ml_1} (L^{\varepsilon} v^{\varepsilon})(x) \ = \ \frac{1}{\varepsilon^{2}} \int\limits_{\mathbb R^d} dz \ a (z) \mu \big( \frac{x}{\varepsilon} \big) \mu \big( \frac{x}{\varepsilon} -z \big) \Big( u_0(x-\varepsilon z) - u_0(x) + \varepsilon \theta \big(\frac{x}{\varepsilon}-z \big) \nabla u_0 (x-\varepsilon z) -\varepsilon \theta \big( \frac{x}{\varepsilon} \big) \nabla u_0(x) \Big). \end{equation} The Taylor expansion of a function $u(y)$ with a remainder in the integral form reads $$ \begin{array}{c} u(y) \ = \ u(x) + \int_0^1 \nabla u (x + (y-x)t) \cdot (y-x) \ dt \\[3pt] = \ u(x) + \nabla u(x) \cdot (y-x) + \int_0^1 \nabla \nabla u(x+(y-x)t) (y-x) (y-x) (1-t) \ dt \end{array} $$ and is valid for any $x, y \in \mathbb R^d$. Thus we can rewrite (\ref{ml_1}) as follows \begin{eqnarray} (L^{\varepsilon} v^{\varepsilon})(x) \hskip -1.7cm &&\nonumber\\[1.6mm] \label{K2_1} &&\!\!\!\!\!=\, \frac{1}{\varepsilon} \mu \Big( \frac{x}{\varepsilon}, \omega \Big)\nabla u_0(x)\! \cdot\! \int\limits_{\mathbb R^d} \Big[ -z + \theta \Big(\frac{x}{\varepsilon}-z, \omega \Big) - \theta \Big(\frac{x}{\varepsilon}, \omega \Big) \Big] a (z) \mu \Big( \frac{x}{\varepsilon} -z, \omega \Big) \, dz \\[1mm] \nonumber &&\!\!\!\!\! +\,\mu \Big(\! \frac{x}{\varepsilon}, \omega \Big) \nabla \nabla u_0 (x)\!\cdot\! \int\limits_{\mathbb R^d}\! \Big[ \frac12 z\!\otimes\!z\! - z \!\otimes\!\theta \Big(\frac{x}{\varepsilon}\!-\!z,\omega \Big) \Big] a (z) \mu \Big( \frac{x}{\varepsilon}\! -\!z, \omega \Big) \, dz +\, \ \phi_\varepsilon (x) \\ \nonumber &&=: \frac{1}{\varepsilon} I^\varepsilon_{-1} + \varepsilon^0 I^\varepsilon_0 + \phi_\varepsilon \end{eqnarray} with \begin{equation}\label{14} \begin{array}{rl} \displaystyle \!\!\!\!&\hbox{ }\!\!\!\!\!\!\!\!\!\!\!\!\phi_\varepsilon (x, \omega) =\\[3mm] & \!\!\!\!\!\!\!\!\displaystyle \!\! \int\limits_{\mathbb R^d}\! a (z) \mu \Big( \frac{x}{\varepsilon},\omega \Big) \mu \Big( \frac{x}{\varepsilon}\! -\!z,\omega \Big) \bigg(\int\limits_0^{1} \nabla \nabla u_0(x-\varepsilon z t) \!\cdot\! z\!\otimes\!z \,(1-t) \ dt - \frac{1}{2} \nabla \nabla u_0(x)\!\cdot\! z\!\otimes\!z \bigg) \, dz \\[4mm] &\!\!\!\!\!\!\!\!\! \displaystyle +\, \frac{1}{\varepsilon} \mu \Big( \frac{x}{\varepsilon},\omega \Big) \int\limits_{\mathbb R^d} \ a (z) \mu \Big( \frac{x}{\varepsilon} -z, \omega \Big) \theta \Big(\frac{x}{\varepsilon}\!-\!z,\omega \Big)\! \Big(\nabla u_0(x- \varepsilon z) - \nabla u_0(x) \Big)\, dz \\[4mm] &\!\!\!\!\!\!\!\!\! \displaystyle + \mu \Big( \frac{x}{\varepsilon},\omega \Big) \nabla \nabla u_0(x) \int\limits_{\mathbb R^d} \ a (z) \mu \Big( \frac{x}{\varepsilon} -z, \omega \Big) z \otimes \theta \Big(\frac{x}{\varepsilon}\!-\!z,\omega \Big)\, dz. \end{array} \end{equation} Here and in what follows $z\otimes z$ stands for the matrix $\{z_iz_j\}_{i,j=1}^d$. Let us outline the main steps of the proof of relation \eqref{convergence1}. In order to make the term $I^\varepsilon_{-1}$ in \eqref{K2_1} equal to zero, we should construct a random field $\theta \big(z, \omega\big)$ that satisfies the following equation \begin{equation}\label{korr1} \int\limits_{\mathbb R^d} \Big( -z + \theta \big(\frac{x}{\varepsilon}-z, \omega \big) - \theta \big(\frac{x}{\varepsilon}, \omega\big) \Big) \, a (z) \mu \big( \frac{x}{\varepsilon} -z,\omega \big) \ dz \ = \ 0. \end{equation} The goal of the first step is to construct such a random field $\theta(z,\omega)$. Next we show that the second term $I^\varepsilon_0$ can be represented as a sum $$ I^\varepsilon_0 = \hat L u_0 + S\Big(\frac x\varepsilon,\omega\Big)\nabla\nabla u_0 + f_2^\varepsilon (x,\omega), $$ where $S(z,\omega)$ is a stationary matrix-field with zero average, and $f_2^\varepsilon (x,\omega)$ is a non-stationary term; both of them are introduced below. We define $u_2^\varepsilon$ and $u_3^\varepsilon$ by $$ (L^\varepsilon - m) u_2^\varepsilon = - S\Big(\frac x\varepsilon,\omega\Big)\nabla\nabla u_0, \quad (L^\varepsilon - m) u_3^\varepsilon = - f_2^\varepsilon (x,\omega), $$ and prove that $\| u_2^\varepsilon \|_{L^2(\mathbb R^d)} \to 0$, $\| u_3^\varepsilon \|_{L^2(\mathbb R^d)} \to 0$. Then considering the properties of the corrector $\theta$, see Theorem \ref{t_corrector}, we derive the limit relation $\|\varepsilon \theta\big(\frac x\varepsilon\big) \nabla u_0(x) \|_{L^2(\mathbb R^d)} \to 0$, as $\varepsilon \to 0$. This yields $\| w^\varepsilon - u_0 \| \to 0$. With this choice of $\theta$, $u_2^\varepsilon$ and $u_3^\varepsilon$ the expression $(L^\varepsilon - m) w^\varepsilon$ can be rearranged as follows: $$ (L^\varepsilon - m) w^\varepsilon = (L^\varepsilon - m) v^\varepsilon + (L^\varepsilon - m) (u_2^\varepsilon + u_3^\varepsilon) = (\hat L - m) u_0 + \phi_\varepsilon - m \varepsilon \theta \nabla u_0 $$ $$ = f + \phi_\varepsilon - m \varepsilon \theta \nabla u_0 = (L^\varepsilon - m) u^\varepsilon + \phi_\varepsilon - m \varepsilon \theta \nabla u_0. $$ We prove below in Lemma \ref{reminder} that $\|\phi_\varepsilon\|\big._{L^2(\mathbb R^d)}$ is vanishing as $\varepsilon \to 0$. This implies the convergence $\| w^\varepsilon - u^\varepsilon \|\big._{L^2(\mathbb R^d)} \to 0$ and, by the triangle inequality, the required relation in \eqref{convergence1}. \section{First corrector}\label{s_corr} In this Section we construct a solution of equation \eqref{korr1}. Denote \begin{equation}\label{fkorr1} r \big(\frac{x}{\varepsilon}, \omega\big) = \int\limits_{\mathbb R^d} z \, a (z) \, \mu \big( \frac{x}{\varepsilon} -z,\omega \big) \ dz, \end{equation} then $r(\xi, \omega) = \mathbf{r}(T_\xi \omega), \; \xi = \frac{x}{\varepsilon},$ is a stationary field. Moreover, since $\mathbb{E} \mu ( \xi -z,\omega )= \mathbb{E}{\bm\mu}(T_{\xi-z} \omega) = const$ for all $z$, then $$ \mathbb{E} r (\xi, \omega) = \int\limits_{\mathbb R^d} z \, a (z) \, \mathbb{E}\mu ( \xi -z,\omega ) \ dz \ = \ 0. $$ Equation \eqref{korr1} takes the form \begin{equation}\label{korrkappa1} r (\xi, \omega) \ = \ \int\limits_{\mathbb R^d} a (z) \mu ( \xi -z,\omega ) \, \big( \theta (\xi-z, \omega ) - \theta (\xi, \omega) \big) \ dz. \end{equation} We are going to show now that equation \eqref{korrkappa1} has a solution that possesses the following properties: \\[1.5mm] {\bf A}) the increments $\zeta_z(\xi, \omega) = \theta (z+\xi, \omega ) - \theta (\xi, \omega)$ are stationary for any given $z$, i.e. $$\zeta_z(\xi, \omega) = \zeta_z(0, T_\xi \omega);$$ {\bf B}) $\varepsilon \theta\big(\frac x\varepsilon,\omega\big) $ is a function of sub-linear growth in $L_{\rm loc}^2(\mathbb R^d)$: for any bounded Lipschitz domain $Q\subset \mathbb R^d$ $$ \Big\| \varepsilon \, \theta \big(\frac{x}{\varepsilon}, \omega \big) \Big\|_{L^2(Q)} \to 0 \quad \mbox{a.s.} \; \omega \in \Omega. $$ Here and in the sequel for presentation simplicity we write for the $L^2$ norm of a vector-function just $L^2(Q)$ instead of $L^2(Q\,;\,\mathbb R^d)$. \begin{theorem}\label{t_corrector} There exists a unique (up to an additive constant vector) solution $\theta\in L^2_{\rm loc}(\mathbb R^d)$ of equation \eqref{korrkappa1} that satisfies conditions {\bf A}{\rm )} -- {\bf B}{\rm )}. \end{theorem} \begin{proof}[Proof of Theorem \ref{t_corrector}] We divide the proof into several steps.\\ {\sl Step 1.} Consider the following operator acting in $L^2(\Omega)$: \begin{equation}\label{A-omega} (A \varphi)(\omega) = \int\limits_{\mathbb R^d} a(z) {\bm\mu}(T_z \omega) \big( \varphi (T_z \omega) - \varphi(\omega) \big) dz \end{equation} \begin{proposition}\label{spectrA} The spectrum $\sigma(A) \subset (-\infty, 0]$. \end{proposition} \begin{proof} It is straightforward to check that the operator $A$ is bounded and symmetric in the weighted space $L^2(\Omega, P_\mu) = L^2_\mu(\Omega)$ with $d P_\mu(\omega) = {\bm\mu}(\omega) d P(\omega)$. Denoting $\tilde \omega = T_z \omega, \ s=-z$, using stationarity of $\mu$ and considering the relation $a(-z) = a(z)$ we get \begin{equation}\label{PropA1} \begin{array}{c} \displaystyle \int\limits_\Omega \int\limits_{\mathbb R^d} a(z){\bm\mu}(T_z \omega){\bm\mu}(\omega) \varphi^2(T_z \omega) \, dz \, dP(\omega)= \int\limits_\Omega \int\limits_{\mathbb R^d} a(z) {\bm\mu}(\tilde \omega) {\bm\mu}(T_{-z} \tilde\omega) \varphi^2(\tilde\omega) \, dz \, dP(\tilde\omega) \\[3pt] \displaystyle = \int\limits_\Omega \int\limits_{\mathbb R^d} a(s){\bm\mu}( \omega){\bm\mu}(T_s \omega) \varphi^2(\omega)\, ds \, dP(\omega). \end{array} \end{equation} Thus \begin{equation}\label{PropA1bis} \begin{array}{c} \displaystyle \big( A\varphi, \varphi \big)_{L^2_\mu} = \int\limits_\Omega \int\limits_{\mathbb R^d} a(z) {\bm\mu}(T_z \omega) \big( \varphi(T_z \omega) - \varphi(\omega) \big) \varphi(\omega) {\bm\mu}(\omega) dz dP(\omega) \\ \displaystyle = -\frac12 \int\limits_\Omega \int\limits_{\mathbb R^d} a(z) {\bm\mu}(T_z \omega) {\bm\mu}(\omega) \big( \varphi(T_z \omega) - \varphi(\omega) \big)^2 dz dP(\omega)<0. \end{array} \end{equation} Since the norms in $L^2(\Omega)$ and $L^2_\mu(\Omega)$ are equivalent, the desired statement follows. \end{proof} Let us consider for any $\delta>0$ the equation \begin{equation}\label{A-delta} \delta \varphi(\omega) - \int\limits_{\mathbb R^d} a (z) {\bm\mu} ( T_z \omega ) ( \varphi (T_z \omega ) - \varphi ( \omega) ) \ dz = r(\omega), \quad r(\omega) = \int\limits_{\mathbb R^d} z a (z) {\bm\mu} (T_z \omega ) \ dz. \end{equation} By Proposition \ref{spectrA} the operator $(\delta I - A)^{-1}$ is bounded, then there exists a unique solution $\varkappa^\delta (\omega) = -(\delta I - A)^{-1} r (\omega)$ of \eqref{A-delta}. For any given $z \in R^d$ we set $$ u^\delta(z,\omega) = \varkappa^\delta(T_z \omega) - \varkappa^\delta(\omega). $$ Then \begin{equation}\label{u-delta} u^\delta(z_1 + z_2,\omega) = u^\delta(z_2,\omega) + u^\delta(z_1, T_{z_2} \omega) \quad \forall \ z_1, z_2 \in \mathbb R^d. \end{equation} For any $\xi \in\mathbb R^d$ as an immediate consequence of \eqref{A-delta} we have \begin{equation}\label{A-delta-xi} \delta \varkappa^\delta (T_\xi \omega) - \int\limits_{\mathbb R^d} a (z) {\bm\mu} ( T_{\xi+z} \omega ) ( \varkappa^\delta (T_{\xi+z} \omega ) - \varkappa^\delta ( T_\xi \omega) ) \ dz = \int\limits_{\mathbb R^d} z a (z) {\bm\mu} (T_{\xi+z} \omega ) \ dz. \end{equation} Next we obtain a priori estimates for $\| \varkappa^\delta (T_z \omega) - \varkappa^\delta (\omega)\|_{L^2_M}$ with $dM(z, \omega) = a(z) dz dP(\omega)$. \begin{proposition}\label{boundM} The following estimate holds: \begin{equation}\label{AB} \| u^\delta(z,\omega) \|_{L^2_M} = \| \varkappa^\delta (T_z \omega) - \varkappa^\delta (\omega) \|_{L^2_M} \ \le \ C \end{equation} with a constant $C$ that does not depend on $\delta$. \end{proposition} \begin{proof} Multiplying equation \eqref{A-delta} by $\varphi(\omega)={\bm\mu}(\omega)\varkappa^\delta(\omega)$ and integrating the resulting relation over $\Omega$ yields \begin{equation}\label{Prop2} \begin{array}{c} \displaystyle \delta \int\limits_\Omega \big(\varkappa^\delta(\omega)\big)^2{\bm\mu}(\omega)\, dP(\omega) - \int\limits_{\mathbb R^d} \int\limits_\Omega a (z) {\bm\mu} ( T_z \omega ) \big( \varkappa^\delta (T_z \omega ) - \varkappa^\delta ( \omega) \big) \varkappa^\delta(\omega){\bm\mu}(\omega) \, dz \, dP(\omega) \\ \displaystyle = \int\limits_{\mathbb R^d} \int\limits_\Omega z a(z) \varkappa^\delta(\omega) {\bm\mu}(T_z \omega) {\bm\mu}(\omega) \, dz \, dP(\omega). \end{array} \end{equation} The same change of variables as in \eqref{PropA1} results in the relation \begin{equation}\label{Prop2_eq} \int\limits_{\mathbb R^d} \int\limits_\Omega z a(z) \varkappa^\delta (\omega) {\bm\mu}(T_z \omega) {\bm\mu}(\omega) \, dz \, dP(\omega)= - \int\limits_{\mathbb R^d} \int\limits_\Omega z a(z) \varkappa^\delta (T_z \omega) {\bm\mu}(\omega) {\bm\mu}(T_z \omega)\, dz \, dP(\omega), \end{equation} therefore, the right-hand side of \eqref{Prop2} takes the form \begin{equation}\label{RHS} \!\int\limits_{\mathbb R^d}\! \int\limits_\Omega z a(z) \varkappa^\delta(\omega) {\bm\mu}(T_z \omega) {\bm\mu}(\omega) dz dP(\omega)= -\frac12 \int\limits_{\mathbb R^d}\! \int\limits_\Omega z a(z) \big( \varkappa^\delta(T_z \omega) - \varkappa^\delta(\omega) \big) {\bm\mu}(T_z \omega) {\bm\mu}(\omega) dz dP(\omega). \end{equation} Equality \eqref{PropA1bis} implies that the second term on the left-hand side of \eqref{Prop2} can be rearranged in the following way \begin{equation}\label{LHS2} \begin{array}{c} \displaystyle - \int\limits_{\mathbb R^d} \int\limits_\Omega a (z) {\bm\mu} ( T_z \omega ) \big( \varkappa^\delta (T_z \omega ) - \varkappa^\delta ( \omega) \big) \varkappa^\delta(\omega){\bm\mu}(\omega) \, dz \, dP(\omega) \\ \displaystyle = \frac12 \int\limits_{\mathbb R^d} \int\limits_\Omega a(z) {\bm\mu}(T_z \omega) {\bm\mu}(\omega) \big( \varkappa^\delta( T_z \omega) - \varkappa^\delta (\omega) \big)^2 dz \, dP(\omega). \end{array} \end{equation} Let us denote $$ J^\delta = \int\limits_{\mathbb R^d} \int\limits_\Omega {\bm\mu}(T_z \omega) {\bm\mu}(\omega) \big( \varkappa^\delta( T_z \omega) - \varkappa^\delta (\omega) \big)^2 a(z) dz \, dP(\omega) = \int\limits_{\mathbb R^d} \int\limits_\Omega {\bm\mu}(T_z \omega) {\bm\mu}(\omega) (u^\delta (z,\omega))^2 dM(z,\omega) $$ and $$ \int\limits_{\mathbb R^d} \int\limits_\Omega \big( \varkappa^\delta( T_z \omega) - \varkappa^\delta (\omega) \big)^2 a(z) dz \, dP(\omega) = \int\limits_{\mathbb R^d} \int\limits_\Omega (u^\delta (z,\omega))^2 dM(z,\omega) = \| u^\delta \|^2_{L^2_M}, $$ where $dM(z, \omega) = a(z) dz dP(\omega)$. Then \begin{equation}\label{B1} J^\delta = \int\limits_{\mathbb R^d} \int\limits_\Omega {\bm\mu}(T_z \omega) {\bm\mu}(\omega) (u^\delta (z,\omega))^2 dM(z,\omega) \ge \alpha_1^2 \| u^\delta \|^2_{L^2_M} \end{equation} and on the other hand, relations \eqref{Prop2} - \eqref{LHS2} imply the following upper bound on $J^\delta$: \begin{equation}\label{B2} J^\delta = \int\limits_{\mathbb R^d} \int\limits_\Omega {\bm\mu}(T_z \omega) {\bm\mu}(\omega) (u^\delta (z,\omega))^2 dM(z,\omega) \le \frac12 \alpha_2^2 \sigma \| u^\delta \|_{L^2_M}. \end{equation} Bounds \eqref{B1} - \eqref{B2} together yield $$ \alpha_1^2 \| u^\delta \|^2_{L^2_M} \le J^\delta \le \frac12 \alpha_2^2 \sigma \| u^\delta \|_{L^2_M}. $$ Consequently we obtain the estimate \eqref{AB} with $C = \frac{\alpha_2^2}{2 \alpha_1^2} \sigma$, and this estimate is uniform in $\delta$. \end{proof} \begin{corollary} For any $\delta>0$ the following upper bound holds: \begin{equation}\label{u-norm} \sqrt{\delta} \, \| \varkappa^\delta \|_{L^2_\mu} \le C. \end{equation} \end{corollary} \begin{proof} From \eqref{Prop2} we have \begin{equation}\label{Prop2-norm} \begin{array}{c} \displaystyle \delta \int\limits_\Omega \big(\varkappa^\delta(\omega)\big)^2{\bm\mu}(\omega)\, dP(\omega) =\int\limits_{\mathbb R^d} \int\limits_\Omega a (z) {\bm\mu} ( T_z \omega ) \big( \varkappa^\delta (T_z \omega ) - \varkappa^\delta ( \omega) \big) \varkappa^\delta(\omega){\bm\mu}(\omega) \, dz \, dP(\omega) \\ \displaystyle +\int\limits_{\mathbb R^d} \int\limits_\Omega z a(z) \varkappa^\delta(\omega) {\bm\mu}(T_z \omega) {\bm\mu}(\omega) \, dz \, dP(\omega). \end{array} \end{equation} Then using \eqref{RHS}, \eqref{LHS2}, \eqref{B2} together with the Cauchy-Swartz inequality and bound \eqref{AB}, we obtain that the expression on the right-hand side of \eqref{Prop2-norm} is uniformly bounded in $\delta$. \end{proof} Proposition \ref{boundM} implies that the family $\{ u^\delta(z, \omega) \}_{\delta>0}$ is bounded in $L^2_M$. Consequently there exists a subsequence $u_j (z, \omega) = u^{\delta_j} (z, \omega)$, $j=1,2, \ldots,$ that converges in a weak topology of $L^2_M$ as $\delta_j \to 0$. We denote this limit by $\theta(z,\omega)$: \begin{equation}\label{theta} w\,\mbox{-}\!\!\lim_{j \to \infty} u_j (z,\omega) = w\,\mbox{-}\!\!\lim_{\delta_j \to 0} \big( \varkappa^{\delta_j}(T_z \omega) - \varkappa^{\delta_j}(\omega) \big) = \theta(z,\omega), \end{equation} Clearly, $\theta(z,\omega) \in L^2_M$, i.e. \begin{equation}\label{thetaLM} \int\limits_{\mathbb R^d} \int\limits_\Omega \theta^2 (z,\omega) a(z) dz dP(\omega) < \infty, \end{equation} and by the Fubini theorem $\theta (z, \omega) \in L^2 (\Omega)$ for almost all $z$ from the support of the function $a(z)$. In addition $\theta(0,\omega) \equiv 0$ and for any $z$ \begin{equation}\label{Etheta} \mathbb{E} \theta(z,\omega) = \lim_{\delta_j \to 0} \Big( \mathbb{E} \varkappa^{\delta_j} (T_z \omega) - \mathbb{E} \varkappa^{\delta_j}(\omega) \Big) = 0. \end{equation} \noindent {\sl Step 2.} {\sl Property A}. The function $\theta(z,\omega)$ introduced in \eqref{theta} is not originally defined on the set $\{z\in\mathbb R^d\,:\,a(z)=0\}$. \begin{proposition}\label{statincrements} The function $\theta(z, \omega)$, given by \eqref{theta}, can be extended to $\mathbb R^d\times\Omega$ in such a way that $\theta(z, \omega)$ satisfies relation \eqref{u-delta}, i.e. $\theta(z, \omega)$ has stationary increments: \begin{equation}\label{thetaVIP} \theta(z+\xi,\omega) - \theta (\xi,\omega) = \theta(z, T_\xi \omega) = \theta(z, T_\xi \omega) - \theta(0, T_\xi \omega). \end{equation} \end{proposition} \begin{proof} Applying Mazur's theorem \cite[Section V.1]{Yo65} we conclude that $\theta(z, \omega) = s\,\hbox{-}\!\lim\limits_{n \to \infty} w_n$ is the strong limit of a sequence $w_n$ of convex combinations of elements $u_j(z,\omega) = u^{\delta_j} (z,\omega)$. The strong convergence implies that there exists a subsequence of $\{w_n \}$ that converges a.s. to the same limit $\theta(z, \omega)$: $$ \lim\limits_{n_k \to \infty} w_{n_k} (z, \omega) = \theta(z, \omega) \quad \mbox{for a.e. } \; z \; \mbox{ and a.e.} \; \omega. $$ Since equality \eqref{u-delta} holds for all $u_j$, it also holds for any convex linear combination $w_n$ of $u_j$: \begin{equation}\label{wn} w_n (z_1 + z_2,\omega) = w_n(z_2,\omega) + w_n (z_1, T_{z_2} \omega) \quad \forall \ n. \end{equation} Thus taking the subsequence $\{w_{n_k} \}$ in equality \eqref{wn} and passing to the point-wise limit $n_k \to \infty$ in any term of this equality we obtain \eqref{thetaVIP} first only for such $z_1, z_2$ that $z_1, z_2, z_1+ z_2$ belong to $\mathrm{supp}(a)$. Then we extend function $\theta(z, \omega)$ to a.e. $z \in \mathbb R^d$ using relation \eqref{thetaVIP}: \begin{equation}\label{lim_sh_inv} \theta(z_1 + z_2, \omega) = \theta(z_2, \omega) + \theta(z_1, T_{z_2} \omega). \end{equation} Observe that this extension is well-defined because relation \eqref{thetaVIP} holds on the support of $a$.\\[1.5mm] Let us show that $\theta(z,\omega)$ is defined for all $z\in\mathbb Z^d$. To this end we observe that, due to the properties of the dynamical system $T_z$, the function $\theta(z_1,T_{z_2}\omega)$ is well-defined measurable function of $z_1$ and $\omega$ for all $z_2\in\mathbb R^d$. The function $\theta(z_1+z_2,\omega)$ possesses the same property due to its particular structure. Then according to \eqref{lim_sh_inv} the function $\theta(z_2, \omega)$ is defined for all $z\in\mathbb Z^d$. \end{proof} Denote $\zeta_z (\xi, \omega)= \theta(z+\xi,\omega) - \theta (\xi,\omega) $, then for $z\in\mathbb R^d$ relation \eqref{thetaVIP} yeilds \begin{equation}\label{thetaVIPbis} \zeta_z (\xi, \omega) = \zeta_z(0, T_\xi \omega) , \end{equation} i.e. for all $z\in\mathbb R^d$ the field $\zeta_z(\xi,\omega)$ is statistically homogeneous in $\xi$, and \begin{equation}\label{zetatheta} \zeta_z(0, \omega) = \theta(z, \omega). \end{equation} Thus by \eqref{theta}, \eqref{thetaVIP} -- \eqref{thetaVIPbis} the random function $\theta(z,\omega)$ is not stationary, but its increments $\zeta_z(\xi, \omega) = \theta (z+\xi, \omega ) - \theta (\xi, \omega)$ form a stationary field for any given $z$. \noindent {\sl Step 3.} At this step we show that $\theta$ satisfies equation \eqref{korrkappa1}.\\ Let us prove now that $\theta(z,\omega)$ defined by \eqref{theta} is a solution of equation \eqref{korr1} (or \eqref{korrkappa1}). To this end for an arbitrary function $\psi(\omega) \in L^2(\Omega)$ we multiply equality \eqref{A-delta-xi} by a function $\psi(\omega){\bm\mu}(\omega)$ and integrate the resulting relation over $\Omega$, then we have \begin{equation}\label{Solution} \begin{array}{c} \displaystyle \delta \int\limits_\Omega \varkappa^\delta(T_\xi \omega) \psi(\omega) {\bm\mu}(\omega)\, dP(\omega) \!=\! \int\limits_{\mathbb R^d} \int\limits_\Omega a (z) {\bm\mu} ( T_{\xi+z} \omega ) \big( \varkappa^\delta (T_{\xi+z} \omega ) - \varkappa^\delta (T_\xi \omega) \big) dz \psi(\omega) {\bm\mu}(\omega) dP(\omega) \\ \displaystyle +\int\limits_{\mathbb R^d} \int\limits_\Omega z a(z) {\bm\mu}(T_{\xi+z} \omega) dz \, \psi(\omega) {\bm\mu}(\omega) \, dP(\omega). \end{array} \end{equation} By estimate \eqref{u-norm} and the Cauchy-Swartz inequality for any $\psi \in L^2(\Omega)$ we get \begin{equation}\label{ud-norm} \delta \int\limits_\Omega \varkappa^\delta(T_\xi \omega) \psi(\omega) {\bm\mu}(\omega)\, dP(\omega) \to 0 \quad \mbox{as } \\ \delta \to 0. \end{equation} Passing to the limit $\delta \to 0$ in equation \eqref{Solution} and taking into account \eqref{theta} and \eqref{ud-norm}, we obtain that for a.e. $\omega$ the function $\theta(z,T_\xi \omega)$ satisfies the equation \begin{equation*}\label{A-delta-xibis} \int\limits_{\mathbb R^d} a (z) {\bm\mu} ( T_{\xi+z} \omega ) \theta(z, T_\xi \omega) ) \ dz = - \int\limits_{\mathbb R^d} z a (z) {\bm\mu} (T_{\xi+z} \omega ) \ dz. \end{equation*} Using \eqref{thetaVIP} we get after the change of variables $z \to -z$ \begin{equation}\label{theta-xi-z} -\int\limits_{\mathbb R^d} a (z) {\bm\mu} ( T_{\xi-z} \omega ) ( \theta (\xi-z, \omega ) - \theta ( \xi, \omega) ) \ dz + \int\limits_{\mathbb R^d} z a (z) {\bm\mu} (T_{\xi-z} \omega ) \ dz =0, \end{equation} and it is the same as \eqref{korr1}. Thus we have proved that $\theta(z,\omega)$ is a solution of \eqref{korrkappa1}. \noindent {\sl Step 4}. Property B. Assumption \eqref{add} and inequality \eqref{thetaLM} imply that $$ c_0 \int\limits_{{\bf B}} \int\limits_\Omega \theta^2 (z,\omega) dz dP(\omega) < \int\limits_{\mathbb R^d} \int\limits_\Omega \theta^2 (z,\omega) a(z) dz dP(\omega) < \infty, $$ and by the Fubini theorem we conclude that a.s. \begin{equation}\label{L2B} \int\limits_{{\bf B}} \theta^2 (z,\omega) dz < \infty. \end{equation} Thus $\theta(z,\omega) \in L^2({\bf B})$ with $\| \theta (z, \omega) \|_{L^2({\bf B})} = K(\omega)$ for a.e. $\omega$, and ${\mathbb E} (K(\omega))^2< \infty$. \begin{proposition} [Sublinear growing of $\varepsilon\theta(\frac x\varepsilon) $ in $L_{\rm loc}^2(\mathbb R^d)$] \label{1corrector} Denote by $\varphi_\varepsilon (z, \omega) = \varepsilon\, \theta \big(\frac z\varepsilon, \omega\big)$. Then a.s. \begin{equation}\label{1corrsmall} \| \varphi_\varepsilon (\cdot, \omega) \|_{L^2(\mathcal{Q})} \ \to \ 0 \quad \mbox{ as } \; \varepsilon \to 0 \end{equation} for any bounded Lipschitz domain $\mathcal{Q}\subset\mathbb R^d$. \end{proposition} \begin{proof} We use in the proof inequality \eqref{L2B} and assume in what follows without loss of the generality that ${\bf B}=[0,1]^d$. \begin{lemma}\label{LemmaC} The family of functions $\varphi_\varepsilon (z, \omega) = \varepsilon\, \theta \big(\frac z\varepsilon, \omega\big)$ is bounded and compact in $L^2(Q)$. \end{lemma} \begin{proof} Using change of variables $\frac z\varepsilon = y$ we have $$ \|\varphi_\varepsilon \|^2_{L^2(Q)} = \| \varepsilon \, \theta \big(\frac z\varepsilon, \omega\big) \|^2_{L^2(Q)} = \int\limits_Q \varepsilon^2 \, \theta^2 \big(\frac z\varepsilon, \omega\big) dz = \int\limits_{\varepsilon^{-1} Q} \varepsilon^{d+2} \, \theta^2 (y, \omega) dy $$ $$ = \varepsilon^{d+2} \sum\limits_{j \in \mathbb{Z}_{ Q/\varepsilon}} \ \int\limits_{B_j} \, \theta^2 (y, \omega) dy = \varepsilon^{d+2} \sum\limits_{j \in \mathbb{Z}_{Q/\varepsilon}} \ \int\limits_{B_j} \, (\theta (y, \omega) - \theta(j,\omega) + \theta(j,\omega))^2 dy $$ \begin{equation}\label{L-1} \le {2}\varepsilon^{d+2} \sum\limits_{j \in \mathbb{Z}_{ Q/\varepsilon}} \ \int\limits_{B_j} (\theta (y, \omega) - \theta(j,\omega))^2 dy \ + \ {2}\varepsilon^{d+2} \sum\limits_{j \in \mathbb{Z}_{ Q/\varepsilon}} \theta^2 (j,\omega) \, |B_j|. \end{equation} Here $j \in \mathbb{Z}^d \cap \frac1\varepsilon Q = \mathbb{Z}_{ Q/\varepsilon}$, $B_j=j+[0,1)^d$. Then if $y \in B_j$, then $y = j+z, \; z \in {\bf B} = [0,1)^d$, and we can rewrite the first term on the right-hand side of \eqref{L-1} as follows $$ {2}\,\varepsilon^{d+2} \sum\limits_{j \in \mathbb{Z}_{ Q/\varepsilon}} \ \int\limits_{{\bf B}} (\theta (j + z, \omega) - \theta(j,\omega))^2 dz = {2}\,\varepsilon^{d+2} \sum\limits_{j \in \mathbb{Z}_{Q/\varepsilon}} \ \int\limits_{{\bf B}} \theta^2 (z, T_j \omega) dz. $$ Using the fact that $ \theta_B(j,\omega):=\int\limits_{{\bf B}} \theta^2 (z, T_j \omega) dz$ is a stationary field and $\theta(z,\omega) \in L^2({\bf B})$, by the Birkhoff ergodic theorem we obtain that $$ {2}\,\varepsilon^{d} \sum\limits_{j \in \mathbb{Z}_{Q/\varepsilon}} \ \int\limits_{{\bf B}} \theta^2 (z, T_j \omega) dz \ \to \ 2 |Q| \ \mathbb{E} \int\limits_{{\bf B}} \theta^2 (z, \omega) dz<\infty. $$ Consequently, the first term in \eqref{L-1} is vanishing as $\varepsilon \to 0$: \begin{equation}\label{L-2} {2}\varepsilon^{d+2} \sum\limits_{j \in \mathbb{Z}_{Q/\varepsilon}} \ \int\limits_{{\bf B}} \theta^2 (z, T_j \omega) dz \ \to \ 0. \end{equation} Let us prove now that a.s. the second term in \eqref{L-1} is bounded. Denoting $$ \widehat \varphi_\varepsilon (z) =\varepsilon \, \widehat \theta \big(\frac z\varepsilon, \omega\big), $$ where $\widehat \theta$ is a piecewise constant function: $\widehat \theta \big(\frac z\varepsilon,\omega\big) = \theta \big([\frac z\varepsilon],\omega\big) = \theta (j,\omega)$ as $z \in \varepsilon B_j$, the second term in \eqref{L-1} equals to \begin{equation}\label{L-3} {2}\,\varepsilon^{d+2} \sum\limits_{j \in \mathbb{Z}_{Q/\varepsilon}} \theta^2 (j,\omega) = 2 \, \| \varepsilon \, \widehat \theta \big(\frac z\varepsilon, \omega\big) \|^2_{L^2(Q)} =2\|\widehat \varphi_\varepsilon(z)\|^2_{L^2(Q)}. \end{equation} Let us estimate the difference gradient of $ \widehat \varphi_\varepsilon$: $$ \| {\rm grad} \, \widehat \varphi_\varepsilon\|^2_{(L^2(Q))^d} = \varepsilon^2 \int\limits_Q \sum_{k=1}^d \frac{\big( \theta\big([\frac1\varepsilon(z+\varepsilon e_k)], \omega\big) - \theta\big([\frac z\varepsilon],\omega\big) \big)^2}{\varepsilon^2} \, dz $$ $$ = \int\limits_Q \sum_{k=1}^d \big(\theta\big(\big[\frac z\varepsilon\big] + e_k, \omega\big) - \theta\big(\big[\frac z\varepsilon\big],\omega\big) \big)^2 \, dz = \varepsilon^d \sum_{k=1}^d \sum\limits_{j \in \mathbb{Z}_{Q/\varepsilon}} \big(\theta(j+ e_k, \omega) - \theta(j,\omega) \big)^2. $$ But $\theta(j+ e_k, \omega) - \theta(j,\omega) = \theta(e_k, T_j \omega)$ is stationary for any given $e_k$, thus \begin{equation}\label{L-4} \| {\rm grad} \, \widehat \varphi_\varepsilon\|^2_{(L^2(Q))^d} = \varepsilon^d \sum_{k=1}^d \sum\limits_{j \in \mathbb{Z}_{Q/\varepsilon}} \big(\theta(j+ e_k, \omega) - \theta(j,\omega) \big)^2 \ \to \ |Q| \sum_{k=1}^d C_k, \end{equation} where $C_k = \mathbb{E} \theta^2 (e_k, \omega)$. Next we prove that a.s. the following estimate holds: \begin{equation}\label{L-5} \bar \theta_\varepsilon (\omega) = \int\limits_Q \widehat \varphi_\varepsilon (z, \omega) dz = \varepsilon^d \sum\limits_{j \in \mathbb{Z}_{ Q/\varepsilon}} \varepsilon \, \theta(j,\omega) \le \widetilde C(\omega). \end{equation} We apply the induction and start with $d=1$. Using stationarity of $\theta(j+1,\omega) - \theta(j,\omega)$ we have by the ergodic theorem $$ \varepsilon^2 \, \Big| \sum\limits_{j \in \mathbb{Z}_{Q/\varepsilon}} \theta(j,\omega) \Big| \le \varepsilon^2 \, \sum\limits_{j \in \mathbb{Z}_{Q/\varepsilon}} \sum_{k=0}^{j-1} |\theta(k+1,\omega) - \theta(k,\omega) | $$ $$ \le \varepsilon^2 \, \sum\limits_{j \in \mathbb{Z}_{Q/\varepsilon}} \sum\limits_{k \in \mathbb{Z}_{Q/\varepsilon}} |\theta(k+1,\omega) - \theta(k,\omega) | = \varepsilon^2\frac{|Q|}\varepsilon \sum\limits_{k \in \mathbb{Z}_{Q/\varepsilon}} |\theta(e_1, T_k\omega) | \ \to \ |Q|^2 \mathbb{E} |\theta (e_1, \omega)| = \bar C_1. $$ Thus $$ \overline{\lim\limits_{\varepsilon \to 0}}\ \varepsilon^2 \, \Big| \sum\limits_{j \in \mathbb{Z}_{Q/\varepsilon}} \theta(j,\omega) \Big| \le \bar C_1, $$ and this implies that for a.e. $\omega$ \begin{equation}\label{L-5A} \sup_\varepsilon \Big| \varepsilon^2 \, \sum\limits_{j \in \mathbb{Z}_{Q/\varepsilon}} \theta(j,\omega) \Big| \le \widetilde C_1(\omega), \end{equation} where the constant $\widetilde C_1 (\omega)$ depends only on $\omega$. Let us show how to derive the required upper bound in the dimension $d=2$ using \eqref{L-5A}. In this case $j~\in~\mathbb{Z}_{Q/\varepsilon}, \ j=(j_1, j_2)$, and we assume without loss of generality that $Q \subset [-q, q]^2$. Then $$ \theta ((j_1, j_2), \omega) = \sum_{k=0}^{j_2 -1} \big( \theta ((j_1, k+1), \omega) - \theta ((j_1, k), \omega) \big) \ + \ \theta ((j_1, 0), \omega), $$ and for any $j=(j_1, j_2) \in \mathbb{Z}_{Q/\varepsilon}$ we get $$ | \theta ((j_1, j_2), \omega)| \le \sum_{k= - q/\varepsilon}^{q/\varepsilon} \big| \theta ((j_1, k+1), \omega) - \theta ((j_1, k), \omega) \big| \ + \ |\theta ((j_1, 0), \omega)|. $$ Using \eqref{L-5A} and the ergodic property of the field $| \theta (e_2, T_j\omega)|$ we obtain the following upper bound $$ \varepsilon^3 \, \Big| \sum\limits_{(j_1, j_2) \in \mathbb{Z}_{Q/\varepsilon}} \theta ((j_1, j_2), \omega) \Big| \le \varepsilon^3 \sum_{j_1= - q/\varepsilon}^{q/\varepsilon} \frac{2q}\varepsilon \sum_{k= - q/\varepsilon}^{q/\varepsilon} | \theta (e_2, T_{(j_1, k)} \omega)| \ + \ \varepsilon^3 \sum_{j_1=- q/\varepsilon}^{q/\varepsilon} \frac{2q}\varepsilon |\theta ((j_1, 0), \omega)| $$ $$ = 2q\varepsilon^2 \sum\limits_{(j_1, k) \in \mathbb{Z}_{Q/\varepsilon}} | \theta (e_2, T_{(j_1, k)} \omega)| + 2q\varepsilon^2 \sum_{j_1=- q/\varepsilon}^{q/\varepsilon} |\theta ((j_1, 0), \omega)| \le \widetilde C_2(\omega) + 2q \widetilde C_1(\omega), $$ where $2q$ is the 1-d volume of slices of $Q$ that are orthogonal to $e_1$. The case of $d>2$ is considered in the same way. Applying the standard discrete Poincar\'e inequality or the Poincar\'e inequality for piece-wise linear approximations of discrete functions we obtain from \eqref{L-4} - \eqref{L-5} that a.s. \begin{equation}\label{L-6} \| \widehat \varphi_\varepsilon \|^2_{L^2(Q)} \le g_1 \Big(\int\limits_Q \widehat \varphi_\varepsilon (z, \omega) dz \Big)^2 + g_2 \| {\rm grad} \, \widehat \varphi_\varepsilon\|^2_{(L^2(Q))^d} \le K(\omega), \end{equation} where the constants $g_1, \; g_2$, and $K(\omega)$ do not depend on $n$. Thus using the same piece-wise linear approximations and considering the compactness of embedding of $H^1(Q)$ to $L^2(Q)$ we derive from \eqref{L-4} and \eqref{L-6} that the set of functions $\{ \widehat \varphi_\varepsilon \}$ is compact in $L^2(Q)$. As follows from \eqref{L-1} -- \eqref{L-2} $$ \varphi_\varepsilon = \widehat \varphi_\varepsilon + \breve{ \varphi}_\varepsilon, \quad \mbox{where } \; \breve{ \varphi}_\varepsilon(x) = \varepsilon \big(\theta\big(\frac x\varepsilon\big) - \widehat \theta\big(\frac x\varepsilon\big)\big), \quad \| \breve{ \varphi_\varepsilon} \|_{L^2(Q)} \to 0 \; (\varepsilon \to 0). $$ This together with compactness of $\{ \widehat \varphi_\varepsilon \}$ implies the compactness of the family $\{ \varphi_\varepsilon \}$. Lemma is proved. \end{proof} Next we show that any limit point of the family $\{\varphi_\varepsilon\}$ as $\varepsilon\to0$ is a constant function. \begin{lemma}\label{Prop_constfun} Let $\{ \varphi_\varepsilon \}$ converge for a subsequence to $\varphi_0$ in $L^2(Q)$. Then $\varphi_0=const$. \end{lemma} \begin{proof} According to \cite{LadSol} the set $\{\mathrm{div}\phi\,:\,\phi\in (C_0^\infty(Q))^d\}$ is dense in the subspace of functions from $L^2(Q)$ with zero average. It suffice to show that \begin{equation}\label{ortog_con} \int\limits_Q \mathrm{div}\phi(x) \varphi_\varepsilon(x)\,dx\longrightarrow 0, \ \ \hbox{as }\varepsilon\to0, \end{equation} for any $\phi=(\phi^1,\,\phi^2,\ldots,\phi^d)\in (C_0^\infty(Q))^d$. Clearly, $$ \frac 1\varepsilon(\phi^j(x+\varepsilon e_j)-\phi^j(x))=\partial_{x_j}\phi^j(x)+\varepsilon\upsilon_\varepsilon, $$ where $\|\upsilon_\varepsilon\|_{L^\infty(Q)}\leq C$. Then, for sufficiently small $\varepsilon$, we have $$ \int\limits_Q \mathrm{div}\phi(x) \varphi_\varepsilon(x)\,dx=\int\limits_Q (\phi^j(x+\varepsilon e_j)-\phi^j(x)) \theta\big(\frac x\varepsilon,\omega\big)\,dx\,+\,o(1) $$ $$ =\int\limits_Q \phi^j(x)\big(\theta\big(\frac x\varepsilon-e_j,\omega\big)-\theta\big(\frac x\varepsilon,\omega\big)\big)\,dx\,+\,o(1), $$ where $o(1)$ tends to zero as $\varepsilon\to0$ by Lemma \ref{LemmaC}. Since $\theta(z-e_j,\omega)-(\theta(z,\omega)$ is a stationary functions, by the Birkhoff ergodic theorem the integral on the right-hand side converges to zero a.s. as $\varepsilon\to 0$, and the desired statement follows. \end{proof} Our next goal is to show that almost surely the limit relation in \eqref{1corrsmall} holds. By Lemma \ref{LemmaC} the constants $\varepsilon c^\varepsilon$ with $c^\varepsilon$ defined in \eqref{hi} are a.s. uniformly in $\varepsilon$ bounded, that is \begin{equation}\label{co_bou} |\varepsilon c^\varepsilon|\leq K(\omega) \end{equation} for all sufficiently small $\varepsilon>0$.\\ Consider a convergent subsequence $\{\varphi_{\varepsilon_n}\}_{n=1}^\infty$. By Lemma \ref{Prop_constfun} the limit function is a constant, denote this constant by $\varphi_0$. Assume that $\varphi_0\not=0$. Then $$ \varphi_{\varepsilon_n}(z)=\varphi_0+\rho_{\varepsilon_n}(z), $$ where $\|\rho_{\varepsilon_n}\|_{L^2({Q})}\to0$ as $\varepsilon_n\to0$. Clearly, we have $$ \varphi_{2\varepsilon_n}(z)=2\varepsilon_n\theta\Big(\frac z{2\varepsilon_n}\Big)=2\varepsilon_n\theta\Big(\frac{z/2}{\varepsilon_n}\Big) =2\varphi_0+2\rho_{\varepsilon_n}\Big(\frac{z}{2}\Big)\to 2\varphi_0, $$ because $\|\rho_{\varepsilon_n}(\cdot/2)\|_{L^2({Q})}\to 0$ as $\varepsilon_n\to0$. Similarly, for any $M\in \mathbb Z^+$ we have $$ \varphi\big._{M\varepsilon_n}(z)\,\to\, M\varphi_0 \qquad \hbox{in }L^2({Q}). $$ Choosing $M$ in such a way that $M|\varphi_0|> K(\omega)$ we arrive at a contradiction with \eqref{co_bou}. Therefore, $\varphi_0=0$ for any convergent subsequence. This yields the desired convergence in \eqref{1corrsmall} and completes the proof of Proposition \ref{1corrector}. \end{proof} \noindent {\sl Step 5}. Uniqueness of $\theta$. \begin{proposition} [Uniqueness]\label{uniqueness} Problem \eqref{korrkappa1} has a unique up to an additive constant solution $\theta(z,\omega)$, $\theta \in L^2_M$, with statistically homogeneous increments such that \eqref{1corrsmall} holds true. \end{proposition} \begin{proof} Consider two arbitrary solutions $\theta_1(z,\omega)$ and $\theta_2(z,\omega)$ of problem \eqref{korrkappa1}. Then the difference $\Delta (z,\omega)=\theta_1(z,\omega)-\theta_2(z,\omega)$ satisfies the equation \begin{equation}\label{1A} \int\limits_{\mathbb R^d} a (z) \mu ( \xi+ z, \omega ) \big(\Delta (\xi+z,\omega ) - \Delta(\xi, \omega) \big) \ dz =0 \end{equation} for a.e. $\omega$ and for all $\xi \in \mathbb R^d$. Let us remark that the function $\Delta (z,\omega)$ inherits properties {\bf A)} and {\bf B)} of $\theta_1(z,\omega)$ and $\theta_2(z,\omega)$. Consider a cut-off function $ \varphi (\frac{|\xi|}{R})$ parameterized by $R>0$, where $\varphi(r)$, $r\in\mathbb R$, is a function defined by $$ \varphi(r) = \left\{ \begin{array}{c} 1, \quad r \le 1, \\ 2 - r, \quad 1<r<2, \\ 0, \quad r \ge 2. \end{array} \right. $$ For any $R>0$, multiplying equation \eqref{1A} by $\mu(\xi, \omega) \Delta (\xi, \omega ) \varphi (\frac{|\xi|}{R})$ and integrating the resulting relation in $\xi$ over $ \mathbb R^d$, we obtain the following equality \begin{equation}\label{1B} \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} a (z) \mu ( \xi+ z, \omega ) \mu (\xi, \omega ) \big(\Delta (\xi+z,\omega ) - \Delta(\xi, \omega) \big) \Delta(\xi, \omega) \varphi (\frac{|\xi|}{R}) \, dz \, d \xi =0. \end{equation} Using the relation $a(-z)=a(z)$, after change of variables $z \to -z, \ \xi - z = \xi'$, we get \begin{equation}\label{2B} \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} a (z) \mu ( \xi'+ z, \omega ) \mu (\xi', \omega ) \big(\Delta (\xi',\omega ) - \Delta(\xi'+z, \omega) \big) \Delta(\xi'+z, \omega) \varphi (\frac{|\xi'+z|}{R}) \, dz \, d \xi' =0. \end{equation} Renaming $\xi'$ back to $\xi$ in the last equation and taking the sum of \eqref{1B} and \eqref{2B} we obtain $$ \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} a (z) \mu ( \xi+ z, \omega ) \mu (\xi, \omega ) \big(\Delta (\xi+z,\omega ) - \Delta(\xi, \omega) \big) \Big( \Delta(\xi+z, \omega) \varphi (\frac{|\xi+z|}{R}) - \Delta(\xi, \omega) \varphi (\frac{|\xi|}{R}) \Big) dz \, d \xi $$ $$ = \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} a (z) \mu ( \xi+ z, \omega ) \mu (\xi, \omega ) \Big(\Delta (\xi+z,\omega ) - \Delta(\xi, \omega) \Big)^2 \varphi (\frac{|\xi|}{R}) \, dz \, d \xi $$ $$ + \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} a (z) \mu ( \xi+ z, \omega ) \mu (\xi, \omega ) \big(\Delta (\xi+z,\omega ) - \Delta(\xi, \omega) \big) \Delta(\xi+z, \omega) \big( \varphi (\frac{|\xi+z|}{R}) - \varphi (\frac{|\xi|}{R}) \big) dz \, d \xi $$ \begin{equation}\label{2C} = J_1^R \ + \ J_2^R = 0. \end{equation} Letting $R=\varepsilon^{-1}$, we first estimate the contribution of $J_2^R $. \begin{lemma}\label{J2} The following limit relation holds a.s.: \begin{equation}\label{3A} \frac{1}{R^d} |J_2^R| \ \to \ 0 \quad \mbox{ as } \; R \to \infty. \end{equation} \end{lemma} \begin{proof} Denote $\Delta_z (T_\xi \omega ) = \Delta (\xi+z,\omega ) - \Delta(\xi, \omega)$, then $\Delta_z (T_\xi \omega ) $ is stationary in $\xi$ for any given $z$. We consider separately the integration over $|\xi| > 3R$ and $|\xi| \le 3R$ in the integral $J_2^R$: $$ J_2^R = \int\limits_{\mathbb R^d} \int\limits_{|\xi|>3R} a (z) \mu ( \xi+ z, \omega ) \mu (\xi, \omega ) \Delta_z(T_\xi \omega) \Delta(\xi+z, \omega) \big( \varphi (\frac{|\xi+z|}{R}) - \varphi (\frac{|\xi|}{R}) \big) dz \, d \xi $$ $$ + \int\limits_{\mathbb R^d} \int\limits_{|\xi|\le 3R} a (z) \mu ( \xi+ z, \omega ) \mu (\xi, \omega ) \Delta_z(T_\xi \omega) \Delta(\xi+z, \omega) \big( \varphi (\frac{|\xi+z|}{R}) - \varphi (\frac{|\xi|}{R}) \big) dz \, d \xi. $$ If $|\xi| > 3R$, then $\varphi (\frac{|\xi|}{R}) = 0$. Also, $\varphi (\frac{|\xi+z|}{R})=0$ if $|\xi| > 3R$ and $|z|>R$. Then we obtain the following upper bound $$ \frac{1}{R^d} \int\limits_{\mathbb R^d} \int\limits_{|\xi|> 3R} a (z) \mu ( \xi+ z, \omega ) \mu (\xi, \omega ) |\Delta_z (T_\xi \omega) | |\Delta(\xi+z, \omega)| \varphi (\frac{|\xi+z|}{R}) d\xi \, dz $$ \begin{equation}\label{estimm} \le \frac{\alpha_2^2 }{R^d} \int\limits_{|\eta|\le 2R} \Big( \int\limits_{|z|>R} |z| a (z) |\Delta_z (T_{\eta-z} \omega) |\, dz \Big) \frac1R |\Delta(\eta, \omega)| \varphi (\frac{|\eta|}{R}) d\eta \end{equation} $$ \le \frac{\alpha_2^2 }{R^d} \int\limits_{|\eta|\le 2R} \phi (T_\eta \omega) \frac1R |\Delta(\eta, \omega)| \varphi (\frac{|\eta|}{R}) \, d\eta, $$ where $\eta=\xi+z$, $$ \phi (T_\eta \omega) = \int\limits_{\mathbb R^d} |z| a (z) |\Delta_z (T_{\eta -z} \omega)| \, dz, $$ and in the first inequality we have used the fact that $1< \frac{|z|}{R}$ if $|z|>R$. Since $\Delta_z(\omega) \in L^2_M$, then $\phi(\omega) \in L^2(\Omega)$. Applying the Cauchy-Swartz inequality to the last integral in \eqref{estimm} and recalling the relation $R=\varepsilon^{-1}$ we have \begin{equation}\label{5B} \frac{\alpha_2^2 }{R^d} \int\limits_{|\eta|\le 2R} \phi (T_\eta \omega) \frac{|\Delta(\eta, \omega)|}{R} \varphi (\frac{|\eta|}{R}) \, d \eta \le \alpha_2^2 \Big( \frac{1}{R^d} \int\limits_{|\eta|\le 2R} \phi^2 (T_\eta \omega) d\eta \Big)^{\frac12} \Big( \frac{1}{R^d} \int\limits_{|\eta|\le 2R} \big(\frac{|\Delta(\eta, \omega)|}{R} \big)^2 d \eta\Big)^{\frac12} \to 0, \end{equation} as $R \to \infty$, because the first integral on the right hand side is bounded due to the stationarity of $\phi (T_\eta \omega)$, and the second integral tends to 0 due to sublinear growth of $\Delta(\eta, \omega)$, see \eqref{1corrsmall}. If $|\xi| \le 3R$, then the corresponding part of $R^{-d} J_2^R$ can be rewritten as a sum of two terms $$ \frac{1}{R^d} \int\limits_{\mathbb R^d} \int\limits_{|\xi| \le 3R } a (z) \mu ( \xi+ z, \omega ) \mu (\xi, \omega ) \Delta_z(T_\xi \omega) (\Delta(\xi+z, \omega) - \Delta(\xi, \omega)) \big( \varphi (\frac{|\xi+z|}{R}) - \varphi (\frac{|\xi|}{R}) \big) d\xi \, dz $$ $$ + \frac{1}{R^d}\int\limits_{\mathbb R^d} \int\limits_{| \xi | \le 3R} a (z) \mu ( \xi+ z, \omega ) \mu (\xi, \omega ) \Delta_z(T_\xi \omega) \Delta(\xi, \omega) \big( \varphi (\frac{|\xi+z|}{R}) - \varphi (\frac{|\xi|}{R}) \big) d\xi \, dz = I_1 + I_2. $$ We estimate $|I_1|$ and $|I_2|$ separately. Using the inequality $|\varphi( \frac{|x|}{R}) - \varphi (\frac{|y|}{R}) | \le \frac{|x-y|}{R}$ by the same arguments as above we get $$ |I_2| \le \frac{\alpha_2^2}{R^d} \int\limits_{\mathbb R^d} \int\limits_{|\xi| \le 3R} a (z) |\Delta_z(T_\xi \omega)| |\Delta(\xi, \omega)| \frac{|z|}{R} d\xi \, d z $$ $$ \le \alpha_2^2 \Big( \frac{1}{R^d} \int\limits_{|\xi|\le 3R} \phi^2 (T_\xi \omega) d\xi \Big)^{\frac12} \Big( \frac{1}{R^d} \int\limits_{|\xi|\le 3R} \big(\frac{|\Delta(\xi, \omega)|}{R} \big)^2 d \xi\Big)^{\frac12} \to 0. $$ To estimate $I_1$ we divide the area of integration in $z$ into two parts: $|z|< \sqrt{R}$ and $|z| \ge \sqrt{R}$, and first consider the integral $$ I_1^{(<)} = \frac{1}{R^d} \int\limits_{|z| < \sqrt{R}} \int\limits_{|\xi| \le 3R } a (z) \mu ( \xi+ z, \omega ) \mu (\xi, \omega ) \Delta_z^2(T_\xi \omega) \big( \varphi (\frac{|\xi+z|}{R}) - \varphi (\frac{|\xi|}{R}) \big) d\xi \, dz $$ Since $|z|\leq\sqrt{R}$, we have $|\varphi( \frac{|\xi +z|}{R}) - \varphi (\frac{|\xi|}{R}) | \le \frac{1}{\sqrt{R}}$. Therefore, $$ |I_1^{(<)}| \le \alpha_2^2 \frac{1}{\sqrt{R}} \ \frac{1}{R^d} \int\limits_{|\xi| \le 3R } \int\limits_{\mathbb{R}^d} a (z) \Delta_z^2(T_\xi \omega) dz \, d \xi \to 0, $$ as $R \to \infty$; here we have used the fact that $$ \frac{1}{R^d} \int\limits_{|\xi| \le 3R } \int\limits_{\mathbb{R}^d} a (z) \Delta_z^2(T_\xi \omega) dz \, d \xi \to c_0 \mathbb{E} \Big( \int\limits_{\mathbb{R}^d} a (z) \Delta_z^2(\omega) dz \Big) $$ with a constant $c_0$ equal to the volume of a ball of radius $3$ in $\mathbb R^d$. We turn to the second integral $$ I_1^{(>)} = \frac{1}{R^d} \int\limits_{|z| \ge \sqrt{R}} \int\limits_{|\xi| \le 3R } a (z) \mu ( \xi+ z, \omega ) \mu (\xi, \omega ) \Delta_z^2(T_\xi \omega) \big( \varphi (\frac{|\xi+z|}{R}) - \varphi (\frac{|\xi|}{R}) \big) d\xi \, dz. $$ Considering the inequality $|\varphi( \frac{|\xi +z|}{R}) - \varphi (\frac{|\xi|}{R}) | \le 1$ we obtain \begin{equation}\label{7A} |I_1^{(>)}| \le \alpha_2^2 \frac{1}{R^d} \int\limits_{|\xi| \le 3R } \int\limits_{|z| \ge \sqrt{R}} a (z) \Delta_z^2(T_\xi \omega) \, dz \, d \xi. \end{equation} Denote by $\psi_{R}(\omega)$ the stationary function defined by $$ \psi_{R}(\omega) = \int\limits_{|z| \ge \sqrt{R}} a (z) \Delta_z^2( \omega) \, dz. $$ Since $ \Delta_z( \omega) \in L^2_M$, then \begin{equation}\label{5A} \mathbb{E} \psi_{R}(\omega) \to 0 \quad \mbox{ as } \; R \to \infty. \end{equation} Moreover, function $\psi_{R}(\omega)$ is a.s. decreasing in $R$. Using the ergodic theorem, \eqref{7A} and \eqref{5A}, we conclude that $ |I_1^{(>)}| $ tends to zero as $R \to \infty$. Thus we have proved that $|I_1| +|I_2| \to 0 $ as $R \to \infty$ a.s. Together with \eqref{5B} this implies \eqref{3A}. \end{proof} We proceed with the term $J_1^R$ in \eqref{2C}: $$ J_1^R = \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} a (z) \mu ( \xi+ z, \omega ) \mu (\xi, \omega ) \Delta_z^2 (\xi,\omega ) \varphi (\frac{|\xi|}{R}) \, dz \, d \xi. $$ Using the ergodic theorem we get as $R \to \infty$ \begin{equation}\label{6A} \frac{1}{R^d} J_1^R = \frac{1}{R^d} \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} a (z) \mu ( \xi+ z, \omega ) \mu (\xi, \omega ) \Delta_z^2 (\xi,\omega ) \varphi (\frac{|\xi|}{R}) \, dz \, d \xi \to c_1 \mathbb{E} \int\limits_{\mathbb R^d} a (z) {\bm\mu} ( T_z \omega ) {\bm\mu} (\omega ) \Delta_z^2 (\omega )dz, \end{equation} where $c_1=\int_{\mathbb R^d}\varphi(|\xi|)d\xi>0$. Consequently from \eqref{2C} - \eqref{3A} it follows that \begin{equation}\label{6B} \frac{1}{R^d} |J_1^R| \ \to \ 0 \quad \mbox{ as } \; R \to \infty, \end{equation} and together with \eqref{6A} this implies that \begin{equation}\label{6C} \mathbb{E} \int\limits_{\mathbb R^d} a (z) {\bm\mu}( T_z \omega ) {\bm\mu} (\omega ) \Delta_z^2 (\omega )dz = 0. \end{equation} Using condition \eqref{add} we conclude from \eqref{6C} that $\Delta_z (\omega) \equiv 0$ for a.e. $z$ and a.e. $\omega$, and hence $\theta_1(z,\omega)=\theta_2(z,\omega)$. Proposition is proved. \end{proof} ${ }$\\[-0.8cm] This completes the proof of Theorem \ref{t_corrector}.\end{proof} \section{Additional terms of the asymptotic expansion}\label{s_addterms} Recall that $I_0^\varepsilon$ stands for the sum of all terms of order $\varepsilon^{0}$ in (\ref{K2_1}) and that $u_0\in C_0^\infty(\mathbb R^d)$. Our first goal is to determine the coefficients of the effective elliptic operator $\hat L$. To this end we consider the following scalar product of $I_0^\varepsilon$ with a function $\varphi \in L^2(\mathbb R^d)$: \begin{equation}\label{hatK2_1} (I^\varepsilon_0, \varphi) = \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \Big( \frac12 z\otimes z - z \otimes \theta \big(\frac{x}{\varepsilon}-z, \omega \big) \Big) \ a (z) \mu \big( \frac{x}{\varepsilon}, \omega \big) \mu \big( \frac{x}{\varepsilon} -z, \omega \big) \ dz \ \nabla \nabla u_0 (x) \varphi(x) dx. \end{equation} After change of variables $x = \varepsilon \eta$ we have \begin{equation}\label{hatK2_2} \begin{array}{l} \displaystyle (I^\varepsilon_0, \varphi) = \varepsilon^d \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \frac12 a (z) \,z\otimes z \, \mu ( \eta, \omega ) \mu ( \eta -z, \omega ) \, dz \, \nabla \nabla u_0 (\varepsilon\eta) \, \varphi (\varepsilon \eta) \, d\eta \\ \displaystyle - \varepsilon^d \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} a (z) \,z \otimes \theta (\eta-z, \omega ) \mu ( \eta, \omega ) \mu ( \eta -z, \omega ) \, dz \, \nabla \nabla u_0 (\varepsilon\eta) \, \varphi (\varepsilon \eta) \, d\eta = I^\varepsilon_1(\varphi) - I^\varepsilon_2(\varphi). \end{array} \end{equation} We consider the integrals $I^\varepsilon_1(\varphi)$ and $I^\varepsilon_2(\varphi)$ separately. Since $\int_{\mathbb R^d}|z|^2a(z)ds\leq\infty$, then $$ \int\limits_{\mathbb R^d} z\otimes z \,a(z) \mu (0,\omega)\mu(-z,\omega)\,dz \in (L^\infty(\Omega))^{d^2}. $$ Therefore, by the Birkhoff ergodic theorem a.s. $$ \int\limits_{\mathbb R^d} z\otimes z\,a(z) \mu (\frac{x}{\varepsilon},\omega)\mu(\frac{x}{\varepsilon}-z,\omega)\,dz \rightharpoonup D_1\quad\hbox{weakly in } \ (L^2_{\rm loc}(\mathbb R^d))^{d^2} $$ with \begin{equation}\label{J_1} D_1 = \int\limits_{\mathbb R^d} \frac12 \, z\otimes z \, a (z) \, E\{ \mu ( 0, \omega ) \mu ( -z, \omega )\} \, dz. \end{equation} Recalling that $u_0\in C_0^\infty(\mathbb R^d)$, we obtain \begin{equation}\label{I_1} I^\varepsilon_1(\varphi)\to \int\limits_{\mathbb R^d}D_1\nabla\nabla u_0(x)\varphi(x)\,dx. \end{equation} The second integral in \eqref{hatK2_2} contains the non-stationary random field $ \theta (z,\omega)$, and we rewrite $I_2(\varphi)$ as a sum of two terms, such that the first term contains the stationary field $\zeta_z (\eta, \omega)$ and the contribution of the second one is asymptotically negligible. In order to estimate the contribution of the second term we construct an additional corrector $u_2^\varepsilon$, see formula \eqref{corr-u2} below.\\ We have \begin{equation}\label{I_2appr} \begin{array}{l} \displaystyle I^\varepsilon_2 (\varphi) = \int\limits_{\mathbb R^d}\! \int\limits_{\mathbb R^d} a (z) z \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \theta (\frac{x}{\varepsilon} - z, \omega ) \nabla \nabla u_0(x) \varphi(x) \, d x \, dz \\ \displaystyle = \frac12 \int\limits_{\mathbb R^d}\! \int\limits_{\mathbb R^d} a (z) z \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \theta (\frac{x}{\varepsilon} - z, \omega ) \nabla \nabla u_0(x) \varphi(x) \, d x \, dz \\ \displaystyle - \, \frac12 \int\limits_{\mathbb R^d}\! \int\limits_{\mathbb R^d} a (z) z \, \mu (\frac{y}{\varepsilon}, \omega ) \mu (\frac{y}{\varepsilon} -z, \omega ) \theta (\frac{x}{\varepsilon} - z, \omega ) \nabla \nabla u_0(y - \varepsilon z) \varphi(y-\varepsilon z) \, d y \, dz \\ \displaystyle = \frac12 \int\limits_{\mathbb R^d}\! \int\limits_{\mathbb R^d} \! a (z) z \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \Big( \theta (\frac{x}{\varepsilon} - z, \omega ) \nabla \nabla u_0(x) \varphi(x) - \theta (\frac{x}{\varepsilon}, \omega ) \nabla \nabla u_0(x - \varepsilon z) \varphi (x-\varepsilon z)\! \Big) d x dz \\ \displaystyle = \frac12 \int\limits_{\mathbb R^d}\! \int\limits_{\mathbb R^d} a (z) z \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \big( \theta (\frac{x}{\varepsilon} - z, \omega ) - \theta (\frac{x}{\varepsilon}, \omega ) \big) \nabla \nabla u_0(x) \varphi(x) d x \, dz \\ \displaystyle + \frac12 \int\limits_{\mathbb R^d}\! \int\limits_{\mathbb R^d} a (z) z \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \, \theta (\frac{x}{\varepsilon}, \omega ) \big( \nabla \nabla u_0(x) \varphi(x) - \nabla \nabla u_0(x - \varepsilon z) \varphi (x-\varepsilon z) \big) d x \, dz, \end{array} \end{equation} here and in what follows $z\theta(z)\nabla\nabla u_0(x)$ stands for $z^i\theta^j(z)\partial_{x_i}\partial_{x_j}u_0(x)$. The field $\zeta_{-z} (\eta, \omega)= \theta(\eta -z,\omega) - \theta (\eta,\omega)$ is stationary for any given $z$, and \begin{equation}\label{PL1} \int\limits_{\mathbb R^d} a (z) z \otimes \zeta_{-z} (0, \omega) \mu ( 0, \omega ) \mu ( -z, \omega ) \, dz \in (L^2(\Omega))^{d^2}. \end{equation} Indeed, in view of \eqref{thetaLM} and \eqref{zetatheta} by the Cauchy-Schwarz inequality we have $$ \int\limits_{\Omega}\bigg(\int\limits_{\mathbb R^d} |a (z) z \otimes \zeta_{-z} (0, \omega) \mu ( 0, \omega ) \mu ( -z, \omega )| \, dz\bigg)^2 d P(\omega) \le $$ $$ \alpha_2^2 \Big(\int\limits_{\mathbb R^d} a (z) |z|^2 dz \Big) \Big( \int\limits_{\mathbb R^d} \int\limits_{\Omega} a (z) \, |\theta(-z, \omega)|^2 dz d P(\omega) \Big) < \infty. $$ Consequently applying the ergodic theorem to the stationary field \eqref{PL1} we obtain for the first integral in \eqref{I_2appr} as $\varepsilon \to 0$ \begin{equation}\label{I2-stationary} \begin{array}{l} \displaystyle \frac12 \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) z \zeta_{-z} (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \nabla \nabla u_0(x) \varphi(x) d x \, dz \ \to \\ \displaystyle \frac12 \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} a (z) z E\{ \zeta_{-z} (0, \omega) \mu ( 0, \omega ) \mu ( -z, \omega ) \} \nabla \nabla u_0(x) \varphi(x) d x \, dz = \int\limits_{\mathbb R^d} D_2 \, \nabla \nabla u_0 (x) \varphi(x) \, dx, \end{array} \end{equation} where we have used the notation \begin{equation}\label{D_2} D_2 = \frac12 \, \int\limits_{\mathbb R^d} a (z) z \otimes E\{ \zeta_{-z} (0, \omega) \mu ( 0, \omega ) \mu ( -z, \omega )\} \, dz. \end{equation} Denote the last integral on the right-hand side in \eqref{I_2appr} by $J_2^\varepsilon (\varphi)$: \begin{equation}\label{J2eps} J_2^\varepsilon (\varphi) = \frac12 \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) z \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \, \theta (\frac{x}{\varepsilon}, \omega ) \big( \nabla \nabla u_0(x) \varphi(x) - \nabla \nabla u_0(x - \varepsilon z) \varphi (x-\varepsilon z) \big) d x \, dz \end{equation} and consider this expression as a functional on $L^2(\mathbb R^d)$ acting on function $\varphi$. In order to show that for each $\varepsilon>0$ the functional $J_2^\varepsilon$ is a bounded linear functional on $L^2(\mathbb R^d)$ we represent $J_2^\varepsilon$ as a sum $J_2^\varepsilon=J_2^{1,\varepsilon} +J_2^{2,\varepsilon}+J_2^{3,\varepsilon}$ with $J_2^{1,\varepsilon}$, $J_2^{2,\varepsilon}$ and $J_2^{3,\varepsilon}$ introduced below and estimate each of these functionals separately. By Proposition \ref{1corrector} a.s. $ \theta (\frac{x}{\varepsilon},\omega)\in L^2_{\rm loc}(\mathbb R^d)$ for all $\varepsilon>0$. Therefore, $$ J_2^{1,\varepsilon} (\varphi) = \frac12 \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) z \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \, \theta (\frac{x}{\varepsilon}, \omega ) \nabla \nabla u_0(x) \varphi(x) d x \, dz $$ is a.s. a bounded linear functional on $L^2(\mathbb R^d)$. Similarly, $$ J_2^{2,\varepsilon} (\varphi) = \frac12 \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) z \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \, \theta (\frac{x}{\varepsilon}-z, \omega ) \nabla \nabla u_0(x-\varepsilon z) \varphi(x-\varepsilon z) d x \, dz $$ is a.s. a bounded linear functional on $L^2(\mathbb R^d)$. Due to \eqref{thetaLM} and by the Birkhoff ergodic theorem the linear functional $$ J_2^{3,\varepsilon} (\varphi) = \frac12 \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) z \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \, \Big( \theta (\frac{x}{\varepsilon}, \omega )- \theta (\frac{x}{\varepsilon}-z, \omega )\Big) \nabla \nabla u_0(x-\varepsilon z) \varphi(x-\varepsilon z) d x \, dz $$ $$ = \frac12 \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) z \, \mu (\frac{x}{\varepsilon}+z, \omega ) \mu (\frac{x}{\varepsilon} , \omega ) \, \Big( \theta (\frac{x}{\varepsilon}+z, \omega )- \theta (\frac{x}{\varepsilon}, \omega )\Big) \nabla \nabla u_0(x) \varphi(x) d x \, dz $$ $$ = \frac12 \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) z \, \mu (\frac{x}{\varepsilon}+z, \omega ) \mu (\frac{x}{\varepsilon} , \omega ) \, \theta (z,T_{\frac{x}{\varepsilon}} \omega ) \nabla \nabla u_0(x) \varphi(x) d x \, dz $$ is a.s. bounded in $L^2(\mathbb R^d)$. Since $J_2^{\varepsilon} (\varphi) =J_2^{1,\varepsilon} (\varphi)+ J_2^{2,\varepsilon} (\varphi)+ J_2^{3,\varepsilon} (\varphi)$, the desired boundedness of $J_2^{\varepsilon}$ follows. Then by the Riesz theorem for a.e. $\omega$ there exists a function $f_2^\varepsilon = f_2^\varepsilon(u_0) \in L^2(\mathbb R^d)$ such that $J_2^\varepsilon(\varphi) = (f_2^\varepsilon,\varphi)$. We emphasize that here we do not claim that the norm of $J_2^\varepsilon$ admits a uniform in $\varepsilon$ estimate. Next we show that the contribution of $f_2^\varepsilon$ to $w^\varepsilon$ is vanishing. To this end consider the function (additional corrector) \begin{equation}\label{corr-u2} u_2^\varepsilon (x,\omega) = (-L^\varepsilon +m)^{-1} f_2^\varepsilon (x, \omega). \end{equation} \begin{lemma}\label{l_u2small} $\| u_2^\varepsilon\|_{L^2(\mathbb R^d)} \to 0$ as $\varepsilon \to 0$ for a.e. $\omega$. \end{lemma} \begin{proof} Taking $\varphi = u_2^\varepsilon$ we get \begin{equation}\label{L1} ((-L^\varepsilon +m) u_2^\varepsilon, u_2^\varepsilon) = (f_2^\varepsilon, u_2^\varepsilon). \end{equation} Considering \eqref{L_eps} the left-hand side of \eqref{L1} can be rearranged as follows: \begin{equation}\label{L1-LHS} \begin{array}{l} \displaystyle - \frac{1}{\varepsilon^2} \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) ( u_2^\varepsilon (x-\varepsilon z) - u_2^\varepsilon(x)) dz \, u_2^\varepsilon (x) dx + m \int\limits_{\mathbb R^d} (u_2^\varepsilon)^2 (x) dx \\ \displaystyle = \, \frac12 \frac{1}{\varepsilon^2} \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) ( u_2^\varepsilon (x-\varepsilon z) - u_2^\varepsilon(x))^2 dz dx + m \int\limits_{\mathbb R^d} (u_2^\varepsilon)^2 (x) dx. \end{array} \end{equation} We denote $$ G_1^2 = \frac{1}{2 \varepsilon^2} \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) ( u_2^\varepsilon (x-\varepsilon z) - u_2^\varepsilon(x))^2 dz dx, \quad G_2^2= m \int\limits_{\mathbb R^d} (u_2^\varepsilon)^2 (x) dx. $$ It follows from \eqref{J2eps} that the right-hand side of \eqref{L1} takes the form \begin{equation}\label{L1-RHS} \begin{array}{l} \displaystyle J_2^\varepsilon (u_2^\varepsilon) = \frac12 \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} a (z) z \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \, \theta (\frac{x}{\varepsilon}, \omega ) \big( \nabla \nabla u_0(x) u_2^\varepsilon(x) - \nabla \nabla u_0(x - \varepsilon z) u_2^\varepsilon (x-\varepsilon z) \big) d x \, dz \\ \displaystyle = \frac12 \, \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) z \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \, \theta (\frac{x}{\varepsilon}, \omega ) \nabla \nabla u_0(x) \big( u_2^\varepsilon(x) - u_2^\varepsilon (x-\varepsilon z) \big) d x \, dz \\[6mm] \displaystyle + \frac12 \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} a (z) z \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \, \theta (\frac{x}{\varepsilon}, \omega ) \big( \nabla \nabla u_0(x) - \nabla \nabla u_0(x - \varepsilon z) \big) u_2^\varepsilon (x-\varepsilon z) d x \, dz =\! \frac12 (I_1 + I_2). \end{array} \end{equation} It is proved in Proposition \ref{1corrector} that a.s. $\|\varepsilon \theta (\frac x\varepsilon,\omega)\|_{L^2(B)}\to 0$ as $\varepsilon\to0$ for any ball $B\subset\mathbb R^d$. By the Cauchy-Schwartz inequality we obtain the following upper bounds for $I_1$: \begin{equation}\label{L1-RHS-I1} \begin{array}{l} \displaystyle I_1 \le \left( \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \big( u_2^\varepsilon(x) - u_2^\varepsilon (x-\varepsilon z) \big)^2 d x \, dz \right)^{1/2} \\ \displaystyle \left( \frac{1}{\varepsilon^2 } \, \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z)|z|^2 \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \, \varepsilon^2 \big|\theta (\frac{x}{\varepsilon}, \omega )\big|^2 (\nabla \nabla u_0(x))^2 d x \, dz \right)^{1/2} \\ \displaystyle \le \frac{1}{\varepsilon} \, o(1) \ \left(\frac12 \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \big( u_2^\varepsilon(x) - u_2^\varepsilon (x-\varepsilon z) \big)^2 d x \, dz \right)^{1/2} = G_1 \cdot o(1), \end{array} \end{equation} where $o(1)\to0$ as $\varepsilon\to0$. We turn to the second integral $I_2$. Let $B$ be a ball centered at the origin and such that $\mathrm{supp}(u_0)\subset B$, $\mathrm{dist}(\mathrm{supp}(u_0),\partial B)>1$. Then $$ \Big|\int\limits_{\mathbb R^d} \int\limits_{B} \, a (z) z \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \, \theta (\frac{x}{\varepsilon}, \omega ) \big( \nabla \nabla u_0(x) - \nabla \nabla u_0(x - \varepsilon z) \big) u_2^\varepsilon (x-\varepsilon z) d x \, dz\Big| $$ \begin{equation}\label{aaa1} \leq C\int\limits_{\mathbb R^d} \int\limits_{B} \, a (z) |z|^2 \, \big|\varepsilon \theta (\frac{x}{\varepsilon}, \omega )\big|\, | u_2^\varepsilon (x-\varepsilon z)| d x \, dz\le \| u_2^\varepsilon \|_{L^2(\mathbb R^d)} \cdot o(1) = G_2 \cdot o(1). \end{equation} The integral over $B^c=\mathbb R^d\setminus B$ can be estimated in the following way: $$ \Big|\int\limits_{\mathbb R^d} \int\limits_{B^c} \, a (z) z \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \, \theta (\frac{x}{\varepsilon}, \omega ) \big( \nabla \nabla u_0(x) - \nabla \nabla u_0(x - \varepsilon z) \big) u_2^\varepsilon (x-\varepsilon z) d x \, dz\Big| $$ $$ \Big|\int\limits_{\mathbb R^d} \int\limits_{B^c} \, a (z) z \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) \, \theta (\frac{x}{\varepsilon}, \omega ) \nabla \nabla u_0(x - \varepsilon z) u_2^\varepsilon (x-\varepsilon z) d x \, dz\Big| $$ \begin{equation}\label{aaa2} \leq C\int\limits_{|z|\geq \frac1\varepsilon} \int\limits_{B^c} \, a (z) |z| \, \big| \theta (\frac{x}{\varepsilon}, \omega )\big|\, |\nabla \nabla u_0(x - \varepsilon z)|\, |u_2^\varepsilon (x-\varepsilon z)|\, d x \, dz \end{equation} $$ \leq C\int\limits_{|z|\geq \frac1\varepsilon} \int\limits_{\mathbb R^d} \, a (z) |z| \, \big| \theta (\frac{x}{\varepsilon}+z, \omega )\big|\, |\nabla \nabla u_0(x)|\, |u_2^\varepsilon (x)|\, d x \, dz $$ $$ \leq C\int\limits_{|z|\geq \frac1\varepsilon} \int\limits_{\mathbb R^d} \, a (z) |z| \,\Big[ \big| \theta (\frac{x}{\varepsilon}+z, \omega ) - \theta (\frac{x}{\varepsilon}, \omega )\big|+\big| \theta (\frac{x}{\varepsilon}, \omega )\big|\Big]\, |\nabla \nabla u_0(x)|\, |u_2^\varepsilon (x)|\, d x \, dz. $$ We have $$ \int\limits_{|z|\geq \frac1\varepsilon} \int\limits_{\mathbb R^d} \, a (z) |z| \,\big| \theta (\frac{x}{\varepsilon}, \omega )\big|\, |\nabla \nabla u_0(x)|\, |u_2^\varepsilon (x)|\, d x \, dz $$ $$ \leq \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) |z|^2 \,\big|\varepsilon \theta (\frac{x}{\varepsilon}, \omega )\big|\, |\nabla \nabla u_0(x)|\, |u_2^\varepsilon (x)|\, d x \, dz \leq G_2\cdot o(1) $$ and $$ \int\limits_{|z|\geq \frac1\varepsilon} \int\limits_{\mathbb R^d} \, a (z) |z| \,\Big[ \big| \theta (\frac{x}{\varepsilon}+z, \omega ) - \theta (\frac{x}{\varepsilon}, \omega )\big|\Big]\, |\nabla \nabla u_0(x)|\, |u_2^\varepsilon (x)|\, d x \, dz $$ $$ \leq \int\limits_{|z|\geq \frac1\varepsilon} \int\limits_{\mathbb R^d} \, a (z) |z| \, \big| \zeta_z (T_{\frac{x}{\varepsilon}}\omega ) \big|\, |\nabla \nabla u_0(x)|\, |u_2^\varepsilon (x)|\, d x \, dz $$ $$ \leq \left( \int\limits_{|z|\geq \frac1\varepsilon} \, a (z) z^2 \, dz \right)^{\frac12} \int\limits_{\mathbb R^d} \left( \int\limits_{\mathbb R^d} a(z) \big| \zeta_z (T_{\frac{x}{\varepsilon}}\omega ) \big|^2 \, dz \right)^{\frac12} |\nabla \nabla u_0(x)|\, |u_2^\varepsilon (x)|\, d x $$ $$ \leq o(1) \, \left( \int\limits_{\mathbb R^d} \, |u_2^\varepsilon (x)|^2 \, dx \right)^{\frac12} \left( \int\limits_{\mathbb R^d} \left( \int\limits_{\mathbb R^d} a(z) \big| \zeta_z (T_{\frac{x}{\varepsilon}}\omega ) \big|^2 \, dz \right) |\nabla \nabla u_0(x)|^2\, d x \right)^{\frac12} = G_2\cdot o(1). $$ Since $\zeta_z (\omega) \in L^2_M$, the second integral in the right hand side here converges to a constant by the ergodic theorem. Combining the last two estimates we conclude that the term on the right-hand side in \eqref{aaa2} does not exceed $G_2\cdot o(1)$. Therefore, considering \eqref{aaa1}, we obtain $I_1\leq G_2\cdot o(1)$. This estimate and \eqref{L1-RHS-I1} imply that $$ G_1^2 + G_2^2 = I_1 + I_2 \le (G_1 + G_2) \cdot o(1). $$ Consequently, $G_1 \to 0$ and $G_2 = m^{1/2} \| u_2^\varepsilon \|_{L^2(\mathbb R^d)} \to 0$ as $\varepsilon \to 0$. Lemma is proved. \end{proof} Thus we can rewrite $I^\varepsilon_0$ (all the terms of the order $\varepsilon^{0}$) as follows \begin{equation}\label{VV} I^\varepsilon_0 = (D_1 - D_2) \cdot \nabla\nabla u_0 + f_2^\varepsilon + S(\frac{x}{\varepsilon}, \omega) \cdot \nabla\nabla u_0, \qquad S(\frac{x}{\varepsilon}, \omega) = \Psi_1(\frac{x}{\varepsilon}, \omega) - \Psi_2(\frac{x}{\varepsilon}, \omega), \end{equation} where the matrices $D_1$and $D_2$ are defined in \eqref{J_1} and \eqref{D_2} respectively, and $ S(\frac{x}{\varepsilon}, \omega), \Psi_1(\frac{x}{\varepsilon}, \omega), \Psi_2(\frac{x}{\varepsilon}, \omega)$ are stationary fields with zero mean which are given by \begin{equation}\label{Psi-1} \Psi_1(\frac{x}{\varepsilon}, \omega) = \frac12 \int\limits_{\mathbb R^d} \, a (z) z^2 \Big[ \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) - E\{ \mu ( 0, \omega ) \mu ( -z, \omega ) \} \Big] dz, \end{equation} \begin{equation}\label{Psi-2} \Psi_2(\frac{x}{\varepsilon}, \omega) = \frac12 \int\limits_{\mathbb R^d} \, a (z) z \Big[ \zeta_{-z} (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega ) - E\{ \zeta_{-z} (0, \omega) \mu ( 0, \omega ) \mu ( -z, \omega )\} \Big] dz. \end{equation} Denote \begin{equation}\label{u3} u_3^\varepsilon(x,\omega) = (-L^\varepsilon+m)^{-1} F^\varepsilon(x,\omega), \quad \mbox{where } \; F^\varepsilon(x, \omega) = S(\frac{x}{\varepsilon}, \omega) \cdot \nabla\nabla u_0(x). \end{equation} Since $ {\rm supp} \, u_0 \subset B$ is a bounded subset of $\mathbb R^d$ and $$ \int\limits_{\mathbb R^d} \, a (z) |z|\, \big|\zeta_{-z} ( \omega )\big| \,dz \in L^2(\Omega), $$ then by the Birkhoff theorem $u_3^\varepsilon \in L^2(\mathbb R^d)$. Our goal is to prove that $\|u_3^\varepsilon \|_{L^2(\mathbb R^d)} \to 0$ as $\varepsilon \to 0$. We first show that the family $\{u_3^\varepsilon\}$ is bounded in $L^2(\mathbb R^d)$. \begin{lemma}\label{Bound} The family of functions $u_3^\varepsilon$ defined by \eqref{u3} is uniformly bounded in $L^2(\mathbb R^d)$ for e.a. $\omega$: $\|u_3^\varepsilon \|_{L^2(\mathbb R^d)} \le C$ for any $0<\varepsilon<1$. \end{lemma} \begin{proof} Since the operator $(-L^\varepsilon+m)^{-1} $ is bounded ($\| (-L^\varepsilon+m)^{-1} \| \le \frac{1}{m}$), then it is sufficient to prove that $\| F^\varepsilon(x,\omega) \|_{L^2(\mathbb R^d)} \le C$ uniformly in $\varepsilon$. By the Birkhoff ergodic theorem the functions $ \Psi_1(\frac{x}{\varepsilon}, \omega)$ and $\Psi_2(\frac{x}{\varepsilon}, \omega)$ a.s converge to zero weakly in $L^2(B)$, so does $S(\frac{x}{\varepsilon}, \omega)$. Then $S(\frac{x}{\varepsilon}, \omega)\cdot \nabla\nabla u_0$ a.s. converges to zero weakly in $L^2(\mathbb R^d)$. This implies the desired boundedness. \end{proof} \begin{lemma}\label{Convergence} For any cube $B$ centered at the origin $\|u_3^\varepsilon \|_{L^2(B)} \ \to \ 0$ as $\varepsilon \to 0$ for e.a. $\omega$. \end{lemma} \begin{proof} The first step of the proof is to show that any sequence $\{u_3^{\varepsilon_j} \}$, $\varepsilon_j \to 0$, is compact in $L^2(B)$. Using definition \eqref{u3} we have $$ ( (-L^\varepsilon+m) u_3^\varepsilon, u_3^\varepsilon) \ = \ ( F^\varepsilon, u_3^\varepsilon). $$ The left-hand side of this relation can be rewritten as \begin{equation}\label{L2-rhs} \begin{array}{l} \displaystyle \int\limits_{\mathbb R^d} (-L^\varepsilon+m) u_3^\varepsilon(x) u_3^\varepsilon(x) dx \\ \displaystyle = \, m \int\limits_{\mathbb R^d} (u_3^\varepsilon(x))^2 dx - \frac{1}{\varepsilon^2} \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega) ( u_3^\varepsilon (x-\varepsilon z) - u_3^\varepsilon(x)) u_3^\varepsilon (x) dz dx \\ \displaystyle = \, m \int\limits_{\mathbb R^d} (u_3^\varepsilon(x))^2 dx + \frac{1}{2 \varepsilon^2} \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega) ( u_3^\varepsilon (x-\varepsilon z) - u_3^\varepsilon(x))^2 dz dx. \end{array} \end{equation} Consequently we obtain the following equality \begin{equation}\label{u3-main} m \int\limits_{\mathbb R^d} (u_3^\varepsilon(x))^2 dx + \frac{1}{2 \varepsilon^2} \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega) ( u_3^\varepsilon (x-\varepsilon z) - u_3^\varepsilon(x))^2 dz dx = ( F^\varepsilon, u_3^\varepsilon). \end{equation} Considering the uniform boundedness of $F^\varepsilon$ and $ u_3^\varepsilon$, see Lemma \ref{Bound}, we immediately conclude that \begin{equation}\label{C-main} \frac{1}{\varepsilon^2} \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) \, \mu (\frac{x}{\varepsilon}, \omega ) \mu (\frac{x}{\varepsilon} -z, \omega) ( u_3^\varepsilon (x-\varepsilon z) - u_3^\varepsilon(x))^2 dz dx < K \end{equation} uniformly in $\varepsilon$ and for a.e. $\omega$. Therefore, \begin{equation}\label{C-main_pure} m \int\limits_{\mathbb R^d} (u_3^\varepsilon(x))^2 dx+\frac{1}{\varepsilon^2} \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) ( u_3^\varepsilon (x-\varepsilon z) - u_3^\varepsilon(x))^2 dz dx < K \end{equation} For the sake of definiteness assume that $B=[-1,1]^d$. The cubes of other size can be considered in exactly the same way. Let $\phi(s)$ be an even $C_0^\infty(\mathbb R)$ function such that $0\leq \phi\leq 1$, $\phi(s)=1$ for $|s|\leq 1$, $\phi(s)=0$ for $|s|\geq 2$, and $|\phi'(s)|\leq 2$. Denote $\tilde u_3^\varepsilon(x)= \phi(|x|)u_3^\varepsilon(x)$. It is straightforward to check that \begin{equation}\label{C-main_modi1} m \int\limits_{\mathbb R^d} (\tilde u_3^\varepsilon(x))^2 dx+\frac{1}{\varepsilon^2} \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) (\tilde u_3^\varepsilon (x-\varepsilon z) - \tilde u_3^\varepsilon(x))^2 dz dx < K \end{equation} We also choose $\mathcal{R}$ in such a way that $\int_{|z|\leq \mathcal{R}}a(z)dz\geq \frac12$ and introduce $$ \tilde a(z) ={\bf 1}_{\{|z|\leq \mathcal{R}\}}\,a(z)\,\Big(\int_{|z|\leq \mathcal{R}}a(z)dz\Big)^{-1}. $$ Then \begin{equation}\label{C-main_cut} m \int\limits_{\mathbb R^d} (\tilde u_3^\varepsilon(x))^2 dx+\frac{1}{\varepsilon^2} \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, \tilde a (z) (\tilde u_3^\varepsilon (x-\varepsilon z) - \tilde u_3^\varepsilon(x))^2 dz dx < K. \end{equation} Letting $\tilde B = [-\pi, \pi]^d$, we denote by $\hat u_3^\varepsilon(x)$ the $\tilde B$ periodic extension of $\tilde u_3^\varepsilon(x)$. For the extended function we have \begin{equation}\label{C-main_per} m \int\limits_{\tilde B} (\hat u_3^\varepsilon(x))^2 dx+\frac{1}{\varepsilon^2} \int\limits_{\tilde B} \int\limits_{\mathbb R^d} \, \tilde a (z) (\hat u_3^\varepsilon (x-\varepsilon z) - \hat u_3^\varepsilon(x))^2 dz dx < K. \end{equation} The functions $e_k(x) = \frac{1}{(2 \pi)^{d/2}} e^{ikx}, \; k \in Z^d$, form an orthonormal basis in $L^2(B)$, and $$ \hat u_3^\varepsilon(x) = \sum_k \alpha_k^\varepsilon e_k(x), \quad \hat u_3^\varepsilon (x-\varepsilon z) = \sum_k \alpha_k^\varepsilon e^{-i\varepsilon kz} e_k(x); $$ $$ \| \hat u_3^\varepsilon(x)\|^2 = \sum_k (\alpha_k^\varepsilon)^2, \quad \|\hat u_3^\varepsilon (x-\varepsilon z) - \hat u_3^\varepsilon(x) \|^2 =\sum_k (\alpha_k^\varepsilon)^2 |e^{-i\varepsilon k z} - 1|^2. $$ Then inequality \eqref{C-main} is equivalent to the following bound \begin{equation}\label{AAA1} \frac{1}{\varepsilon^2} \sum_k (\alpha_k^\varepsilon)^2 \, \int\limits_{\mathbb R^d} \, \tilde a (z) |e^{-i\varepsilon k z} - 1|^2 dz < C. \end{equation} \begin{lemma}\label{Propc1c2} For any $k \in Z^d$ and any $0<\varepsilon<1$ there exist constants $C_1, \ C_2$ (depending on $d$) such that \begin{equation}\label{A2} \int\limits_{\mathbb R^d} \, \tilde a (z) |e^{-i\varepsilon k z} - 1|^2 dz \ge \min \{ C_1 k^2 \varepsilon^2, \ C_2 \}. \end{equation} \end{lemma} \begin{proof} For small $\varepsilon$, the lower bound by $C_1 k^2 \varepsilon^2$ follows from the expansion of $e^{-i \varepsilon k z}$ in the neighborhood of 0. For large enough $\varepsilon |k|\ge \varkappa_0>1$ we use the following inequality $$ \int\limits_{\mathbb R^d} \, \tilde a (z) |e^{-i\varepsilon k z} - 1|^2 dz \ge c_0 \int\limits_{[0,1]^d} |e^{-i\varepsilon k z} - 1|^2 dz \ge c_0 \big(2-\frac{2}{\varkappa_0}\big)^d. $$ \end{proof} Let us consider a sequence $\varepsilon_j \to 0$. Using inequalities \eqref{AAA1}-\eqref{A2} we will construct now for any $\delta>0$ a finite $2 \delta$-net covering all elements of the sequence $u_3^{\varepsilon_j}$. For any $\delta>0$ we take $|k_0|$ and $j_0$ such that \begin{equation}\label{A3} \frac{C}{\delta} < C_1 |k_0|^2 < \frac{C_2}{\varepsilon_{j_0}^2}, \end{equation} where $C,\, C_1, \, C_2$ are the same constants as in \eqref{AAA1}-\eqref{A2}. Then it follows from \eqref{AAA1}-\eqref{A3} that $$ \sum_{k:|k| \ge |k_0|} C_1 |k_0|^2 (\alpha_k^{\varepsilon_j})^2 < \sum_{k: |k| \ge |k_0|} \min \Big\{ C_1 |k|^2, \, \frac{C_2}{\varepsilon_j^2} \Big\} \, (\alpha_k^{\varepsilon_j})^2 < C \quad \mbox{ for any } \; j>j_0. $$ Consequently we obtain the uniform bound on the tails of $\hat u_3^{\varepsilon_j}$ for all $j>j_0$: \begin{equation}\label{A4} \sum_{k:|k| \ge |k_0|} (\alpha_k^{\varepsilon_j})^2 < \frac{C}{C_1 |k_0|^2} < \delta. \end{equation} Denote by ${\cal H}_{k_0} \subset L^2(\tilde B)$ a linear span of basis vectors $\{ e_k, \ |k|<|k_0| \}$. Evidently, it is a finite-dimensional subspace. Then we have $$ \hat u_3^\varepsilon = w_{k_0}^\varepsilon + \sum_{k:|k| \ge |k_0|} \alpha_k^{\varepsilon} e_k, \quad \mbox{ where } \; w_{k_0}^\varepsilon = P_{{\cal H}_{k_0}} u_3^\varepsilon. $$ Since we already know from Lemma \ref{Bound} that the functions $\hat u_3^{\varepsilon_j}$ are uniformly bounded in $L^2(\tilde B)$, then the functions $w_{k_0}^{\varepsilon_j}$ are also uniformly bounded. Therefore there exists in ${\cal H}_{k_0}$ a finite $\delta$-net covering the functions $\{ w_{k_0}^{\varepsilon_j}, \, j>j_0 \}$. Estimate \eqref{A4} implies that the same net will be the $2 \delta$-net for the functions $\{\hat u_3^{\varepsilon_j}, \, j>j_0 \}$. We need to add to this net $j_0$ elements to cover first $j_0$ functions $\hat u_3^{\varepsilon_j}, \, j=1, \ldots, j_0$. Thus we constructed the finite $2 \delta$-net for any $\delta>0$ which proves the compactness of $\{\hat u_3^{\varepsilon} \}$ as $\varepsilon \to 0 $ in $L^2(\tilde B)$. Since $u_3^{\varepsilon}(x)=\hat u_3^{\varepsilon}(x)$ for $x\in B$, we conclude that the family $\{u_3^{\varepsilon}\}$ is compact in $L^2(B)$. In the same way one can show that this family is compact on any cube $B=[-L,L]^d$. This completes the proof of Lemma. \end{proof} \begin{lemma}\label{l_u3small} The following limit relation holds: $\|u_3^\varepsilon\|_{L^2(\mathbb R^d)}\to 0$, as $\varepsilon\to0$. \end{lemma} \begin{proof} We go back to formula \eqref{u3-main}. On the right-hand side of this equality we have the inner product of two sequences $F^\varepsilon$ and $u_3^\varepsilon$ Since the sequence $F^\varepsilon \rightharpoonup 0$ weakly in $L^2(B)$, and the sequence $u_3^\varepsilon$ is compact in $L^2(B)$, the product $(F^\varepsilon, u_3^\varepsilon) \to 0$ as $\varepsilon \to 0$. Therefore, both integrals on the left-hand side of \eqref{u3-main} also tend to zero as $\varepsilon \to 0$, and we obtain that $\| u_3^\varepsilon \|_{L^2(\mathbb R^d)} \to 0, \ \varepsilon \to 0$. \end{proof} Denote by $\Theta$ the matrix $\Theta = D_1 - D_2$, where $D_1, \, D_2$ are defined by \eqref{J_1}, \eqref{D_2}. Our next goal is to show that $D_1 - D_2$ is a positive definite matrix. \begin{proposition} The matrix $\Theta = D_1 - D_2$ is positive definite: \begin{equation}\label{Positive} \Theta \ = \ \frac12 \, \int\limits_{\mathbb R^d} \int\limits_{\Omega} \big(z\otimes z - z \otimes \zeta_{-z} (0, \omega ) \big) \, a (z) \, \mu (0, \omega ) \mu ( -z, \omega) \, dz \, d P(\omega) > 0. \end{equation} \end{proposition} \begin{proof} We recall that $\varkappa^\delta(\omega)$ stands for a unique solution of equation \eqref{A-delta}. Letting $\varkappa_\eta^\delta(\omega)=\eta\cdot\varkappa^\delta(\omega)$, $\eta\in\mathbb R^d\setminus \{0\}$, one can easily obtain \begin{equation}\label{Prop2_eta} \begin{array}{c} \displaystyle \delta \int\limits_\Omega \big(\varkappa_\eta^\delta(\omega)\big)^2\mu(\omega)\, dP(\omega) - \int\limits_{\mathbb R^d} \int\limits_\Omega a (z) \mu ( T_z \omega ) \big( \varkappa_\eta^\delta (T_z \omega ) - \varkappa_\eta^\delta ( \omega) \big) \varkappa_\eta^\delta ( \omega)\mu(\omega) \, dz \, dP(\omega) \\ \displaystyle = \int\limits_{\mathbb R^d} \int\limits_\Omega (\eta\cdot z) a(z) \varkappa_\eta^\delta(\omega) \mu(T_z \omega) \mu(\omega) \, dz \, dP(\omega). \end{array} \end{equation} In the same way as in the proof of Proposition \ref{spectrA}, we derive the following relation: \begin{equation}\label{Prop2_etabis} \begin{array}{c} \displaystyle \delta \int\limits_\Omega \big(\varkappa_\eta^\delta(\omega)\big)^2\mu(\omega)\, dP(\omega) +\frac12\int\limits_{\mathbb R^d} \int\limits_\Omega a (z) \mu ( T_z \omega ) \big( \varkappa_\eta^\delta (T_z \omega ) - \varkappa_\eta^\delta ( \omega) \big)^2\mu(\omega) \, dz \, dP(\omega) \\ \displaystyle = - \frac12 \int\limits_{\mathbb R^d} \int\limits_\Omega (\eta\cdot z) a(z)\big( \varkappa_\eta^\delta (T_z \omega ) - \varkappa_\eta^\delta ( \omega) \big) \mu(T_z \omega) \mu(\omega) \, dz \, dP(\omega). \end{array} \end{equation} According to \eqref{theta} the sequence $\eta\cdot(\varkappa_\eta^{\delta_j} (T_z \omega ) - \varkappa_\eta^{\delta_j} ( \omega))$ converges weakly in $L^2_M $ as $\delta_j\to 0$ to $\eta\cdot\theta(z,\omega)$. Passing to the limit $\delta_j\to0$ in relation \eqref{Prop2_etabis} and considering the lower semicontinuity of the $L^2_M$ norm with respect to the weak topology, we arrive at the following inequality \begin{equation}\label{est_ineq} \frac12\int\limits_{\mathbb R^d} \int\limits_\Omega a (z) \mu ( T_z \omega ) \big(\eta\cdot\theta(z,\omega) \big)^2\mu(\omega) \, dz \, dP(\omega) \leq - \frac12 \int\limits_{\mathbb R^d} \int\limits_\Omega (\eta\cdot z) a(z)\big( \eta\cdot\theta(z,\omega) \big) \mu(T_z \omega) \mu(\omega) \, dz \, dP(\omega). \end{equation} Therefore, $$ \Theta \eta\cdot\eta= \frac12 \, \eta_i\eta_j\int\limits_{\mathbb R^d} \int\limits_{\Omega} \big(z^i z^j - z^i \zeta^j_{-z} (0, \omega ) \big) \, a (z) \, \mu (0, \omega ) \mu ( -z, \omega) \, dz \, d P(\omega) $$ $$ =\frac12 \int\limits_{\mathbb R^d} \int\limits_\Omega \big((\eta\cdot z)^2+(\eta\cdot z) (\eta\cdot \theta(z,\omega))\big) \, a (z) \, \mu (0, \omega ) \mu ( z, \omega) \, dz \, d P(\omega). $$ Combining the latter relation with \eqref{est_ineq} we obtain $$ \Theta \eta\cdot\eta\geq \frac12 \int\limits_{\mathbb R^d} \int\limits_\Omega \big((\eta\cdot z)+(\eta\cdot z) (\eta\cdot \theta(z,\omega))\big)^2 \, a (z) \, \mu (0, \omega ) \mu ( z, \omega) \, dz \, d P(\omega). $$ Since $\theta(z, \omega)$ is a.s. a function of sublinear growth in $z$, we conclude that $ \eta\cdot\theta(z, \omega) \not \equiv \eta\cdot z$, consequently the integral on the right-hand side here is strictly positive. This yields the desired positive definiteness. \end{proof} \section{Estimation of the remainder $ \phi_\varepsilon $}\label{s_estrem} In this section we consider the remainder $ \phi_\varepsilon (x, \omega)$ given by (\ref{14}) and prove that $\|\phi_\varepsilon\|_{L^2(\mathbb R^d)}$ vanishes a. s. as $\varepsilon \to 0$. \begin{lemma}\label{reminder} Let $u_0 \in {\cal{S}}(\mathbb R^d)$. Then a.s. \begin{equation}\label{fi} \| \phi_\varepsilon (\cdot, \omega) \|_{L^2(\mathbb R^d)} \ \to \ 0 \quad \mbox{ as } \; \varepsilon \to 0. \end{equation} \end{lemma} \begin{proof} The first term in (\ref{14}) can be written as $$ \phi_\varepsilon^{(1)} (x, \omega) = \int\limits_{\mathbb R^d} dz \ a (z) \mu \Big( \frac{x}{\varepsilon}, \omega \Big) \mu \Big( \frac{x}{\varepsilon} -z, \omega \Big) \int_0^{1} \ \Big( \nabla \nabla u_0(x - \varepsilon z t) - \nabla \nabla u_0(x) \Big) z \otimes z (1-t) \ dt. $$ It doesn't depend on the random corrector $ \theta$ and can be considered exactly in the same way as in \cite[Proposition 5 ]{PiZhi17}. Thus we have \begin{equation}\label{phi_1bis} \| \phi_\varepsilon^{(1)} \|_{L^2(\mathbb R^d)} \to 0 \quad \mbox{ as } \; \varepsilon \to 0. \end{equation} Let us denote by $\phi_\varepsilon^{(2)}$ the sum of the second and the third terms in (\ref{14}): \begin{equation}\label{reminder-2} \begin{array}{rl} \displaystyle \!\!\!\!&\hbox{ }\!\!\!\!\!\!\!\!\!\!\!\!\phi_\varepsilon^{(2)} (x, \omega) =\\[3mm] &\!\!\!\!\!\!\!\!\! \displaystyle \mu \big( \frac{x}{\varepsilon},\omega \big) \int\limits_{\mathbb R^d} \ a (z) \mu \big( \frac{x}{\varepsilon} -z, \omega \big) \theta \big(\frac{x}{\varepsilon}\!-\!z,\omega \big) \Big( \frac{1}{\varepsilon} \big(\nabla u_0(x- \varepsilon z) - \nabla u_0(x)\big) + z \, \nabla \nabla u_0(x) \Big)\, dz. \end{array} \end{equation} We take sufficiently large $L>0$ such that supp $\, u_0 \subset \{|x|<\frac12 L \}$ and estimate $\phi_\varepsilon^{(2)} (x, \omega)$ separately in the sets $\{|x|<L\}$ and $\{|x|>L\}$. If $|x|>L$, then $u_0(x) = 0$. Since $a(z)$ has a finite second moment in $\mathbb R^d$, for any $c>0$ we have \begin{equation}\label{ineqz2} \frac{1}{\varepsilon^2} \int\limits_{|z|> \frac{c}{\varepsilon}} a (z) \, dz = \frac{1}{\varepsilon^2} \int\limits_{|z|> \frac{c}{\varepsilon}} a (z) \frac{z^2}{z^2} \, dz \le \frac{1}{c^2} \int\limits_{|z|> \frac{c}{\varepsilon}} a (z) z^2 \, dz \to 0 \quad \mbox{as } \; \varepsilon \to 0. \end{equation} Therefore, \begin{equation}\label{r-2out} \begin{array}{l} \displaystyle \| \phi_\varepsilon^{(2)} \, \chi_{|x|>L} \|^2_{L^2(\mathbb R^d)} =\!\! \int\limits_{|x|>L} \Big(\!\! \int\limits_{|x - \varepsilon z|< \frac12 L}\!\! \frac{1}{\varepsilon} \mu \big( \frac{x}{\varepsilon},\omega \big) a (z) \mu \big( \frac{x}{\varepsilon} -z, \omega \big) \theta \big(\frac{x}{\varepsilon}\!-\!z,\omega \big) \nabla u_0(x- \varepsilon z) \, dz \Big)^2 dx \\[3mm] \displaystyle < \alpha_2^4 \, \Big( \frac{1}{\varepsilon^2} \int\limits_{|z|> \frac{L}{2\varepsilon}} \ a (z) \, dz \, \Big)^2 \|\varepsilon \theta \big(\frac{y}{\varepsilon},\omega \big) \nabla u_0(y)\|_{L^2(\mathbb R^d)}^2 \to 0; \end{array} \end{equation} Here we have also used the limit relation $\| \varepsilon \theta \big(\frac{y}{\varepsilon},\omega) \nabla u_0(y) \|_{L^2(\mathbb R^d)} \to 0$ that is ensured by Proposition \ref{1corrector}. Denote $\chi_{<L}(x) = \chi_{\{|x|<L\}}(x)$ and represent the function $\phi_\varepsilon^{(2)} (x,\omega) \, \chi_{<L}(x)$ as follows: \begin{equation}\label{r-2in-bis} \phi_\varepsilon^{(2)} (x, \omega) \, \chi_{<L} (x) = \gamma_\varepsilon^{<} (x, \omega) + \gamma_\varepsilon^{>} (x, \omega), \end{equation} where \begin{equation}\label{r-2in} \begin{array}{l} \displaystyle \gamma_\varepsilon^{<} (x, \omega) =\mu \big( \frac{x}{\varepsilon},\omega \big) \chi_{<L}(x)\\[3mm] \displaystyle \times\int\limits_{|\varepsilon z|< 2L } \ a (z) \mu \big( \frac{x}{\varepsilon} -z, \omega \big) \theta \big(\frac{x}{\varepsilon}\!-\!z,\omega \big) \Big( \frac{1}{\varepsilon} \big(\nabla u_0(x- \varepsilon z) - \nabla u_0(x)\big) + z \, \nabla \nabla u_0(x) \Big)\, dz; \\[9mm] \displaystyle \gamma_\varepsilon^{>} (x, \omega) = \mu \big( \frac{x}{\varepsilon},\omega \big) \chi_{<L}(x) \\[3mm] \displaystyle \times\int\limits_{|\varepsilon z|> 2L } \ a (z) \mu \big( \frac{x}{\varepsilon} -z, \omega \big) \theta \big(\frac{x}{\varepsilon}\!-\!z,\omega \big) \Big( \frac{1}{\varepsilon} \big(\nabla u_0(x- \varepsilon z) - \nabla u_0(x)\big) + z \, \nabla \nabla u_0(x) \Big)\, dz. \end{array} \end{equation} Since $u_0\in C_0^\infty(\mathbb R^d)$, the Teylor decomposition applies to $\nabla u_0 (x- \varepsilon z)$, and we get $$ \frac{1}{\varepsilon} \big(\nabla u_0 (x- \varepsilon z) - \nabla u_0(x)\big) + z \, \nabla \nabla u_0(x) = \frac{\varepsilon}{2} \nabla\nabla\nabla u_0 (\xi)\, z \otimes z $$ with some $\xi \in \mbox{supp} \, u_0$, here the notation $\nabla\nabla\nabla u_0 (\xi)\, z \otimes z$ is used for the vector function $(\nabla\nabla\nabla u_0 (\xi)\, z \otimes z)^i=\partial_{x^j}\partial_{x^k}\partial_{x^i}u_0(\xi)z^jz^k$. Then the right-hand side of the first formula in \eqref{r-2in} admits the estimate \begin{equation}\label{r-2in1} \begin{array}{l} \displaystyle \mu \big( \frac{x}{\varepsilon},\omega \big) \chi_{<L}(x) \Big|\!\!\!\int\limits_{|\varepsilon z|< 2L } \!\!\!\!\!\! a (z) \mu \big( \frac{x}{\varepsilon} -z, \omega \big) \theta \big(\frac{x}{\varepsilon}\!-\!z,\omega \big) \Big( \frac{1}{\varepsilon} \big(\nabla u_0(x- \varepsilon z) - \nabla u_0(x)\big) + z \nabla \nabla u_0(x)\! \Big) dz \Big| \\[3mm] \displaystyle \le \frac{\alpha_2^2}{2} \max | \nabla\nabla\nabla u_0 | \int\limits_{\mathbb R^d } \, \varepsilon | \theta \big(\frac{x}{\varepsilon}\!-\!z,\omega \big)| \, \chi_{<3L}(x-\varepsilon z) \, a (z) z^2 \, dz. \end{array} \end{equation} Taking into account the relation \begin{equation}\label{r-2in1add} \begin{array}{l} \displaystyle \int\limits_{\mathbb R^d } \Big( \int\limits_{\mathbb R^d } \, \varepsilon | \theta \big(\frac{x}{\varepsilon}\!-\!z,\omega \big)| \, \chi_{<3L}(x-\varepsilon z) \, a (z) z^2 \, dz \Big)^2 dx \\[3mm] \displaystyle = \int\limits_{\mathbb R^d } a (z_1) z_1^2 dz_1 \int\limits_{\mathbb R^d } a (z_2) z_2^2 dz_2 \int\limits_{\mathbb R^d } \varepsilon^2 | \theta \big(\frac{x}{\varepsilon}\!-\!z_1,\omega \big)| | \theta \big(\frac{x}{\varepsilon}\!-\!z_2,\omega \big)| \chi_{<3L}(x-\varepsilon z_1) \chi_{<3L}(x-\varepsilon z_2) dx \end{array} \end{equation} and applying the Cauchy-Schwartz inequality to the last integral on its right hand side we conclude with the help of Proposition \ref{1corrector} that $\|\gamma_\varepsilon^{<} (x, \omega) \|_{L^2(\mathbb R^d) } \to 0$ as $\varepsilon \to 0$. If $|x|<L$ and $|\varepsilon z|>2L$, then $|x-\varepsilon z|>L$, and $u_0 (x-\varepsilon z)=0$. The right-hand side of the second formula in \eqref{r-2in} can be rearranged as follows: \begin{equation}\label{r-2in2} \begin{array}{l} \displaystyle \gamma_\varepsilon^{>} (x, \omega) = \mu \big( \frac{x}{\varepsilon},\omega \big) \chi_{<L}(x) \int\limits_{|z|> \frac{2L}{\varepsilon} }\!\!\!\! a (z) \mu \big( \frac{x}{\varepsilon} -z, \omega \big) \theta \big(\frac{x}{\varepsilon}\!-\!z,\omega \big) \Big( - \frac{1}{\varepsilon} \nabla u_0(x) + z \, \nabla \nabla u_0(x) \Big)\, dz \\[3mm] \displaystyle =\mu \big( \frac{x}{\varepsilon},\omega \big) \chi_{<L}(x) \!\! \int\limits_{|z|> \frac{2L}{\varepsilon} } \!\!\!\! a (z) \mu \big( \frac{x}{\varepsilon} -z, \omega \big) \big( \theta \big(\frac{x}{\varepsilon}\!-\!z,\omega \big) - \theta \big(\frac{x}{\varepsilon},\omega \big) \big) \Big(\!\! - \frac{1}{\varepsilon} \nabla u_0(x) + z \nabla \nabla u_0(x)\!\Big) dz \\[3mm] \displaystyle +\mu \big( \frac{x}{\varepsilon},\omega \big) \chi_{<L}(x) \!\! \int\limits_{|z|> \frac{2L}{\varepsilon} } \!\!\!\! a (z) \mu \big( \frac{x}{\varepsilon} -z, \omega \big) \theta \big(\frac{x}{\varepsilon},\omega \big) \Big( - \frac{1}{\varepsilon} \nabla u_0(x) + z \, \nabla \nabla u_0(x) \Big)\, dz \end{array} \end{equation} The second term on the right-hand side in \eqref{r-2in2} is estimated in the same way as the function $\phi_\varepsilon^{(2)} \, \chi_{|x|>L}$ in \eqref{r-2out}. Thus the $L^2(\mathbb R^d)$ norm of this term tends to 0 as $\varepsilon \to 0$. The first term on the right-hand side of \eqref{r-2in2} admits the following upper bound: \begin{equation}\label{r-2in2bis} \begin{array}{l} \displaystyle \Big| \mu \big( \frac{x}{\varepsilon},\omega \big) \chi_{<L}(x) \int\limits_{|z|> \frac{2L}{\varepsilon} } \ a (z) \mu \big( \frac{x}{\varepsilon} -z, \omega \big) \zeta_{-z} \big(T_{\frac{x}{\varepsilon}}\omega \big) \Big( - \frac{1}{\varepsilon} \nabla u_0(x) + z \, \nabla \nabla u_0(x) \Big)\, dz \Big| \\[3mm] \displaystyle \leq \alpha_2^2 \int\limits_{|z|> \frac{2L}{\varepsilon} } \ a (z) \Big| \zeta_{-z} \big(T_{\frac{x}{\varepsilon}}\omega \big)\Big|\ \Big| - \frac{1}{\varepsilon} \nabla u_0(x) + z \, \nabla \nabla u_0(x)\Big|\, dz \\[3mm] \displaystyle \leq \alpha_2^2 C(L) \int\limits_{|z|> \frac{2L}{\varepsilon} } |z| a (z) \Big| \zeta_{-z} \big(T_{\frac{x}{\varepsilon}}\omega \big)\Big| \, dz\ \big(\big| \nabla u_0(x)\big| + \big| \nabla \nabla u_0(x)\big|\big). \\[3mm] \displaystyle \leq \alpha_2^2 C(L) \Big(\int\limits_{|z|> \frac{2L}{\varepsilon} } |z|^2 a (z)dz\Big)^\frac12 \Big(\int\limits_{\mathbb R^d} a (z) \big|\zeta_{-z} \big(T_{\frac{x}{\varepsilon}}\omega \big)\big|^2 \, dz\Big)^\frac12\ \big(\big| \nabla u_0(x)\big| + \big| \nabla \nabla u_0(x)\big|\big). \end{array} \end{equation} Since $\zeta_{-z} (\omega)\in L^2_M$, we have $$ \mathbb E\int\limits_{\mathbb R^d} a (z) | \zeta_{-z} (\omega )|^2 \, dz<\infty. $$ Taking into account the convergence $$ \int\limits_{|z|> \frac{2L}{\varepsilon} } |z|^2 a (z)dz\to 0,\quad \hbox{as }\varepsilon\to0, $$ by the Birkhoff ergodic theorem we obtain that the $L^2(\mathbb R^d)$ norm of the first term on the right-hand side of \eqref{r-2in2} tends to zero a.s., as $\varepsilon\to0$. Therefore, $\|\gamma_\varepsilon^{>} (x, \omega) \|_{L^2(\mathbb R^d) } \to 0$ as $\varepsilon \to 0$. From \eqref{r-2in-bis} it follows that $\| \phi_\varepsilon^{(2)}(x,\omega) \chi_{<L} (x) \|_{L^2(\mathbb R^d)} \to 0$ as $ \varepsilon \to 0$, and together with \eqref{r-2out} this implies that \begin{equation}\label{rr} \| \phi_\varepsilon^{(2)}(x,\omega) \|_{L^2(\mathbb R^d)} \to 0 \quad \mbox{as } \; \varepsilon \to 0. \end{equation} Finally, \eqref{fi} follows from \eqref{phi_1bis} and \eqref{rr}. Lemma is proved. \end{proof} \section{Proof of the main results}\label{s_proofmain} We begin this section by proving relation \eqref{convergence1} for $f\in \mathcal{S}_0(\mathbb R^d)$. For such $f$ we have $u_0\in C_0^\infty(\mathbb R^d)$. It follows from \eqref{v_eps}, Proposition \ref{1corrector} and Lemmas \ref{l_u2small}, \ref{l_u3small} that \begin{equation}\label{frstconv} \|w^\varepsilon-u_0\|_{L^2(\mathbb R^d)}\to 0,\quad\hbox{as }\varepsilon\to 0. \end{equation} By the definition of $v^\varepsilon$, $u_2^\varepsilon$ and $u_3^\varepsilon$, $$ (L^\varepsilon-m)w^\varepsilon=(\hat L-m)u_0-m\varepsilon \theta \Big(\frac x\varepsilon\Big)\cdot\nabla u_0+\phi_\varepsilon =f-m\varepsilon \theta \Big(\frac x\varepsilon\Big)\cdot\nabla u_0+\phi_\varepsilon $$ $$ =(L^\varepsilon-m)u^\varepsilon-m\varepsilon \theta \Big(\frac x\varepsilon\Big)\cdot\nabla u_0+\phi_\varepsilon. $$ Therefore, $$ (L^\varepsilon-m)(w^\varepsilon-u^\varepsilon)=-m\varepsilon \theta \Big(\frac x\varepsilon\Big)\cdot\nabla u_0+\phi_\varepsilon. $$ According to Proposition \ref{1corrector} and Lemma \ref{reminder} the $L^2$ norm of the functions on the right-hand side of the last formula tends to zero as $\varepsilon\to0$. Consequently, $$ \|w^\varepsilon-u^\varepsilon\|_{L^2({\mathbb R^d})}\to 0,\quad\hbox{as }\varepsilon\to 0. $$ Combining this relation with \eqref{frstconv} yields the desired relation \eqref{convergence1} for $f\in\mathcal{S}_0(\mathbb R^d)$. To complete the proof of Theorem \ref{T1} we should show that the last convergence holds for any $f\in L^2(\mathbb R^d)$. For any $f \in L^2(\mathbb R^d)$ there exists $f_\delta \in \mathcal{S}_0$ such that $\| f - f_\delta\|_{L^2(\mathbb R^d)} <\delta$. Since the operator $(L^\varepsilon - m)^{-1}$ is bounded uniformly in $\varepsilon$, then \begin{equation}\label{delta_1} \| u^{\varepsilon}_\delta - u^\varepsilon \|_{L^2(\mathbb R^d)} \le C_1 \delta, \qquad \| u_{0,\delta} - u_0 \|_{L^2(\mathbb R^d)} \le C_1 \delta, \end{equation} where $$ u^{\varepsilon} \ = \ (L^{\varepsilon} - m)^{-1} f, \; \; u_{0} \ = \ (\hat L - m)^{-1} f, \; \; u^{\varepsilon}_\delta \ = \ (L^{\varepsilon} - m)^{-1} f_\delta, \; \; u_{0,\delta} \ = \ (\hat L - m)^{-1} f_\delta. $$ Recalling that $f_\delta\in\mathcal{S}_0$, we obtain $\| u^{\varepsilon}_\delta - u_{0, \delta} \|_{L^2(\mathbb R^d)} \to 0 $. Therefore, by (\ref{delta_1}) $$ \mathop{ \overline{\rm lim}}\limits_{\varepsilon \to 0} \| u^{\varepsilon} - u_0 \|_{L^2(\mathbb R^d)} \le 2 C_1 \delta $$ with an arbitrary $\delta>0$. This implies the desired convergence in \eqref{t1} for an arbitrary $f\in L^2(\mathbb R^d)$ and completes the proof of the main theorem. \subsection{Proof of Corollary \ref{cor_main}} Here we assume that the operator $L^{\varepsilon,{\rm ns}}$ is defined by \eqref{L_eps_ns}. Multiplying equation \eqref{u_eps_nssss} by $\rho^\varepsilon(x,\omega)=\rho\big(\frac{x}{\varepsilon},\omega\big)= \mu\big(\frac{x}{\varepsilon},\omega\big)\big(\lambda\big(\frac{x}{\varepsilon},\omega\big)\big)^{-1}$ we obtain \begin{equation}\label{eq_modfd} L^{\varepsilon}u_\varepsilon -m\rho^\varepsilon u_\varepsilon=\rho_\varepsilon f, \end{equation} where the symmetrized operator $L^{\varepsilon}$ is given by \eqref{L_eps}. Letting $\langle\rho\rangle=\mathbb E\bm{\rho} =\mathbb E\big(\frac{\bm{\mu}}{\bm{\lambda}}\big)$ we consider an auxiliary equation \begin{equation}\label{eq_ns_aux} L^{\varepsilon}g_\varepsilon -m\langle\rho\rangle g_\varepsilon=\langle\rho\rangle f. \end{equation} By Theorem \ref{T1} the functions $g_\varepsilon$ converge a.s. in $L^2(\mathbb R^d)$, as $\varepsilon\to0$, to a solution of the equation $\hat Lg -m\langle\rho\rangle g=\langle\rho\rangle f$. Our goal is to show that $\|g_\varepsilon-u_\varepsilon\|_{L^2(\mathbb R^d)}\to0$ as $\varepsilon\to0$. To this end we subtract equation \eqref{eq_modfd} from \eqref{eq_ns_aux}. After simple rearrangements this yields \begin{equation}\label{eq_ns_alpha} L^{\varepsilon}\alpha_\varepsilon -m\rho_\varepsilon \alpha_\varepsilon=\big(\langle\rho\rangle-\rho_\varepsilon\big)g_\varepsilon +\big(\langle\rho\rangle-\rho_\varepsilon\big) f. \end{equation} with $\alpha_\varepsilon(x)=g_\varepsilon(x)-u_\varepsilon(x)$. In a standard way one can derive the following estimate \begin{equation}\label{C_ns_pure} m \int\limits_{\mathbb R^d} (\alpha_\varepsilon(x))^2 dx+\frac{1}{\varepsilon^2} \int\limits_{\mathbb R^d} \int\limits_{\mathbb R^d} \, a (z) ( \alpha_\varepsilon (x-\varepsilon z) - \alpha_\varepsilon(x))^2 dz dx < C. \end{equation} As was shown in the proof of Lemma \ref{Convergence}, this estimate implies compactness of the family $\{\alpha_\varepsilon\}$ in $L^2(B)$ for any cube $B$. Multiplying \eqref{eq_ns_alpha} by $\alpha_\varepsilon$ and integrating the resulting relation over $\mathbb R^d$ we obtain \begin{equation}\label{al_al} \|\alpha_\varepsilon\|^2_{L^2(\mathbb R^d)}\leq C_1 \big|\big((\langle\rho\rangle-\rho_\varepsilon)g_\varepsilon, \alpha_\varepsilon\big)_{L^2(\mathbb R^d)}\big| +\big|\big((\langle\rho\rangle-\rho_\varepsilon) f,\alpha_\varepsilon\big)_{L^2(\mathbb R^d)}\big| \end{equation} By the Birkhoff ergodic theorem $(\langle\rho\rangle-\rho_\varepsilon)$ converges to zero weakly in $L^2_{\rm loc}(\mathbb R^d)$. Considering the boundedness of $(\langle\rho\rangle-\rho_\varepsilon)$ and the properties of $\alpha_\varepsilon$ and $g_\varepsilon$, we conclude that the both terms on the right-hand side in \eqref{al_al} tend to zero, as $\varepsilon\to0$. So does $\|\alpha_\varepsilon\|^2_{L^2(\mathbb R^d)}$. Therefore, $u_\varepsilon$ converges to the solution of equation $\hat Lu -m\langle\rho\rangle u=\langle\rho\rangle f$. Dividing this equation by $\langle\rho\rangle$, we rewrite the limit equation as follows $$ \Big(\mathbb E\big\{\frac{\bm\mu}{\bm\lambda}\big\}\Big)^{-1}Q_{ij}\frac{\partial^2 u}{\partial x_i\partial x_j}-mu=f $$ with $\Theta$ defined in \eqref{Positive}. This completes the proof of Corollary. \noindent {\large \bf Acknowlegements}\\[2mm] The work on this project was completed during the visit of Elena Zhizhina at the Arctic University of Norway, campus Narvik. She expresses her gratitude to the colleagues at this university for hospitality. \end{document}
\begin{document} \title{Existence of solution for Hilfer fractional differential equations with boundary value conditions \thanks{ Mathematics Subject Classifications: 34A08, 26A33, 34A12,34A40.}} \date{} \author{Mohammed S. Abdo\thanks{ Department of Mathematics, Dr.Babasaheb Ambedkar Marathwada University, Aurangabad, (M.S) \textrm{431001}, India}\ , Satish K. Panchal\thanks{ Department of Mathematics, Dr.Babasaheb Ambedkar Marathwada University, Aurangabad, (M.S) \textrm{431001}, India}\ , Sandeep P. Bhairat \thanks{ Faculty of Engineering Mathematics, Institute of Chemical Technology Mumbai, Marathwada Campus, Jalna (M.S), India. Corresponding author email: [email protected]}} \maketitle \begin{abstract} In this paper, we consider a class of nonlinear fractional differential equations involving Hilfer derivative with boundary conditions. First, we obtain an equivalent integral for the given boundary value problem in weighted space of continuous functions. Then we obtain the existence results for a given problem under a new approach and minimal assumptions on nonlinear function $f$. The technique used in the analysis relies on a variety of tools including Schauder's, Schaefer's and Krasnosel'ski's fixed point theorems. We demonstrate our results through illustrative examples. \end{abstract} \section{Introduction} Fractional calculus (FC) is playing an even vital role in applied mathematics and engineering sciences, provoking a blurring of boundaries between scientific disciplines and the real world applications by a resurgence of interest in the modern as well as classical techniques of applied analysis, see \cite{rms,kd,rh,hi,kst}. The development of FC is a natural consequence of a high level of excitement on the research frontier in applied analysis. Fractional differential equations (FDEs) naturally occurs in many situations and are studied intensively with initial and boundary value conditions over the last three decades. The existence of a solution for such initial value problems (IVPs) and boundary value problems (BVPs) is crucial for further qualitative studies and applications. In recent years, an increasing interest in the analysis of Hilfer FDEs has been developed in the literature \cite{as,SP1,SPN,SP5,DB1,DBN,sp1,db,fk,hlt,rk,rc,sup,zrs,zt,dv,wz,zx}. We mention here some works on Hilfer fractional differential equations. One of the first works in this direction with an initial value condition was the paper by K. M. Furati et al. \cite{fk}. They studied the Hilfer FDE \begin{equation}\label{f1} D_{a^{+}}^{\alpha,\beta }y(x)=f \left(x,y(x)\right), \qquad x>a,\,0<\alpha<1,\,0\leq\beta\leq1, \end{equation} with the initial condition \begin{equation}\label{f2} I_{a^{+}}^{1-\gamma }y(a^+)=y_a, \quad y_a\in\mathbb{R}, \,\, \gamma=\alpha+\beta(1-\alpha), \end{equation} where $D_{a^{+}}^{\alpha,\beta}$ is Hilfer fractional derivative of order $\alpha\in(0,1)$ and type $\beta\in[0,1],$ and $I_{a^{+}}^{1-\gamma}$ is Riemann-Liouville fractional integral of order $1-\gamma.$ The existence and uniqueness of solution to IVP \eqref{f1}-\eqref{f2} is proved in weighted space of continuous functions by using Banach fixed point theorem. For details, see \cite{fk,zt}. In the year 2015, J. Wang and Y. Zang investigated the existence of a solution to nonlocal IVP for Hilfer FDEs: \begin{align}\label{l1} D_{a^{+}}^{\alpha,\beta }u(t)&=f \left(t,u(t)\right), \qquad 0<\alpha<1, 0\leq\beta\leq1 ,t\in (a,b],\\ I_{a^{+}}^{1-\gamma }u(a^+)&=\sum_{i=1}^{m}\lambda_{i}u(\tau_{i}),\qquad \tau _{i}\in (a,b],\ \alpha\leq\gamma=\alpha+\beta-\alpha\beta,\label{l11} \end{align} For details, see \cite{wz}. Later, H. Gu and J. J. Trujillo \cite{ht} studied the existence of mild solution of Hilfer evolution equation: \begin{align}\label{l2} D_{0^{+}}^{\nu,\mu }x(t)&=Ax(t)+f\left(t,x(t)\right),\qquad 0<\alpha<1, 0\leq\beta\leq1 ,t\in (0,b],\\ I_{0^{+}}^{1-\gamma}x(0)&=x_0,\qquad x_0\in\mathbb{R},\,\gamma=\alpha+\beta-\alpha\beta.\label{l22} \end{align} They utilized the method of noncompact measure and established sufficient conditions to ensure the existence of a mild solution to Hilfer evolution IVP \eqref{l2}-\eqref{l22}. The state $x(t)$ defined for the values in Banach space $X$ with the norm $|\cdot|$ and $A$ is infinitesimal generator of $C_0$ semigroups in $X.$ In 2016, in \cite{rc}, Rafal Kamoki et al. considered fractional Cauchy problem involving Hilfer derivative \begin{align}\label{l3} D_{a^{+}}^{\alpha,\beta }y(t)&=g \left(t,y(t)\right), \qquad 0<\alpha<1,0\leq\beta\leq1,t\in [a,b],b>a,\\ I_{a^{+}}^{1-\gamma }y(a)&=c,\qquad c\in {\mathbb{R}}^n, \gamma=\alpha+\beta-\alpha\beta,\label{l33} \end{align} and proved the existence and uniqueness of its solution in the space of continuous functions by using Banach contraction theorem. They used Bielecki norm without partitioning the interval and obtained solutions to both homogeneous and nonhomogeneous Cauchy problems. In recent two years, the series of works on Hilfer FDEs have been published. S. Abbas et al. \cite{as} surveyed the existence and stability for Hilfer FDEs of the form: \begin{align}\label{l4} D_{0}^{\alpha,\beta}u(t)&=f\left(t,y(t),D_{0}^{\alpha,\beta}u(t)\right), \qquad 0<\alpha<1,0\leq\beta\leq1,t\in [0,\infty),\\ I_{0}^{1-\gamma}u(0)&=\phi,\qquad \phi\in {\mathbb{R}}, \gamma=\alpha+\beta-\alpha\beta,\label{l44} \end{align} with the uniform norm on weighted space of bounded and continuous functions. They discussed existence, uniqueness and asymptotic stability of solution to IVP by using Schauder's fixed point theorem. Further, they obtained Ulam-type stabilities for Hilfer FDEs in Banach spaces using the measure of noncompactness and Monch's fixed point theorem. They also derived some results on the existence of weak solutions to \eqref{f1}-\eqref{f2}. Z. Gao and X. Yu \cite{zx} discussed the existence of a solution to Hilfer integral BVP for the relaxation FDEs: \begin{align}\label{l5} D_{0^{+}}^{\nu,\mu }x(t)&=cx(t)+f\left(t,x(t)\right),\qquad c<0,0<\nu<1, 0\leq\mu\leq1,t\in (0,b],\\ I_{0^{+}}^{1-\gamma }x(0^+)&=\sum_{i=1}^{m}\lambda_{i}x(\tau_{i}),\qquad \tau _{i}\in (0,b),\ 0\leq\gamma=\nu+\mu-\nu\mu, \label{l55} \end{align} By utilizing properties of Mittag-Leffler function and fixed point theory, they established three existence results for the solution of Hilfer integral BVP \eqref{l5}-\eqref{l55} similar to that of results in \cite{hlt}. Bhairat et al. in \cite{db} generalized IVP \eqref{f1}-\eqref{f2} for $\alpha\in(n-1,n).$ First, they derived equivalent integral representation in weighted space of continuous functions. Then by employing the method of successive approximations, the existence, uniqueness and continuous dependence of the solution are obtained. Further, in \cite{sp1}, Bhairat studied the singular IVP for Hilfer FDE: \begin{align}\label{l7} D_{a^+}^{\alpha,\beta}x(t)=f(t,x(t)),&\quad 0<\alpha<1,\, 0\leq\beta\leq1,\quad t>{a},\\ \displaystyle\lim_{t\to{a^{+}}}{(t-a)}^{1-\gamma}x(t)=x_0,&\qquad \gamma=\alpha+\beta(1-\alpha), \end{align} Using properties of Euler's beta, gamma functions and Picard's iterative technique, the existence and uniqueness of solution to the singular IVP were obtained. Some existence results for Hilfer-fractional implicit differential equation with nonlocal initial conditions can be found in \cite{as,dv}. Recently, Suphawat et al \cite{sup} studied the nonlocal BVP: \begin{equation}\label{l8} D^{\alpha,\beta}x(t)=f(t,x(t)),\quad 1<\alpha<2,\, 0\leq\beta\leq1,\quad t\in[a,b], \end{equation} with the integral boundary conditions \begin{equation}\label{l9} x(a)=0,\quad x(b)=\sum_{i=1}^{m}\delta_{i}I^{\phi_i}x(\xi_i),\qquad \phi_{i}>0,\,\delta_i\in\mathbb{R},\, \xi_i\in[a,b]. \end{equation} The Banach contraction mapping principle, Banach fixed point theorem with Holder inequality, nonlinear contractions, Krasnoselskii's fixed point theorem, nonlinear Leray-Schauder alternative are employed to prove the existence of the solution to integral BVP. Motivated by aforesaid works, in this paper, we consider the following BVP for a class of Hilfer FDEs: \begin{equation} D_{a^{+}}^{\alpha ,\beta }z(t)=f\big(t,z(t)\big),\text{ \ }0<\alpha <1,\,0\leq \beta \leq 1,t\in (a,b],\qquad \ \qquad \qquad \label{e8.1} \end{equation} \begin{equation} I_{a^{+}}^{1-\gamma }\big[cz(a^{+})+dz(b^{-})\big]=e_{k},\text{\ \ \ }\gamma =\alpha +\beta (1-\alpha ),\, e_k\in\mathbb{R}, \label{e8.2} \end{equation} where, $f:(a,b]\times \mathbb{R}\rightarrow \mathbb{R}$ be a function such that $ f(t,z)\in C_{1-\gamma }[a,b]$ for any $z\in C_{1-\gamma }[a,b]$ and $ c,d,e_{k}\in \mathbb{R}$. We obtain several existence results by Schauder's, Schaefer's and Krasnosel’ski's fixed point theorems. The paper is organized as follows: Some preliminary concepts related to our problem are listed in Section 2 which are useful in the sequel. In Section 3, we first establish an equivalent integral equation of BVP \eqref{e8.1}-\eqref{e8.2} and then study the existence results. Illustrative examples are provided in the last section. \section{Preliminaries} In this section, we list some definitions, lemmas and weighted spaces which are useful in the sequel. Let $-\infty <a<b<+\infty .$ Let $C[a,b],AC[a,b]$ and $C^{n}[a,b]$ be the spaces of continuous, absolutely continuous, $n-$times continuous and continuously differentiable functions on $[a,b],$ respectively. Here $ L^{p}(a,b),p\geq 1,$ is the space of Lebesgue integrable functions on $ (a,b). $ Further more we recall the following weighted spaces \cite{fk}: \begin{gather*} C_{\gamma }[a,b]=\{g:(a,b]\rightarrow \mathbb{R}:(t-a)^{\gamma }g(t)\in C[a,b]\},\quad 0\leq \gamma <1, \\ C_{\gamma }^{n}[a,b]=\{g:(a,b]\rightarrow \mathbb{R},g\in C^{n-1}[a,b]:g^{(n)}(t)\in C_{\gamma }[a,b]\},\,n\in \mathbb{N}. \end{gather*} \begin{definition} (\cite{kst}) Let $g:[a,\infty )\rightarrow R$ is a real valued continuous function. The left sided Riemann-Liouville fractional integral of $g$ of order $\alpha >0$ is defined by \begin{equation} I_{a^{+}}^{\alpha }g(t)=\frac{1}{\Gamma (\alpha )}\int_{a}^{t}(t-s)^{\alpha -1}g(s)ds,\quad t>a, \label{d1} \end{equation} where $\Gamma (\cdot )$ is the Euler's Gamma function and $a\in \mathbb{R} .$ provided the right hand side is pointwise defined on $(a,\infty ).$ \end{definition} \begin{definition} (\cite{kst}) Let $g:[a,\infty )\rightarrow R$ is a real valued continuous function. The left sided Riemann-Liouville fractional derivative of $g$ of order $\alpha >0$ is defined by \begin{equation} D_{a^{+}}^{\alpha }g(t)=\frac{1}{\Gamma (n-\alpha )}\frac{d^{n}}{dt^{n}} \int_{a}^{t}(t-s)^{n-\alpha -1}g(s)ds, \label{d2} \end{equation} where $n=[\alpha ]+1,$ and $[\alpha ]$ denotes the integer part of $\alpha .$ \end{definition} \begin{definition} \label{7} (\cite{rh}) The left sided Hilfer fractional derivative of function $g\in L^{1}(a,b)$ of order $0<\alpha <1$ and type $0\leq \beta \leq 1$ is denoted as $D_{a^{+}}^{\alpha ,\beta }$ and defined by \begin{equation} D_{a^{+}}^{\alpha ,\beta }g(t)=I_{a^{+}}^{\beta (1-\alpha )}DI_{a^{+}}^{(1-\beta )(1-\alpha )}g(t),\text{ }D=\frac{d}{ dt}. \label{d3} \end{equation} where $I_{a^{+}}^{\alpha }$ and $D_{a^{+}}^{\alpha }$ are Riemann-Liouville fractional integral and derivative defined by \eqref{d1} and \eqref{d2}, respectively. \end{definition} \begin{remark} \label{rem8.a} From Definition \ref{7}, we observe that: \begin{itemize} \item[(i)] The operator $D_{a^{+}}^{\alpha ,\beta }$ can be written as \begin{equation*} D_{a^{+}}^{\alpha ,\beta }=I_{a^{+}}^{\beta (1-\alpha )}DI_{a^{+}}^{(1-\gamma )}=I_{a^{+}}^{\beta (1-\alpha )}D^{\gamma },~~~~~~~~\gamma =\alpha +\beta -\alpha \beta \text{.} \end{equation*} \item[(ii)] The Hilfer fractional derivative can be regarded as an interpolator between the Riemann-Liouville derivative ($\beta =0$) and Caputo derivative ($\beta =1$) as \begin{equation*} D_{a^{+}}^{\alpha ,\beta }= \begin{cases} DI_{a^{+}}^{(1-\alpha )}=~D_{a^{+}}^{\alpha },~~~~~~~~~~if~\beta =0; \\ I_{a^{+}}^{(1-\alpha )}D=~^{c}D_{a^{+}}^{\alpha },~~~~~~~~if~\beta =1. \end{cases} \end{equation*} \item[(iii)] In particular, if $0<\alpha <1,$ $0\leq \beta \leq 1$ and $ \gamma =\alpha +\beta -\alpha \beta ,$ then \begin{equation*} (D_{a^{+}}^{\alpha ,\beta }g)(t)=\Big(I_{a^{+}}^{\beta (1-\alpha )}\frac{d}{ dt}\Big(I_{a^{+}}^{(1-\beta )(1-\alpha )}g\Big)\Big)(t). \end{equation*} One has, \begin{equation*} (D_{a^{+}}^{\alpha ,\beta }g)(t)=\Big(I_{a^{+}}^{\beta (1-\alpha )}\Big( D_{a^{+}}^{\gamma }g\Big)\Big)(t), \end{equation*} where $\Big(D_{a^{+}}^{\gamma }g\Big)(t)=\frac{d}{dt}\Big( I_{a^{+}}^{(1-\beta )(1-\alpha )}g\Big)(t).$ \end{itemize} \end{remark} \begin{definition} (\cite{fk}) Let $0<\alpha <1,0\leq \beta \leq 1,$ the weighted space $ C_{1-\gamma }^{\alpha ,\beta }[a,b]$ is defined by \begin{equation} C_{1-\gamma }^{\alpha ,\beta }[a,b]=\big\{g\in {C_{1-\gamma }[a,b]} :D_{a^{+}}^{\alpha ,\beta }g\in {C_{1-\gamma }[a,b]}\big\},\quad \gamma =\alpha +\beta (1-\alpha ). \label{w1} \end{equation} Clearly, $D_{a^{+}}^{\alpha ,\beta }g=I_{a^{+}}^{\beta (1-\alpha )}D_{a^{+}}^{\gamma }g$ and $C_{1-\gamma }^{\gamma }[a,b]\subset C_{1-\gamma }^{\alpha ,\beta }[a,b],\,\gamma =\alpha +\beta -\alpha \beta $, $ 0<\alpha <1,0\leq \beta \leq 1.$ Consider the space $C_{\gamma }^{0}[a,b]$ with the norm \begin{equation} {\Vert g\Vert }_{C_{\gamma }^{n}}=\sum_{k=0}^{n-1}{\Vert g^{(k)}\Vert }_{C}+{ \Vert g^{(n)}\Vert }_{C_{\gamma }}. \label{n1} \end{equation} \end{definition} \begin{lemma} \label{def8.5} (\cite{kd}) If $\alpha >0$ and $\beta >0,$ and $g\in L^{1}(a,b)$ for $t\in \lbrack a,b]$, then \newline $\Big(I_{a^{+}}^{\alpha }I_{a^{+}}^{\beta }g\Big)(t)=\Big(I_{a^{+}}^{\alpha +\beta }g\Big)(t)$ and $\Big(D_{a^{+}}^{\alpha }I_{a^{+}}^{\beta }g\Big) (t)=g(t).$\newline In particular, if $f\in C_{\gamma }[a,b]$ or $f\in C[a,b]$, then the above properties hold for each $t\in (a,b]$ or $t\in \lbrack a,b]$ respectively. \end{lemma} \begin{lemma} \label{Le1}(\cite{fk}) For $t>a,$ we have \begin{description} \item[(i)] $I_{a^{+}}^{\alpha }(t-a)^{\beta -1}=\frac{\Gamma (\beta )}{ \Gamma (\beta +\alpha )}(t-a)^{\beta +\alpha -1},\quad \alpha \geq 0,\beta >0.$\newline \item[(ii)] $D_{a^{+}}^{\alpha }(t-a)^{\alpha -1}=0,\quad \alpha \in (0,1).$ \end{description} \end{lemma} \begin{lemma} \label{def8.8} (\cite{fk}) Let $\alpha >0$, $\beta >0$ and $\gamma =\alpha +\beta -\alpha \beta .$ If $g\in C_{1-\gamma }^{\gamma }[a,b]$, then \newline \begin{equation*} I_{a^{+}}^{\gamma }D_{a^{+}}^{\gamma }g=I_{a^{+}}^{\alpha }D_{a^{+}}^{\alpha ,\beta }g,~D_{a^{+}}^{\gamma }I_{a^{+}}^{\alpha }g=D_{a^{+}}^{\beta (1-\alpha )}g. \end{equation*} \end{lemma} \begin{lemma} \label{Le2} (\cite{fk}) Let $0<\alpha <1,$ $0\leq \beta \leq 1$ and $g\in C_{1-\gamma }[a,b].$ Then \begin{equation*} I_{a^{+}}^{\alpha }D_{a^{+}}^{\alpha ,\beta }g(t)=g(t)-\frac{ (t-a)^{\alpha +\beta (1-\alpha )-1}}{\Gamma (\alpha +\beta (1-\alpha ))} I_{a^{+}}^{(1-\beta )(1-\alpha )}g(a),\quad \text{for all}\quad t\in (a,b], \end{equation*} Moreover, if $\ \gamma =\alpha +\beta -\alpha \beta ,$ $g\in C_{1-\gamma }[a,b]$ and $I_{a^{+}}^{1-\gamma }g\in C_{1-\gamma }^{1}[a,b],$ then \begin{equation*} I_{a^{+}}^{\gamma }D_{a^{+}}^{\gamma }g(t)=g(t)-\frac{ (t-a)^{\gamma -1}}{\Gamma (\gamma)}I_{a^{+}}^{1-\gamma }g(a),\quad \text{for all}\quad t\in (a,b]. \end{equation*} \end{lemma} \begin{lemma} \label{def8.7} (\cite{fk}) If $0\leq \gamma <1$ and $g\in C_{\gamma }[a,b]$, then \begin{equation*} (I_{a^{+}}^{\alpha }g)(a)=\lim_{t\rightarrow a^{+}}I_{a^{+}}^{\alpha }g(t)=0,~0\leq \gamma <\alpha . \end{equation*} \end{lemma} \begin{lemma} \label{le}(\cite{fk}) Let $\gamma =\alpha +\beta -\alpha \beta $ where $ 0<\alpha <1$ and $0\leq \beta \leq 1.$ Let $f:(a,b]\times \mathbb{R}\rightarrow \mathbb{R}$ be a function such that $f(t,z)\in C_{1-\gamma }[a,b]$ for any $z\in C_{1-\gamma }[a,b].$ If $z\in C_{1-\gamma }^{\gamma }[a,b],$ then $z$ satisfies IVP \eqref{f1}-\eqref{f2} if and only if $z$ satisfies the Volterra integral equation \begin{equation} z(t)=\frac{y_a}{\Gamma (\gamma)}(t-a)^{\gamma -1}+\frac{ 1}{\Gamma (\alpha )}\int_{a}^{t}(t-s)^{\alpha -1}f(s,z(s))ds,\quad t>a. \label{s3} \end{equation} \end{lemma} \section{Existence of solution} In this section we prove the existence of solution to BVP \eqref{e8.1}-\eqref{e8.2} in $C_{1-\gamma }^{\alpha ,\beta }[a,b].$ \begin{lemma} \label{lee(1)} Let $0<\alpha <1$, $0\leq \beta \leq 1$ where $ \gamma =\alpha +\beta -\alpha \beta $, and $f:(a,b]\times \mathbb{R} \rightarrow \mathbb{R}$ be a function such that $f(x,z)\in C_{1-\gamma }[a,b] $ for any $z\in C_{1-\gamma }[a,b].$ If $z\in C_{1-\gamma }^{\gamma }[a,b],$ then $z$ satisfies BVP \eqref{e8.1}-\eqref{e8.2} if and only if $z$ satisfies the integral equation$-$ \begin{eqnarray} z(t) &=&\frac{(t-a)^{\gamma -1}}{\Gamma (\gamma)}\frac{ e_{k}}{d\left( 1+\frac{c}{d}\right) }-\frac{1}{\left( 1+\frac{c}{d}\right) } \frac{(t-a)^{\gamma -1}}{\Gamma (\gamma)} \notag \\ &&\times \frac{1}{\Gamma (1-\gamma +\alpha )}\int_{a}^{b}(b-s)^{\alpha-\gamma}f(s,z(s))ds \notag \\ &&+\frac{1}{\Gamma (\alpha )}\int_{a}^{t}(t-s)^{\alpha -1}f(s,z(s))ds. \label{ee3} \end{eqnarray} \end{lemma} Proof: \ In view of Lemma \ref{le}, the solution of \eqref{e8.1} can be written as \begin{equation} z(t)=\frac{I_{a^{+}}^{1-\gamma }z(a^{+})}{\Gamma (\gamma)} (t-a)^{\gamma -1}+\frac{1}{\Gamma (\alpha )}\int_{a}^{t}(t-s)^{\alpha -1}f(s,z(s))ds,\quad t>a. \label{e8.3} \end{equation} \ Applying $I_{a^{+}}^{1-\gamma }$ on both sides of \eqref{e8.3} and taking the limit $t\rightarrow b^{-}$, we obtain \begin{equation} I_{a^{+}}^{1-\gamma }z(b^{-})=I_{a^{+}}^{1-\gamma }z(a^{+})+ \frac{1}{\Gamma (1-\gamma +\alpha )}\int_{a}^{b}(b-s)^{\alpha-\gamma}f(s,z(s))ds. \label{e8.4} \end{equation} Also again by applying $I_{a^{+}}^{1-\gamma }$ on both sides of \eqref{e8.3} , we have \begin{eqnarray*} I_{a^{+}}^{1-\gamma }z(t) &=&I_{a^{+}}^{1-\gamma }z(a^{+})+ \frac{1}{\Gamma (1-\gamma +\alpha )}\int_{a}^{t}(t-s)^{\alpha-\gamma}f(s,z(s))ds \\ &=&I_{a^{+}}^{1-\gamma }z(a^{+})+I_{a^{+}}^{1-\beta (1-\alpha )}f(t,z(t)). \end{eqnarray*} Taking the limit $t\rightarrow a^{+}$\ and using Lemma \ref{def8.7} with $ 1-\gamma <1-\beta (1-\alpha ),$\ we obtain \begin{equation} I_{a^{+}}^{1-\gamma }z(a^{+})=I_{a^{+}}^{1-\gamma }z(a^{+}), \label{e8.4a} \end{equation} hence \begin{equation} I_{a^{+}}^{1-\gamma }z(b^{-})=I_{a^{+}}^{1-\gamma }z(a^{+})+\frac{1}{\Gamma (1-\gamma +\alpha )}\int_{a}^{b}(b-s)^{-\gamma +\alpha}f(s,z(s))ds. \label{e8.4b} \end{equation} From the boundary condition (\ref{e8.2}), we have \ \begin{equation} I_{a^{+}}^{1-\gamma }z(b^{-})=\frac{e_{k}}{d}-\frac{c}{d}I_{a^{+}}^{1-\gamma }z(a^{+}). \label{e3} \end{equation} Comparing the equations (\ref{e8.4b}) and (\ref{e3}), and using (\ref{e8.4a} ), we get \begin{equation} I_{a^{+}}^{1-\gamma }z(a^{+})=\frac{1}{\left( 1+\frac{c}{d}\right) }\left( \frac{e_{k}}{d}-\frac{1}{\Gamma (1-\gamma +\alpha )}\int_{a}^{b}(b-s)^{ \alpha-\gamma}f(s,z(s))ds\right) . \label{e4} \end{equation} Submitting \eqref{e8.3} into \eqref{e4}, we obtain \begin{eqnarray} z(t) &=&\frac{(t-a)^{\gamma -1}}{\Gamma (\gamma)}\frac{ e_{k}}{d\left( 1+\frac{c}{d}\right) }-\frac{1}{\left( 1+\frac{c}{d}\right) } \frac{(t-a)^{\gamma -1}}{\Gamma (\gamma)} \notag \\ &&\times \frac{1}{\Gamma (1-\gamma +\alpha )}\int_{a}^{b}(b-s)^{\alpha-\gamma}f(s,z(s))ds \notag \\ &&+\frac{1}{\Gamma (\alpha )}\int_{a}^{t}(t-s)^{\alpha -1}f(s,z(s))ds. \label{E5} \end{eqnarray} \ Conversely, applying $I_{a^{+}}^{1-\gamma }$ on both sides of \eqref{ee3}, using Lemmas \ref{Le1} and \ref{def8.5}, with some simple calculations, we get \begin{eqnarray*} &&I_{a^{+}}^{1-\gamma }cz(a^{+})+I_{a^{+}}^{1-\gamma }dz(b^{-}) \\ &=&\frac{c}{\left( 1+\frac{c}{d}\right) }\left( \frac{e_{k}}{d}-\frac{1}{ \Gamma (1-\gamma +\alpha )}\int_{a}^{b}(b-s)^{\alpha-\gamma}f(s,z(s))ds\right) \\ &&+\frac{d}{\left( 1+\frac{c}{d}\right) }\left( \frac{e_{k}}{d}-\frac{1}{ \Gamma (1-\gamma +\alpha )}\int_{a}^{b}(b-s)^{\alpha-\gamma}f(s,z(s))ds\right) \\ &&+\frac{d}{\Gamma (1-\gamma +\alpha )}\int_{a}^{b}(b-s)^{\alpha-\gamma}f(s,z(s))ds \\ &=&\frac{ce_{k}}{\left( 1+\frac{c}{d}\right) d}+\frac{de_{k}}{\left( 1+\frac{ c}{d}\right) d}-\left( \frac{c}{\left( 1+\frac{c}{d}\right) }+\frac{d}{ \left( 1+\frac{c}{d}\right) }-d\right) \\ &&\frac{1}{\Gamma (1-\gamma +\alpha )}\int_{a}^{b}(b-s)^{\alpha-\gamma}f(s,z(s))ds \\ &=&e_{k}. \end{eqnarray*} \ Which shows that the boundary condition (\ref{e8.2}) is satisfied. \newline Next, applying $D_{a^{+}}^{\gamma }$ on both sides of \eqref{ee3} and using Lemmas \ref{Le1} and \ref{def8.8}, we have \begin{equation} D_{a^{+}}^{\gamma }z(t)=D_{a^{+}}^{\beta (1-\alpha )}f\big(t,z(t)\big). \label{e8.9} \end{equation} Since $z\in C_{1-\gamma }^{\gamma }[a,b]$ and by definition of $C_{1-\gamma }^{\gamma }[a,b]$, we have $D_{a^{+}}^{\gamma }z\in C_{n-\gamma }[a,b]$, therefore, $D_{a^{+}}^{\beta (1-\alpha )}f=DI_{a^{+}}^{1-\beta (1-\alpha )}f\in C_{1-\gamma }[a,b].$ For $f\in C_{1-\gamma }[a,b]$, it is clear that $ I_{a^{+}}^{1-\beta (1-\alpha )}f\in C_{1-\gamma }[a,b]$. Hence $f$ and $ I_{a^{+}}^{1-\beta (1-\alpha )}f$ satisfy the hypothesis of Lemma \ref{Le2}. \newline Now, applying $I_{a^{+}}^{\beta (1-\alpha )}$ on both sides of \eqref{e8.9}, we have \begin{equation*} {\large I_{a^{+}}^{\beta (1-\alpha )}}D_{a^{+}}^{\gamma }z(t)={\large I_{a^{+}}^{\beta (1-\alpha )}}D_{a^{+}}^{\beta (1-\alpha )}f\big(t,z(t)\big). \end{equation*} Using Remark~\ref{rem8.a} (i), Eq.(\ref{e8.9}) and Lemma \ref{Le2}, we get \begin{equation*} I_{a^{+}}^{\gamma }D_{a^{+}}^{\gamma }z(t)=f\big(t,z(t)\big)- \frac{I_{a^{+}}^{1-\beta (1-\alpha )}f\big(a,z(a)\big)}{\Gamma (\beta (1-\alpha ))}(t-a)^{\beta (1-\alpha )-1},\quad \text{for all}\quad t\in (a,b]. \end{equation*} \ By Lemma \ref{def8.7}, we have $I_{a^{+}}^{1-\beta (n-\alpha )}f\big(a,z(a) \big)=0$. Therefore, we have $D_{a^{+}}^{\alpha ,\beta }z(t)=f\big(t,z(t) \big)$. This completes the proof. Let us introduce the hypotheses needed to prove the existence of solutions for the problem at hand. \begin{itemize} \item[ (H1)] $f:(a,b]\times \mathbb{R} \rightarrow \mathbb{R} $ is a function such that $f(\cdot ,z(\cdot ))\in C_{1-\gamma }^{\beta (1-\alpha )}[a,b]$ for any $z\in C_{1-\gamma }[a,b]$ and there exist two constants $N,\zeta >0$ such that \begin{equation*} \left\vert f\big(t,z\big)\right\vert \leq N\big(1+\zeta \left\Vert z\right\Vert _{C_{1-\gamma }}\big). \end{equation*} \item[ (H2)] The inequality \begin{equation} \mathcal{G}:=\frac{\Gamma (\gamma)}{\Gamma (\alpha +1)}\left[ (b-a)^{\alpha }+(b-a)^{\alpha +1-\gamma }\right] N\zeta <1 \label{rr1} \end{equation} holds. \end{itemize} Now, we are ready to present the existence result for the BVP \eqref{e8.1}-\eqref{e8.2}, which is based on Schauder's fixed point theorem (see \cite{gd}). \begin{theorem} \label{th8.1} Assume that the hypotheses (H1) and (H2) are satisfied. Then Hilfer boundary value problem \eqref{e8.1}-\eqref{e8.2} has at least one solution in $C_{1-\gamma }^{\gamma }[a,b]\subset C_{1-\gamma }^{\alpha ,\beta }[a,b]$. \end{theorem} \begin{proof} Define the operator ${\large \mathcal{T}}:C_{1-\gamma }[a,b]\longrightarrow C_{1-\gamma }[a,b]$ by \begin{eqnarray} \left( {\large \mathcal{T}}z\right) (t) &=&\frac{(t-a)^{\gamma-1}}{\Gamma (\gamma)}\frac{e_{k}}{d\left( 1+\frac{c}{d}\right) }-\frac{1 }{\left( 1+\frac{c}{d}\right) }\frac{(t-a)^{\gamma -1}}{\Gamma (\gamma)} \notag \\ &&\times \frac{1}{\Gamma (1-\gamma +\alpha )}\int_{a}^{b}(b-s)^{-\gamma +\alpha}f(s,z(s))ds \notag \\ &&+\frac{1}{\Gamma (\alpha )}\int_{a}^{t}(t-s)^{\alpha -1}f(s,z(s))ds. \label{e8.10} \end{eqnarray} Let $\mathbb{B}_{r}=\left\{ z\in C_{1-\gamma }[a,b]:\left\Vert z\right\Vert _{C_{1-\gamma }}\leq r\right\} $ with ${\large r\geq }\frac{\Omega }{1- \mathcal{G}},$ for $\mathcal{G<}1,$ where \begin{eqnarray*} \Omega &:&=\bigg[\frac{1}{\Gamma (\gamma)} \frac{e_{k}}{d\left( 1+\frac{c}{d}\right) }+\left\vert \frac{1}{1+\frac{c}{d} }\right\vert \frac{1}{\Gamma (\gamma)} \\ &&\times \left[ \frac{(b-a)^{\alpha +1-\gamma }}{\Gamma (2-\gamma +\alpha) }+\frac{(b-a)^{2\alpha +1-\gamma }}{\Gamma (\alpha +1)}\right] N\bigg]. \end{eqnarray*} The proof will be given by the following three steps: Step1: We show that $\mathcal{T}(\mathbb{B}_{r})\subset \mathbb{B}_{r}$. By hypothesis $(H_{2})$, we have \begin{align*} & \left\vert (\mathcal{T}z)(t)(t-a)^{1-\gamma }\right\vert \\ & \leq \left\vert \frac{1}{\Gamma (\gamma)} \frac{e_{k}}{d\left( 1+\frac{c}{d}\right) }\right\vert \\ & +\left\vert \frac{1}{1+\frac{c}{d}}\frac{1}{\Gamma (\gamma)}\right\vert \frac{1}{\Gamma (1-\gamma +\alpha )} \int_{a}^{b}(b-s)^{-\gamma +\alpha}N(1+\zeta \left\vert z\right\vert )ds \\ & +\frac{\left\vert (t-a)^{1-\gamma }\right\vert }{\Gamma (\alpha )} \int_{a}^{t}(t-s)^{\alpha -1}N(1+\zeta \left\vert z\right\vert )ds \\ & \leq \frac{1}{\Gamma (\gamma)}\frac{e_{k}}{ d\left( 1+\frac{c}{d}\right) }+\left\vert \frac{1}{1+\frac{c}{d}}\right\vert \frac{1}{\Gamma (\gamma)} \\ & \times \frac{1}{\Gamma (1-\gamma +\alpha )}\int_{a}^{b}(b-s)^{\alpha-\gamma}N(1+\zeta (s-a)^{\gamma -1}\Vert z\Vert _{C_{1-\gamma }})ds \\ & +\frac{\left\vert (t-a)^{1-\gamma }\right\vert }{\Gamma (\alpha )} \int_{a}^{t}(t-s)^{\alpha -1}N(1+\zeta (s-a)^{\gamma -1}\Vert z\Vert _{C_{1-\gamma }})ds. \end{align*} Note that, for any $z\in \mathbb{B}_{r}$, and for each $t\in (a,b]$, we get \begin{eqnarray*} &&\frac{1}{\Gamma (1-\gamma +\alpha )}\int_{a}^{b}(b-s)^{-\gamma +\alpha}N(1+\zeta (s-a)^{\gamma -1}{\large \Vert z\Vert _{C_{1-\gamma }}})ds \\ &\leq &\left[ \frac{(b-a)^{1-\gamma }}{\Gamma (2-\gamma +\alpha )}+\frac{ \zeta r\Gamma (\gamma)}{\Gamma (\alpha +1)}\right] N(b-a)^{\alpha }, \end{eqnarray*} and \begin{eqnarray*} &&\frac{\left\vert (t-a)^{1-\gamma }\right\vert }{\Gamma (\alpha )} \int_{a}^{t}(t-s)^{\alpha -1}N(1+\zeta (s-a)^{\gamma -1}{\large \Vert z\Vert _{C_{1-\gamma }}})ds \\ &\leq &\left[ \frac{(t-a)^{\alpha }}{\Gamma (\alpha +1)}+\frac{\zeta r\Gamma (\gamma)}{\Gamma (\alpha +1)}\right] N(t-a)^{\alpha +1-\gamma }. \end{eqnarray*} Hence \begin{eqnarray*} \left\vert (\mathcal{T}z)(t)(t-a)^{1-\gamma }\right\vert &\leq &\frac{1}{\Gamma (\gamma)}\frac{e_{k}}{d\left( 1+\frac{c}{d}\right) }+\left\vert \frac{1}{1+\frac{c}{d}}\right\vert \frac{1}{\Gamma (\gamma)} \\ &&\times \left[ \frac{(b-a)^{1-\gamma }}{\Gamma (2-\gamma +\alpha)}+\frac{ \zeta r\Gamma (\gamma)}{\Gamma (\alpha +1)}\right] N(b-a)^{\alpha } \\ &&+\left[ \frac{(t-a)^{\alpha }}{\Gamma (\alpha +1)}+\frac{\zeta r\Gamma (\gamma)}{\Gamma (\alpha +1)}\right] N(t-a)^{\alpha +1-\gamma }, \end{eqnarray*} which yields \begin{eqnarray*} {\large \Vert \mathcal{T}z\Vert _{C_{1-\gamma }}} &\leq &\frac{ 1}{\Gamma (\gamma)}\frac{e_{k}}{d\left( 1+\frac{c}{d}\right) } +\left\vert \frac{1}{1+\frac{c}{d}}\right\vert\frac{ 1}{\Gamma (\gamma)} \\ &&\times \bigg[\frac{(b-a)^{\alpha +1-\gamma }}{\Gamma (2-\gamma +\alpha)} +\frac{(b-a)^{2\alpha +1-\gamma }}{\Gamma (\alpha +1)}\bigg]N \\ &&+\frac{N\zeta r\Gamma (\gamma)}{\Gamma (\alpha +1)}\bigg[ (b-a)^{\alpha }+(b-a)^{\alpha +1-\gamma }\bigg] \end{eqnarray*} In the light of hypothesis (H2) and definition of $r$, we get $\Vert {\large \mathcal{T}}z\Vert _{C_{1-\gamma }}\leq \mathcal{G}r+(1-\mathcal{G})r=r,$ that is, ${\large \mathcal{T(}}\mathbb{B}_{r})\subset \mathbb{B}_{r}.$ We shall prove that ${\large \mathcal{T}}$ is completely continuous. Step 2. The operator ${\large \mathcal{T}}$ is continuous. Suppose that $ \{z_{n}\}$ is a sequence such that $z_{n}\rightarrow z$ in $\mathbb{B}_{r}$\ as $n\rightarrow \infty $. Then for each $t\in (a,b],$ we have \begin{eqnarray*} &&\left\vert \big((\mathcal{T}z_{n})(t)-(\mathcal{T}z)(t)\big) (t-a)^{1-\gamma }\right\vert \\ &=&\left\vert \frac{1}{1+\frac{c}{d}}\right\vert\frac{ 1}{\Gamma (\gamma)} \\ &&\times \frac{1}{\Gamma (1-\gamma +\alpha )}\int_{a}^{b}(b-s)^{-\gamma +\alpha}\left\vert f\big(s,z_{n}(s)\big)-f\big(s,z(s)\big)\right\vert ds \\ &&+\frac{(t-a)^{1-\gamma }}{\Gamma (\alpha )}\int_{a}^{t}(t-s)^{\alpha -1}\left\vert f\big(s,z_{n}(s)\big)-f\big(s,z(s)\big)\right\vert ds \\ &\leq &\left\vert \frac{1}{1+\frac{c}{d}}\right\vert\frac{ 1}{\Gamma (\gamma)} \\ &&\times \frac{\Gamma (\gamma)}{\Gamma (\alpha +1)}(b-a)^{\alpha }\left\Vert f\big(\cdot ,z_{n}(\cdot )\big)-f\big(\cdot ,z(\cdot )\big) \right\Vert _{C_{1-\gamma }} \\ &&+\frac{\Gamma (\gamma)(t-a)^{1-\gamma +\alpha }}{\Gamma (-\gamma+\alpha)}\left\Vert f\big(\cdot ,z_{n}(\cdot )\big)-f\big(\cdot ,z(\cdot )\big)\right\Vert _{C_{1-\gamma }}. \end{eqnarray*} \ Since $f$ is continuous on $(a,b]$, and $z_{n}\rightarrow z,$\ this implies \begin{equation*} \Vert (\mathcal{T}z_{n}-\mathcal{T}z)\Vert _{C_{1-\gamma }}\longrightarrow 0~~as~~n\longrightarrow \infty , \end{equation*} which means that operator $\mathcal{T}$ is continuous on $\mathbb{B}_{r}$. \newline Step 3. We prove that $\mathcal{T}(\mathbb{B}_{r})$ is relatively compact. From Step 1, we have ${\large \mathcal{T(}}\mathbb{B}_{r})\subset \mathbb{B} _{r}.$ It follows that ${\large \mathcal{T(}}\mathbb{B}_{r})$ is uniformly bounded. Moreover, we show that operator $\mathcal{T}$ is equicontinuous on $ \mathbb{B}_{r}$. Indeed,for any $a<t_{1}<t_{2}<b$ and $z\in \mathbb{B}_{r}$, we get \begin{eqnarray*} &&\left\vert (t_{2}-a)^{1-\gamma }\big({\large \mathcal{T}}z\big) (t_{2})-(t_{1}-a)^{1-\gamma }\big({\large \mathcal{T}}z\big) (t_{1})\right\vert \\ &\leq &\frac{\left\vert (t_{2}-a)^{^{n-k}}-(t_{1}-a)^{^{n-k}}\right\vert }{\Gamma (\gamma)} \frac{e_{k}}{d\left( 1+\frac{c}{d}\right) }+\left\vert \frac{1}{1+\frac{c}{d} }\right\vert \frac{\left\vert (t_{2}-a)^{^{n-k}}-(t_{1}-a)^{^{n-k}}\right\vert }{\Gamma (\gamma)} \\ &&\times \frac{1}{\Gamma (1-\gamma +\alpha )}\int_{a}^{b}(b-s)^{1-\gamma +\alpha -1}\left\vert f\big(s,z(s)\big)\right\vert ds \\ &&+\dfrac{1}{\Gamma (\alpha )}\left\vert (t_{2}-a)^{1-\gamma }\int_{a}^{t_{2}}(t_{2}-s)^{\alpha -1}f\big(s,z(s)\big)ds\right. \\ &&\left. -(t_{1}-a)^{1-\gamma }\int_{a}^{t_{1}}(t_{1}-s)^{\alpha -1}f\big( s,z(s)\big)ds\right\vert \\ &\leq &\frac{\left\vert (t_{2}-a)^{^{n-k}}-(t_{1}-a)^{^{n-k}}\right\vert }{\Gamma (\gamma)} \left[ \frac{e_{k}}{d\left( 1+\frac{c}{d}\right) }\right. \\ &&\left. +\left\vert \frac{1}{1+\frac{c}{d}}\right\vert \frac{\left\Vert f\right\Vert _{C_{1-\gamma }}}{\Gamma (1-\gamma +\alpha )} \int_{a}^{b}(b-s)^{-\gamma +\alpha}(s-a)^{\gamma -1}ds\right] \\ &&+\dfrac{\Vert f\Vert _{C_{1-\gamma }}}{\Gamma (\alpha )}\left\vert (t_{2}-a)^{1-\gamma }\int_{a}^{t_{2}}(t_{2}-s)^{\alpha -1}(s-a)^{\gamma -1}ds\right. \\ &&\left. -(t_{1}-a)^{1-\gamma }\int_{a}^{t_{1}}(t_{1}-s)^{\alpha -1}(s-a)^{\gamma -1}ds\right\vert \\ &\leq &\frac{\left\vert (t_{2}-a)^{^{n-k}}-(t_{1}-a)^{^{n-k}}\right\vert }{\Gamma (\gamma)} \left[ \frac{e_{k}}{d\left( 1+\frac{c}{d}\right) }\right. \\ &&+\left. \left\vert \frac{1}{1+\frac{c}{d}}\right\vert \frac{\Gamma (\gamma)}{\Gamma (\alpha +1)}(b-a)^{\alpha }\left\Vert f\right\Vert _{C_{1-\gamma }}\right] \\ &&+\dfrac{\Vert f\Vert _{C_{1-\gamma }}}{\Gamma (\alpha )}\mathcal{B}(\gamma -n+1,\alpha )\left\vert (t_{2}-a)^{\alpha }-(t_{1}-a)^{\alpha }\right\vert \end{eqnarray*} which tends to zero as $t_{2}\rightarrow t_{1},$ independent of $z\in \mathbb{B}_{r}$, where $\mathcal{B(\cdot },\mathcal{\cdot )}$ is a Beta function. Thus we conclude that $\mathcal{T}(\mathbb{B}_{r})$ is equicontinuous on $\mathbb{B}_{r}$ and therefore $\mathcal{T}(\mathbb{B}_{r}) $ is relatively compact. As a consequence of Steps 1 to 3 together with Arzela-Ascoli theorem, we can conclude that $\mathcal{T}:\mathbb{B} _{r}\rightarrow \mathbb{B}_{r}$ is completely continuous operator. An application of Schauder's fixed point theorem shows that there exists at least a fixed point $z$ of $\mathcal{T}$ in $C_{1-\gamma }[a,b]$. This fixed point $z$ is the solution to (\ref{e8.1})-(\ref{e8.2}) in $C_{1-\gamma }^{\gamma }[a,b],$ and the proof is completed. \end{proof} We will study the next existence result by using Schaefer fixed point theorem. To this end, we change hypothesis (H1) into the following one: \begin{itemize} \item[ (H3)] $f:(a,b]\times \mathbb{R} \rightarrow \mathbb{R} $ is a function such that $f(\cdot ,z(\cdot ))\in C_{1-\gamma }^{\beta (1-\alpha )}[a,b]$ for any $z\in C_{1-\gamma }[a,b]$ and there exist a function $\eta (t)\in C_{1-\gamma }[a,b]$ such that \begin{equation*} \left\vert f\big(t,z\big)\right\vert \leq \eta (t),\text{ for all }t\in (a,b],\text{ }z\in \mathbb{R} . \end{equation*} \end{itemize} \begin{theorem} \label{th3.3} Assume that $\ $(H3) holds. Then Hilfer boundary value problem \eqref{e8.1}-\eqref{e8.2} has at least one solution in $C_{1-\gamma }^{\gamma }[a,b]\subset C_{1-\gamma }^{\alpha ,\beta }[a,b]$. \end{theorem} \begin{proof} As in the proof of Theorem \ref{th8.1}, one can repeat Steps 1 to 3 and show that operator ${\large \mathcal{T}}:C_{1-\gamma }[a,b]\longrightarrow C_{1-\gamma }[a,b]$ defined in (\ref{e8.10}) is a completely continuous. It remains to prove that \begin{equation*} \Delta =\left\{ z\in {\large C_{n-\gamma }[a,b]}:z=\lambda \mathcal{T}z, \text{ for some }\lambda \in (0,1)\right\} \end{equation*} is a bounded set. Let $z\in \Delta $ and $\lambda \in (0,1)$ be such that $ z=\lambda Tz.$ By hypothesis (H3) and Eq.(\ref{e8.10}), then for all $t\in \lbrack a,b],$ we have \begin{align*} & \left\vert {\large \mathcal{T}z(t)(t-a)}^{n-\gamma }\right\vert \\ & \leq \sum_{k=1}^{n}\frac{(t-a)^{n-k}}{\Gamma (\gamma -k+1)}\frac{e_{k}}{ d\left( 1+\frac{c}{d}\right) } \\ & +\left\vert \frac{1}{1+\frac{c}{d}}\right\vert \sum_{k=1}^{n}\frac{ (t-a)^{n-k}}{\Gamma (\gamma -k+1)}\frac{1}{\Gamma (n-\gamma +\alpha )} \int_{a}^{b}(b-s)^{n-\gamma +\alpha -1}\eta (s)ds \\ & +\frac{\left\vert (t-a)^{n-\gamma }\right\vert }{\Gamma (\alpha )} \int_{a}^{t}(t-s)^{\alpha -1}\eta (s)ds \\ & \leq \sum_{k=1}^{n}\frac{(t-a)^{n-k}}{\Gamma (\gamma -k+1)}\frac{e_{k}}{ d\left( 1+\frac{c}{d}\right) } \\ & +\left\vert \frac{1}{1+\frac{c}{d}}\right\vert \sum_{k=1}^{n}\frac{ (t-a)^{n-k}}{\Gamma (\gamma -k+1)}\frac{1}{\Gamma (n-\gamma +\alpha )} \int_{a}^{b}(b-s)^{n-\gamma +\alpha -1}(s-a)^{\gamma -n}\Vert \mathcal{\eta } \Vert _{C_{n-\gamma }}ds \\ & +\frac{\left\vert (t-a)^{n-\gamma }\right\vert }{\Gamma (\alpha )} \int_{a}^{t}(t-s)^{\alpha -1}(s-a)^{\gamma -n}\Vert \mathcal{\eta }\Vert _{C_{n-\gamma }}ds. \end{align*} That is \begin{eqnarray} &&\Vert \mathcal{T}z\Vert _{C_{n-\gamma }} \notag \\ &\leq &\sum_{k=1}^{n}\frac{(b-a)^{n-k}}{\Gamma (\gamma -k+1)}\frac{e_{k}}{ d\left( 1+\frac{c}{d}\right) } \notag \\ &&+\left[ \left\vert \frac{1}{1+\frac{c}{d}}\right\vert \sum_{k=1}^{n}\frac{ (b-a)^{-k}}{\Gamma (\gamma -k+1)}\frac{\Gamma (\alpha )}{\mathcal{B(\alpha } ,1\mathcal{)}}+\frac{\mathcal{B(}\gamma -n+1,1\mathcal{)}}{\Gamma (\alpha )(b-a)^{\gamma }}\right] (b-a)^{n+\alpha }\Vert \mathcal{\eta }\Vert _{C_{n-\gamma }} \notag \\ &:&=\ell . \label{ss3} \end{eqnarray} Since $\lambda \in (0,1)$ then $z<\mathcal{T}z.$ The last inequality with Eq.(\ref{ss3}) leads us to \begin{equation*} \Vert z\Vert _{C_{n-\gamma }}<\Vert \mathcal{T}z\Vert _{C_{n-\gamma }}\leq \ell \end{equation*} Which proves that $\Delta $ is bounded. By using Schaefer fixed point theorem, the proof can be completed. \end{proof} Finally, we present the existence result for the problem \eqref{e8.1}- \eqref{e8.2}, which is based on Krasnosel'skii fixed point theorem. For this end, we change hypothesis (H1) into the following one: \begin{itemize} \item[ (H4)] $f:(a,b]\times \mathbb{R} \rightarrow \mathbb{R} $ is a function such that $f(\cdot ,z(\cdot ))\in C_{n-\gamma }^{\beta (n-\alpha )}[a,b]$ for any $z\in C_{n-\gamma }[a,b]$ and there exists constant $L>0$ such that \begin{equation*} \left\vert f\big(t,z\big)-f\big(t,w\big)\right\vert \leq L\left\vert z-w\right\vert ,\text{ }\forall t\in (a,b],\text{ }z,w\in \mathbb{R} \end{equation*} And we consider the following hypothesis: \item[ (H5)] The inequality \begin{eqnarray*} \mathcal{W} &:&=\bigg[\left\vert \frac{1}{1+\frac{c}{d}}\right\vert \sum_{k=1}^{n}\frac{(b-a)^{n-k}}{\Gamma (\gamma -k+1)}+\frac{\mathcal{B} (\gamma -n,\alpha +1)}{\Gamma (\gamma -n)}\bigg] \\ &&\times \frac{\Gamma (\gamma -n)(b-a)^{\alpha }}{\mathcal{B}(\gamma -n,1)\Gamma (\alpha +1)}L<1 \end{eqnarray*} is holds. \end{itemize} \begin{theorem} \label{th3.4} Assume that hypotheses (H4) and (H5) are satisfied. If \begin{equation} \left\vert \frac{1}{1+\frac{c}{d}}\right\vert \sum_{k=1}^{n}\frac{ (b-a)^{n-k+\alpha }}{\Gamma (\gamma -k+1)}\frac{\Gamma (\gamma -n+1)}{\Gamma (\alpha +1)}L<1. \label{e2} \end{equation} Then Hilfer boundary value problem \eqref{e8.1}-\eqref{e8.2} has at least one solution in $C_{n-\gamma }^{\gamma }[a,b]\subset C_{n-\gamma }^{\alpha ,\beta }[a,b]$. \end{theorem} \begin{proof} Consider the operator ${\large \mathcal{T}}$ is defined as in Theorem \ref {th8.1}. Now, we need to analyze the operator ${\large \mathcal{T}}$ into sum two operators ${\large \mathcal{T}}_{1}+{\large \mathcal{T}}_{2}$ as follows \begin{equation*} {\large \mathcal{T}}_{1}z(t)=-\frac{1}{\left( 1+\frac{c}{d}\right) } \sum_{k=1}^{n}\frac{(t-a)^{\gamma -k}}{\Gamma (\gamma -k+1)}\frac{1}{\Gamma (n-\gamma +\alpha )}\int_{a}^{b}(b-s)^{n-\gamma +\alpha -1}f(s,z(s))ds \end{equation*} and \begin{equation*} {\large \mathcal{T}}_{2}z(t)=\sum_{k=1}^{n}\frac{(t-a)^{\gamma -k}}{\Gamma (\gamma -k+1)}\frac{e_{k}}{d\left( 1+\frac{c}{d}\right) }+\frac{1}{\Gamma (\alpha )}\int_{a}^{t}(t-s)^{\alpha -1}f(s,z(s))ds. \end{equation*} \ Set $\widetilde{f}=f(s,0)$ and consider the ball $\mathbb{B}_{\epsilon }=\{z\in C_{n-\gamma ;\psi }([a,b]:\left\Vert z\right\Vert _{C_{n-\gamma ;\psi }}\leq \epsilon \}\ $with $\epsilon \geq \frac{\Lambda }{1-\mathcal{W}} ,\mathcal{W<}1$ where \begin{eqnarray} \Lambda &=&\bigg[\left\vert \frac{1}{1+\frac{c}{d}}\right\vert \sum_{k=1}^{n}\frac{(b-a)^{n-k}}{\Gamma (\gamma -k+1)}+\frac{\mathcal{B} (\gamma -n,\alpha +1)}{\Gamma (\gamma -n)}\bigg] \notag \\ &&\times \frac{\Gamma (\gamma -n)(b-a)^{\alpha }}{\mathcal{B}(\gamma -n,1)\Gamma (\alpha +1)}\left\Vert \widetilde{f}\right\Vert _{C_{n-\gamma }}+\sum_{k=1}^{n}\frac{(b-a)^{n-k}}{\Gamma (\gamma -k+1)}\frac{e_{k}}{ d\left( 1+\frac{c}{d}\right) }. \label{E1} \end{eqnarray} The proof will be given in three stages. \textbf{Stage 1:} We prove that ${\large \mathcal{T}}_{1}z+{\large \mathcal{T }}_{2}w\in \mathbb{B}_{r}$ for every $z,w\in \mathbb{B}_{\epsilon }.$ By assumpition (H4), then for every $z\in \mathbb{B}_{\epsilon },$and $t\in (a,b]$, we have \begin{eqnarray*} &&\left\vert (t-a)^{n-\gamma }{\large \mathcal{T}}_{1}z(t)\right\vert \\ &\leq &\left\vert \frac{1}{1+\frac{c}{d}}\right\vert \sum_{k=1}^{n}\frac{ (t-a)^{n-k}}{\Gamma (\gamma -k+1)}\frac{1}{\Gamma (n-\gamma +\alpha )} \\ &&\times \int_{a}^{b}(b-s)^{n-\gamma +\alpha -1}\bigg[\left\vert f(s,z(s))-f(s,0)\right\vert +\left\vert f(s,0)\right\vert \bigg]ds \\ &\leq &\left\vert \frac{1}{1+\frac{c}{d}}\right\vert \sum_{k=1}^{n}\frac{ (t-a)^{n-k}}{\Gamma (\gamma -k+1)}\frac{1}{\Gamma (n-\gamma +\alpha )} \\ &&\times \int_{a}^{b}(b-s)^{n-\gamma +\alpha -1}(s-a)^{\gamma -n}\bigg[ L\left\Vert z\right\Vert _{C_{n-\gamma }}+\left\Vert \widetilde{f} \right\Vert _{C_{n-\gamma }}\bigg]ds \\ &\leq &\left\vert \frac{1}{1+\frac{c}{d}}\right\vert \sum_{k=1}^{n}\frac{ (t-a)^{n-k}}{\Gamma (\gamma -k+1)}\frac{\Gamma (\gamma -n+1)}{\Gamma (\alpha +1)}(t-a)^{\alpha }\bigg[L\epsilon +\left\Vert \widetilde{f}\right\Vert _{C_{n-\gamma }}\bigg]. \end{eqnarray*} This gives \begin{eqnarray} &&\left\Vert {\large \mathcal{T}}_{1}z\right\Vert _{C_{n-\gamma }} \notag \\ &\leq &\left\vert \frac{1}{1+\frac{c}{d}}\right\vert \sum_{k=1}^{n}\frac{ (b-a)^{n-k+\alpha }}{\Gamma (\gamma -k+1)}\frac{\Gamma (\gamma -n+1)}{\Gamma (\alpha +1)}\bigg[L\epsilon +\left\Vert \widetilde{f}\right\Vert _{C_{n-\gamma }}\bigg]. \label{q1} \end{eqnarray} For operator ${\large \mathcal{T}}_{2}$, we have \begin{eqnarray*} &&\left\vert (t-a)^{n-\gamma }{\large \mathcal{T}}_{2}w(t)\right\vert \\ &\leq &\sum_{k=1}^{n}\frac{(t-a)^{n-k}}{\Gamma (\gamma -k+1)}\frac{e_{k}}{ d\left( 1+\frac{c}{d}\right) } \\ &&+\frac{(t-a)^{n-\gamma }}{\Gamma (\alpha )}\int_{a}^{t}(t-s)^{\alpha -1} \bigg[\left\vert f(s,w(s))-f(s,0)\right\vert +\left\vert f(s,0)\right\vert \bigg]ds \\ &\leq &\sum_{k=1}^{n}\frac{(t-a)^{n-k}}{\Gamma (\gamma -k+1)}\frac{e_{k}}{ d\left( 1+\frac{c}{d}\right) } \\ &&+\frac{(t-a)^{n-\gamma }}{\Gamma (\alpha )}\int_{a}^{t}(t-s)^{\alpha -1}(s-a)^{\gamma -n}\bigg[L\left\Vert w\right\Vert _{C_{n-\gamma }}+\left\Vert \widetilde{f}\right\Vert _{C_{n-\gamma }}\bigg]ds. \end{eqnarray*} For every $w\in \mathbb{B}_{\epsilon },$and $t\in (a,b]$, this gives \begin{eqnarray} \left\Vert {\large \mathcal{T}}_{2}w\right\Vert _{C_{n-\gamma }} &\leq &\sum_{k=1}^{n}\frac{(b-a)^{n-k}}{\Gamma (\gamma -k+1)}\frac{e_{k}}{d\left( 1+\frac{c}{d}\right) } \notag \\ &&+\frac{\Gamma (\gamma -n+1)(b-a)^{\alpha }}{\Gamma (\gamma -n+\alpha +1)} \bigg[L\epsilon +\left\Vert \widetilde{f}\right\Vert _{C_{n-\gamma }}\bigg]. \label{q2} \end{eqnarray} From Eqs.(\ref{q1}),(\ref{q2}), and using hypothesis (H5) with Eq.(\ref{E1} ), we get \begin{eqnarray*} &&\left\Vert {\large \mathcal{T}}_{1}z+{\large \mathcal{T}}_{2}w\right\Vert _{C_{n-\gamma }} \\ &\leq &\left\Vert {\large \mathcal{T}}_{1}z\right\Vert _{C_{n-\gamma }}+\left\Vert {\large \mathcal{T}}_{2}w\right\Vert _{C_{n-\gamma }} \\ &\leq &\bigg[\left\vert \frac{1}{1+\frac{c}{d}}\right\vert \sum_{k=1}^{n} \frac{(b-a)^{n-k}}{\Gamma (\gamma -k+1)}+\frac{\Gamma (\alpha +1)}{\Gamma (\gamma -n+\alpha +1)}\bigg] \\ &&\times \frac{\Gamma (\gamma -n+1)(b-a)^{\alpha }}{\Gamma (\alpha +1)}\bigg[ L\epsilon +\left\Vert \widetilde{f}\right\Vert _{C_{n-\gamma }}\bigg] \\ &&+\sum_{k=1}^{n}\frac{(b-a)^{n-k}}{\Gamma (\gamma -k+1)}\frac{e_{k}}{ d\left( 1+\frac{c}{d}\right) } \\ &\leq &\mathcal{W}\epsilon +(1-\mathcal{W})\epsilon =\epsilon . \end{eqnarray*} This proves that ${\large \mathcal{T}}_{1}z+{\large \mathcal{T}}_{2}w\in \mathbb{B}_{r}$ for every $z,w\in \mathbb{B}_{r}.$ \textbf{Stage 2:} We prove that the operator ${\large \mathcal{T}}_{1}$ is a contration mapping on $\mathbb{B}_{r}.$ For any $z,w\in \mathbb{B}_{r},$ and for $t\in (a,b],$ then by assumptions (H4), we have \begin{eqnarray*} &&\left\vert (t-a)^{n-\gamma }{\large \mathcal{T}}_{1}z(t)-(t-a)^{n-\gamma } {\large \mathcal{T}}_{1}w(t)\right\vert \\ &\leq &\left\vert \frac{1}{1+\frac{c}{d}}\right\vert \sum_{k=1}^{n}\frac{ (t-a)^{n-k}}{\Gamma (\gamma -k+1)}\frac{1}{\Gamma (n-\gamma +\alpha )} \\ &&\times \int_{a}^{b}(b-s)^{n-\gamma +\alpha -1}\bigg[\left\vert f(s,z(s))-f(s,w(s))\right\vert \bigg]ds \\ &\leq &\left\vert \frac{1}{1+\frac{c}{d}}\right\vert \sum_{k=1}^{n}\frac{ (t-a)^{n-k}}{\Gamma (\gamma -k+1)} \\ &&\times \frac{1}{\Gamma (n-\gamma +\alpha )}\int_{a}^{b}(b-s)^{n-\gamma +\alpha -1}L\left\vert z(s)-w(s))\right\vert ds \\ &\leq &\left\vert \frac{1}{1+\frac{c}{d}}\right\vert \sum_{k=1}^{n}\frac{ (t-a)^{n-k}}{\Gamma (\gamma -k+1)}\frac{\Gamma (\gamma -n+1)}{\Gamma (\alpha +1)}(b-s)^{\alpha }L\left\Vert z-w\right\Vert _{C_{n-\gamma }} \end{eqnarray*} This gives, \begin{eqnarray*} &&\left\Vert {\large \mathcal{T}}_{1}z-{\large \mathcal{T}}_{1}w\right\Vert _{C_{n-\gamma }} \\ &\leq &\left\vert \frac{1}{1+\frac{c}{d}}\right\vert \sum_{k=1}^{n}\frac{ (b-a)^{n-k+\alpha }}{\Gamma (\gamma -k+1)}\frac{\Gamma (\gamma -n+1)}{\Gamma (\alpha +1)}L\left\Vert z-w\right\Vert _{C_{n-\gamma }}. \end{eqnarray*} The operator ${\large \mathcal{T}}_{1}$ is contraction mapping due to assumption Eqs.(\ref{e2}). \textbf{Stage 3:} We show that the operator ${\large \mathcal{T}}_{2}$ is completely continuous on $\mathbb{B}_{\epsilon }.$ Firstly, from the continuity of $f$, we conclude that the operator ${\large \mathcal{T}}_{2}:\mathbb{B}_{\epsilon }\rightarrow \mathbb{B}_{\epsilon }$ i.e. ${\large \mathcal{T}}_{2}$ is continuous on $\mathbb{B}_{\epsilon }$. Next, we show that for all $\epsilon >0,$ there exists some $\epsilon ^{^{\prime }}>0$ such that $\left\Vert {\large \mathcal{T}}_{2}z\right\Vert _{C_{n-\gamma }}\leq \epsilon ^{^{\prime }}.$ According to stage 1, for $ z\in \mathbb{B}_{\epsilon },$ we know that \begin{eqnarray*} \left\Vert {\large \mathcal{T}}_{2}z\right\Vert _{C_{n-\gamma }} &\leq &\sum_{k=1}^{n}\frac{(b-a)^{n-k}}{\Gamma (\gamma -k+1)}\frac{e_{k}}{d\left( 1+\frac{c}{d}\right) } \\ &&+\mathcal{B}(\gamma -n+1,\alpha )\frac{(b-a)^{\alpha }}{\Gamma (\alpha )} \bigg[L\epsilon +\left\Vert \widetilde{f}\right\Vert _{C_{n-\gamma }}\bigg]. \end{eqnarray*} which is independent of $t$ and $z$, hence there exists \begin{equation*} \epsilon ^{^{\prime }}=\sum_{k=1}^{n}\frac{(b-a)^{n-k}}{\Gamma (\gamma -k+1)} \frac{e_{k}}{d\left( 1+\frac{c}{d}\right) }+\mathcal{B}(\gamma -n+1,\alpha ) \frac{(b-a)^{\alpha }}{\Gamma (\alpha )}\bigg[L\epsilon +\left\Vert \widetilde{f}\right\Vert _{C_{n-\gamma }} \end{equation*} such that $\left\Vert {\large \mathcal{T}}_{2}z\right\Vert _{C_{n-\gamma }}\leq \epsilon ^{^{\prime }}.$ So ${\large \mathcal{T}}_{2}$ is uniformly bounded set on $\mathbb{B}_{\epsilon }.$ Finally, to prove that ${\large \mathcal{T}}_{2}$ is equicontinuous in $ \mathbb{B}_{\epsilon }$, for any $z\in \mathbb{B}_{\epsilon }$ and $ t_{1},t_{2}\in (a,b]$ with $t_{1}<t_{2},$ we have \begin{eqnarray*} &&\left\vert (t_{2}-a)^{n-\gamma }\mathcal{T}_{2}z(t_{2})-(t_{1}-a)^{n- \gamma }\mathcal{T}_{2}z(t_{1})\right\vert \\ &=&\left\vert \sum_{k=1}^{n}\frac{(t_{2}-a)^{n-k}-(t_{1}-a)^{n-k}}{\Gamma (\gamma -k+1)}\frac{e_{k}}{d\left( 1+\frac{c}{d}\right) }\right. \\ &&+\frac{(t_{2}-a)^{n-\gamma }}{\Gamma (\alpha )} \int_{a}^{t_{2}}(t_{2}-s))^{\alpha -1}f(s,z(s))ds \\ &&-\left. \frac{(t_{1}-a)^{n-\gamma }}{\Gamma (\alpha )} \int_{a}^{t_{1}}(t_{1}-s))^{\alpha -1}f(s,z(s))ds\right\vert \\ &\leq &\sum_{k=1}^{n}\frac{\left\vert (t_{2}-a)^{n-k}-(t_{1}-a)^{n-k}\right\vert }{\Gamma (\gamma -k+1)}\frac{e_{k} }{d\left( 1+\frac{c}{d}\right) } \\ &&\left\vert +\frac{(t_{2}-a)^{n-\gamma }}{\Gamma (\alpha )} \int_{a}^{t_{2}}(t_{2}-s))^{\alpha -1}(s-a)^{\gamma -n}\left\Vert f\right\Vert _{C_{n-\gamma ;\psi }[a,b]}ds\right. \\ &&\left. -\frac{(t_{1}-a)^{n-\gamma }}{\Gamma (\alpha )} \int_{a}^{t_{1}}(t_{1}-s))^{\alpha -1}(s-a)^{\gamma -n}\left\Vert f\right\Vert _{C_{n-\gamma ;\psi }[a,b]}ds\right\vert \\ &=&\sum_{k=1}^{n}\frac{\left\vert (t_{2}-a)^{n-k}-(t_{1}-a)^{n-k}\right\vert }{\Gamma (\gamma -k+1)}\frac{e_{k}}{d\left( 1+\frac{c}{d}\right) } \\ &&+\frac{\mathcal{B}(\gamma -n+1)}{\Gamma (\alpha )}\left\Vert f\right\Vert _{C_{n-\gamma ;\psi }[a,b]}\left\vert (t_{2}-a)^{\alpha }-(t_{1}-a)^{\alpha }\right\vert . \end{eqnarray*} Observe that the right-hand side of the above inequality is independent of $ z.$ So \begin{equation*} \left\vert (t_{2}-a)^{n-\gamma }{\large \mathcal{T}} _{2}z(t_{2})-(t_{1}-a)^{n-\gamma }{\large \mathcal{T}}_{2}z(t_{1})\right\vert \rightarrow 0,\text{ as }|t_{2}-t_{1}|\rightarrow 0. \end{equation*} This proves that ${\large \mathcal{T}}_{2}$ is equicontinuous on $\mathbb{B} _{\epsilon }$. In the view of Arzela-Ascoli Theorem, it follows that $( {\large \mathcal{T}}_{2}\mathbb{B}_{\epsilon })$ is relatively compact. As a consequence of Krasnosel'skii fixed point theorem, we conclude that the problem \eqref{e8.1}-\eqref{e8.2} has at least one solution. \end{proof} \begin{corollary} Assume that hypotheses (H4) and (H5) are satisfied. Then Hilfer boundary value problem \eqref{e8.1}-\eqref{e8.2} has a unique solution in $ C_{n-\gamma }^{\gamma }[a,b]\subset C_{n-\gamma }^{\alpha ,\beta }[a,b]$. \end{corollary} \section{An example \label{Sec5}} Consider the Hilfer fractional differential equation with boundary condition \begin{equation} \begin{cases} D_{a^{+}}^{\alpha ,\beta }z(t)=f\big(t,z(t)\big),t\in (0,1],0<\alpha <1,0\leq \beta \leq 1, \\ I_{a^{+}}^{1-\gamma }\big[\frac{1}{4}z(0^{+})+\frac{3}{4}z(1^{-})\big]=\frac{ 2}{5},~~\alpha \leq \gamma =\alpha +\beta -\alpha \beta , \end{cases} \label{3} \end{equation} where, $\alpha =\frac{1}{2},\beta =\frac{1}{3}$, $\gamma =\frac{2}{3}$, $c= \frac{1}{4}$, $d=\frac{3}{4}$, $e_{1}=\frac{2}{5},$ and \begin{equation*} f\big(t,z(t)\big)=t^{\frac{-1}{6}}+\frac{1}{16}t^{\frac{5}{6}}\sin z(t), \end{equation*} Clearly, $t^{\frac{1}{3}}f\big(t,z(t)\big)=t^{\frac{1}{6}}+\frac{1}{16}t^{ \frac{7}{6}}\sin z(t)\in C[0,1],$ hence $f\big(t,z(t)\big)\in C_{\frac{1}{3} }[0,1].$ Observe that, for any $z\in \mathbb{R} ^{+}$ and $t\in (0,1],$ \begin{eqnarray*} \left\vert f\big(t,z(t)\big)\right\vert &\leq &t^{\frac{1}{6}}\left( 1+\frac{ 1}{16}t^{\frac{2}{3}}\left\vert t^{\frac{1}{3}}z(t)\right\vert \right) \\ &\leq &\left( 1+\frac{1}{16}\left\Vert z\right\Vert _{C_{\frac{1}{3} }}\right) . \end{eqnarray*} Therefore, the conditions (H1) is satisfied with $N=1,$ and $\zeta =\frac{1}{ 16}$. It is easy to check that the (H2) is satisfied too. Indeed,$\ $by some calculations, we get \begin{equation} \mathcal{G}=\frac{\Gamma (\gamma -n+1)}{\Gamma (\alpha +1)}\left[ (b-a)^{\alpha }+(b-a)^{\alpha +n-\gamma }\right] N\zeta \simeq 0.19<1. \notag \end{equation} An application of Theorem \ref{th8.1} implies that problem (\ref{3}) has a solution in $C_{\frac{1}{3}}^{\frac{2}{3}}([0,1])$. Moreover, consider $f\big(t,z(t)\big)=t^{\frac{-1}{6}}+\frac{1}{16}t^{\frac{5 }{6}}\sin (t),$ it follows $\left\vert f\big(t,z(t)\big)\right\vert \leq t^{ \frac{-1}{6}}+\frac{1}{16}t^{\frac{5}{6}}=\eta (t)\in C_{1-\gamma }[0,1].$ Therefore (H3) holds. An application of Theorem \ref{th3.3} implies that problem (\ref{3}) has a solution in $C_{\frac{1}{3}}^{\frac{2}{3}}([0,1])$. Finally, if $f\big(t,z(t)\big)=t^{\frac{-1}{6}}+\frac{1}{16}t^{\frac{5}{6} }\sin z(t),$ then for $z,w\in \mathbb{R} ^{+}$ and $t\in (0,1],$ we get \begin{equation*} \left\vert f\big(t,z(t)\big)-f\big(t,w(t)\big)\right\vert \leq \frac{1}{16} \left\vert z-w\right\vert . \end{equation*} Thus, the hypothesis $(H4)$ is satisfied with $L=\frac{1}{16}$. It is easy to check that hypothesis (H5) \ and inequality (\ref{e2}) are satisfied. Indeed,$\ $by some calculations, we get \begin{eqnarray*} \mathcal{W} &:&=\bigg[\left\vert \frac{1}{1+\frac{c}{d}}\right\vert \sum_{k=1}^{n}\frac{(b-a)^{n-k}}{\Gamma (\gamma -k+1)}+\frac{\mathcal{B} (\gamma -n,\alpha +1)}{\Gamma (\gamma -n)}\bigg] \\ &&\times \frac{\Gamma (\gamma -n)(b-a)^{\alpha }}{\mathcal{B}(\gamma -n,1)\Gamma (\alpha +1)}L\simeq 0.14<1 \end{eqnarray*} and \begin{equation} \left\vert \frac{1}{1+\frac{c}{d}}\right\vert \sum_{k=1}^{n}\frac{ (b-a)^{n-k+\alpha }}{\Gamma (\gamma -k+1)}\frac{\Gamma (\gamma -n+1)}{\Gamma (\alpha +1)}L\simeq 0.05<1. \end{equation} An application of Theorem \ref{th3.4} implies that problem (\ref{3}) has a solution in $C_{\frac{1}{3}}^{\frac{2}{3}}[0,1]$. \section{Conclusion} We have obtained some existence results for the solution of boundary value problem for Hilfer fractional differential equations based on the reduction of fractional differential equations to integral equations. The employed techniques, the fixed point theorems, are quite general and effective. We trust the reported results here will have a positive impact on the development of further applications in engineering and applied sciences. \end{document}
\begin{document} \title[Generalized Spikes]{Generalized spikes with circuits and cocircuits of different cardinalities} \author[N.~Brettell]{Nick Brettell} \address{School of Mathematics and Statistics\\ Victoria University of Wellington\\ New Zealand} \email{[email protected]} \author[K.~Grace]{Kevin Grace} \address{Department of Mathematics\\ Vanderbilt University\\ Nashville, Tennessee} \email{[email protected]} \subjclass{05B35} \date{\today} \begin{abstract} We consider matroids with the property that every subset of the ground set of size $s$ is contained in a $2s$-element circuit and every subset of size $t$ is contained in a $2t$-element cocircuit. We say that such a matroid has the \emph{$(s,2s,t,2t)$-property}. A matroid is an \emph{$(s,t)$-spike} if there is a partition of the ground set into pairs such that the union of any $s$ pairs is a circuit and the union of any $t$ pairs is a cocircuit. Our main result is that all sufficiently large matroids with the $(s,2s,t,2t)$-property are $(s,t)$-spikes, generalizing a 2019 result that proved the case where $s=t$. We also present some properties of $(s,t)$-spikes. \end{abstract} \maketitle \section{Introduction} For integers $s$, $u$, $t$, and $v$, with $u \ge s \ge 1$ and $v \ge t \ge 1$, a matroid~$M$ has the \emph{$(s,u,t,v)$-property} if every $s$-element subset of $E(M)$ is contained in a circuit of size~$u$, and every $t$-element subset of $E(M)$ is contained in a cocircuit of size~$v$. Matroids with this property appear regularly in the matroid theory literature: for example, wheels and whirls have the $(1,3,1,3)$-property, and (tipless) spikes have the $(2,4,2,4)$-property. Note that $M$ has the $(s,u,t,v)$-property if and only if $M^*$ has the $(t,v,s,u)$-property. Brettell, Campbell, Chun, Grace, and Whittle~\cite{bccgw2019} studied such matroids, and showed that if $u<2s$ or $v<2t$, then there are only finitely many matroids with the $(s,u,t,v)$-property~\cite[Theorem 3.3]{bccgw2019}. On the other hand, in the case that $s=t$ and $u=v=2t$, any sufficiently large matroid with the $(s,u,t,v)$-property is a member of a class of structured matroids referred to as \emph{$t$-spikes}. In particular, when $t=2$, this is the class typically known simply as \emph{(tipless) spikes}. Our focus in this paper is also on the case where $u=2s$ and $v=2t$, but we drop the requirement that $s=t$. For positive integers $s$ and $t$, an \emph{$(s,t)$-spike} is a matroid on at least $2\max\{s,t\}$ elements whose ground set has a partition $(S_1,S_2,\ldots,S_n)$ into pairs such that the union of every set of $s$ pairs is a circuit and the union of every set of $t$ pairs is a cocircuit. The following is our main result: \begin{theorem} \label{mainthm} There exists a function $f : \mathbb{N}^2 \rightarrow \mathbb{N}$ such that, if $M$ is a matroid with the $(s,2s,t,2t)$-property and $|E(M)| \ge f(s,t)$, then $M$ is an $(s,t)$-spike. \end{theorem} \noindent This proves the conjecture of Brettell et al.~\cite[Conjecture~1.2]{bccgw2019}. Our approach is essentially the same as in \cite{bccgw2019}, but some care is required to generalize the argument. We note also that \cref{modcut} corrects an erroneous lemma \cite[Lemma 6.6]{bccgw2019}. This paper is one in a developing series on matroids with the $(s,u,t,v)$-property. First, Miller~\cite{miller2014} studied matroids with the $(2,4,2,4)$-property, proving the specialization of \cref{mainthm} to the case where $s=t=2$. As previously mentioned, Brettell et al.~\cite{bccgw2019} considered the more general case where $s=t$ and $u=v=2t$, for any $t \ge 1$. Oxley, Pfeil, Semple, and Whittle considered the case where $s=2$, $u=4$, $t=1$, and $v \in \{3,4\}$, showing that a sufficiently large $v$-connected matroid with the $(2,4,1,v)$-property is isomorphic to $M(K_{v,n})$ for some $n$~\cite{pfeil}. A ``cyclic'' analogue of the $(s,u,t,v)$-property has also been considered, where a cyclic ordering $\sigma$ is imposed on $E(M)$, and only sets that appear consecutively with respect to $\sigma$ and have size~$s$ (or size~$t$) need appear in a circuit of size $u$ (or a cocircuit of size $v$, respectively). The case where $s = u-1$ and $t = v-1$ and $s=t$ was considered by Brettell, Chun, Fife, and Semple~\cite{bcfs2019}; whereas Brettell, Semple, and Toft dropped the requirement that $s=t$~\cite{bst2022}. This series of papers has been motivated by problems involving matroid connectivity. The well-known Wheels-and-Whirls Theorem of Tutte~\cite{tutte1966} states that wheels and whirls (which have the $(1,3,1,3)$-property) are the only $3$-connected matroids with no elements that can be either deleted or contracted to retain a $3$-connected matroid. Similarly, spikes (which have the $(2,4,2,4)$-property) are the only $3$-connected matroids on at least $13$ elements that have no triangles, no triads, and no pairs of elements that can be either deleted or contracted to preserve $3$-connectivity~\cite{williams2015}. The following conjecture was stated as \cite[Conjecture 1.3]{bccgw2019}. The case where $t=2$ was proved by Williams~\cite{williams2015}. \begin{conjecture} \label{conj:old} There exists a function $f : \mathbb{N} \rightarrow \mathbb{N}$ such that if $M$ is a $(2t-1)$-connected matroid with no circuits or cocircuits of size $2t-1$, and $|E(M)| \ge f(t)$, then either \begin{enumerate} \item there exists a $t$-element set $X \subseteq E(M)$ such that either $M/X$ or $M \backslash X$ is $(t+1)$-connected, or \item $M$ is a $(t,t)$-spike. \end{enumerate} \end{conjecture} Indeed, sufficiently large $(t,t)$-spikes are $(2t-1)$-connected matroids~\cite[Lemma~6.5]{bccgw2019}, they have no circuits or cocircuits of size $(2t-1)$~\cite[Lemma~6.3]{bccgw2019}, and for every $t$-element subset $X \subseteq E(M)$, neither $M/X$ nor $M \backslash X$ is $(t+1)$-connected. Optimistically, we offer the following generalization of \cref{conj:old}. \begin{conjecture} \label{conj:new} There exists a function $f : \mathbb{N}^2 \rightarrow \mathbb{N}$ such that if $M$ is a matroid with no circuits of size at most $2s-1$, no cocircuits of size at most $2t-1$, the matroid $M$ is $(2\min\{s,t\}-1)$-connected, and $|E(M)| \ge f(s,t)$, then either \begin{enumerate} \item there exists an $s$-element set $X \subseteq E(M)$ such that $M/X$ is $(s+1)$-connected, \item there exists a $t$-element set $X \subseteq E(M)$ such that $M \backslash X$ is $(t+1)$-connected, or \item $M$ is an $(s,t)$-spike. \end{enumerate} \end{conjecture} \cref{sec:Preliminaries} recalls some terminology and a Ramsey-theoretic result used later in the paper. In \cref{sec:echidnas}, we recall the definition of echidnas from~\cite{bccgw2019} and show that every matroid with the $(s,2s,t,2t)$-property and having a sufficiently large $s$-echidna is an $(s,t)$-spike. In \cref{sec:t2t}, we prove \cref{mainthm}. Finally, \cref{sec:tspikeprops} describes some properties of $(s,t)$-spikes, as well as a construction that allows us to build an $(s,t+1)$-spike from an $(s,t)$-spike. \section{Preliminaries} \label{sec:Preliminaries} Our notation and terminology follows Oxley~\cite{oxbook}. We refer to the fact that a circuit and a cocircuit cannot intersect in exactly one element as ``orthogonality''. A set $S_1$ \emph{meets} a set $S_2$ if $S_1 \cap S_2 \neq \emptyset$. We denote $\{1,2,\dotsc,n\}$ by $\seq{n}$, and, for positive integers $i < j$, we denote $\{i,i+1,\dotsc,j\}$ by $[i,j]$. We denote the set of positive integers by $\mathbb{N}$. In order to prove \cref{mainthm}, we will use some hypergraph Ramsey Theory~\cite{ramsey1930}. Recall that a hypergraph is \emph{$k$-uniform} if every hyperedge has size~$k$. \begin{theorem}[Ramsey's Theorem for $k$-uniform hypergraphs] \label{hyperramsey} For positive integers $k$ and $n$, there exists an integer $r_k(n)$ such that if $H$ is a $k$-uniform hypergraph on $r_k(n)$ vertices, then $H$ has either a clique on $n$ vertices, or a stable set on $n$ vertices. \end{theorem} \section{Echidnas and \texorpdfstring{$(s,t)$}{(s,t)}-spikes} \label{sec:echidnas} Recall that $M$ is an $(s,t)$-spike if there is a partition of $E(M)$ into pairs such that the union of any $s$ pairs is a circuit and the union of any $t$ pairs is a cocircuit. In this section, we prove a sufficient condition for $M$ to be an $(s,t)$-spike. Namely, we prove as \cref{lem:swamping} that if $M$ has the $(s,2s,t,2t)$-property, and a subset of $E(M)$ can be partitioned into $u$ pairs such that the union of any $t$ pairs is a circuit, then, when $u$ is sufficiently large, $M$ is an $(s,t)$-spike. Conforming with \cite{bccgw2019}, we call such a partition a $t$-echidna, as defined below. Let $M$ be a matroid. A $t$-\emph{echidna} of order $n$ is a partition $(S_1,\ldots, S_n)$ of a subset of $E(M)$ such that \begin{enumerate} \item $|S_i|=2$ for all $i \in \seq{n}$, and \item $\bigcup_{i \in I}S_i$ is a circuit for all $I \subseteq \seq{n}$ with $|I|=t$. \end{enumerate} For $i \in \seq{n}$, we say $S_i$ is a \emph{spine}. We say $(S_1,\ldots,S_n)$ is a \emph{$t$-coechidna} of $M$ if $(S_1,\ldots,S_n)$ is a $t$-echidna of $M^*$. Let $(S_1,\dotsc,S_n)$ be a $t$-echidna of a matroid $M$. If $(S_1,\dotsc,S_m)$ is a $t$-echidna of $M$, for some $m \ge n$, we say that $(S_1,\dotsc,S_n)$ \emph{extends} to $(S_1,\dotsc,S_m)$. We say that $\pi=(S_1,\dotsc,S_n)$ is \emph{maximal} if $\pi$ extends only to $\pi$. Note that a matroid~$M$ is an $(s,t)$-spike if there exists a partition $\pi=(A_1,\ldots,A_m)$ of $E(M)$ such that $\pi$ is an $s$-echidna and a $t$-coechidna, for some $m\geq\max\{s,t\}$. In this case, we say that the $(s,t)$-spike~$M$ has \emph{order~$m$}, we call $\pi$ the \emph{associated partition} of the $(s,t)$-spike~$M$, and we say that $A_i$ is an \emph{arm} of the $(s,t)$-spike for each $i \in \seq{m}$. An $(s,t)$-spike with $s=t$ is also called a \emph{$t$-spike}. Note that if $M$ is an $(s,t)$-spike, then $M^*$ is a $(t,s)$-spike. Throughout this section, we assume that $s$ and $t$ are positive integers. \begin{lemma} \label{lem:coechidna} Let $M$ be a matroid with the $(s,2s,t,2t)$-property. If $M$ has an $s$-echidna $(S_1,\ldots, S_n)$, where $n\geq s+2t-1$, then $(S_1,\ldots, S_n)$ is also a $t$-coechidna of $M$. \end{lemma} \begin{proof} Suppose $M$ has an $s$-echidna $(S_1,\ldots, S_n)$ with $n \ge s+2t-1$, and let $S_i=\{x_i,y_i\}$ for each $i \in [n]$. We show, for every $t$-element subset $J$ of $[n]$, that $\bigcup_{j \in J} S_j$ is a cocircuit. Without loss of generality, let $J=[t]$. By the $(s,2s,t,2t)$-property, $\{x_1,\ldots,x_{t}\}$ is contained in a $2t$-element cocircuit~$C^*$. Suppose for a contradiction that $C^*\neq\bigcup_{j \in J} S_j$. Then there is some $i \in [t]$ such that $y_i\notin C^*$. Without loss of generality, say $y_1\notin C^*$. Let $I$ be an $(s-1)$-element subset of $[t+1,n]$. For any such $I$, the set $S_1 \cup \bigcup_{i \in I} S_i$ is a circuit that meets $C^*$. By orthogonality, $\bigcup_{i \in I} S_i$ meets $C^*$. Thus, $C^*$ avoids at most $s-2$ of the $S_i$'s for $i \in [t+1,n]$. In fact, as $C^*$ meets each $S_i$ with $i \in [t]$, the cocircuit~$C^*$ avoids at most $s-2$ of the $S_i$'s for $i \in [n]$. Thus $|C^*| \ge n-(s-2) \ge (s+2t-1) -(s-2) =2t+1 > 2t$, a contradiction. Therefore, we conclude that $C^*=\bigcup_{j \in J} S_j$, and the result follows. \end{proof} \sloppy \begin{lemma} \label{lem:rep-orthog} Let $M$ be a matroid with the $(s,2s,t,2t)$-property, and let $(S_1,\ldots, S_n)$ be an $s$-echidna of $M$ with $n\geq\max\{s+2t,2s+t\}-1$. \begin{itemize} \item[(i)] Let $I$ be an $(s-1)$-subset of $[n]$. For $z\in E(M)-\bigcup_{i \in I}S_i$, there is a $2s$-element circuit containing $\{z\} \cup \bigcup_{i \in I}S_i$. \item[(ii)] Let $I$ be a $(t-1)$-subset of $[n]$. For $z\in E(M)-\bigcup_{i \in I}S_i$, there is a $2t$-element cocircuit containing $\{z\} \cup \bigcup_{i \in I}S_i$. \end{itemize} \end{lemma} \fussy \begin{proof} First we prove (i). For $i \in [n]$, let $S_i=\{x_i,y_i\}$. By the $(s,2s,t,2t)$-property, there is a $2s$-element circuit~$C$ containing $\{z\} \cup \{x_i : i \in I\}$. Let $J$ be a $(t-1)$-element subset of $[n]$ such that $C$ and $\bigcup_{j \in J}S_j$ are disjoint (such a set exists since $|C|=2s$ and $n \ge 2s+t-1$). For $i \in I$, let $C^*_i=S_i \cup \bigcup_{j \in J} S_j$, and observe that $x_i \in C^*_i \cap C$, and $C^*_i \cap C \subseteq S_i$. By \cref{lem:coechidna}, $(S_1,\dotsc,S_n)$ is a $t$-coechidna as well as an $s$-echidna; therefore, $C^*_i$ is a cocircuit. Now, for each $i \in I$, orthogonality implies that $|C^*_i \cap C| \ge 2$, and hence $y_i \in C$. So $C$ contains $\{z\} \cup \bigcup_{i \in I}S_i$, as required. Now, to prove (ii), recall that $(S_1,\dotsc,S_n)$ is a $t$-coechidna by Lemma \cref{lem:coechidna}. Therefore, (ii) follows by (i) and duality. \end{proof} \begin{lemma} \label{lem:swamping} Let $M$ be a matroid with the $(s,2s,t,2t)$-property. If $M$ has an $s$-echidna $\pi=(S_1,\ldots, S_n)$, where $n\geq\max\{s+2t-1,2s+t-1,3s+t-3\}$, then $(S_1,\ldots, S_n)$ extends to a partition of $E(M)$ that is both an $s$-echidna and a $t$-coechidna. \end{lemma} \begin{proof} Let $\pi'=(S_1, \dotsc, S_m)$ be a maximal $s$-echidna with $X=\bigcup_{i = 1}^{m} S_i\subseteq E(M)$. Suppose for a contradiction that $X\neq E(M)$. Since $\pi'$ is maximal, $m\geq n\geq s+2t-1$. Therefore, by Lemma \ref{lem:coechidna}, $\pi'$ is a $t$-coechidna. Let $z\in E(M)-X$. By Lemma \ref{lem:rep-orthog}, there is a $2s$-element circuit $C = (\bigcup_{i \in [s-1]} S_i)\cup \{z,z'\}$ for some $z'\in E(M)$. We claim that $z'\notin X$. Towards a contradiction, suppose that $z'\in S_k$ for some $k\in [s,m]$. Let $J$ be a $t$-element subset of $[s,m]$ containing $k$. Then, since $(S_1,\dotsc,S_m)$ is a $t$-coechidna, $\bigcup_{j \in J}S_j$ is a cocircuit that contains $z'$. Now, this cocircuit intersects the circuit~$C$ in a single element $z'$, contradicting orthogonality. Thus, $z'\notin X$, as claimed. We next show that $(\{z,z'\}, S_{s}, S_{s+1}, \ldots, S_m)$ is a $t$-coechidna. Since $\pi'$ is a $t$-coechidna, it suffices to show that $\{z,z'\} \cup \bigcup_{i \in I}S_i$ is a cocircuit for each $(t-1)$-element subset~$I$ of $[s,m]$. Let $I$ be such a set. \Cref{lem:rep-orthog} implies that there is a $2t$-element cocircuit~$C^*$ of $M$ containing $\{z\} \cup \bigcup_{i\in I}S_i$. By orthogonality, $|C\cap C^*|>1$. Therefore, $z'\in C^*$. Thus, $(\{z,z'\}, S_{s}, S_{s+1}, \ldots, S_m)$ is a $t$-coechidna. Since this $t$-coechidna has order $1+m-(s-1)\geq n-s+2\geq2s+t-1$, the dual of \cref{lem:coechidna} implies that $(\{z,z'\}, S_{s}, S_{s+1}, \dotsc, S_m)$ is also an $s$-echidna. Next we show that $(\{z,z'\}, S_1, S_2, \dotsc, S_m)$ is a $t$-coechidna. Let $I$ be a $(t-1)$-element subset of $[m]$. We claim that $\{z,z'\} \cup \bigcup_{i \in I}S_i$ is a cocircuit. Let $J$ be an $(s-1)$-element subset of $[s,m]-I$. Then $C=\{z,z'\} \cup \bigcup_{j \in J}S_j$ is a circuit since $(\{z,z'\}, S_{s}, S_{s+1}, \dotsc, S_m)$ is an $s$-echidna. By \cref{lem:rep-orthog}, there is a $2t$-element cocircuit~$C^*$ containing $\{z\} \cup \bigcup_{i \in I}S_i$. By orthogonality between $C$ and $C^*$, we have $z'\in C^*$. Since $I$ was arbitrarily chosen, $(\{z,z'\}, S_1, S_2, \dotsc, S_m)$ is a $t$-coechidna. By the dual of \cref{lem:coechidna}, it is also an $s$-echidna, contradicting the maximality of $(S_1,\dotsc,S_m)$. \end{proof} \section{Matroids with the \texorpdfstring{$(s,2s,t,2t)$}{(s,2s,t,2t)}-property} \label{sec:t2t} In this section, we prove that every sufficiently large matroid with the $(s,2s,t,2t)$-property is an $(s,t)$-spike. We will show that a sufficiently large matroid with the $(s,2s,t,2t)$-property has a large $s$-echidna or $t$-coechidna; it then follows, by \cref{lem:swamping}, that the matroid is an $(s,t)$-spike. As in the previous section, we assume that $s$ and $t$ are positive integers. \begin{lemma} \label{lem:rank-t} Let $M$ be a matroid with the $(s,2s,t,2t)$-property, and let $X\subseteq E(M)$. \begin{enumerate} \item If $r(X)<s$, then $X$ is independent.\label{rt1} \item If $r(X)=s$, then $M|X\cong U_{s,|X|}$ and $|X|<s+2t$.\label{rt2} \end{enumerate} \end{lemma} \begin{proof} Every subset of $E(M)$ of size at most $s$ is independent since it is contained in a circuit of size $2s$. In particular, \ref{rt1} holds. Now let $r(X)=s$. Then every $(s+1)$-element subset of $X$ is a circuit, so $M|X\cong U_{s,|X|}$. Suppose for a contradiction that $|X|\geq s+2t$. Let $C^*$ be a $2t$-element cocircuit such that there is some $x\in X\cap C^*$. Then $X-C^*$ is contained in the hyperplane $E(M)-C^*$. Since $x\in X\cap C^*$, we have $r(X-C^*)<r(X)=s$. Therefore, $X-C^*$ is an independent set, so $|X-C^*|<s$. Since $|X|\geq s+2t$, we have $|C^*|>2t$, a contradiction. Thus, \ref{rt2} holds. \end{proof} \begin{lemma} \label{lemmaA} Let $M$ be a matroid with the $(s,2s,t,2t)$-property, and let $C_1^*,C_2^*,\dotsc,C_{s-1}^*$ be a collection of pairwise disjoint cocircuits of $M$. Let $Y = E(M)-\bigcup_{i \in [s-1]} C_i^*$. For all $y \in Y$, there is a $2s$-element circuit~$C_y$ containing $y$ such that either \begin{enumerate} \item $|C_y \cap C_i^*| = 2$ for all $i \in [s-1]$, or\label{A1} \item $|C_y \cap C_j^*| = 3$ for some $j \in [s-1]$, and $|C_y \cap C_i^*| = 2$ for all $i \in [s-1]-\{j\}$.\label{A2} \end{enumerate} Moreover, if $C_y$ satisfies \ref{A2}, then there are at most $s+2t-1$ elements $w \in Y$ such that $(C_y-y) \cup \{w\}$ is a circuit. \end{lemma} \begin{proof} Choose an element $c_i \in C_i^*$ for each $i \in [s-1]$. By the $(s,2s,t,2t)$-property, there is a $2s$-element circuit~$C_y$ containing $\{c_1,c_2,\dotsc,c_{s-1},y\}$, for each $y \in Y$. By orthogonality, $C_y$ satisfies \ref{A1} or \ref{A2}. Suppose $C_y$ satisfies \ref{A2}, and let $S =C_y-Y= C_y-\{y\}$. Let $W = \{w \in Y : S \cup \{w\} \textrm{ is a circuit}\}$. It remains to prove that $|W| < s+2t$. Observe that $W \subseteq \cl(S) \cap Y$, and, since $S$ contains $s-1$ elements in pairwise disjoint cocircuits that avoid $Y$, we have $r(\cl(S) \cup Y) \ge r(Y) + (s-1)$. Thus, \begin{align*} r(W) &\le r(\cl(S) \cap Y) \\ &\le r(\cl(S)) + r(Y) - r(\cl(S) \cup Y) \\ &\le (2s-1) + r(Y) - (r(Y)+ (s-1)) \\ &=s, \end{align*} using submodularity of the rank function at the second line. Now, by \cref{lem:rank-t}\ref{rt1}, if $r(W) < s$, then $W$ is independent, so $|W| = r(W) < s < s + 2t$. On the other hand, by \cref{lem:rank-t}\ref{rt2}, if $r(W)=s$, then $M|W \cong U_{t,|W|}$ and $|W|<s+2t$, as required. \end{proof} \begin{lemma} \label{lem:disjoint} There exists a function $h$ such that if $M$ is a matroid with at least $h(k,d,t)$ $k$-element circuits, and the property that every $t$-element set is contained in a $2t$-element cocircuit for some positive integer $t$, then $M$ has a collection of $d$ pairwise disjoint $2t$-element cocircuits. \end{lemma} \begin{proof} By \cite[Lemma 3.2]{bccgw2019}, there is a function $g$ such that if $M$ has at least $g(k,d)$ $k$-element circuits, then $M$ has a collection of $d$ pairwise disjoint circuits. We define $h(k,d,t) = g(k,dt)$, and claim that a matroid with at least $h(k,d,t)$ $k$-element circuits, and the property that every $t$-element set is contained in a $2t$-element cocircuit, has a collection of $d$ pairwise disjoint $2t$-element cocircuits. Let $M$ be such a matroid. Then $M$ has a collection of $dt$ pairwise disjoint circuits. We partition these into $d$ groups of size $t$: call this partition $(\mathcal{C}_1,\dotsc,\mathcal{C}_d)$. Since the $t$ circuits in any cell of this partition are pairwise disjoint, it now suffices to show that, for each $i \in [d]$, there is a $2t$-element cocircuit contained in the union of the members of $\mathcal{C}_i$. Let $\mathcal{C}_i = \{C_1,\dotsc,C_{t}\}$ for some $i \in [d]$. Pick some $c_j \in C_j$ for each $j \in [t]$. Then, since $\{c_1,c_2,\dotsc,c_{t}\}$ is a $t$-element set, it is contained in a $2t$-element cocircuit, which, by orthogonality, is contained in $\bigcup_{j \in [t]}C_j$. \end{proof} \begin{lemma} \label{setup} Let $M$ be a matroid with the $(s,2s,t,2t)$-property such that $r(M)\geq r^*(M)$. There exists a function $g$ such that, if $|E(M)| \ge g(s,t,q)$, then $M$ has $s-1$ pairwise disjoint $2t$-element cocircuits $C_1^*, C_2^*, \dotsc, C_{s-1}^*$, and there is some $Z \subseteq E(M)-\bigcup_{i \in [s-1]}C_i^*$ such that \begin{enumerate} \item $r_{M}(Z) \ge q$, and\label{ps1} \item for each $z \in Z$, there exists an element $z'\in Z-\{z\}$ such that $\{z,z'\}$ is contained in a $2s$-element circuit~$C$ with $|C \cap C_i^*|=2$ for each $i \in [s-1]$.\label{ps2} \end{enumerate} \end{lemma} \begin{proof} By \cref{lem:disjoint}, there is a function $h$ such that if $M$ has at least $h(k,d,t)$ $k$-element circuits, then $M$ has $d$ pairwise disjoint $2t$-element cocircuits. Suppose $|E(M)|\geq 2s\cdot h(2s,s-1,t)$. By the $(s,2s,t,2t)$-property, $M$ has at least $h(2s,s-1,t)$ distinct $2s$-element circuits. Therefore, by \cref{lem:disjoint}, $M$ has a collection of $s-1$ pairwise disjoint $2t$-element cocircuits $C_1^*,\dotsc, C_{s-1}^*$. Let $X = \bigcup_{i \in [s-1]}C_i^*$ and $Y=E(M)-X$. By \cref{lemmaA}, for each $y \in Y$ there is a $2s$-element circuit~$C_y$ containing $y$ such that $|C_y \cap C_j^*| = 3$ for at most one $j \in [s-1]$ and $|C_y \cap C_i^*| = 2$ otherwise. Let $W$ be the set of all $w \in Y$ such that $w$ is in a $2s$-element circuit~$C$ with $|C\cap C_j^*|=3$ for some $j \in [s-1]$, and $|C \cap C_i^*|=2$ for all $i \in [s-1]-\{j\}$. Now, letting $Z=Y-W$, we see that \ref{ps2} is satisfied. It remains to show that \ref{ps1} holds. Since each $C_i^*$ has size $2t$, there are $(s-1)\binom{2t}{3}\binom{2t}{2}^{s-2}$ sets $X'\subseteq X$ with $|X' \cap C_j^*|=3$ for some $j \in [s-1]$ and $|X' \cap C_i^*|=2$ for all $i \in [s-1]-\{j\}$. It follows, by \cref{lemmaA}, that $|W| \le f(s,t)$ where \[f(s,t) = (s+2t-1)\left[(s-1)\binom{2t}{3}\binom{2t}{2}^{s-2}\right].\] We define \[g(s,t,q) = \max\left\{2s\cdot h(2s,s-1,t), 2\big(2t(s-1)+f(s,t)+q\big)\right\}.\] Suppose that $|E(M)| \ge g(s,t,q)$. Since $r(M)\geq r^*(M)$ and $|E(M)|\geq2(2t(s-1)+f(s,t)+q)$, we have $r(M) \ge 2t(s-1)+f(s,t)+q$. Then, \begin{align*} r_{M}(Z) &\ge r_{M}(Y) - |W| \\ &\ge \big(r(M)-2t(s-1)\big) - f(s,t) \\ &\ge q, \end{align*} so \ref{ps1} holds as well. \end{proof} \sloppy \begin{lemma} \label{lem:payoff} Let $M$ be a matroid with the $(s,2s,t,2t)$-property. Suppose $M$ has $s-1$ pairwise disjoint $2t$-element cocircuits $C_1^*, C_2^*, \dotsc, C_{s-1}^*$ and, for some positive integer~$p$, there is a set $Z \subseteq E(M)-\bigcup_{i \in [s-1]}C_i^*$ such that \begin{enumerate}[label=\rm(\alph*)] \item $r(Z) \ge \binom{2t}{2}^{s-1}(p + 2(s-1))$, and \item for each $z \in Z$, there exists an element $z'\in Z-\{z\}$ such that $\{z,z'\}$ is contained in a $2s$-element circuit $C$ of $M$ with $|C\cap C_i^*|=2$ for each $i\in [s-1]$. \end{enumerate} There exists a subset $Z' \subseteq Z$ and a partition $\pi=( Z_1', \dotsc, Z_p' )$ of $Z'$ into pairs such that \begin{enumerate} \item each circuit of $M|Z'$ is a union of pairs in $\pi$, and \item the union of any $s$ pairs in $\pi$ contains a circuit. \end{enumerate} \end{lemma} \fussy \begin{proof} We first prove the following: \begin{sublemma} \label{prelem:payoff} There exists a $(2s-2)$-element set $X$ such that $|X\cap C_i^*|=2$ for every $i\in[s-1]$ and a set $Z'\subseteq Z$ with a partition $\pi=\{ Z_1', \dotsc, Z_p' \}$ of $Z'$ into pairs such that \begin{enumerate}[label=\rm(\Roman*)] \item $X \cup Z_i'$ is a circuit, for each $i \in [p]$ and \label{ppo1} \item $\pi$ partitions the ground set of $(M/X)|Z'$ into parallel classes such that $r_{M/X}\big(\bigcup_{i \in [p]}Z_i'\big)=p$. \label{ppo2} \end{enumerate} \end{sublemma} \begin{subproof} By (b), for each $z \in Z$, there exists an element $z'\in Z-\{z\}$ and a set $X'$ such that $\{z,z'\} \cup X'$ is a circuit of $M$ and $X'$ is the union of pairs $Y_i$ for $i\in[s-1]$, with $Y_i\subseteq C_i^*$. Since $|C_i^*|=2t$ for each $i\in[s-1]$, there are $\binom{2t}{2}^{s-1}$ choices for $(Y_1,Y_2,\ldots,Y_{s-1})$. Therefore, for some $m\leq\binom{2t}{2}^{s-1}$, there are $(2s-2)$-element sets $X_1,X_2,\ldots,X_m$, and sets $Z_1,Z_2,\ldots,Z_m$ whose union is $Z$, such that each of $X_1,X_2,\ldots,X_m$ intersects $C_i^*$ in two elements for each $i\in[s-1]$, and such that, for each $j\in[m]$ and each $z_j\in Z_j$, there is an element $z_j'$ such that $\{z_j,z_j'\}\cup X_j$ is a circuit. Since $Z=\bigcup_{i \in [m]}Z_i$, we have $\sum_{i\in[m]}r(Z_i)\geq r(Z)$. Thus, the pigeonhole principle implies that there is some $j\in[m]$ such that \[r(Z_j) \ge \frac{r(Z)}{\binom{2t}{2}^{s-1}} \ge p+2(s-1),\] by (a). We define $Z' = Z_j$ and $X = X_j$. Observe that $X \cup \{z,z'\}$ is a circuit, for some pair $\{z,z'\} \subseteq Z'$, if and only if $\{z,z'\}$ is a parallel pair in $M/X$. Therefore, there is a partition of the ground set of $(M/X)|Z'$ into parallel classes, where every parallel class has size at least two. Let $\{\{z_1,z_1'\}, \dotsc,\{z_n,z_n'\}\}$ be a collection of pairs from each parallel class such that $\{z_1,z_2,\dotsc,z_n\}$ is an independent set in $(M/X)|Z'$. Note that $n\geq r_{M/X}(Z') = r(Z' \cup X) -r(X) \ge r(Z') - 2(s-1) \ge p$. For $i\in[p]$, let $Z_i'=\{z_i,z_i'\}$. Then $\pi=\{ Z_1', \dotsc, Z_p' \}$ satisfies \ref{prelem:payoff}. \end{subproof} Let $X$, $\pi$, and $Z'$ be as described in \ref{prelem:payoff}, and let $\mathcal{X} = \{X_1,\dotsc,X_{s-1}\}$, where $X_i = \{x_i,x_i'\} = X \cap C_i^*$. \begin{sublemma} \label{metamatroid} Each circuit of $M|(X \cup Z')$ is a union of pairs in $\mathcal{X} \cup \pi$. \end{sublemma} \begin{subproof} Let $C$ be a circuit of $M|(X \cup Z')$. If $x_i \in C$, for some $\{x_i,x_i'\} \in \mathcal{X}$, then orthogonality with $C_i^*$ implies that $x_i' \in C$. Assume for a contradiction that $\{z,z'\} \in \pi$ and $C \cap \{z,z'\} = \{z\}$. Let $W$ be the union of the pairs in $\pi$ containing elements of $(C-\{z\}) \cap Z'$. Then $z \in \cl(X \cup W)$. Hence $z \in \cl_{M/X}(W)$, contradicting \cref{prelem:payoff}\ref{ppo2}. \end{subproof} \begin{sublemma} \label{induct} Every union of $s$ pairs in $\mathcal{X} \cup \pi$ contains a circuit. \end{sublemma} \begin{subproof} Let $\mathcal{W}$ be a subset of $\mathcal{X} \cup \pi$ of size $s$. We proceed by induction on the number of pairs in $\mathcal{W} \cap \pi$. If there is only one pair in $\mathcal{W} \cap \pi$, then the union of the pairs in $\mathcal{W}$ contains a circuit (indeed, is a circuit) by \cref{prelem:payoff}\ref{ppo1}. Suppose the result holds for any subset containing $k$ pairs in $\pi$, and let $\mathcal{W}$ be a subset containing $k+1$ pairs in $\pi$. Let $\{x,x'\}$ be a pair in $\mathcal{X}-\mathcal{W}$, and let $W = \bigcup_{W' \in \mathcal{W}}W'$. Then $W \cup \{x,x'\}$ is the union of $s+1$ pairs of $\mathcal{X} \cup \pi$, of which $k+1$ are in $\pi$, so, by the induction hypothesis, $W \cup \{x,x'\}$ properly contains a circuit~$C_1$. If $\{x,x'\} \subseteq E(M)-C_1$, then $C_1 \subseteq W$, in which case the union of the pairs in $\mathcal{W}$ contains a circuit, as desired. Therefore, we may assume, by \cref{metamatroid}, that $\{x,x'\} \subseteq C_1$. Since $X$ is independent, there is a pair $\{z,z'\} \subseteq Z' \cap C_1$. By the induction hypothesis, there is a circuit~$C_2$ contained in $(W-\{z,z'\}) \cup \{x,x'\}$. Observe that $C_1$ and $C_2$ are distinct, and $\{x,x'\} \subseteq C_1 \cap C_2$. Circuit elimination on $C_1$ and $C_2$, and \cref{metamatroid}, imply that there is a circuit $C_3 \subseteq (C_1 \cup C_2) - \{x,x'\} \subseteq W$, as desired. The claim now follows by induction. \end{subproof} Now, \cref{induct} implies that the union of any $s$ pairs in $\pi$ contains a circuit, and the result follows. \end{proof} \begin{lemma} \label{lem:tis1} If $M$ is a matroid with the $(1,2,t,2t)$-property and at least $t$ elements, then $M$ is a $(1,t)$-spike. Dually, if $M$ is a matroid with the $(s,2s,1,2)$-property and at least $s$ elements, then $M$ is an $(s,1)$-spike. \end{lemma} \begin{proof} By duality, it suffices to consider the case where $M$ has the $(1,2,t,2t)$-property and at least $t$ elements. Since every element of $M$ is contained in a $2$-element circuit, there is a partition of $E(M)$ into parallel classes $P_1,P_2,\ldots,P_n$, where $|P_i|\geq2$ for each $i$. For each $P_i$, let $x_i\in P_i$. First, we consider the case where $n\geq t$. Let $X$ be a $t$-element subset of $\{x_1,\ldots,x_{n}\}$; for ease of notation, we assume $X=\{x_1,\ldots,x_{t}\}$. By the $(1,2,t,2t)$-property, $X\subseteq C^*$ for some $2t$-element cocircuit $C^*$. Since $P_i$ is a parallel class, $\{x_i,y_i\}$ is a circuit for each $y_i\in P_i-\{x_i\}$. By orthogonality, $y_i\in C^*$ for each such $y_i$, so $P_i\subseteq C^*$. Since $|C^*|=2t$, and $X$ is an arbitrary $t$-element subset of $\{x_1,\ldots,x_{n}\}$, it follows that $|P_i|=2$ for each $i\in[n]$, and that the union of any $t$ of the $P_i$'s is a cocircuit. Thus $M$ is a $(1,t)$-spike. It remains to consider the case where $n<t$. Since $M$ has at least $t$ elements, let $X$ be any $t$-element set containing $\{x_1,\ldots,x_n\}$. By the $(1,2,t,2t)$-property, there is a $2t$-element cocircuit $C^*$ containing $X$. For $i\in[n]$ and each $y_i\in P_i-\{x_i\}$, orthogonality implies $y_i\in C^*$. Thus, $E(M)=C^*$. It follows that $M\cong U_{1,2t}$, which is a $(1,t)$-spike. \end{proof} We now prove \cref{mainthm}, restated below. \begin{theorem} \label{mainthmtake2} There exists a function $f : \mathbb{N}^2 \rightarrow \mathbb{N}$ such that, if $M$ is a matroid with the $(s,2s,t,2t)$-property and $|E(M)| \ge f(s,t)$, then $M$ is an $(s,t)$-spike. \end{theorem} \begin{proof} If $s=1$ or $t=1$, then, by \cref{lem:tis1}, the theorem holds with $f(s,t) = \max\{s,t\}$. So we may assume that $\min\{s,t\} \ge 2$. A matroid is an $(s,t)$-spike if and only if its dual is a $(t,s)$-spike; moreover, a matroid has the $(s,2s,t,2t)$-property if and only if its dual has the $(t,2t,s,2s)$-property. Therefore, by duality, we may also assume that $r(M)\geq r^*(M)$. Let $r_k(n)$ be the Ramsey number described in \cref{hyperramsey}. For $k \in [s]$, we define the function $h_k : \mathbb{N}^2 \rightarrow \mathbb{N}$ such that \[h_{s}(s,t)=\max\{s+2t-1,2s+t-1,3s+t-3,s+3t-3\}\] and such that $h_k(s,t)=r_k(h_{k+1}(s,t))$ for $k\in[s-1]$. Note that $h_{k}(s,t) \ge h_{k+1}(s,t) \ge h_{s}(s,t)$, for each $k \in [s-1]$. Let $p = h_1(s,t)$ and let $q(s,t)=\binom{2t}{2}^{s-1}(p + 2(s-1))$. By \cref{setup}, there exists a function $g$ such that if $|E(M)| \ge g(s,t,q(s,t))$, then $M$ has $s-1$ pairwise disjoint $2t$-element cocircuits $C_1^*, C_2^*, \dotsc, C_{s-1}^*$, and there is some $Z \subseteq E(M)-\bigcup_{i \in [s-1]}C_i^*$ such that $r_M(Z) \ge q(s,t)$, and, for each $z \in Z$, there exists an element $z'\in Z'-\{z\}$ such that $\{z,z'\}$ is contained in a $2s$-element circuit~$C$ with $|C \cap C_i^*|=2$ for each $i \in [s-1]$. Let $f(s,t) = g(s,t,q(s,t))$, and suppose that $|E(M)| \ge f(s,t)$. Then, by \cref{lem:payoff}, there exists a subset $Z \subseteq Z'$ such that $Z$ has a partition into pairs $\pi = ( Z_1, \dotsc, Z_{p})$ such that \begin{enumerate}[label=\rm(\Roman*)] \item each circuit of $M|Z$ is a union of pairs in $\pi$, and \item the union of any $s$ pairs in $\pi$ contains a circuit.\label{rc2} \end{enumerate} Let $m=h_{s}(s,t)$. By \cref{lem:swamping} and its dual, it suffices to show that $M$ has either an $s$-echidna or a $t$-coechidna of order $m$. If the smallest circuit in $M|Z$ has size $2s$, then, by \ref{rc2}, $\pi$ is an $s$-echidna of order $p \ge m$. So we may assume that the smallest circuit in $M|Z$ has size $2j$ for some $j \in [s-1]$. \begin{sublemma} \label{iterramsey} If the smallest circuit in $M|Z$ has size $2j$, for $j \in [s-1]$, and $|\pi| \ge h_j(s,t)$, then either \begin{enumerate} \item $M$ has a $t$-coechidna of order $m$, or\label{ir1} \item there exists some $Z' \subseteq Z$ that is the union of $h_{j+1}(s,t)$ pairs in $\pi$ for which the smallest circuit in $M|Z'$ has size at least $2(j+1)$.\label{ir2} \end{enumerate} \end{sublemma} \begin{subproof} We define $H$ to be the $j$-uniform hypergraph with vertex set $\pi$ whose hyperedges are the $j$-subsets of $\pi$ that are partitions of circuits in $M|Z$. By \cref{hyperramsey}, and the definition of $h_k$, as $H$ has at least $h_j(s,t)$ vertices, it has either a clique or a stable set, on $h_{j+1}(s,t)$ vertices. If $H$ has a stable set~$\pi'$ on $h_{j+1}(s,t)$ vertices, then clearly \ref{ir2} holds, with $Z' = \bigcup_{P \in \pi'} P$. Therefore, we may assume that there are $h_{j+1}(s,t)$ pairs in $\pi$ such that the union of any $j$ of these pairs is a circuit. Let $Z''$ be the union of these $h_{j+1}(s,t)$ pairs. We claim that the union of any set of $t$ pairs contained in $Z''$ is a cocircuit. Let $T$ be a transversal of $t$ pairs in $\pi$ contained in $Z''$, and let $C^*$ be the $2t$-element cocircuit containing $T$. Suppose, for a contradiction, that there exists some pair $P \in \pi$ with $P \subseteq Z''$ such that $|C^* \cap P| = 1$. Select $j-1$ pairs $Z_1'',\dotsc,Z_{j-1}''$ in $\pi$ that are each contained in $Z''-C^*$ (these exist since $h_{j+1}(s,t) \ge s+2t-1 \ge 2t + j - 1$). Then $P \cup (\bigcup_{i \in [j-1]}Z_i'')$ is a circuit intersecting $C^*$ in a single element, contradicting orthogonality. We deduce that the union of any $t$ pairs in $\pi$ that are contained in $Z''$ is a cocircuit. Thus, $M$ has a $t$-coechidna of order $h_{j+1}(t) \ge m$, satisfying \ref{ir1}. \end{subproof} We now apply \cref{iterramsey} iteratively, for a maximum of $s-j$ iterations. If \ref{ir1} holds, at any iteration, then $M$ has a $t$-coechidna of order $m$, as required. Otherwise, we let $\pi'$ be the partition of $Z'$ induced by $\pi$; then, at the next iteration, we relabel $Z=Z'$ and $\pi=\pi'$. If \ref{ir2} holds for each of $s-j$ iterations, then we obtain a subset $Z'$ of $Z$ such that the smallest circuit in $M|Z'$ has size $2s$. Then, by \ref{rc2}, $M$ has an $s$-echidna of order $h_{s}(s,t)=m$, completing the proof. \end{proof} \section{Properties of \texorpdfstring{$(s,t)$}{(s,t)}-spikes} \label{sec:tspikeprops} In this section, we prove some properties of $(s,t)$-spikes. In particular, we show that an $(s,t)$-spike has order at least $s+t-1$; an $(s,t)$-spike of order~$m$ has $2m$ elements and rank~$m+s-t$; and the circuits of an $(s,t)$-spike that are not a union of $s$ arms meet all but at most $t-2$ of the arms. We also give some results about the connectivity of $(s,t)$-spikes of sufficiently large order. We also show that an appropriate concatenation of the associated partition of a $t$-spike is a $(2t-1)$-anemone, following the terminology of~\cite{ao2008}. Finally, we describe a construction that can be used to obtain an $(s,t+1)$-spike from an $(s,t)$-spike of sufficiently large order, and we show that every $(s,t+1)$-spike can be constructed from some $(s,t)$-spike in this way. We again assume that $s$ and $t$ are positive integers. \subsection*{Basic properties} \begin{lemma} \label{tspikeorder} Let $M$ be an $(s,t)$-spike with associated partition $(A_1,\ldots,A_m)$. Then $m \ge s+t-1$. \end{lemma} \begin{proof} By the definition of an $(s,t)$-spike, we have $m\geq\max\{s,t\}$. Let $Y = \bigcup_{j \in [t]}A_j$, and let $y\in Y$. Since $Y$ is a cocircuit, $Z=(E(M)-Y) \cup \{y\}$ spans $M$. Therefore, $r(M)\leq|Z|=2m-2t+1$. Similarly, by duality, $r^*(M)\leq2m-2s+1$. Therefore, \[2m = |E(M)| = r(M) + r^*(M) \le (2m-2t+1)+(2m-2s+1).\] The result follows. \end{proof} \begin{lemma} \label{lem:rank-matroid} Let $M$ be an $(s,t)$-spike of order~$m$. Then $r(M)=m+s-t$ and $r^*(M)=m-s+t$. \end{lemma} \begin{proof} Let $(A_1,\ldots,A_m)$ be the associated partition of $M$, and let $A_i = \{x_i,y_i\}$ for each $i \in [m]$. Choose $I \subseteq J \subseteq [m]$ such that $|I|=s-1$ and $|J| = m-t$. (This is possible by \cref{tspikeorder}.) Let $X = \{y_j : i \in I\} \cup \{x_j : j \in J\}$. Note that $\bigcup_{i \in I\cup J}A_i\subseteq\cl(X)$. Since $E(M)-\bigcup_{i \in I\cup J}A_i$ is a cocircuit, $\bigcup_{i \in I\cup J}A_i$ is a hyperplane. Therefore, $\bigcup_{i \in I\cup J}A_i=\cl(X)$, and we have $r(M)-1=r(X)\leq|X|=|I|+|J|=m+s-t-1$. Thus, $r(M)\leq m+s-t$. Similarly, by duality, $r^*(M)\leq m-s+t$. Therefore, we have \[2m=|E(M)|=r(M)+r^*(M)\leq(m+s-t)+(m-s+t)=2m.\] Thus, we must have equality, and the result holds. \end{proof} \sloppy \begin{lemma} \label{l:circuits} Let $M$ be an $(s,t)$-spike of order~$m$ with associated partition $(A_1,\ldots,A_m)$, and let $C$ be a circuit of $M$. \begin{enumerate} \item $C = \bigcup_{j \in J}A_j$ for some $s$-element set $J \subseteq [m]$, or\label{c1} \item $\left|\{i \in [m] : A_i \cap C \neq \emptyset\}\right| \ge m-(t-2)$ and $\left|\{i \in [m] : A_i \subseteq C\}\right| < s$.\label{c2} \end{enumerate} \end{lemma} \fussy \begin{proof} Let $S = \{i \in [m] : A_i \cap C \neq \emptyset\}$. Thus, $S$ is the minimal subset of $[m]$ such that $C \subseteq \bigcup_{i \in S}A_i$. We have $|S| \ge s$ since $C$ is independent otherwise. If $|S|=s$, then $C$ satisfies \ref{c1}. Therefore, we may assume $|S| > s$. We must have $\left|\{i \in [m] : A_i \subseteq C\}\right| < s$; otherwise $C$ properly contains a circuit. Thus, there is some $j \in S$ such that $A_j - C \neq \emptyset$. If $|S| \ge m-(t-2)$, then $C$ satisfies \ref{c2}. Therefore, we may assume $|S| \le m-(t-1)$. Let $T = ([m]-S) \cup \{j\}$. Then $|T|\ge t$, implying that $\bigcup_{i \in T}A_i$ contains a cocircuit intersecting $C$ in one element. This contradicts orthogonality. \end{proof} In the remainder of the paper, if $(A_1,\ldots,A_m)$ is the associated partition of an $(s,t)$-spike and $J\subseteq[m]$, then we define \[A_J=\bigcup_{j \in J} A_j.\] \begin{proposition} \label{pro:rank-func} Let $\pi=(A_1,\ldots,A_m)$ be the associated partition of an $(s,t)$-spike. If $J\subseteq[m]$, then \[r(A_J) = \begin{cases} 2|J| & \textrm{if $|J| < s$,}\\ s+|J|-1 & \textrm{if $s\leq|J| \leq m-t+1$,}\\ m+s-t & \textrm{if $|J| \ge m-t+1$.} \end{cases}\] \end{proposition} \begin{proof} If $|J|<s$, then $A_J$ is properly contained in a circuit and is therefore independent. Thus, $r(A_J)=|A_J|=2|J|$. We now prove that $r(A_J)=s+|J|-1$ if $s\leq|J| \leq m-t+1$. We proceed by induction on $|J|$. As a base case, if $|J|=s$, then $A_J$ is a circuit. Therefore, $r(A_J)=|A_J|-1=s+|J|-1$. Now, for the inductive step, let $s<|J|\leq m-t+1$, and let $J'\subseteq J$ with $|J'|=|J|-1$. By induction, $r(A_{J'})=s+|J|-2$. Let $\{x_i,y_i\}=A_J-A_{J'}$. By \cref{l:circuits}, since $|J|<m-t+2$, there is no circuit $C$ such that $x_i\in C\subseteq A_{J'}\cup\{x_i\}$. Therefore, $x_i\notin\cl(A_{J'})$, and $r(A_{J'}\cup\{x_i\})=r(A_{J'})+1$. On the other hand, since $|J|>s$, there is a circuit $C$ such that $y_i\in C\subseteq A_{J}$. Therefore, $y_i\in\cl(A_{J'}\cup\{x_i\})$, and $r(A_J)=r(A_{J'})+1=s+|J|-1$. Note that the preceding argument, along with \cref{lem:rank-matroid} implies that, if $|J|=m-t+1$, then $A_J$ is spanning. Thus, if $|J|\geq m-t+1$, then $r(A_J)=r(M)=m+s-t$. \end{proof} \subsection*{Connectivity} Let $M$ be a matroid with ground set $E$. Recall that the \emph{connectivity function} of $M$, denoted by $\lambda$, is defined as \begin{align*} \lambda(X) = r(X) + r(E - X) - r(M), \end{align*} for all subsets $X$ of $E$. In the case where $M$ is an $(s,t)$-spike of order $m$ and $X=A_J$ for some set $J\subseteq[m]$, this implies \begin{align*} \lambda(A_J) = r(A_J) + r(A_{[m]-J}) - r(M). \end{align*} Therefore, \cref{pro:rank-func} allows us to easily compute $\lambda(A_J)$. \begin{lemma} \label{lem:conn} Let $\pi=(A_1,\ldots,A_m)$ be the associated partition of an $(s,t)$-spike, and let $(J,K)$ be a partition of $[m]$ with $|J| \le |K|$. \begin{enumerate} \item If $|J|\leq t-1$, then $\lambda(A_J)=r(A_J)$. \item If $t-1\leq|J|\leq m-s$, then \[\lambda(A_J)= \begin{cases} t+|J|-1 & \textrm{if $|J| < s$,}\\ s+t-2 & \textrm{if $s\leq|J|\leq m-t+1$.} \end{cases}\] \item If $|J|> m-s$, then $\lambda(A_J)=m-s+t$. \end{enumerate} \end{lemma} \begin{proof} If $|J|\leq t-1$, then $|K|\geq m-t+1$. Therefore, $A_K$ is spanning, and $\lambda(A_J)=r(A_J)+r(A_K)-r(M)=r(A_J)$. Statement (i) follows. If $t-1\leq|J|\leq m-s$, then $s\leq|K|\leq m-t+1$. Therefore, $\lambda(A_J)=r(A_J)+r(A_K)-r(M)=r(A_J)+s+m-|J|-1-(m+s-t)$. Statement (ii) follows. (Note that we cannot have $|J|>m-t+1$ because otherwise $|K|<t-1\leq|J|$.) If $|J|> m-s$, then $s>|K|\geq|J|$. Therefore, $\lambda(A_J)=r(A_J)+r(A_K)-r(M)=2|J|+2(m-|J|)-(m+s-t)=m-s+t$. Statement (iii) follows. \end{proof} Using the terminology of~\cite{ao2008}, \cref{lem:conn} implies the following. \begin{proposition} \label{pro:anemone} Let $(A_1,\dotsc,A_m)$ be the associated partition of an $(s,t)$-spike~$M$, and suppose that $(P_1,\dotsc,P_k)$ is a partition of $E(M)$ such that, for each $i \in [k]$, $P_i = \bigcup_{i \in I}A_i$ for some subset $I$ of $[m]$, with $|I| \ge \max\{s-1,t-1\}$. Then $(P_1,\dotsc,P_k)$ is an $(s+t-1)$-anemone. \end{proposition} We now continue our study of the connectivity of $(s,t)$-spikes. \begin{lemma} \label{ind-and-coind} Let $M$ be an $(s,t)$-spike of order $m\geq3\max\{s,t\}-2$, and let $X\subseteq E(M)$ such that $|X|\leq2\min\{s,t\}-1$. Then $\lambda(X)=|X|$. \end{lemma} \begin{proof} By Lemma \ref{l:circuits}, if $X$ is dependent, then either $|X|=2s$ or $|X|\geq m-t+2\geq 3\max\{s,t\}-2-t+2=3\max\{s,t\}-t\geq2\max\{s,t\}\geq2s$. However, $|X|\leq2\min\{s,t\}-1<2s$. Therefore, $X$ is independent, which implies that $r(X)=|X|$. By a similar argument, using the dual of \cref{l:circuits}, $X$ is coindependent, implying that $r(E(M)-X)=r(M)$. Therefore, \begin{align*} \lambda(X)&=r(X)+r(E(M)-X)-r(M)\\ &=|X|+r(M)-r(M)\\ &=|X|, \end{align*} proving the lemma. \end{proof} \begin{theorem} Let $M$ be an $(s,t)$-spike of order \[m\geq\max\{3s+t,s+3t\}-4,\] where $\min\{s,t\}\geq2$. Then $M$ is $(2\min\{s,t\}-1)$-connected. \end{theorem} \begin{proof} Because $M^*$ is a $(t,s)$-spike and because $\lambda_{M^*}=\lambda_M$, we may assume without loss of generality that $t\leq s$. Note that $\max\{3s+t,s+3t\}=3\max\{s,t\}+\min\{s,t\}$. Therefore, $m\geq3s+t-4$, and we must show that $M$ is $(2t-1)$-connected. Now, suppose for a contradiction that $M$ is not $(2t-1)$-connected. Then there is a $k$-separation $(P,Q)$ of $M$, with $|P|\geq|Q|$, for some $k<2t-1$. Therefore, $\lambda(P)=\lambda(Q)<k\leq2t-2$. First, we consider the case where $A_I \subseteq P$, for some $(t-1)$-element set $I \subseteq [m]$. Let $U = \{u \in [m] : |P \cap A_u|= 1\}$. Then $A_j \subseteq \cl_{M^*}(P)$ for each $j \in U$. For such a $j$, it follows, by the definition of $\lambda_{M^*}$ (which is equal to $\lambda_M=\lambda$), that $\lambda(P \cup A_j) \le \lambda(P)$. We use this repeatedly below; in particular, we see that $\lambda(P\cup A_U)\leq\lambda(P)$. Let $P' = P\cup A_U$, and let $Q' = E(M)-P'$. Then there is a partition $(J,K)$ of $[m]$, with $|J|\leq|K|$, such that $Q'=A_J$ and $P'=A_K$. Moreover, $\lambda(Q')=\lambda(P')\leq\lambda(P)$. Suppose $|J|\geq t-1$. Note that $m\geq3s+t-4\geq2s$ since $\min\{s,t\}\geq2$. Therefore, $|J|\leq\frac{1}{2}m=m-\frac{1}{2}m\leq m-\frac{1}{2}(2s)=m-s$. Thus, to determine $\lambda(Q')$, we need only consider Lemma \ref{lem:conn}(ii). If $|J|\geq s$, then by Lemma \ref{lem:conn}(ii), \[\lambda(P)\geq\lambda(P')=\lambda(Q')=s+t-2\geq2t-2,\] a contradiction. Otherwise, $|J|<s$, implying by Lemma \ref{lem:conn}(ii) that \[\lambda(P)\geq\lambda(P')=\lambda(Q')=t+|J|-1\geq t+t-1-1=2t-2,\] another contradiction. Therefore, $|J|<t-1$. Let $U'\subseteq U$ such that $|U'|=|Q|-(2t-2)$. Then $\lambda(P) \ge \lambda\left(P \cup A_{U'}\right) = \lambda\left(Q- A_{U'}\right)$. Since $\left|Q- A_{U'}\right| = 2t-2$ and $m\geq3s+t-4\geq3s-2$, \cref{ind-and-coind} implies that $\lambda\left(Q-A_{U'}\right)=2t-2$, so $\lambda(P) \ge 2t-2$, a contradiction. Now we consider the case that $|\{i \in [m] : A_i \subseteq P\}| < t-1$. Since $|Q| \le |P|$, it follows that $|\{i \in [m] : A_i \subseteq Q\}| \le |\{i \in [m] : A_i \subseteq P\}| < t-1<s$. Now, since $|\{i \in [m] : A_i \subseteq P\}| < t-1$, we have $|\{i \in [m] : A_i \cap Q \neq \emptyset\}| > m-(t-1)$. Therefore, $r(Q) \ge m-(t-1)$ by \cref{l:circuits}. Similarly, $r(P) \ge m-(t-1)$. Thus, \begin{align*} \lambda(P) &= r(P) + r(Q) - r(M) \\ &\ge (m-(t-1)) + (m-(t-1)) - (m+s-t) \\ &=m-s-t+2 \\ &\ge 3s+t-4-s-t+2 \\ &= 2s-2\\ &\ge 2t-2, \end{align*} a contradiction. This completes the proof. \end{proof} \subsection*{Constructions} In \cite{bccgw2019}, a construction is described that, starting from a $(t,t)$-spike $M_0$, obtains a $(t+1,t+1)$-spike $M_1$. This construction consists of a certain elementary quotient $M_0'$ of $M_0$, followed by a certain elementary lift $M_1$ of $M_0'$. It is shown in \cite{bccgw2019} that $M_1$ is a $(t+1,t+1)$-spike as long as the order of $M_0$ is sufficiently large. In the process of constructing $M_1$ in this way, the intermediary matroid $M_0'$ is a $(t,t+1)$-spike. For the sake of completeness, we will review this construction in the more general case where $M_0$ is an $(s,t)$-spike, in which case $M_0'$ is an $(s,t+1)$-spike. To construct an $(s+1,t)$-spike, we perform the construction on $M^*$ and dualize. Since $(2,2)$-spikes (and indeed, $(1,1)$-spikes) are well known to exist, this means that $(s,t)$-spikes exist for all positive integers $s$ and $t$. It is also shown in \cite{bccgw2019} that all $(t,t)$-spikes can be constructed in this manner. We also extend this to the general case of $(s,t)$-spikes below. Recall that $M_1$ is an \emph{elementary quotient} of $M_0$ if there is a single-element extension $M^+_0$ of $M_0$ by an element~$e$ such that $M_1 = M^+_0 / e$. If $M_1$ is an elementary quotient of $M_0$, then $M_0$ is an \emph{elementary lift} of $M_1$. Also, note that if $M_1$ is an elementary lift of $M_0$, then $M_1^*$ is an elementary quotient of $M_0^*$. \begin{construction} \label{cons:quotient} Let $M$ be an $(s,t)$-spike of order~$m \ge s+t$, with associated partition $\pi$. Let $M+e$ be a single-element extension of $M$ by an element $e$ such that $e$ blocks each $2t$-element cocircuit that is a union of $t$ arms of $M$. Then let $M'=(M+e)/e$. \end{construction} In other words, $M+e$ has the property that $e\notin \cl_{M+e}(E(M)-C^*)$ for every $2t$-element cocircuit $C^*$ that is the union of $t$ arms. Note that one possibility is that $M+e$ is the free extension of $M$ by an element $e$. Since $m-t\geq s$, we have $e\notin\cl_{M+e}(C)$ for each $2s$-element circuit $C$. Thus, in $M'$, the union of any $s$ arms of the $(s,t)$-spike $M$ is still a circuit of $M'$. However, since $r(M') = r(M) - 1$, the union of any $t+1$ arms is a $2(t+1)$-element cocircuit. Therefore, $M'$ is an $(s,t+1)$-spike. Note that $M'$ is not unique; more than one $(s,t+1)$-spike can be constructed from a given $(s,t)$-spike $M$ using \cref{cons:quotient}. Given an $(s+1,t)$-spike~$M'$, we will describe how to obtain an $(s,t)$-spike~$M$ from $M'$ by a specific elementary quotient. This process reverses the dual of \cref{cons:quotient}. This will then imply that every $(s,t)$-spike can be constructed from a $(1,1)$-spike by repeated use of \cref{cons:quotient} and its dual. \cref{modcut} describes the single-element extension that gives rise to the elementary quotient we desire. Intuitively, the extension adds a ``tip'' to the $(s,t)$-spike. In the proof of this lemma, we assume knowledge of the theory of modular cuts (see \cite[Section~7.2]{oxbook}). The proof of \cref{modcut} will be very similar to the proof of \cite[Lemma 6.6]{bccgw2019}. However, we note that \cite[Lemma 6.6]{bccgw2019} is falsely stated; what is proven in \cite{bccgw2019} is essentially the specialisation of \cref{modcut}, below, in the case that $s=t$. The statement of \cite[Lemma 6.6]{bccgw2019} replaces the condition that $M$ is a $(t,t)$-spike with the weaker condition that $M$ has a $t$-echidna. To demonstrate that this is overly general, consider the rank-$3$ matroid consisting of two disjoint lines with four points. Let these lines be $\{a,b,c,d\}$ and $\{w,x,y,z\}$. Then $(\{a,b\},\{w,x\})$ is a $2$-echidna of order $2$. For \cite[Lemma 6.6]{bccgw2019} to be true, we would need a single-element extension $M^+$ by an element $e$ such that $e\in\cl_{M^+}(\{a,b\})$ but $e\notin\cl_{M^+}(\{c,d\})$. This is impossible since $\cl_M(\{a,b\})=\cl_M(\{c,d\})$. \begin{lemma} \label{modcut} Let $M$ be an $(s,t)$-spike. There is a single-element extension $M^+$ of $M$ by an element $e$ having the property that, for every $X \subseteq E(M)$, $e \in \cl_{M^+}(X)$ if and only if $X$ contains at least $s-1$ arms of $M$. \end{lemma} \begin{proof} Since $M$ is an $(s,t)$-spike, there is a partition $\pi=(S_1,\dotsc,S_m)$ of $E(M)$ that is both an $s$-echidna and a $t$-coechidna. Let $$\mathcal{F} = \left\{\bigcup_{i\in I}S_i : I \subseteq [m] \textrm{ and } |I|=s-1\right\}.$$ By the definition of an $s$-echidna, $\mathcal{F}$ is a collection of flats of $M$. Let $\mathcal{M}$ be the set of all flats of $M$ containing some flat $F \in \mathcal{F}$. We claim that $\mathcal{M}$ is a modular cut. Recall that, for distinct $F_1,F_2 \in \mathcal{M}$, the pair $(F_1,F_2)$ is \emph{modular} if $r(F_1) + r(F_2) = r(F_1 \cup F_2) + r(F_1 \cap F_2)$. To show that $\mathcal{M}$ is a modular cut, it suffices to prove that, for any $F_1,F_2 \in \mathcal{M}$ such that $(F_1,F_2)$ is a modular pair, $F_1 \cap F_2 \in \mathcal{M}$. For any $F \in \mathcal{M}$, since $F$ contains at least $s-1$ arms of $M$, and the union of any $s$ arms is a circuit, it follows that $F$ is a union of arms of $M$. Thus, let $F_1,F_2 \in \mathcal{M}$ be such that $F_1=\bigcup_{i\in I_1}S_i$ and $F_2=\bigcup_{i\in I_2}S_i$, where $I_1$ and $I_2$ are distinct subsets of $[m]$ with $u_1=|I_1| \ge s-1$ and $u_2=|I_2|\ge s-1$. Let $q=|I_1 \cap I_2|$. Then $F_1 \cup F_2$ is the union of $u_1 + u_2 - q \ge s-1$ arms, and $F_1\cap F_2$ is the union of $q$ arms. We show that if $q<s-1$, then $(F_1,F_2)$ is not a modular pair. We consider several cases. First, suppose $u_1,u_2\leq m-t+1$. By \cref{pro:rank-func}, \begin{align*} r(F_1) + r(F_2) &= (s + u_1 - 1) + (s + u_2 - 1) \\ &> (s-1 + u_1 + u_2 - q) +2q \\ &= s+|I_1\cup I_2|-1+2|I_1\cap I_2| \\ &\geq r(F_1 \cup F_2) + r(F_1 \cap F_2). \end{align*} Next, consider the case where $u_2\leq m-t+1<u_1$. (By symmetry, the argument is the same if $u_1$ and $u_2$ are swapped.) One can check that $u_1+u_2-q>m-t+1$. By \cref{pro:rank-func}, \begin{align*} r(F_1) + r(F_2) &= (m+s-t) + (s + u_2 - 1) \\ &> (m + s-t)+2q\\ &= r(F_1 \cup F_2) + r(F_1 \cap F_2). \end{align*} Finally, consider the case where $u_1,u_2>m-t-1$. We have \[r(F_1) + r(F_2) = 2m+2s -2t,\] which by \cref{tspikeorder}, is at least \begin{align*} m+3s-t-1 &> m+s-t+2q\\ &= r(F_1 \cup F_2) + r(F_1 \cap F_2). \end{align*} Thus, in all cases, $(F_1,F_2)$ is not a modular pair. Therefore, we have shown that $\mathcal{M}$ is a modular cut. Now, there is a single-element extension corresponding to the modular cut~$\mathcal{M}$, and this extension satisfies the requirements of the lemma (see, for example, \cite[Theorem~7.2.3]{oxbook}). \end{proof} \begin{theorem} Let $M$ be an $(s,t)$-spike of order $m\geq s+t$. Then $M$ can be constructed from a $(1,1)$-spike of order $m$ by applying \cref{cons:quotient} $t-1$ times, followed by the dual of \cref{cons:quotient} $s-1$ times. \end{theorem} \begin{proof} For $s=t=1$, the result is clear. Otherwise, by duality, we may assume without loss of generality that $t>1$. By induction and duality, it suffices to show that $M$ can be constructed from an $(s-1,t)$-spike of order $m$ by applying the dual of \cref{cons:quotient} once. Let $\pi=(A_1,\dotsc,A_m)$ be the associated partition of $M$. Let $M^+$ be the single-element extension of $M$ by an element~$e$ described in \cref{modcut}. Let $M'=M^+/e$. We claim that $\pi$ is an $(s-1)$-echidna and a $t$-coechidna that partitions the ground set of $M'$. Let $X$ be the union of any $s-1$ spines of $\pi$. Then $X$ is independent in $M$, and $X \cup \{e\}$ is a circuit in $M^+$, so $X$ is a circuit in $M'$. Thus, $\pi$ is an $(s-1)$-echidna of $M'$. Now let $C^*$ be the union of any $t$ spines of $\pi$, and let $H=E(M)-C^*$. Then $H$ is the union of at least $s-1$ spines, so $e \in \cl_{M^+}(H)$. Now $H \cup \{e\}$ is a hyperplane in $M^+$, so $C^*$ is a cocircuit in $M^+$ and therefore in $M'$. Hence $\pi$ is a $t$-coechidna of $M'$. Note that $M'$ is an elementary quotient of $M$, so $M$ is an elementary lift of $M'$ where none of the $2(s-1)$-element circuits of $M'$ are preserved in $M$. So the $(s,t)$-spike $M$ can be obtained from the $(s-1,t)$-spike $M'$ using the dual of \cref{cons:quotient}. \end{proof} \end{document}
\begin{document} \title{Generalized Sweeping Line Spanners} \begin{abstract} We present \emph{sweeping line graphs}, a generalization of $\Theta$-graphs. We show that these graphs are spanners of the complete graph, as well as of the visibility graph when line segment constraints or polygonal obstacles are considered. Our proofs use general inductive arguments to make the step to the constrained setting that could apply to other spanner constructions in the unconstrained setting, removing the need to find separate proofs that they are spanning in the constrained and polygonal obstacle settings. \end{abstract} \section{Introduction} A \emph{geometric graph} $G$ is a graph whose vertices are points in the plane and whose edges are line segments between pairs of points. Every edge in the graph has weight equal to the Euclidean distance between its two endpoints. The distance between two vertices $u$ and $v$ in $G$, denoted by $\delta_{G}(u,v)$, or simply $\delta(u,v)$ when $G$ is clear from the context, is defined as the sum of the weights of the edges along the shortest path between $u$ and $v$ in $G$. A subgraph $H$ of $G$ is a $t$-spanner of $G$ (for $t \ge 1$) if for each pair of vertices $u$ and $v$, $\delta_{H}(u,v) \le t \cdot \delta_{G}(u,v)$. The smallest $t$ for which $H$ is a $t$-spanner is the \emph{spanning ratio} or \emph{stretch factor} of $H$. The graph $G$ is referred to as the \emph{underlying graph} of $H$. The spanning properties of various geometric graphs have been studied extensively in the literature (see~\cite{BS11, NS-GSN-06} for an overview). The spanning ratio of a class of graphs is the supremum over the spanning ratios of all members of that graph class. Since spanners preserve the lengths of all paths up to a factor of $t$, these graphs have applications in the context of geometric problems, including motion planning and optimizing network costs and delays. We introduce a generalization of an existing geometric spanner ($\Theta$-graphs) which we call \emph{sweeping line spanners}. We show that these graphs are spanners, also when considering line segment obstacles or polygonal obstacles during its construction. We show this using a very general method that we conjecture applies to other geometric spanners as well, meaning that such proofs do not have to be reinvented for different spanner constructions. Clarkson~\cite{C87} and Keil~\cite{K88} independently introduced $\Theta$-graphs as follows: for each vertex $u$, we partition the plane into $k$ disjoint cones with apex $u$. Each cone has aperture equal to $\theta = \frac{2\pi}{k}$ (see Figure~\ref{fig:cones}) and the orientation of these cones is identical for every vertex. The $\Theta$-graph is constructed by, for each cone with apex $u$, connecting $u$ to the vertex $v$ whose projection onto the bisector of the cone is closest to $u$ (see Figure~\ref{fig:closest}). When $k$ cones are used, we denote the resulting $\Theta$-graph by the $\Theta_{k}$-graph. We note that while $\Theta$-graphs can be defined using an arbitrary line in the cone instead of the bisector (see Chapter 4 of \cite{NS-GSN-06}), the bisector is by far the most commonly used construction and the spanning ratios mentioned in the following paragraph only apply when the bisector is used in the construction process. \begin{figure} \caption{The plane around $u$ is split into 10 cones.} \label{fig:cones} \caption{Vertex $v$ is the vertex with the projection closest to $u$.} \label{fig:closest} \end{figure} Ruppert and Seidel~\cite{RS91} upperbounded the spanning ratio of these graphs (when there are at least 7 cones) by $1/(1 - 2 \sin (\theta/2))$, when $\theta < \pi/3$. Bonichon~\emph{et~al.}\xspace~\cite{BGHI10} showed that the $\Theta_6$-graph has a tight spanning ratio of 2, i.e., it has a matching upper and lower bound of 2. Other recent results include a tight spanning ratio of $1 + 2 \sin(\theta/2)$ for $\Theta$-graphs with $4m + 2$ cones, where $m \geq 1$ and integer, and improved upper bounds for the other families of $\Theta$-graphs~\cite{BCMRV16}. When there are fewer than 6 cones, most inductive arguments break down. Hence, it was only recently that upper bounds on the spanning ratio of the $\Theta_5$-graph and the $\Theta_4$-graph were determined: $\sqrt{50 + 22 \sqrt{5}} \approx 9.960$ for the $\Theta_5$-graph~\cite{BMRV2015} and $(1 + \sqrt{2}) \cdot (\sqrt{2} + 36) \cdot \sqrt{4 + 2 \sqrt{2}} \approx 237$ for the $\Theta_4$-graph~\cite{BBCRV2013}. These bounds were recently improved to $5.70$ for the $\Theta_5$-graph~\cite{BHO21} and $17$ for the $\Theta_4$-graph~\cite{BCDS19}. Constructions similar to those demonstrated by El Molla~\cite{E09} for Yao-graphs show that $\Theta$-graphs with fewer than 4 cones are not spanners. In fact, until recently it was not known that the $\Theta_3$-graph is connected~\cite{ABBBKRTV2013}. An alternative way of describing the $\Theta$-graph construction is that for each cone with apex $u$, we visualize a line perpendicular to the bisector of the cone that sweeps outwards from the apex $u$. The $\Theta$-graph is then constructed by connecting $u$ to the first vertex $v$ in the cone encountered by the sweeping line as it moves outwards from $u$ (see Figure~\ref{fig:sweepingTheta}). \begin{figure} \caption{The sweeping line of a cone in a $\Theta$-graph. The sweeping line is a thick black segment inside the cone and grey dotted outside, as vertices outside the cone as ignored.} \label{fig:sweepingTheta} \caption{The sweeping line of a cone in the sweeping line graph. For comparison to $\Theta$-graphs, the line through $u$ perpendicular to the sweeping line is shown in red.} \label{fig:sweepingLine} \end{figure} The \emph{sweeping line graph} generalizes this construction by allowing the sweeping line to be at an angle $\gamma$ to the line perpendicular to the bisector of the cone (see Figure~\ref{fig:sweepingLine}, a more precise definition follows in Section 2). When $\gamma \in [0, \frac{\pi - 3\theta}{2})$ we show that the resulting graph is a spanner whose spanning ratio depends only on $\theta$ and $\gamma$. We note that this angle $\gamma$ implies that the line perpendicular to the sweeping line can be \emph{outside} the cone associated with that sweeping line, which is not supported in the common $\Theta$-graph (using bisectors) or the more general ones described in~\cite{NS-GSN-06}. For example, when 10 cones are used (i.e., $\theta = \pi/5$) the construction in~\cite{NS-GSN-06} allows for an angle up to $\theta/2 = \pi/10$, while our construction allows the far larger value of $7\pi/20$ and this difference increases as the number of cones increases (i.e., $\theta$ decreases). Pushing the generalization of these graphs even further, we consider spanners in two more general settings by introducing line segment constraints and polygonal obstacles. Specifically, given a set $P$ of points in the plane, let $S$ be a set of line segments connecting pairs of vertices in $P$ (not every vertex in $P$ needs to be an endpoint of a constraint in $S$). We refer to $S$ as \emph{line segment constraints}, or simply \emph{constraints}. The set of line segment constraints is planar, i.e. no two constraints intersect properly. Two vertices $u$ and $v$ can see each other if and only if either the line segment $uv$ does not properly intersect any constraint or $uv$ is itself a constraint. If two vertices $u$ and $v$ can see each other, the line segment $uv$ is a \emph{visibility edge} (this notion can be generalized to apply to arbitrary points that can see each other). The \emph{visibility graph} of $P$ with respect to a set of constraints $S$, denoted by $Vis(P,S)$, has $P$ as vertex set and all visibility edges defined by vertices in $P$ as edge set. In other words, it is the complete graph on $P$ minus all edges that properly intersect one ore more constraints in $S$. The aim of this generalization is to construct a spanner of $Vis(P,S)$ such that no edge of the spanner properly intersects any constraint. In other words, to construct a spanner of $Vis(P,S)$. Polygonal obstacles generalize the notion of line segment constraints by allowing the constraints to be simple polygons instead of line segments. In this situation, $S$ is a finite set of simple polygonal obstacles where each corner of each obstacle is a vertex in $V$, such that no two obstacles intersect. We assume that each vertex is part of at most one polygonal obstacle and occurs at most once along its boundary, i.e., the obstacles are vertex-disjoint simple polygons. Note that $V$ can also contain vertices that do not lie on the corners of the obstacles. The definitions of visibility edge and visibility graph are analogous to the ones for line segment constraints. In the context of motion planning amid obstacles, Clarkson~\cite{C87} showed how to construct a linear-sized $(1+\epsilon)$-spanner of $Vis(P,S)$. Subsequently, Das~\cite{D97} showed how to construct a spanner of $Vis(P,S)$ with constant spanning ratio and constant degree. More recently, the constrained $\Theta_6$-graph was shown to be a 2-spanner of $Vis(P,S)$~\cite{BFRV12} when considering line segment constraints. This result was recently generalized to polygonal obstacles~\cite{polygonallemma}. Most related to this paper is the result by Bose and van Renssen~\cite{BR2019}, who generalized the results from~\cite{BCMRV16} to the setting with line segment constraints, without increasing the spanning ratios of the graphs. In this paper, we examine the sweeping line graph in the setting without constraints (or the unconstrained setting), with line segment constraints (or the constrained setting), and with polygonal obstacles. First, we generalize the spanning proof of the $\Theta$-graph given in the book by Narasimhan and Smid~\cite{NS-GSN-06} to the sweeping line graph in the unconstrained setting. Next, we extend the proof to the constrained setting and finally apply it to the case of polygonal obstacles. In all three cases, we prove that the spanning ratio is upperbounded by $\frac{1}{\cos(\frac{\theta}{2} + \gamma) - \sin\theta}$, where $\theta = \frac{2\pi}{k}$ ($k \geq 7$) and $\gamma \in [0, \frac{\pi - 3\theta}{2})$. The most interesting aspect of our approach is that the step from the unconstrained to the constrained and polygonal obstacle settings is very general and could apply to other spanner constructions in the unconstrained setting as well, making it a step towards a general condition of which spanners in the unconstrained setting can be proven to be spanners in the presence of obstacles. \section{Preliminaries} Throughout this paper, the notation $|pq|$ refers to the Euclidean distance between $p$ and $q$. We also emphasize that a point can be any point in the plane, while a vertex is restricted to being one of the points in the point set $P$. Before we formally define the \emph{sweeping line graph}, we first need a few other notions. A cone is the region in the plane between two rays that emanate from the same point, called the apex of the cone. Let $k \ge 7$ and define $\theta = \frac{2\pi}{k}$. If we rotate the positive $x$-axis by angles $i \cdot \theta$, $0 \le i < k$, then we get $k$ rays. Each pair of consecutive rays defines a cone whose apex is at the origin. We denote the collection of these $k$ cones by $\zeta_{k}$. Let $C$ be a cone of $\zeta_{k}$. For any point $p$ in the plane, we define $C_{p}$ to be the cone obtained by translating $C$ such that its apex is at $p$. Next, given an angle $\theta$ for a particular cone and an angle $\gamma \in [0, \frac{\pi - 3\theta}{2})$, we give the definition of the sweeping line: For any vertex $x$ in a cone, let the \emph{sweeping line} be the line through the vertex $x$ that is at an angle of $\gamma$ to the line perpendicular to the bisector of the cone. We then define $x_{\gamma}$ to be the intersection of the left-side of the cone and this sweeping line (see Figure~\ref{fig:definition}). \begin{figure} \caption{Defining the point $x_{\gamma} \label{fig:definition} \end{figure} Finally, we define the sweeping line graph: \begin{definition}[Sweeping line graph] Given a set of points $P$ in the plane, an integer $k \ge 7$, $\theta = \frac{2\pi}{k}$, and $\gamma \in [0, \frac{\pi - 3\theta}{2})$. The sweeping line graph is defined as follows: \begin{enumerate} \item The vertices of the graph are the vertices of $P$. \item For each vertex $u$ of $S$ and for each cone $C$ of $\zeta_{k}$, such that the translated cone $C_{u}$ contains one or more vertices of $P \setminus \{u\}$, the spanner contains the undirected edge $(u, r)$, where $r$ is the vertex in $C_{u} \cap S \setminus \{u\}$, which minimizes $|ur_{\gamma}|$. This vertex $r$ is referred to as the \emph{closest} vertex in this cone of $u$. In the remainder of the paper, we use $|ur_{\gamma}|$ when measuring the closeness between a vertex $r$ and the apex $u$ of a cone that contains it. \end{enumerate} \end{definition} For ease of exposition, we only consider point sets in general position: no two vertices lie on a line parallel to one of the rays that define the cones, no two vertices lie on a line parallel to the sweeping line of any cone, and no three points are collinear. Using the structure of the sweeping line graph, we define a simple algorithm called \emph{sweeping-routing} to construct a path between any two vertices $s$ and $t$. The name comes from the fact that this is also a 1-local routing algorithm on the sweeping line graph. Let $t$ be the destination of the routing algorithm and let $u$ be the current vertex (initially $u = s$). If there exists a direct edge to $t$, follow this edge. Otherwise, follow the edge to the closest vertex in the cone of $u$ that contains $t$. \subsection{Auxiliary Lemmas} In order to prove that the sweeping line graph is indeed a spanner, we start with a number of auxiliary lemmas needed to prove the main geometric lemma used throughout our proofs. \begin{lemma} \label{lem:lemma1} Let $\theta \in (0, \frac{2\pi}{7}]$ and $\gamma \in [0, \frac{\pi - 3\theta}{2})$. Then $\cos(\frac{\theta}{2} + \gamma) - \sin \theta > 0$. \end{lemma} \begin{proof} Since $\cos(\frac{\theta}{2} + \gamma) - \sin\theta$ is decreasing with respect to $\gamma$ in our domain, it is minimized when $\gamma$ is maximized. It follows that: \begin{align*} \cos\left(\frac{\theta}{2} + \gamma\right) - \sin \theta &> \cos\left(\frac{\theta}{2} + \frac{\pi}{2} - \frac{3\theta}{2}\right) - \sin \theta \\ &= \cos\left(\frac{\pi}{2} - \theta\right) - \sin\theta\\ &= \sin\theta - \sin\theta\\ &= 0 \end{align*} Therefore, within our domain, $\cos(\frac{\theta}{2} + \gamma) - \sin \theta > 0$. \end{proof} \begin{lemma} \label{lem:lemma2} Let $\theta \in (0, \frac{2\pi}{7}]$, $\gamma \in [0,\frac{\pi - 3\theta}{2})$, and $\kappa \in [0,\theta]$. Then $\cos(\frac{\theta}{2} - \gamma - \kappa) > 0$ and $\cos(\frac{\theta}{2} + \gamma - \kappa) > 0$. \end{lemma} \begin{proof} To prove this, it suffices to show that $-\frac{\pi}{2} < \frac{\theta}{2} - \gamma - \kappa, \frac{\theta}{2} + \gamma - \kappa < \frac{\pi}{2}$ as within this domain, $\cos(\frac{\theta}{2} - \gamma - \kappa)$ and $\cos(\frac{\theta}{2} + \gamma - \kappa)$ are greater than 0. \emph{Proof of $-\frac{\pi}{2} < \frac{\theta}{2} - \gamma - \kappa < \frac{\pi}{2}$:} First, we show that $\frac{\theta}{2} - \gamma - \kappa$ is upperbounded by $\frac{\pi}{2}$: \begin{align*} \frac{\theta}{2} - \gamma - \kappa \le \frac{\theta}{2} &\le \frac{\pi}{7} \text{ (using the domain of $\theta$)}\\ &< \frac{\pi}{2} \end{align*} Next, we show that $\frac{\theta}{2} - \gamma - \kappa$ is lowerbounded by $-\frac{\pi}{2}$: \begin{align*} \frac{\theta}{2} - \gamma - \kappa > -\gamma - \kappa &\ge -\frac{\pi - 3\theta}{2}- \theta \text{ (using the domain of $\gamma$ and $\kappa$)}\\ &> -\frac{\pi}{2} \end{align*} \emph{Proof of $-\frac{\pi}{2} < \frac{\theta}{2} + \gamma - \kappa < \frac{\pi}{2}$:} First, we show that $\frac{\theta}{2} + \gamma - \kappa$ is upperbounded by $\frac{\pi}{2}$: \begin{align*} \frac{\theta}{2} + \gamma - \kappa \le \frac{\theta}{2} + \gamma &< \frac{\theta}{2} + \frac{\pi - 3\theta}{2} \text{ (using the bounds on $\gamma$)}\\ &= \frac{\pi -2\theta}{2}\\ &< \frac{\pi}{2} \end{align*} Next, we show that $\frac{\theta}{2} + \gamma - \kappa$ is lowerbounded by $-\frac{\pi}{2}$: \begin{align*} \frac{\theta}{2} + \gamma - \kappa > -\kappa &\ge -\frac{2\pi}{7} \text{ (using the bounds on $\kappa$)}\\ &> -\frac{\pi}{2} \end{align*} \end{proof} \begin{lemma} \label{lem:lemma3} Let $a$ and $b$ be positive reals and $\theta \in (0, \frac{2\pi}{7}]$, $\gamma \in [0,\frac{\pi - 3\theta}{2})$, and $\kappa \in [0,\theta]$. Then $a-\frac{b(\cos(\frac{\theta}{2} + \gamma) - \sin \theta)}{\cos (\frac{\theta}{2} - \gamma - \kappa)} \le a - b(\cos(\frac{\theta}{2} + \gamma) - \sin \theta)$ and $a-\frac{b(\cos(\frac{\theta}{2} + \gamma) - \sin \theta)}{\cos (\frac{\theta}{2} + \gamma - \kappa)} \le a - b(\cos(\frac{\theta}{2} + \gamma) - \sin \theta)$. \end{lemma} \begin{proof} By Lemma~\ref{lem:lemma2}, we know that $0 < \cos (\frac{\theta}{2} - \gamma - \kappa) \le 1$. This implies that $1 \le \frac{1}{\cos (\frac{\theta}{2} - \gamma - \kappa)}$ and thus $-\frac{1}{\cos \left(\frac{\theta}{2} - \gamma - \kappa\right)} \le -1$. Using that $(\cos(\frac{\theta}{2} + \gamma) - \sin \theta) > 0$ from Lemma~\ref{lem:lemma1}, we obtain: \begin{align*} -\frac{b(\cos(\frac{\theta}{2} + \gamma) - \sin \theta)}{\cos (\frac{\theta}{2} - \gamma - \kappa)} &\le -b\left(\cos\left(\frac{\theta}{2} + \gamma\right) - \sin \theta\right) \end{align*} which implies that \begin{align*} a - \frac{b(\cos(\frac{\theta}{2} + \gamma) - \sin \theta)}{\cos (\frac{\theta}{2} - \gamma - \kappa)} &\le a - b\left(\cos\left(\frac{\theta}{2} + \gamma\right) - \sin \theta\right) \end{align*} An analogous argument shows that $a-\frac{b(\cos(\frac{\theta}{2} + \gamma) - \sin \theta)}{\cos (\frac{\theta}{2} + \gamma - \kappa)} \le a - b(\cos(\frac{\theta}{2} + \gamma) - \sin \theta)$. \end{proof} \begin{lemma} \label{lem:lemma4} Let $\theta \in (0, \frac{2\pi}{7}]$ and $\gamma \in [0, \frac{\pi - 3\theta}{2})$. Then $\cos(\frac{\theta}{2} - \gamma) \ge \cos(\frac{\theta}{2} + \gamma)$. \end{lemma} \begin{proof} To prove this, we show that $\cos\left(\frac{\theta}{2} - \gamma\right) -\cos\left(\frac{\theta}{2} + \gamma\right)$ is at least 0. \begin{align*} \cos\left(\frac{\theta}{2} - \gamma\right) -\cos\left(\frac{\theta}{2} + \gamma\right) &= \cos\frac{\theta}{2}\cos \gamma + \sin \frac{\theta}{2} \sin \gamma - \cos \frac{\theta}{2} \cos \gamma + \sin \frac{\theta}{2} \sin \gamma\\ &= 2\sin \frac{\theta}{2} \sin \gamma\\ &\ge 0 \text{ (due to the domain of $\theta$ and $\gamma$)} \end{align*} \end{proof} \begin{lemma} \label{lem:lemma5} Let $\theta \in (0, \frac{2\pi}{7}]$, $\gamma \in [0, \frac{\pi-3\theta}{2})$, and $\kappa \in [0, \theta]$. Then $\cos(\frac{\theta}{2} - \gamma - \kappa) \ge \cos(\frac{\theta}{2} + \gamma)$. \end{lemma} \begin{proof} We observe that $\cos(\frac{\theta}{2} - \kappa - \gamma) = \cos(\kappa - (\frac{\theta}{2} - \gamma))$. Note that $ -\frac{\pi}{2} < \frac{\theta}{2} - \gamma < \frac{\pi}{2}$ and that $\cos(\kappa - (\frac{\theta}{2} - \gamma))$ corresponds to the shifted $\cos \kappa$ function. To prove the lemma, we distinguish between two cases: \emph{Case 1:} If $\frac{\theta}{2} - \gamma \le 0$, $\cos(\kappa - (\frac{\theta}{2}-\gamma))$ corresponds to translating $\cos \kappa$ to the left by $\frac{\theta}{2} - \gamma$. Therefore, $\cos(\kappa - (\frac{\theta}{2} - \gamma))$ is decreasing over the domain of $\kappa$, which implies that $\cos(\frac{\theta}{2} - \gamma - \kappa) \ge \cos(\frac{\theta}{2} - \gamma - \theta) = \cos(\frac{\theta}{2} + \gamma)$. \emph{Case 2:} If $\frac{\theta}{2} - \gamma > 0$, $\cos(\kappa - (\frac{\theta}{2}-\gamma))$ corresponds to translating $\cos \kappa$ to the right by $\frac{\theta}{2}-\gamma$. Therefore, for $\kappa \in (\frac{\theta}{2}-\gamma, \theta]$ the function is decreasing, so we can apply an argument analogous to that in Case 1 to prove the result in this domain. It remains to prove the result for $\kappa \in [0, \frac{\theta}{2}-\gamma]$. In this domain, $\cos(\kappa - (\frac{\theta}{2}-\gamma))$ is increasing. Therefore to prove the result, we need to show that at $\kappa = 0$ (where $\cos(\kappa - (\frac{\theta}{2}-\gamma))$ is minimized in this domain), $\cos(\kappa - (\frac{\theta}{2}-\gamma)) > \cos(\frac{\theta}{2} + \gamma)$. After substituting $\kappa = 0$, we see that this corresponds to showing that $\cos(\frac{\theta}{2} - \gamma) \ge \cos(\frac{\theta}{2} + \gamma)$ which follows from Lemma~\ref{lem:lemma4}. \end{proof} \begin{lemma} \label{lem:lemma6} Let $\theta \in (0, \frac{2\pi}{7}]$, $\gamma \in [0, \frac{\pi-3\theta}{2})$, and $\kappa \in [0, \theta]$. Then $\cos(\frac{\theta}{2} + \gamma - \kappa) \ge \cos(\frac{\theta}{2} + \gamma)$. \end{lemma} \begin{proof} We observe that $\cos(\frac{\theta}{2} - \kappa + \gamma) = \cos(\kappa - (\frac{\theta}{2} + \gamma))$. Note that $ 0 < \frac{\theta}{2} + \gamma < \frac{\pi}{2}$ and that $\cos(\kappa - (\frac{\theta}{2} + \gamma))$ corresponds to translating $\cos \kappa$ to the right by $\frac{\theta}{2}+\gamma$, since $\frac{\theta}{2}+\gamma$ is positive. Therefore, for $\kappa \in [0, \frac{\theta}{2}+\gamma],\text{ } \cos(\kappa - (\frac{\theta}{2} + \gamma))$ is increasing and so $\cos(\kappa - (\frac{\theta}{2} + \gamma)) \ge \cos(-(\frac{\theta}{2} + \gamma)) = \cos(\frac{\theta}{2} + \gamma).$ For $\kappa \in (\frac{\theta}{2}+\gamma, \theta], \text{ } \cos(\kappa - (\frac{\theta}{2} + \gamma))$ is decreasing. Therefore to prove the result, we need to show that at $\kappa = \theta$ (where $\cos(\kappa - (\frac{\theta}{2}+\gamma))$ is minimized in this domain), $\cos(\kappa - (\frac{\theta}{2}+\gamma)) \ge \cos(\frac{\theta}{2} + \gamma)$. After substituting $\kappa = \theta$, we see that this corresponds to showing that $\cos(\frac{\theta}{2} - \gamma) \ge \cos(\frac{\theta}{2} + \gamma)$ which we proved in Lemma~\ref{lem:lemma4}. \end{proof} \subsection{Main Geometric Lemma} Now that we have our auxiliary lemmas, we are ready to prove the main geometric lemma that we use throughout our later proofs. \begin{lemma} \label{lem:lemma7} Let $k \geq 7$ be an integer, $\theta$ = $\frac{2\pi}{k}$, and $\gamma \in [0, \frac{\pi - 3\theta}{2})$. Let $p$ and $q$ be two distinct points in the plane and let $C$ be the cone of $\zeta_{k}$ such that $q \in C_{p}$. Let $r$ be a point in $C_{p}$ such that it is at least as close to $p$ as $q$ is to $p$. Then $| rq | \le | pq | - (\cos (\frac{\theta}{2} + \gamma) - \sin \theta) | pr |$. \end{lemma} \begin{proof} If $r = q$ then the claims holds. We assume in the rest of the proof that $r \neq q$. Let $\ell$ be the line through $p$ and $q$. Let $s$ be the intersection of $\ell$ and the sweeping line through $r$. Let $a$ and $b$ be the intersection of the sweeping line through $r$ with the left and right side of the cone respectively. Let $x$ be the intersection of the right side of the cone and the line through $a$ perpendicular to the bisector of the cone. Finally, let $\alpha$ be the angle between the segments $pq$ and $pr$ and let $\beta$ be the angle between the segment $pr$ and either the left or right side of the cone such that $\alpha$ and $\beta$ do not overlap. We have $0 \leq \alpha, \beta \leq \theta$ and $0 \leq \alpha + \beta \leq \theta$. We distinguish two cases depending on whether $r$ lies to the left or right of $\ell$. \emph{Case 1:} If $r$ lies to the left of $\ell$ (see Figure~\ref{fig:mainLemmaCase1}), we have that since $\triangle pax$ is isosceles, $\angle pax = \frac{\pi - \theta}{2}$. By considering $\triangle pas$, we can then deduce that $\angle asp = \frac{\pi}{2} + \frac{\theta}{2} - \gamma - (\alpha + \beta)$. Finally, by considering $\triangle prs$, we can deduce that $\angle prs = \frac{\pi}{2} - \frac{\theta}{2} + \gamma + \beta$. \begin{figure} \caption{The points and angles defined for Case 1.} \label{fig:mainLemmaCase1} \end{figure} Applying the sine rule and trigonometric rewrite rules, we have: \begin{align*} | rs | &= | pr | \frac{\sin \alpha}{\cos (\frac{\theta}{2} - (\alpha + \beta) - \gamma)} \\ &\le | pr | \frac{\sin \theta}{\cos (\frac{\theta}{2} - (\alpha + \beta) - \gamma)} \end{align*} and \begin{align*} | ps | &= | pr | \frac{\cos (\frac{\theta}{2} - \beta - \gamma)}{\cos (\frac{\theta}{2} - (\alpha + \beta) - \gamma)} \\ &\ge | pr | \frac{\cos (\frac{\theta}{2} + \gamma)}{\cos (\frac{\theta}{2} - (\alpha + \beta) - \gamma)} \text{(using Lemma~\ref{lem:lemma5}).} \end{align*} Applying the triangle inequality, we get: \begin{align*} | rq | &\le | rs | + | sq | \\ &= | rs | + | pq | - | ps | \\ &\le | pq | + | pr | \frac{\sin \theta}{\cos (\frac{\theta}{2} - (\alpha + \beta) - \gamma)} - | pr | \frac{\cos (\frac{\theta}{2} + \gamma)}{\cos (\frac{\theta}{2} - (\alpha + \beta) - \gamma)} \\ &= | pq | - | pr | \frac{1}{\cos \left(\frac{\theta}{2} - (\alpha + \beta) - \gamma\right)}\left(\cos \left(\frac{\theta}{2} + \gamma\right) - \sin{\theta}\right) \\ &\le | pq | - | pr | \left(\cos \left(\frac{\theta}{2} + \gamma\right) - \sin{\theta}\right) \text{ (using Lemma~\ref{lem:lemma3}).} \end{align*} \emph{Case 2:} If $r$ lies to the right of $q$ (see Figure~\ref{fig:mainLemmaCase2}), we have that since $\triangle pax$ is an isosceles triangle, $\angle pxb = \frac{\pi - \theta}{2}$. This implies that $\angle axb = \frac{\pi + \theta}{2}$. By considering $\triangle abx$, we can then deduce that $\angle abx = \frac{\pi}{2} - \frac{\theta}{2} - \gamma$. We can then deduce that $\angle psb = \frac{\pi}{2} + \frac{\theta}{2} + \gamma - (\alpha + \beta)$ by considering $\triangle psb$. Finally, by considering $\triangle psr$, we can deduce that $\angle srp = \frac{\pi}{2} - \frac{\theta}{2} - \gamma + \beta$. \begin{figure} \caption{The points and angles defined for Case 2.} \label{fig:mainLemmaCase2} \end{figure} By applying the sine rule and using trigonometric rewrite rules we have: \begin{align*} | rs | &= | pr | \frac{\sin \alpha}{\cos (\frac{\theta}{2} - (\alpha + \beta) + \gamma)} \\ &\le | pr | \frac{\sin \theta}{\cos (\frac{\theta}{2} - (\alpha + \beta) + \gamma)} \end{align*} and \begin{align*} | ps | &= | pr | \frac{\cos (\frac{\theta}{2} - \beta + \gamma)}{\cos (\frac{\theta}{2} - (\alpha + \beta) + \gamma)} \\ &\ge | pr | \frac{\cos (\frac{\theta}{2} + \gamma)}{\cos (\frac{\theta}{2} - (\alpha + \beta) + \gamma)} \text{(using Lemma~\ref{lem:lemma6}).} \end{align*} An argument identical to that of Case 1 completes the proof of this case. \end{proof} \section{The Unconstrained Setting} Now that we have our tools ready, it is time to prove that the sweeping line graph is a spanner in the unconstrained setting. \begin{theorem} Let k $\ge$ 7 be an integer, let $\theta = \frac{2\pi}{k}$, and let $\gamma \in [0, \frac{\pi - 3\theta}{2})$. Then the sweeping line construction produces a $t$-spanner, where t = $\frac{1}{\cos(\frac{\theta}{2} + \gamma) - \sin\theta}$. \end{theorem} \begin{proof} Let $u$ and $w$ be two distinct vertices of $P$. We consider the path $u = v_{0}, v_{1}, v_{2}, ...$ that is constructed by the \emph{sweeping-routing} algorithm. We start by showing that this path terminates at $w$. Let $i \ge 0$ and assume that $v_{i} \neq w$. Hence, vertex $v_{i+1}$ exists. Consider the three vertices $v_{i}$, $v_{i+1}$, and $w$. Let $C$ be the cone such that $w \in C_{u}$. By the construction of the sweeping line graph, $v_{i+1}$ is at least as close to $u$ as $w$ is to $u$. Hence, by applying Lemma~\ref{lem:lemma7} followed by Lemma~\ref{lem:lemma1} we obtain: \[| v_{i+1}w | \le | v_{i}w | - \left(\cos\left(\frac{\theta}{2} + \gamma\right) - \sin\theta\right)| v_{i}v_{i + 1} | < | v_{i}w |.\] Hence, the vertices $v_{0}, v_{1}, v_{2}, ...$ on the path starting at $v$ are pairwise distinct, as each vertex on this path lies strictly closer to $w$ than any of its predecessors. Since the set $P$ is finite, this implies that the algorithm terminates. Therefore, the algorithm constructs a path between $u$ and $w$. We now prove an upper bound on the length of this path. Let $m$ be the index such that $v_{m} = w$. Rearranging $| v_{i+1}w | \le | v_{i}w | - (\cos(\frac{\theta}{2} + \gamma) - \sin\theta)| v_{i}v_{i + 1} |$, yields \[| v_{i}v_{i + 1} | \le \frac{1}{\cos\left(\frac{\theta}{2} + \gamma\right) - \sin\theta}(| v_{i}w | - | v_{i+1}w |),\] for each $i$ such that $0 \le i < m$. Therefore, the path between $u$ and $w$ has length \begin{align*} \sum_{i=0}^{m-1} | v_{i}v_{i+1} | &\leq \frac{1}{\cos(\frac{\theta}{2} + \gamma) - \sin\theta} \sum_{i=0}^{m-1} (| v_{i}w | - | v_{i+1}w |)\\ &= \frac{1}{\cos(\frac{\theta}{2} + \gamma) - \sin\theta}(| v_{0}w | - | v_{m}w |)\\ &= \frac{1}{\cos(\frac{\theta}{2} + \gamma) - \sin\theta} |uw |, \end{align*} completing the proof. \end{proof} In addition to showing that the graph is a spanner, the above proof shows that the sweeping-routing algorithm constructs a bounded length path, thus we obtain a local competitive routing algorithm. \begin{corollary} Let k $\ge$ 7 be an integer, let $\theta = \frac{2\pi}{k}$, and let $\gamma \in [0, \frac{\pi - 3\theta}{2})$. Then for any pair of vertices $u$ and $w$ the sweeping-routing algorithm produces a path from $u$ to $w$ of length at most $\frac{1}{\cos(\frac{\theta}{2} + \gamma) - \sin\theta} \cdot |uw|$. \end{corollary} \section{The Constrained Setting} Next, we generalize the sweeping line graph to a more general setting with the introduction of \emph{line segment constraints}. Recall that $P$ is a set of points in the plane and that $S$ is a set of line segments connecting two vertices in $P$ (not every vertex in $P$ needs to be an endpoint of a constraint in $S$). The set of constraints is planar, i.e. no two constraints intersect properly. Let vertex $u$ be an endpoint of a constraint $c$ and let the other endpoint be $v$. Let $C$ be the cone of $\zeta_{k}$ such that $v \in C_{u}$. The line through $c$ splits $C_u$ into two \emph{subcones} and for simplicity, we say that $v$ is contained in both of these. In general, a vertex $u$ can be an endpoint of several constraints and thus a cone can be split into several subcones. For ease of exposition, when a cone $C_u$ is not split, we consider $C_u$ itself to be a single subcone. We use $C^{j}_u$ to denote the $j$-th subcone of $C_u$. Recall that for any vertex $x$ in a cone, we defined $x_{\gamma}$ to be the intersection of the left-side of the cone and the sweeping line through $x$. We define the constrained sweeping line graph (see Figure~\ref{fig:defnConstrained}): \begin{definition}[Constrained sweeping line graph] Given a set of points $P$ in the plane, a plane set $S$ of line segment constraints connecting pairs of vertices in $P$, an integer $k \ge 7$, $\theta = \frac{2\pi}{k}$, and $\gamma \in [0, \frac{\pi - 3\theta}{2})$. The constrained sweeping line graph is defined as follows: \begin{enumerate} \item The vertices of the graph are the vertices of $P$. \item For each vertex $u$ of $P$ and for each \emph{subcone} $C^j_u$ that contains one or more vertices of $P \setminus \{u\}$ visible to $u$, the spanner contains the undirected edge $(u, r)$, where $r$ is the vertex in $C^j_u \cap P \setminus \{u\}$, which is visible to $u$ and minimizes $|ur_{\gamma}|$. This vertex $r$ is referred to as the \emph{closest} visible vertex in this subcone of $u$. \end{enumerate} \end{definition} \begin{figure} \caption{The edges in a cone of the constrained sweeping line graph. The thick red segment represents a constraint. The sweeping line of a subcone is a thick black segment inside the subcone and grey dotted outside, as vertices outside the subcone as ignored.} \label{fig:defnConstrained} \end{figure} To prove that the above graph is a spanner, we need three lemmas. A proof of Lemma~\ref{lem:convexChainConstrained} can be found in~\cite{constrained}. \begin{lemma} \label{lem:convexChainConstrained} Let $u$, $v$, and $w$ be three arbitrary points in the plane such that $uw$ and $vw$ are visibility edges and $w$ is not the endpoint of a constraint intersecting the interior of triangle $uvw$. Then there exists a convex chain of visibility edges (different from the chain consisting of $uw$ and $wv$) from $u$ to $v$ in triangle $uvw$, such that the polygon defined by $uw$, $wv$ and the convex chain is empty and does not contain any constraints. \end{lemma} \begin{lemma} \label{lem:visibilityEdgeConstrained} Let $u$ and $w$ be two distinct vertices in the constrained sweeping line graph such that $uw$ is a visibility edge and let $C$ be the cone of $\zeta_{k}$ such that $w \in C_{u}$. Let $v_{0}$ be the closest visible vertex in the subcone of $C_{u}$ that contains $w$. Let $\ell$ be the line through $u$ and $w$. Let $s$ be the intersection of $\ell$ and the sweeping line through $v_{0}$. Then $v_{0}s$ is a visibility edge. \end{lemma} \begin{proof} We use a proof by contradiction (see Figure~\ref{fig:constrained} for an example layout). Assume $v_{0}s$ is not a visibility edge. Then there must be a line segment constraint intersecting $v_{0}s$. This implies that one of its endpoints lies in $\triangle uv_{0}s$, as $uw$ and $uv_{0}$ are visibility edges and thus the constraint cannot pass through them. However, such an endpoint would imply that there exists a vertex that is visible to $u$ and closer to $u$ than $v_{0}$ (in particular the first vertex hit by the sweeping line starting from $u$), contradicting that $v_0$ is the closest visible vertex. Therefore, no constraint intersects $v_{0}s$. \end{proof} \begin{figure} \caption{An example layout of $p$, $q$, $v_0$, and $s$.} \label{fig:constrained} \end{figure} The following lemma ensures that we can apply induction later. \begin{lemma} \label{lem:segmentLengths} Let $k \ge 7$ be an integer, let $\theta = \frac{2\pi}{k}$, and let $\gamma \in [0, \frac{\pi - 3\theta}{2})$. Let $u$ and $w$ be two distinct vertices in the constrained sweeping line graph and let $C$ be the cone of $\zeta_{k}$ such that $w \in C_{u}$. Let $v_{0}$ be a vertex in $C_{u}$ such that it is the closest visible vertex to $u$. Let $\ell$ be the line through $uw$ and let $s$ be the intersection of $\ell$ and the sweeping line through $v_{0}$. Then $| v_{0}s | < | uw |$, $| sw | < | uw |$, and $| v_{0}w | < | uw |$. \end{lemma} \begin{proof} Refer to Figure~\ref{fig:constrained} for an example layout. We first show that $| v_{0}s | < | uw |$: By applying Lemma~\ref{lem:lemma7} to $u$, $s$, and $v_0$, we obtain that $| v_{0}s | \le | us | - (\cos(\frac{\theta}{2} + \gamma) - \sin\theta) \cdot| uv_{0} |$. Using Lemma~\ref{lem:lemma1}, this implies that $| v_{0}s | < | us |$. Finally, using the fact that $us$ is contained in $uw$, we conclude that $| v_{0}s | < | uw |$. Next, since $sw$ is contained in $uw$, it follows that $| sw | < | uw |$. Finally, we argue that $| v_{0}w | < | uw |$: By applying Lemma~\ref{lem:lemma7} to $u$, $w$, and $v_0$, we obtain that $| v_{0}w | \le | uw | - (\cos(\frac{\theta}{2} + \gamma) - \sin\theta) \cdot | uv_{0} | $. We can then apply Lemma~\ref{lem:lemma1} to obtain that $| v_{0}w | < | uw |$. \end{proof} We are now ready to prove that the constrained sweeping line graph is a spanner of the visibility graph. \begin{theorem} \label{theo:constrained} Let $k \geq 7$ be an integer, let $\theta = \frac{2\pi}{k}$, and let $\gamma \in [0, \frac{\pi - 3\theta}{2})$. Let $u$ and $w$ be two distinct vertices in the plane that can see each other. There exists a path connecting $u$ and $w$ in the constrained sweeping line graph of length at most $\frac{1}{\cos(\frac{\theta}{2} + \gamma) - \sin(\theta)} \cdot | uw |$. \end{theorem} \begin{proof} Let $C$ be the cone of $\zeta_{k}$ such that $w \in$ $C_{u}$. We prove the theorem by induction on the rank of the pairs of vertices that can see each other, based on the Euclidean distance between them. Our inductive hypothesis is the following: $\delta(u,w) \le \frac{1}{\cos(\frac{\theta}{2} + \gamma) - \sin\theta} | uw |$, where $u$ and $w$ are two distinct vertices that can see each other and $\delta(u,w)$ is the length of the shortest path between them in the constrained sweeping line graph. \emph{Base case:} In this case, $u$ and $w$ are the Euclidean closest visible pair. If there exists an edge between $u$ and $w$, then $\delta(u, w) = | uw | \le \frac{1}{\cos(\frac{\theta}{2} + \gamma) - \sin\theta} | uw |$, so the induction hypothesis holds. It remains to show that, indeed, there exists an edge between the Euclidean closest visible pair. We prove this using contradiction. Assume that there is no edge between $u$ and $w$. Then there must exist a vertex $v_{0}$ in the subcone $C^j_u$ that contains $w$ that has an edge to $u$ in the constrained sweeping line graph. Let $\ell$ be the line through $uw$. Let $s$ be the intersection of $\ell$ and the sweeping line through $v_{0}$. Note that $sw$ is a visibility edge, due to $uw$ being a visibility edge, and $v_{0}s$ is a visibility edge, by Lemma~\ref{lem:visibilityEdgeConstrained}. Therefore, by applying Lemma~\ref{lem:convexChainConstrained}, there exists a convex chain $v_{0}, v_{1}, ..., v_{m-1}, v_{m} = w$ of visibility edges from $v_{0}$ to $w$ inside $\triangle v_0 s w$. By applying Lemma~\ref{lem:segmentLengths} using $u$, $w$, and $v_{0}$, we infer that all sides of $\triangle v_0 s w$ have length less than the Euclidean distance between $u$ and $w$. Since the convex chain is contained in this triangle, it follows that any pair of consecutive vertices along it has a smaller Euclidean distance than the Euclidean distance between $u$ and $w$. This contradicts that $uw$ is the closest Euclidean pair of visible vertices. \emph{Induction step:} We assume that the induction hypothesis holds for all pairs of vertices that can see each other and whose Euclidean distance is smaller than the Euclidean distance between $u$ and $w$. If $uw$ is an edge in the constrained sweeping line graph, the induction hypothesis follows by the same argument as in the base case. If there is no edge between $u$ and $w$, let $v_{0}$ be the closest visible vertex to $u$ (using the sweeping line) in the subcone $C^j_u$ that contains $w$. By construction, $(u,v_0)$ is an edge of the graph. Let $\ell$ be the line passing through $u$ and $w$. Let $s$ be the intersection of $\ell$ and the sweeping line through $v_{0}$ (see Figure~\ref{fig:constrained}). By definition, $\delta(u,w) \le | uv_{0} | + \delta(v_{0}, w)$. We know that $sw$ is a visibility edge, since $uw$ is a visibility edge, and we know $v_{0}s$ is a visibility edge by Lemma~\ref{lem:visibilityEdgeConstrained}. Therefore, by Lemma~\ref{lem:convexChainConstrained} there exists a convex chain $v_{0},...,v_{m} = w$ of visibility edges inside $\triangle v_0 s w$ connecting $v_{0}$ and $w$. Applying Lemma~\ref{lem:segmentLengths} to the points $u$, $v_{0}$, and $w$, we infer that each side of $\triangle v_{0}sw$ has length smaller than $|uw|$. Therefore, we can apply induction to every visibility edge along the convex chain from $v_0$ to $w$, as each has length smaller than $|uw|$. Therefore, \begin{align*} \delta(u,w) &\le | uv_{0} | + \sum_{i = 0}^{m-1} \delta(v_{i},v_{i + 1}) \\ &\le | uv_{0} | + \frac{1}{\cos(\frac{\theta}{2} + \gamma) - \sin\theta}\sum_{i = 0}^{m-1} | v_{i}v_{i + 1} | \text{ (using the induction hypothesis)}\\ &\le | uv_{0} | + \frac{1}{\cos(\frac{\theta}{2} + \gamma) - \sin\theta} (| v_{0}s | + | sw |) \text{ (using the fact that the chain is convex)} \end{align*} Finally, we apply Lemma~\ref{lem:lemma7}, using $r = v_{0}$, $q = s$, and $p = u$. This gives us that $| v_{0}s | \le | us | - | uv_{0} | \left(\cos\left(\frac{\theta}{2} + \gamma\right) - \sin \theta\right)$, which can be rewritten to $| uv_{0} | + | v_{0}s |/(\cos(\frac{\theta}{2} + \gamma) - \sin \theta) \le | us |/(\cos(\frac{\theta}{2} + \gamma) - \sin \theta)$. By adding $| sw |/(\cos(\frac{\theta}{2} + \gamma) - \sin\theta)$ to both sides and the fact that $|us| + |sw| = |uw|$, we obtain: \begin{align*} | uv_{0} | + \frac{1}{\cos(\frac{\theta}{2} + \gamma) - \sin \theta}(| v_{0}s | + | sw |) &\le \frac{1}{\cos(\frac{\theta}{2} + \gamma) - \sin \theta} (| us | + | sw |)\\ &= \frac{1}{\cos(\frac{\theta}{2} + \gamma) - \sin \theta} |uw|. \end{align*} Hence, we conclude that $\delta(u,w) \le \frac{1}{\cos(\frac{\theta}{2} + \gamma) - \sin \theta} | uw |$. \end{proof} \section{Polygonal Obstacles} Finally, we generalize the result from the previous section to more complex obstacles. Recall that in this setting $S$ is a finite set of polygonal obstacles where each corner of each obstacle is a vertex in $P$, such that no two obstacles intersect, and that each vertex is part of at most one polygonal obstacle and occurs at most once along its boundary. As in the constrained setting, the line segment between two visible vertices is called a \emph{visibility edge} and the \emph{visibility graph} of a point set $P$ and a set of polygonal obstacles $S$ is the complete graph on $P$ excluding all the edges that properly intersect some obstacle. Cones that are split are considered to be subcones of the original cone. Note that since $S$ consists of vertex-disjoint simple polygons, a cone can be split into at most two subcones. By focusing on the subcones, the polygonal-constrained sweeping line graph is defined analogously to the constrained sweeping line graph: for each subcone $C^{j}_u$ of each vertex $u$, we add an undirected edge between $u$ and the closest vertex in that subcone that can see $u$, where the distance is measured along the left side of the original cone of $u$ (not the left side of the subcone). We now introduce modifications to Lemmas~\ref{lem:convexChainConstrained} and ~\ref{lem:visibilityEdgeConstrained} to make them suited for polygonal obstacles. A proof of Lemma~\ref{lem:convexChainPolygonal} can be found in \cite{polygonallemma}. \begin{lemma} \label{lem:convexChainPolygonal} Let $u$, $v$, and $w$ be three points where $(w,u)$ and $(u,v)$ are both visibility edges and $u$ is not a vertex of any polygonal obstacle $P$ where the open polygon $P'$ of $P$ intersects $\triangle wuv$. The area $A$, bounded by $(w,u)$, $(u,v)$, and a convex chain formed by visibility edges between $w$ and $v$ inside $\triangle wuv$, does not contain any vertices and is not intersected by any obstacles. \end{lemma} \begin{lemma} \label{lem:visibilityEdgePolygonal} Let $u$ and $w$ be two distinct vertices in the polygonal-constrained sweeping line graph such that $uw$ is a visibility edge and let $C$ be the cone of $\zeta_{k}$ such that $w \in C_{u}$. Let $v_{0}$ be the closest visible vertex in the subcone of $C_{u}$ that contains $w$. Let $\ell$ be the line through $u$ and $w$. Let $s$ be the intersection of $\ell$ and the sweeping line through $v_{0}$. Then $v_{0}s$ is a visibility edge. \end{lemma} \begin{proof} This proof is analogous to the proof of Lemma~\ref{lem:visibilityEdgeConstrained}. \end{proof} Using these two modified lemmas, we can prove that the polygonal-constrained sweeping line graph is a spanner of the visibility graph. \begin{theorem} Let $k \geq 7$ be an integer, let $\theta = \frac{2\pi}{k}$, and let $\gamma \in [0, \frac{\pi - 3\theta}{2})$. Let $u$ and $w$ be two distinct vertices in the plane that can see each other. There exists a path connecting $u$ and $w$ in the polygonal-constrained sweeping line graph of length at most $\frac{1}{\cos(\frac{\theta}{2} + \gamma) - \sin(\theta)} \cdot | uw |$. \end{theorem} \begin{proof} The proof is analogous to the proof of Theorem~\ref{theo:constrained}. The only changes required are that the uses of Lemma~\ref{lem:convexChainConstrained} and Lemma~\ref{lem:visibilityEdgeConstrained} are replaced with Lemma~\ref{lem:convexChainPolygonal} and Lemma~\ref{lem:visibilityEdgePolygonal} respectively. Note that all other arguments still hold, as they are arguments based on Euclidean distance, rather than the specific shape of the (straight-line) obstacles. \end{proof} \section{Conclusion} We showed that the sweeping line construction produces a spanner in the unconstrained, constrained, and polygonal constrained settings. These graphs are a generalization of $\Theta$-graphs and thus we also showed that every $\Theta$-graph with at least 7 cones is a spanner in the presence of polygonal obstacles. We also note that the proof in the unconstrained case immediately implied a local routing algorithm with competitive ratio equal to (the current upper bound of) the spanning ratio. Our proofs rely on Lemma~\ref{lem:lemma7}, which bounds the length of the inductive part of our path. We conjecture that any proof strategy that uses induction must satisfy a condition similar to Lemma~\ref{lem:lemma7} in order to upper bound the spanning ratio. An analogous argument could then be applied to prove the construction to be a spanner in all three settings using the methods from this paper (i.e., finding a vertex $v_0$ satisfying the conditions of Lemmas~\ref{lem:visibilityEdgeConstrained} and~\ref{lem:segmentLengths}). This would greatly simplify spanner construction for the constrained and polygonal obstacle settings, by putting the focus on the simpler unconstrained setting. In particular, we conjecture that the strategy described in this paper can be applied to generalize the known results for Yao-graphs. \end{document}
\begin{document} \title{Dispersion Analysis of Finite Difference and Discontinuous Galerkin Schemes for Maxwell's Equations in Linear Lorentz Media} \begin{abstract} In this paper, we consider Maxwell's equations in linear dispersive media described by a single-pole Lorentz model for electronic polarization. We study two classes of commonly used spatial discretizations: finite difference methods (FD) with arbitrary even order accuracy in space and high spatial order discontinuous Galerkin (DG) finite element methods. Both types of spatial discretizations are coupled with second order semi-implicit leap-frog and implicit trapezoidal temporal schemes. By performing detailed dispersion analysis for the semi-discrete and fully discrete schemes, we obtain rigorous quantification of the dispersion error for Lorentz dispersive dielectrics. In particular, comparisons of dispersion error can be made taking into account the model parameters, and mesh sizes in the design of the two types of schemes. This work is a continuation of our previous research on energy-stable numerical schemes for nonlinear dispersive optical media \cite{bokil2017energy,bokil2018high}. The results for the numerical dispersion analysis of the reduced linear model, considered in the present paper, can guide us in the optimal choice of discretization parameters for the more complicated and nonlinear models. The numerical dispersion analysis of the fully discrete FD and DG schemes, for the dispersive Maxwell model considered in this paper, clearly indicate the dependence of the numerical dispersion errors on spatial and temporal discretizations, their order of accuracy, mesh discretization parameters and model parameters. The results obtained here cannot be arrived at by considering discretizations of Maxwell's equations in free space. In particular, our results contrast the advantages and disadvantages of using high order FD or DG schemes and leap-frog or trapezoidal time integrators over different frequency ranges using a variety of measures of numerical dispersion errors. Finally, we highlight the limitations of the second order accurate temporal discretizations considered. \mathrm{e}nd{abstract} \begin{keywords} Maxwell's equations, Lorentz model, numerical dispersion, finite differences, discontinuous Galerkin finite elements. \mathrm{e}nd{keywords} \section{Introduction} The electromagnetic (EM) field inside a material is governed by the macroscopic Maxwell's equations along with constitutive laws that account for the response of the material to the electromagnetic field. In this work, we consider a linear dispersive material in which the delayed response to the EM field is modeled as a damped vibrating system for the polarization accounting for the average dipole moment per unit volume over the atomic structure of the material. The corresponding mathematical equations are called the Lorentz model for electronic polarization. Such dielectric materials have actual physical dispersion. The complex-valued electric permittivity of such a dispersive material is frequency dependent and includes physical dissipation, or attenuation. It is well known that numerical discretizations of (systems of) partial differential equations (PDEs) will have numerical errors. These errors include dissipation, the dampening of some frequency modes, and dispersion, the frequency dependence of the phase velocity of numerical wave modes \cite{trefethen1982group}. To preserve the correct physics, it is important that the dispersion and dissipation effects are accurately captured by numerical schemes, particularly for long time simulations. Thus, an understanding of how numerical discretizations affect the dispersion relations of PDEs is important in constructing good numerical schemes that correctly predict wave propagation over long distances. When the PDEs have physical dispersion modeling a retarded response of the material to the imposed electromagnetic field, the corresponding numerical discretizations will support numerical dispersion errors that have a complicated dependence on mesh step sizes, spatial and temporal accuracy and model parameters. In this paper, we perform dispersion analysis of high spatial order discontinuous Galerkin (DG) and a class of high order finite difference (FD) schemes, both coupled with second order implicit trapezoidal or semi-implicit leap-frog temporal discretizations for Maxwell's equations in linear Lorentz media. The fully discrete time domain (TD) methods are the leap-frog DGTD or FDTD methods and the trapezoidal DGTD or FDTD methods. This paper is a continuation of our recent efforts on energy stable numerical schemes for nonlinear dispersive optical media. In \cite{bokil2017energy,bokil2018high}, we developed fully discrete energy stable DGTD and FDTD methods, respectively, for Maxwell's equations with linear Lorentz and nonlinear Kerr and Raman responses via the {\mathrm{i}t auxiliary differential equation} (ADE) approach. These schemes include second order modified leap-frog or trapezoidal temporal schemes combined with high order DG or FD methods for the spatial discretization. In the ADE approach, ordinary differential equations (ODEs) for the evolution of the electric polarization are appended to Maxwell's equations. The two spatial discretizations that were used, the DG method and the FD method are very popular methods for electromagnetic simulations in the literature. The DG methods, which are a class of finite element methods using discontinuous polynomial spaces, have grown to be broadly adopted for EM simulations in the past two decades. They have been developed and analyzed for time dependent linear models, including Maxwell's equations in free space (e.g., \cite{chung2013convergence, cockburn2004locally, hesthaven2002nodal}), and in dispersive media (e.g., \cite{gedney2012discontinuous, huang2011interior, lanteri2013convergence,lu2004discontinuous}). The Yee scheme \cite{yee1966numerical} is a leap-frog FDTD method that was initially developed for Maxwell's equations in linear dielectrics, and is one of the gold standards for numerical simulation of EM wave propagation in the time domain. The Yee scheme has been extended to linear dispersive media \cite{joseph1991direct, KF_Deb, KF_Lor} (see the books \cite{taflove2005computational, taflove2013advances} and references therein), and then to nonlinear dispersive media \cite{ziolkowski1994nonlinear, goorjian1992computational, joseph1991direct, hile1996numerical, sorensen2005kink}. Additional references for Yee and other FDTD methods for EM wave propagation in linear and nonlinear Lorentz dispersion can be found in \cite{joseph1997fdtd, joseph1994spatial, bourgeade2005numerical, greene2006general, ramadan2015systematic} for the 1D case, and in \cite{fujii2004high, joseph1993direct, ziolkowski1994nonlinear} for 2D and 3D cases. In our recent work \cite{bokil2017energy,bokil2018high}, we proved energy stability of fully discrete new FDTD and DGTD schemes for Maxwell's equations with Lorentz, Kerr and Raman effects. Both types of schemes employ second order time integrators, while utilizing high order discretizations in space. The schemes are benchmarked on several one-dimensional test examples and their performance in stability and accuracy are validated. The objective of the present work, is to conduct numerical dispersion analysis of the aforementioned DGTD and FDTD schemes for Maxwell's equations in linear Lorentz media, which can guide us in the optimal choice of numerical discretization parameters for more general dispersive and nonlinear models. There has been abundant study on the dispersion analysis of DG methods. Most work was carried out for semi-discrete schemes, e.g., for scalar linear conservation laws \cite{ainsworth2004dispersive, hu1999analysis, hu2002eigensolution, sherwin2000dispersion}, and for the second-order wave equation \cite{ainsworth2006dispersive}. Dispersive behavior of fully discrete DGTD schemes is studied for the one-way wave equation \cite{yang2013dispersion, ainsworth2014dispersive} and two-way wave equations \cite{cheng2017L2}. Particularly, in \cite{sarmany2007dispersion} the accuracy order of the dispersion and dissipation errors of nodal DG methods with Runge-Kutta time discretization for Maxwell's equations in free space are analyzed numerically. The stability and dispersion properties of a variety of FDTD schemes applied to Maxwell's equations in free space are also well known (see \cite{taflove2005computational}). Additionally, various time domain finite element methods have been devised for the numerical approximation of Maxwell's equations in free space (see \cite{monk1992comparison, lee1997time} and the references therein). There has been relatively less work on phase error analysis for dispersive dielectrics; see \cite{taflove2005computational, petropoulos1994stability, prokopidis2004fdtd, bokil2012} for finite difference methods and \cite{banks2009analysis} for finite element methods. To the best of our knowledge, the present work is the first in the literature to conduct dispersion analysis of fully discrete DGTD methods for Maxwell's equations in Lorentz dispersive media and providing comparisons of the numerical dispersion errors with those of fully discrete FDTD methods. By rigorous quantification of the numerical dispersion error for such dispersive Maxwell systems, we make comparisons of the DGTD and FDTD methods taking into account the model parameters, spatial and temporal accuracy and mesh sizes in the design of the schemes. Given the popularity of both DGTD and FDTD methods in science and engineering, such a comparison of errors between the two schemes will provide practioners of these methods with guidelines on their proper implementation. Our dispersion analysis indicates that there is a complicated dependence of dispersion errors on the model parameters, orders of spatial and temporal discretizations, CFL conditions as well as mesh discretization parameters. We compute and plot a variety of different measures of numerical dispersion errors as functions of the quantity $\frac{\omega}{\omega_1}$, where $\omega_1$ is the resonance frequency of the Lorentz material, and $\omega$ is an angular frequency. These measures include normalized phase and group velocities, attenuation constants and an energy velocity \cite{gilles2000comparison}. The parameter range of the quantity $\frac{\omega}{\omega_1}$ separates the response of the material into distinct bands. We find that some counterintuitive results can occur for high-loss materials where a low order scheme can have smaller numerical dispersion error than a higher order scheme. Since this situation does not occur in non-dispersive dielectrics, our results demonstrates the need to analyze and study the numerical dispersion relation for the Lorentz media beyond those for the case of free space that commonly appear in the literature. We have made quantitative comparisons of the high order FD and DG schemes based on the metrics discussed above. We also identify the differences in numerical dispersion due to the temporal integrator used. In particular, our results clearly identify the limitation of the second order temporal accuracy of our time discretizations, by identifying distinct bands in the frequency parameter ranges where the high order spatial accuracy of either the DG or FD schemes is unable to alleviate the error in numerical dispersion due to time discretization. The rest of the paper is organized as follows. In Section \ref{model} we introduce Maxwell's equations in a one spatial dimensional Lorentz dispersive material. In Sections \ref{dispersion} and \ref{time}, we present and analyze the dispersion relations and the relative phase errors for the PDE model, and two semi-discrete in time finite difference numerical schemes, respectively. In Sections \ref{semifdtd} and \ref{fdtd} numerical dispersion errors in semi-discrete in space staggered FD methods, and fully space-time discrete FDTD methods, respectively, are considered, while numerical dispersion errors in semi-discrete in space DG methods and fully discrete DGTD methods are studied in Sections \ref{semidg}, and \ref{dgtd}, respectively. In Section \ref{sec:numerical}, we define four quantities that provide different measures of numerical dispersion error and compare these for the FDTD and DGTD methods. Interpretations and conclusions of our results are made in Section \ref{conclude}. \section{Maxwell's Equations in a Linear Lorentz Dielectric} \label{model} We begin by introducing Maxwell's equations in a non-magnetic, non-conductive medium $\Omega \subset \mathbb{R}^d$, $d=1,2,3$, from time 0 to $T$, containing no free charges, that govern the dynamic evolution of the electric field $\mathbf{E}$ and the magnetic field $\mathbf{H}$ in the form \begin{subequations}\label{eq:max} \begin{align}\label{eq:max1} &\displaystyle\dd{t}{\mathbf{B}}+{\bf{\nabla}}\times \mathbf{E} = 0, \ \text{in} \ (0,T]\times \Omega, \\[1.5ex] \label{eq:max2} &\displaystyle\dd{t}{\mathbf{D}} -{\bf{\nabla}}\times \mathbf{H} = 0, \ \text{in} \ (0,T]\times \Omega, \\[1.5ex] \label{eq:max3} & {\bf{\nabla}}\cdot \mathbf{B} = 0, \ {\bf{\nabla}}\cdot \mathbf{D} = 0, \ \text{in} \ (0,T]\times \Omega, \mathrm{e}nd{align} \mathrm{e}nd{subequations} along with initial data that satisfies the Gauss laws \mathrm{e}qref{eq:max3}, and appropriate boundary data. System \mathrm{e}qref{eq:max} has to be completed by constitutive laws on $[0,T] \times \Omega$. The electric flux density $\mathbf{D}$, and the magnetic induction $\mathbf{B}$, are related to the electric field and magnetic field, respectively, via the constitutive laws \begin{equation} \label{eq:constD} \mathbf{D} = \mathrm{e}psilon_0(\mathrm{e}psilon_\mathrm{i}nfty\mathbf{E}+\mathbf{P}), \ \ \mathbf{B} = \mu_0\mathbf{H}. \mathrm{e}nd{equation} The parameter $\mathrm{e}psilon_{0}$ is the electric permittivity of free space, while $\mu_0$ is the magnetic permeability of free space. The term $\mathrm{e}psilon_0 \mathrm{e}psilon_\mathrm{i}nfty \mb{E}$ captures the linear instantaneous response of the medium to the EM fields, with $\mathrm{e}psilon_{\mathrm{i}nfty}$ defined as the relative electric permittivity in the limit of infinite frequencies. The macroscopic {\mathrm{e}m (electric) retarded polarization} $\mathbf{P}$ is modeled as a single pole resonance Lorentz dispersion mechanism, in which the time dependent evolution of the polarization follows the second order ODE \cite{gilles2000comparison, taflove2005computational} \begin{equation} \label{eq:polar:P} \frac{\partial^2 \mathbf{P}}{\partial t^2} + 2\gamma\frac{\partial\mathbf{P}}{\partial t} +\omega_1^2\mathbf{P} =\omega_p^2\mb{E}. \mathrm{e}nd{equation} In the ODE \mathrm{e}qref{eq:polar:P}, $\omega_1$ and $\omega_p$ are the resonance and plasma frequencies of the medium, respectively, and $\gamma$ is a damping constant. The plasma frequency is related to the resonance frequency via the relation $\omega_p^2 = (\mathrm{e}psilon_s-\mathrm{e}psilon_\mathrm{i}nfty)\omega_1^2 := \mathrm{e}psilon_d\omega_1^2$. Here $\mathrm{e}psilon_s$ is defined as the relative permittivity at zero frequency, and $\mathrm{e}psilon_d$ measures the strength of the electric field coupling to the linear Lorentz dispersion model. We note that the limit $\mathrm{e}psilon_d\rightarrow 0$, or $\mathrm{e}psilon_s\rightarrow \mathrm{e}psilon_\mathrm{i}nfty$ corresponds to a linear dispersionless dielectric. In this paper, we focus on a one dimensional Maxwell model on $\Omega=\mathbb{R}$ that is obtained from \mathrm{e}qref{eq:max}, \mathrm{e}qref{eq:constD} and \mathrm{e}qref{eq:polar:P} by assuming an isotropic and homogeneous material in which electromagnetic plane waves are linearly polarized and propagate in the $x$ direction. Thus, the electric field is represented by one scalar component $E := E_z$, while the magnetic field is represented by the one component $H := H_y$. All the other variables are similarly represented by single scalar components. We convert the second order ODE \mathrm{e}qref{eq:polar:P} for the linear retarded polarization $P$ to first order form by introducing the linear polarization current density $J$, \begin{align} \label{eq:P_ODE} \frac{\partial P}{\partial t} = J, \quad \frac{\partial J}{\partial t} = -2\gamma J -\omega_1^2P+\omega_p^2E. \mathrm{e}nd{align} We consider a rescaled formulation of the resulting one spatial dimensional Maxwell-Lorentz system with the following scaling: let the reference time scale be $t_0$, and reference space scale be $x_0$ with $x_{0}=ct_{0}$ and $c=1/\sqrt{\mu_0\mathrm{e}psilon_0}$. Henceforth, the rescaled fields and constants are defined based on a reference electric field $E_0$ as follows, \begin{align*} & (H/E_{0})\sqrt{\mu_{0}/\mathrm{e}psilon_{0}}\rightarrow H, \ \ \ D/(\mathrm{e}psilon_{0}E_{0}) \rightarrow D, \ \ \ P/E_{0}\rightarrow P, \ \ \ (J/E_{0})t_{0}\rightarrow J,\ \ \ E/E_{0}\rightarrow E, \\ & \omega_{1}t_{0}\rightarrow\omega_{1}, \ \ \ \omega_{p}t_{0}\rightarrow\omega_{p}, \ \ \ \gamma t_{0}\rightarrow \gamma, \mathrm{e}nd{align*} where for simplicity, we have used the same notation to denote the scaled and original variables. In summary, we arrive at the following dimensionless Maxwell's equations with linear Lorentz dispersion in one dimension: \begin{subequations} \label{eq:sys} \begin{align} \frac{\partial H}{\partial t} &= \frac{\partial E}{\partial x}, \label{eq:sys1}\\ \frac{\partial D}{\partial t} &= \frac{\partial H}{\partial x}, \label{eq:sys2}\\ \frac{\partial P}{\partial t} &= J, \label{eq:sys3}\\ \frac{\partial J}{\partial t} &= -2\gamma J -\omega_1^2P+\omega_p^2E, \label{eq:sys4} \\ D &= \mathrm{e}psilon_\mathrm{i}nfty E +P. \label{eq:sys7} \mathrm{e}nd{align} \mathrm{e}nd{subequations} \section{Dispersion Relations} \label{dispersion} The Maxwell-Lorentz system \mathrm{e}qref{eq:sys} is a linear dispersive system, i.e. it admits plane wave solutions of the form $e^{i\left( kx - \omega t \right)}$ for all its unknown field variables, with the property that the speed of propagation of these waves is not independent of the wave number $k$ or the angular frequency $\omega$ \cite{trefethen1982group}. In this section, we derive the dispersion relation of \mathrm{e}qref{eq:sys} and highlight its main properties. We assume the space-time harmonic variation \begin{align} \displaystyle \label{ExDis} X(x,t) \mathrm{e}quiv X_0 e^{i\left( kx - \omega t \right)}, \mathrm{e}nd{align} \noindent of all field components $X \mathrm{i}n \{H, E, P, J\}$. Substituting \mathrm{e}qref{ExDis} in \mathrm{e}qref{eq:sys} yields the system \begin{subequations} \label{DisEx} \begin{align} \omega H_0 + k E_0 &= 0, \\ kH_0 + \mathrm{e}psilon_\mathrm{i}nfty \omega E_0 + \omega P_0 &= 0, \\ i \omega P_0 + J_0 &= 0, \\ \omega_p^2 E_0 - \omega_1^2 P_0 + \left(i \omega - 2\gamma\right)J_0 &= 0. \mathrm{e}nd{align} \mathrm{e}nd{subequations} Define the vector $\displaystyle \textbf{U}= [H_0, E_0, P_0, J_0]^T$ containing all amplitudes of the field solution, then \mathrm{e}qref{DisEx} can be rewritten as a linear system, given by \begin{align} \label{DisEx2} \mathcal{A}\textbf{U} = \textbf{0}, \quad \text{with} \quad \mathcal{A} =\begin{pmatrix} \omega & k & 0 & 0 \\ k & \mathrm{e}psilon_\mathrm{i}nfty \omega & \omega & 0 \\ 0 & 0 & i \omega & 1 \\ 0 & \omega_p^2 & - \omega_1^2 & i \omega - 2\gamma \\ \mathrm{e}nd{pmatrix}. \mathrm{e}nd{align} By solving $\det(\mathcal{A})=0$, we obtain the exact dispersion relation for \mathrm{e}qref{eq:sys} as \begin{align} \displaystyle \label{DisEx4} k = \pm k^{\text{ex}}, \qquad \textrm{with} \quad k^{\text{ex}} = \omega \sqrt{\mathrm{e}psilon(\WH{\omega}; \mathbf{p})}, \ \ \textrm{and} \ \ \mathrm{e}psilon(\WH{\omega}; \mathbf{p}) = \mathrm{e}psilon_\mathrm{i}nfty \left(1- \frac{\mathrm{e}psilon_{d}/\mathrm{e}psilon_\mathrm{i}nfty }{\displaystyle \WH{\omega}^2 + 2 i \WH{\gamma} \WH{\omega} - 1} \right). \mathrm{e}nd{align} Here, $\mathrm{e}psilon(\WH{\omega}; \mathbf{p}) $ is the permittivity of the medium dependent on the ``relative" frequency $\WH{\omega}=\omega/\omega_{1}$ and the parameter set $\mathbf{p} = [\mathrm{e}psilon_s, \mathrm{e}psilon_\mathrm{i}nfty, \WH{\gamma}]$, with $\WH{\gamma}=\gamma/\omega_1$. The permittivity is clearly frequency dependent and displays the dispersive nature of the system. A major goal in the design and construction of numerical methods for linear dispersive PDEs is to devise methods that accurately capture the medium's complex permittivity \cite{taflove2005computational}. We will assume that $\mathrm{e}psilon_s >0, \mathrm{e}psilon_\mathrm{i}nfty >0$ and $\mathrm{e}psilon_d=\mathrm{e}psilon_s-\mathrm{e}psilon_\mathrm{i}nfty>0$. These assumptions are based on physical considerations \cite{taflove2005computational}. In the dispersion analysis, we assume $\omega$ is a real number, and restrict $\omega\geq0$ in this work. Note that $\mathrm{e}psilon(\WH{\omega}; \mathbf{p}) $ and $k$ can be complex, depending on the values that certain parameters assume. For lossless materials (i.e. $\WH{\gamma}=0$), the {\mathrm{i}t medium absorption band} is defined by $\WH{\omega}\mathrm{i}n [1, \sqrt{\mathrm{e}psilon_{s}/\mathrm{e}psilon_{\mathrm{i}nfty}}]$, in which $\mathrm{e}psilon(\WH{\omega}; \mathbf{p})\le0$ and $k^{\text{ex}}$ is an imaginary number or zero. Outside the medium absorption band, i.e. for other $\WH{\omega}$ values, we have $\mathrm{e}psilon(\WH{\omega}; \mathbf{p})>0$ and $k^{\text{ex}}$ is a real number. Moreover, it is easy to check $|k^{\text{ex}}|\rightarrow\mathrm{i}nfty$ as $\WH{\omega}$ approaches $1$ (the \mathrm{e}mph{resonance frequency}, which is also the lower bound of the medium absorption band) and $k^{\text{ex}}=0$ at the upper bound $\WH{\omega}=\sqrt{\mathrm{e}psilon_{s}/\mathrm{e}psilon_{\mathrm{i}nfty}}.$ In this paper, we are mainly interested in low-loss materials, i.e. $\WH{\gamma}>0$ with $\WH{\gamma}\ll1$. In this case, the dispersion relation retains similar properties, which means $|k^{\text{ex}}|$ is a large number around $\WH{\omega}=1$ and a small number near $\WH{\omega}=\sqrt{\mathrm{e}psilon_{s}/\mathrm{e}psilon_{\mathrm{i}nfty}}$. This behavior of the exact dispersion relation has implications for the numerical dispersion errors, as illustrated in later sections. \begin{Remark} \label{rk1} In the literature, dispersion relations can be presented in two ways; 1) representing the continuous or discrete angular frequency $\omega\mathrm{i}n\mathbb{C}$ as a function of the exact and continuous wave number $k\mathrm{i}n\mathbb{R}$ (and also of the model parameters and possible mesh parameters); 2) representing the continuous or discrete wave number $k\mathrm{i}n \mathbb{C}$ as a function of the exact and continuous angular frequency $\omega\mathrm{i}n \mathbb{R}$ \cite{taflove2005computational}. In the first approach, we will obtain a fourth order polynomial for $\omega$ as a function of $k$ and other parameters. We provide some insight into approach 1 in Appendix A, for the semi-discrete in space FDTD discretizations in which the effect of high order FDTD spatial approximations on the dispersion relation is clearly evident in terms of the {\mathrm{i}t symbol} of the spatial discretization operators. In this paper, we mainly use the second approach since in this approach we are able to explicitly identify the effects of discretization on the permittivity of the Maxwell-Lorentz model \mathrm{e}qref{eq:sys}. \mathrm{e}nd{Remark} Before we proceed, for convenience of the readers, we gather some notations frequently used in the paper, together with the place of their first appearances in a table listed below. \begin{table}[H] \label{tab:notation} \centering \caption{Notations used and the place of their first appearance. The symbol $*$ in the superscript can be LF (for the leap-frog temporal scheme) or TP (for the trapezoidal temporal scheme).} \renewcommand{1}{1.5} \begin{tabular}{c c c c c c c c c} \hline & $\WH{\omega}$ & $\WH{\gamma}$ & $W$ & $W_{1}$ & $K$ & $\mathrm{e}psilon(\WH{\omega}; \mathbf{p})$ & $ \delta(\WH{\omega};\mathbf{p})$ & $\Psi^*(\WH{\omega})$ \\\hline \textbf{Def} & $\omega / \omega_{1}$ & $\gamma/ \omega_{1}$ & $\omega\Delta t$ & $\omega_{1}\Delta t$ & $ k^{\text{ex}} h$ & $\displaystyle \mathrm{e}psilon_\mathrm{i}nfty - \frac{\mathrm{e}psilon_{d}}{\displaystyle \WH{\omega}^2 + 2 i \WH{\gamma}\, \WH{\omega} - 1}$ & $\displaystyle\frac{\mathrm{e}psilon_{d} \, \WH{\omega} \left( \WH{\omega}+i \WH{\gamma} \right)} { ( \WH{\omega}^2+2i \WH{\gamma}\,\WH{\omega} -1)^2}$ & $\displaystyle \left| \frac{k^{\text{ex}}(\WH{\omega}) - k^*(\WH{\omega})}{k^{\text{ex}}(\WH{\omega})} \right|$\\ \textbf{Eqn} & \mathrm{e}qref{DisEx4} & \mathrm{e}qref{DisEx4} & \mathrm{e}qref{eq:matrixA_LF} & \mathrm{e}qref{eq:matrixA_LF} & \mathrm{e}qref{Dissemi4} & \mathrm{e}qref{DisEx4} & \mathrm{e}qref{delta} & \mathrm{e}qref{eq:error}, \mathrm{e}qref{ERRTP} \\ \hline \mathrm{e}nd{tabular} \renewcommand{1}{1} \mathrm{e}nd{table} \begin{table}[H] \centering \renewcommand{1}{1.5} \begin{tabular}{c c c c c } \hline & $\Psi_{\text{FD},2M}(\WH{\omega})$ & $\Psi^*_{\text{FD},2M}(\WH{\omega})$ & $\Psi_{\text{DG},p}(\WH{\omega})$ & $\Psi^*_{\text{DG},p}(\WH{\omega})$ \\\hline \textbf{Def} & $\displaystyle \left| \frac{k^{\text{ex}}(\WH{\omega}) - k_{\text{FD},2M}(\WH{\omega})}{k^{\text{ex}}(\WH{\omega})} \right|$ & $\displaystyle \left| \frac{k^{\text{ex}}(\WH{\omega}) - k^*_{\text{FD},2M}(\WH{\omega})}{k^{\text{ex}}(\WH{\omega})} \right|$ & $\displaystyle \left| \frac{k^{\text{ex}}(\WH{\omega}) - k_{\text{DG},p}(\WH{\omega})}{k^{\text{ex}}(\WH{\omega})} \right|$ & $\displaystyle \left| \frac{k^{\text{ex}}(\WH{\omega}) - k^*_{\text{DG},p}(\WH{\omega})}{k^{\text{ex}}(\WH{\omega})} \right|$ \\ \textbf{Eqn} &\mathrm{e}qref{ERRTP2} & Figure \ref{Fig:Phase_Error_FD_fully1} & Figure \ref{Fig:semi_DG} & Figure \ref{Fig:Phase_Error_DG_fully1} \\ \hline \mathrm{e}nd{tabular} \renewcommand{1}{1} \mathrm{e}nd{table} \section{Second Order Accurate Temporal Discretizations} \label{time} This section concerns the dispersion analysis of the semi-discrete in time schemes. Continuing from our previous work \cite{bokil2017energy,bokil2018high}, we consider two types of commonly used second-order time schemes for the linear system \mathrm{e}qref{eq:sys}, both implicit in the ODE parts. Let $\Delta t > 0$ be a temporal mesh step. Suppose $u^{n}(x)$ is the solution at time $t^{n}= n\Delta t, n \mathrm{i}n \mathbb{N}$, with $u=H,\, E,\, D,\, P,\, J$. Then, we compute $u^{n+1}(x)$ at time $t^{n+1}=t^{n}+\Delta t$ by the following methods. The first scheme uses a staggered leap-frog discretization in time for the PDE part, with the magnetic field $H$ staggered in time from the rest of the field components. The scheme is given by: \begin{subequations} \label{eq:LF} \begin{align} \frac{H^{n+1/2}-H^{n}}{\Delta t/2} &= \frac{\partial E^n}{\partial x}, \label{eq:LF1}\\ \frac{D^{n+1}-D^{n}}{\Delta t} &= \frac{\partial H^{n+1/2}}{\partial x}, \label{eq:LF2}\\ \frac{P^{n+1}-P^{n}}{\Delta t} &= \frac{1}{2} \left(J^{n}+J^{n+1}\right), \label{eq:LF3}\\ \frac{J^{n+1}-J^{n}}{\Delta t} &= -\gamma \left(J^{n}+J^{n+1}\right) -\frac{\omega_{1}^{2}}{2} \left(P^{n}+P^{n+1}\right) + \frac{\omega_{p}^2}{2} \left(E^{n}+E^{n+1}\right), \label{eq:LF4}\\ D^{n+1} &= \mathrm{e}psilon_{\mathrm{i}nfty} E^{n+1} + P^{n+1}, \label{eq:LF5}\\ \frac{H^{n+1}-H^{n+1/2}}{\Delta t/2} &= \frac{\partial E^{n+1}}{\partial x}. \label{eq:LF6} \mathrm{e}nd{align} \mathrm{e}nd{subequations} The second scheme, which is a fully implicit scheme based on the trapezoidal rule, is given as follows: \begin{subequations} \label{eq:TP} \begin{align} \frac{H^{n+1}-H^{n}}{\Delta t} =& \frac{1}{2} \left( \frac{\partial E^{n+1}}{\partial x} + \frac{\partial E^{n}}{\partial x}\right), \label{eq:TR1} \\ \frac{D^{n+1}-D^{n}}{\Delta t} =& \frac{1}{2} \left( \frac{\partial H^{n+1}}{\partial x} + \frac{\partial H^{n}}{\partial x}\right), \label{eq:TR2} \\ \frac{P^{n+1}-P^{n}}{\Delta t} =& \frac{1}{2} \left(J^{n}+J^{n+1}\right), \label{eq:TR3}\\ \frac{J^{n+1}-J^{n}}{\Delta t} =& -\gamma \left(J^{n}+J^{n+1}\right) -\frac{\omega_{1}^{2}}{2} \left(P^{n}+P^{n+1}\right) + \frac{\omega_{p}^2}{2} \left(E^{n}+E^{n+1}\right), \label{eq:TR4}\\ D^{n+1} =& \mathrm{e}psilon_{\mathrm{i}nfty} E^{n+1} + P^{n+1}. \label{eq:TR5} \mathrm{e}nd{align} \mathrm{e}nd{subequations} Similar to the continuous case, we can perform dispersion analysis on the semi-discrete schemes \mathrm{e}qref{eq:LF} and \mathrm{e}qref{eq:TP} by assuming the time discrete plane wave solution as \begin{align} \label{eq:disper_semi} X^{n}(x)\mathrm{e}quiv X_{0} e^{i(k^{\text{*}}x-\omega t_{n})}, \mathrm{e}nd{align} where $*$ can be LF (with respect to the leap-frog scheme \mathrm{e}qref{eq:LF}) or TP (with respect to the trapezoidal scheme \mathrm{e}qref{eq:TP}). Define $\displaystyle \textbf{U}= [H_0, E_0, P_0, J_0]^T$ as the vector containing all amplitudes of the field solutions. Substituting \mathrm{e}qref{eq:disper_semi} in the schemes \mathrm{e}qref{eq:LF} or \mathrm{e}qref{eq:TP}, we obtain linear systems for each case in the form \begin{align} \mathcal{A}^{*}\textbf{U} = \textbf{0}. \mathrm{e}nd{align} The semi-discrete numerical dispersion relation can be then obtained from $\det(\mathcal{A}^{*})=0.$ For the leap-frog scheme \mathrm{e}qref{eq:LF}, we have \renewcommand{1}{1.5} \begin{align} \label{eq:matrixA_LF} \mathcal{A}^{\text{LF}} =\begin{pmatrix} \sin\left( \frac{W}{2} \right) & \frac{\Delta t}{2}k^{\text{LF}} & 0 & 0 \\ \frac{\Delta t}{2}k^{\text{LF}} & \mathrm{e}psilon_\mathrm{i}nfty \sin\left( \frac{W}{2} \right) & \sin\left( \frac{W}{2} \right) & 0 \\ 0 & 0 & i \sin\left( \frac{W}{2} \right) & \frac{\Delta t}{2} \cos\left( \frac{W}{2} \right) \\ 0 & \frac{\Delta t}{2} \omega_p^2 \cos\left( \frac{W}{2} \right) & - \frac{\Delta t}{2} \omega_1^2 \cos\left( \frac{W}{2} \right) & i \sin\left( \frac{W}{2} \right) - \gamma \Delta t \cos\left( \frac{W}{2} \right) \\ \mathrm{e}nd{pmatrix}, \mathrm{e}nd{align} \renewcommand{1}{1} where $W:=\omega \Delta t = \WH{\omega} W_1,$ with $W_1:=\omega_1 \Delta t.$ This yields the dispersion relation \begin{align} \label{DissemiLF3-2} k^{\text{LF}} = \pm \omega \sqrt{\mathrm{e}psilon(\WH{\omega}^{\mathrm{LF}}; \mathbf{p}^{\mathrm{LF}})}, \ \ \ \textrm{with} \quad \mathrm{e}psilon(\WH{\omega}^{\mathrm{LF}};\mathbf{p}^{\mathrm{LF}}) = \mathrm{e}psilon_\mathrm{i}nfty^{\mathrm{LF}}\left( 1 - \frac{ \mathrm{e}psilon_{d}^{\mathrm{LF}}/\mathrm{e}psilon_\mathrm{i}nfty^{\mathrm{LF}} }{ \left(\WH{\omega}^{\mathrm{LF}}\right)^2 + 2i \WH{\gamma}^{\mathrm{LF}}\, \WH{\omega}^{\mathrm{LF}} - 1 }\right), \mathrm{e}nd{align} where $s_\omega := \displaystyle \frac{\sin(\frac{W}{2})}{\frac{W}{2}} $ and $r_\omega := \displaystyle \frac{\tan(\frac{W}{2})}{\frac{W}{2}} $ as in \cite{petropoulos1994stability}, and $\WH{\omega}^{\mathrm{LF}} = \WH{\omega} r_\omega, \mathbf{p}^{\mathrm{LF}} = [\mathrm{e}psilon_s^{\mathrm{LF}}, \mathrm{e}psilon_\mathrm{i}nfty^{\mathrm{LF}}, \WH{\gamma}^{\mathrm{LF}}]$, with components given by the identities \begin{equation} \label{notation1.1} \displaystyle \mathrm{e}psilon_s^{\mathrm{LF}} = \mathrm{e}psilon_s s_\omega^2, \ \ \mathrm{e}psilon_\mathrm{i}nfty^{\mathrm{LF}} = \mathrm{e}psilon_\mathrm{i}nfty s_\omega^2, \ \ \WH{\gamma}^{\mathrm{LF}} = \WH{\gamma}. \mathrm{e}nd{equation} In this form, we can clearly identify how the leap-frog time discretization misrepresents the permittivity by misrepresenting the parameters of the model. These misrepresentations are solely due to the discretizations of the ODEs by the leap-frog time integrator. The misrepresentations depend on the value of the (exact) angular frequency that is chosen, and in particular as $\frac{W}{2}$ approaches zero, the discrete parameters approach the continuous ones. Thus, a guideline for practitioners using this time integrator to control these misrepresentation, is to choose $\Delta t$ so that $\cos \left( \frac{W}{2} \right) \approx 1$ across the range of frequencies present in the short pulse that propagates in the medium \cite{petropoulos1994stability}. To further analyze the dispersion error, we consider the regime when $W \ll 1$, and obtain the Taylor expansion of \mathrm{e}qref{DissemiLF3-2} with respect to $W$ as \begin{align} \label{eq:dis_LF} k^{\text{LF}} = \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta(\WH{\omega};\mathbf{p})} {\mathrm{e}psilon(\WH{\omega};\mathbf{p})} -\frac{1}{2} \right) W^2 + \mathcal{O}(W^4) \right), \mathrm{e}nd{align} where \begin{equation} \label{delta} \displaystyle \delta(\WH{\omega};\mathbf{p}) = \frac{\mathrm{e}psilon_{d} \, \WH{\omega} \left( \WH{\omega}+i \, \WH{\gamma} \right)} { \left( \WH{\omega}^2 +2i \, \WH{\gamma}\,\WH{\omega} -1 \right)^2}. \mathrm{e}nd{equation} We define the {\mathrm{i}t relative phase error} for the LF scheme to be the ratio \begin{equation} \label{eq:error} \Psi^{\text{LF}}(\WH{\omega}) := \left|\frac{k^{\text{LF}}(\WH{\omega})-k^{\text{ex}}(\WH{\omega})}{k^{\text{ex}}(\WH{\omega})} \right| = \left|\frac{\sqrt{\mathrm{e}psilon(\WH{\omega}^{\text{LF}};\mathbf{p}^{\text{LF}})} -\sqrt{\mathrm{e}psilon(\WH{\omega};\mathbf{p})}}{\sqrt{\mathrm{e}psilon(\WH{\omega};\mathbf{p})}}\right |. \mathrm{e}nd{equation} Here, we consider $k^{{\text{LF}}}$ in \mathrm{e}qref{DissemiLF3-2} with plus sign in front. A similar definition will be used for all semi-discrete and fully discrete schemes that appear in this paper, and provides quantitative measurement of the numerical dispersion error. Equation \mathrm{e}qref{eq:dis_LF} verifies a second order dispersion error in time of the leap-frog scheme in the small time step limit. Similarly, for the trapezoidal method \mathrm{e}qref{eq:TP}, we can obtain \renewcommand{1}{1.5} \begin{align} \mathcal{A}^{\text{TP}} =\begin{pmatrix} \sin\left( \frac{W}{2} \right) & \frac{\Delta t}{2}k^{\text{TP}}\cos\left( \frac{W}{2} \right) & 0 & 0 \\ \frac{\Delta t}{2}k^{\text{TP}} \cos\left( \frac{W}{2} \right) & \mathrm{e}psilon_\mathrm{i}nfty \sin\left( \frac{W}{2} \right) & \sin\left( \frac{W}{2} \right) & 0 \\ 0 & 0 & i \sin\left( \frac{W}{2} \right) & \frac{\Delta t}{2} \cos\left( \frac{W}{2} \right) \\ 0 & \frac{\Delta t}{2} \omega_p^2 \cos\left( \frac{W}{2} \right) & - \frac{\Delta t}{2} \omega_1^2 \cos\left( \frac{W}{2} \right) & i \sin\left( \frac{W}{2} \right) - \gamma \Delta t \cos\left( \frac{W}{2} \right) \\ \mathrm{e}nd{pmatrix}. \mathrm{e}nd{align} \renewcommand{1}{1} This leads to the dispersion relation \begin{align} \label{DissemiLF3-3} \displaystyle k^{\text{TP}} &= \pm \omega \sqrt{\mathrm{e}psilon(\WH{\omega}^{\mathrm{TP}}; \mathbf{p}^{\mathrm{TP}})} = \frac{s_\omega}{r_\omega}k^{\text{LF}} , \ \ \ \textrm{with} \quad \mathrm{e}psilon(\WH{\omega}^{\mathrm{TP}}; \mathbf{p}^{\mathrm{TP}}) = \mathrm{e}psilon_\mathrm{i}nfty^{\mathrm{TP}}\left( 1 - \frac{\displaystyle \mathrm{e}psilon_{d}^{\mathrm{TP}}/\mathrm{e}psilon_\mathrm{i}nfty^{\mathrm{TP}} }{ \displaystyle \left(\WH{\omega}^{\mathrm{TP}}\right)^2 + 2i \, \WH{\gamma}^{\mathrm{TP}} \, \WH{\omega}^{\mathrm{TP}} - 1 }\right), \mathrm{e}nd{align} where $\WH{\omega}^{\mathrm{TP}} = \WH{\omega} r_\omega, \mathbf{p}^{\mathrm{TP}} = [\mathrm{e}psilon_s^{\mathrm{TP}}, \mathrm{e}psilon_\mathrm{i}nfty^{\mathrm{TP}}, \WH{\gamma}^{\mathrm{TP}}]$, with components given as \begin{equation} \label{notation1.2} \displaystyle \mathrm{e}psilon_s^{\mathrm{TP}} = \mathrm{e}psilon_s r_\omega^2, \ \ \mathrm{e}psilon_\mathrm{i}nfty^{\mathrm{TP}}= \mathrm{e}psilon_\mathrm{i}nfty r_\omega^2, \ \ \WH{\gamma}^{\mathrm{TP}}= \WH{\gamma}. \mathrm{e}nd{equation} Again, we can clearly identify how the trapezoidal time discretization misrepresents the permittivity. In particular, this method misrepresents the dissipation and medium resonance in the same manner as the leap-frog method. However, the relative permittivities $\mathrm{e}psilon_\mathrm{i}nfty$ and $\mathrm{e}psilon_s$ are misrepresented in a different manner. Thus, the speeds of propagation of discrete plane waves are different in these two discretizations. In particular, the slow and fast speeds in the medium, corresponding to relative permittivities $\mathrm{e}psilon_s$ and $\mathrm{e}psilon_\mathrm{i}nfty$, respectively, are different. In the small time step limit, for $W\ll 1$, we have \begin{align} \label{eq:dis_TP} k^{\text{TP}} = \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta(\WH{\omega};\mathbf{p})}{\mathrm{e}psilon(\WH{\omega};\mathbf{p})} +1 \right) W^2 + \mathcal{O}(W^4) \right), \mathrm{e}nd{align} which indicates second order accuracy in time for the \mathrm{e}mph{relative phase error} for the trapezoidal scheme defined, in a similar manner to the leap-frog scheme, as \begin{equation} \label{ERRTP} \displaystyle \Psi^{\text{TP}}(\WH{\omega}) :=\left| \frac{k^{\text{TP}}(\WH{\omega})-k^{\text{ex}}(\WH{\omega})}{k^{\text{ex}}(\WH{\omega})}\right| = \left|\frac{\sqrt{\mathrm{e}psilon(\WH{\omega}^{\text{TP}};\mathbf{p}^{\text{TP}})} -\sqrt{\mathrm{e}psilon(\WH{\omega};\mathbf{p})}}{\sqrt{\mathrm{e}psilon(\WH{\omega};\mathbf{p})}}\right |. \mathrm{e}nd{equation} Finally, we make qualitative comparisons of the leap-frog and trapezoidal temporal discretizations. For low-loss materials, the conclusions can be implied from considering the case of $\WH{\gamma}=0$. For this case, for a given set of parameters $\mathbf{p}$, $\mathrm{e}psilon(\WH{\omega}; \mathbf{p})$ and $\delta(\WH{\omega}; \mathbf{p})\geq0$ are real numbers. When $\WH{\omega}\rightarrow\sqrt{\mathrm{e}psilon_{s}/\mathrm{e}psilon_{\mathrm{i}nfty}}$, we have $\mathrm{e}psilon(\WH{\omega}; \mathbf{p})\rightarrow0$. This means $\displaystyle\frac{\delta(\WH{\omega}; \mathbf{p})}{\mathrm{e}psilon(\WH{\omega}; \mathbf{p})}\rightarrow \mathrm{i}nfty$, and thus the leading error term of both temporal schemes would be approaching $\mathrm{i}nfty$. On the other hand, when $\WH{\omega} \rightarrow 1$, it is easy to check that $\displaystyle\frac{\delta(\WH{\omega};\mathbf{p})}{\mathrm{e}psilon(\WH{\omega}; \mathbf{p})}\rightarrow \mathrm{i}nfty$ as well. Hence, both time schemes will give large dispersion error at $\WH{\omega}=1$ and $\WH{\omega}=\sqrt{\mathrm{e}psilon_{s}/\mathrm{e}psilon_{\mathrm{i}nfty}}$, which are the two endpoints of the medium absorption band. In addition, when $W\ll 1$, if $\WH{\omega} \mathrm{i}n (1, \sqrt{\mathrm{e}psilon_{s}/\mathrm{e}psilon_{\mathrm{i}nfty}})$, i.e. for values in the interior of the medium absorption band, we can prove that $\displaystyle\frac{\delta(\WH{\omega}; \mathbf{p})} {\mathrm{e}psilon(\WH{\omega}; \mathbf{p})}<-1$, which leads to the relation $\displaystyle \left | \frac{\delta(\WH{\omega}; \mathbf{p})} {\mathrm{e}psilon(\WH{\omega}; \mathbf{p})}-\frac{1}{2} \right | \geq \left | \frac{\delta(\WH{\omega}; \mathbf{p})} {\mathrm{e}psilon(\WH{\omega}; \mathbf{p})}+1 \right |$. This means the leap-frog scheme has a larger relative phase error than the trapezoidal scheme in the interior of the medium absorption band. For other values of $\WH{\omega}$ outside the medium absorption band we obtain $\mathrm{e}psilon(\WH{\omega}; \mathbf{p})>0$ and $\displaystyle \left | \frac{\delta(\WH{\omega}; \mathbf{p})} {\mathrm{e}psilon(\WH{\omega}; \mathbf{p})}-\frac{1}{2}\right | \leq \left | \frac{\delta(\WH{\omega};\mathbf{p})} {\mathrm{e}psilon(\WH{\omega};\mathbf{p})}+1\right |$. Hence, the leap-frog scheme would give a small relative phase error outside the absorption band. For low-loss Lorentz medium, i.e., when $\WH{\gamma}\ll1$, we believe that these conclusions are still valid with a slight change in two peak positions (see Figure \ref{Fig:semi_time0}). Now we choose the following set of parameters, which are the same as in \cite{gilles2000comparison}, representing a low-loss Lorentz medium: \begin{align} \label{eq:parameter} \displaystyle \mathrm{e}psilon_s = 5.25, \quad \mathrm{e}psilon_\mathrm{i}nfty = 2.25, \quad \WH{\gamma}= 0.01. \mathrm{e}nd{align} Taking $W_1$ as $\{\pi/15$, $\pi/30$, $\pi/60\}$, the relative phase errors are plotted against $\WH{\omega}\mathrm{i}n[0,3]$ in Figure \ref{Fig:semi_time0}. We can observe that the phase errors always have two peaks around $\WH{\omega} = 1$ and $\WH{\omega} = \sqrt{\mathrm{e}psilon_{s}/\mathrm{e}psilon_{\mathrm{i}nfty}} \approx 1.527$, with no appreciable distinction between two temporal discretizations at these peaks. The relative phase errors in the two temporal schemes are basically the same for small $\WH{\omega}$. As $\WH{\omega}$ is increased towards 1, we see that the error in the leap-frog scheme is slightly smaller than that in the trapezoidal scheme. When $\WH{\omega}$ is between 1 and 1.527, the leap-frog scheme presents larger error than the trapezoidal method. Beyond 1.527, the trapezoidal scheme generates larger error than the leap-frog scheme. There is no obvious difference between the dispersion errors of two time discretizations at the peaks. Therefore, in the last graph of Figure \ref{Fig:semi_time0}, we only plot the errors of the leap-frog time schemes at the peaks. We verify the second order accuracy of the method when the mesh size is varied. These observations are consistent with our analysis. \begin{figure}[h] \centering \mathrm{i}ncludegraphics[scale= 0.28] {pics/Phase_Error_semi_time4-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.28] {pics/Phase_Error_semi_time1-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.28] {pics/Phase_Error_semi_time2-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.28] {pics/Phase_Error_semi_time_order-eps-converted-to.pdf} \caption{The relative phase error of leap-frog (LF) and trapezoidal (TP) time discretizations. In the first three plots we fix $W_1 \mathrm{i}n \{\pi/15, \pi/30, \pi/60\}$, respectively, while we vary $\WH{\omega}\mathrm{i}n[0,3]$. In the fourth plot, we fix $\WH{\omega} = 1$ or $\WH{\omega} = \sqrt{\mathrm{e}psilon_{s}/\mathrm{e}psilon_{\mathrm{i}nfty}}$ and consider three different values of $W$ corresponding to $W_1 \mathrm{i}n \{\pi/15, \pi/30, \pi/60\}$, with leap-frog time discretization.} \label{Fig:semi_time0} \mathrm{e}nd{figure} \section{Spatial Discretization: High Order Staggered Finite Difference Methods} \label{semifdtd} In this section, we consider semi-discrete in space staggered finite difference schemes for \mathrm{e}qref{eq:sys}. The spatial discretizations that we consider here for system \mathrm{e}qref{eq:sys} combined with a nonlinear instantaneous Kerr response and a Raman retarded nonlinear response have been recently developed in \cite{bokil2018high}. The electric and magnetic fields are staggered in space and the discrete spatial operators have arbitrary even order, $2M, M \mathrm{i}n \mathbb{N}$, accuracy in space. Below, we describe the semi-discrete spatial schemes, denoted as the FD2M scheme, and then we obtain and discuss dispersion relations of these schemes. As in \cite{bokil2018high}, we define two staggered grids on $\mathbb{R}$ with spatial step size $h$, the primal grid $G_{p}$, and the dual grid $G_{d}$, defined respectively, as \begin{align} \label{eq:GP} G_{p} = \{ j h \ | \ j\mathrm{i}n\mathbb{Z} \}, \quad\text{and}\quad G_{d} = \{ (j+\frac{1}{2}) h \ | \ j\mathrm{i}n\mathbb{Z} \}. \mathrm{e}nd{align} The discrete magnetic field will be approximated at spatial nodes on the dual grid. These approximations are denoted by $H_{j+1/2}$, termed as {\mathrm{i}t degrees of freedom} (DoF) of $H$. All the other discrete fields will have their DoF at spatial nodes on the primal grid. For a continuous field variable $V$, $V_h$ denotes its corresponding {\mathrm{i}t grid function}, defined as the set of all DoF on its respective grid. The semi-discrete scheme is given as follows: \begin{subequations} \label{eq:semi} \begin{align} \frac{\partial H_{j+1/2}}{\partial t} &= \left(\mathcal{D}^{(2M)}_{h} E_h\right)_{j +\frac{1}{2}}, \label{eq:semi1}\\ \frac{\partial D_{j}}{\partial t} &= \left(\widetilde{\mathcal{D}}^{(2M)}_{h} H_h\right)_j, \label{eq:semi2}\\ \frac{\partial P_{j}}{\partial t} &= J_{j}, \label{eq:semi3}\\ \frac{\partial J_{j}}{\partial t} &= -2\gamma J_{j} -\omega_1^2 P_{j}+\omega_p^2 E_{j}, \label{eq:semi4} \\ D_{j}& = \mathrm{e}psilon_\mathrm{i}nfty E_{j} +P_{j}, \label{eq:semi5} \mathrm{e}nd{align} \mathrm{e}nd{subequations} where $\displaystyle \mathcal{D}^{(2M)}_{h}$ and $\displaystyle \widetilde{\mathcal{D}}^{(2M)}_{h}$ are the $2M$-th order finite difference approximations (with $M\mathrm{i}n \mathbb{N}$) of the spatial differential operator $\partial_{x}$, on the primal and dual grids, respectively. These approximations are defined as \begin{subequations} \label{eq:D2M} \begin{align} & \left(\mathcal{D}^{(2M)}_{h} E_h \right)_{j+1/2} = \frac{1}{h} \sum_{p=1}^{M} \frac{\lambda_{2p-1}^{2M}}{(2p-1)} \left(E_{j+p} - E_{j-p+1}\right),\\ & \left(\widetilde{\mathcal{D}}^{(2M)}_{h} H_h\right) _{j} = \frac{1}{h} \sum_{p=1}^{M} \frac{\lambda_{2p-1}^{2M}}{(2p-1)} \left(H_{j+p-\frac{1}{2}} - H_{j-p+\frac{1}{2}} \right), \mathrm{e}nd{align} \mathrm{e}nd{subequations} \noindent and $\lambda_{2p-1}^{2M},$ is given as \cite{bokil2018high} \begin{equation} \label{eq:lambda} \lambda^{2M}_{2p-1} = \displaystyle\frac{2(-1)^{p-1}[(2M-1)!!]^2}{(2M+2p-2)!!(2M-2p)!!(2p-1)}, \mathrm{e}nd{equation} \noindent with the double factorial $n!!$ defined as \begin{equation} n!! = \begin{cases} n\cdot (n-2)\cdot (n-4) \ldots 5\cdot 3\cdot 1 & n>0, \ \text{odd}\\ n\cdot (n-2)\cdot (n-4) \ldots 6\cdot 4\cdot 2 & n>0, \ \text{even}\\ 1, & n = -1,0. \mathrm{e}nd{cases} \mathrm{e}nd{equation} \subsection{Semi-discrete in space dispersion analysis} \label{semi} In this section we analyze the spatial semi-discrete system \mathrm{e}qref{eq:semi}, i.e., the FD2M scheme. We assume that the semi-discrete system \mathrm{e}qref{eq:semi} has plane wave solutions of the form \begin{align} \label{SemiDis} \displaystyle X_j(t) \mathrm{e}quiv X_0 e^{i\left( k_{\text{FD},2M}j h - \omega t \right)}, \mathrm{e}nd{align} where $\displaystyle k_{\text{FD},2M}$ represents the numerical wave number of the semi-discrete FD2M scheme. By substituting \mathrm{e}qref{SemiDis} in \mathrm{e}qref{eq:semi} we obtain the linear system \begin{align} \label{Dissemi2} \mathcal{A}_{\text{FD},2M}\textbf{U}_{\text{FD}} = \textbf{0}, \mathrm{e}nd{align} where the vector $\displaystyle \textbf{U}_{\text{FD}} = [H_0, E_0, P_0, J_0]^T,$ and the matrix $\mathcal{A}_{\text{FD},2M}$ is given by \begin{align} \mathcal{A}_{\text{FD},2M}=\begin{pmatrix} \omega & \Lambda_{2M} & 0 & 0 \\ \Lambda_{2M} & \mathrm{e}psilon_\mathrm{i}nfty \omega & \omega & 0 \\ 0 & 0 & i \omega & 1 \\ 0 & \omega_p^2 & - \omega_1^2& i \omega - 2\gamma \\ \mathrm{e}nd{pmatrix} \quad \text{with} \quad \Lambda_{2M}=\frac{2}{h}\sum_{p=1}^{M} \frac{\lambda_{2p-1}^{2M}}{(2p-1)} \sin \left[ \left( p-\frac{1}{2} \right) k_{\text{FD},2M} h \right]. \mathrm{e}nd{align} The numerical dispersion relation of the FD2M method is obtained by solving the characteristic equation of matrix $\mathcal{A}_{\text{FD},2M}$ and is given as \begin{align} \displaystyle \label{Dissemi3} \Lambda_{2M} = \pm \omega \sqrt{\mathrm{e}psilon(\WH{\omega};\mathbf{p})} = \pm \omega \sqrt{\mathrm{e}psilon_\mathrm{i}nfty\left(1- \frac{\mathrm{e}psilon_{d}/ \mathrm{e}psilon_\mathrm{i}nfty}{\displaystyle \WH{\omega}^2 + 2i \, \WH{\gamma} \, \WH{\omega} - 1}\right) } = \pm k^{\text{ex}}. \mathrm{e}nd{align} Using results from \cite{bokil2018high}, we can rewrite this numerical dispersion relation as \begin{align} \displaystyle \label{Dissemi4} \frac{1}{2}\,h\, \Lambda_{2M} = \sum_{p=1}^{M} \frac{[(2p-3)!!]^2}{(2p-1)!} \sin^{2p-1} \left( \frac{k_{\text{FD},2M} h}{2} \right) =\pm \frac{1}{2}K, \mathrm{e}nd{align} with $K:=k^{\text{ex}} h$. In general, for any $M\geq1$, we will have $(4M-2)$ discrete wave numbers $k_{\text{FD},2M}$ that satisfy \mathrm{e}qref{Dissemi4}. In particular, when $M=1$, i.e. for the FD2 scheme, the numerical dispersion relation \mathrm{e}qref{Dissemi4} is \begin{align} \label{tay1} \displaystyle \sin \left( \frac{k_{\text{FD},2} h}{2} \right) = \pm \frac{1}{2} K. \mathrm{e}nd{align} Thus, considering $K \ll 1$, and performing a Taylor expansion of \mathrm{e}qref{tay1} we obtain \begin{align} \label{eq:dis_FD_semi1} k_{\text{FD},2} =\pm k^\text{ex} \left( 1+ \frac{1}{24} K^2 + \frac{3}{640} K^4 + \mathcal{O}( K^6) \right), \mathrm{e}nd{align} which indicates that the numerical dispersion error of the FD2 scheme is second order accurate in space. \noindent For the case $M=2$, i.e. for the FD4 scheme, the numerical dispersion relation \mathrm{e}qref{Dissemi4} becomes \begin{align} \displaystyle \label{DisExact2} \frac{1}{6}\sin^3 \left( \frac{k_{\text{FD},4} h}{2} \right) + \sin\left( \frac{k_{\text{FD},4} h}{2} \right) = \pm \frac{1}{2} K. \mathrm{e}nd{align} \noindent The Taylor expansions of all roots in equation \mathrm{e}qref{DisExact2} are given by \begin{subequations} \begin{align} \label{eq:dis_FD_semi2} k_{\text{FD}^{\text{ phys}},4} &= \pm k^\text{ex} \left( 1 + \frac{3}{640}K^4 - \frac{1}{3584} K^6 + \mathcal{O}(K^8) \right), \\ \label{eq:dis_FD_semi2s1} k_{\text{FD}^{\text{ spur1}},4} &= \pm k^\text{ex} \left( i \frac{\arcsinh(2\sqrt{42})}{K} - \frac{1}{2\sqrt{7}} + i\frac{9}{1568}\sqrt{42}K + \mathcal{O}(K^2) \right),\\ \label{eq:dis_FD_semi2s2} k_{\text{FD}^{\text{ spur2}},4} &= \pm k^\text{ex} \left(- i \frac{\arcsinh(2\sqrt{42})}{K} - \frac{1}{2\sqrt{7}} - i\frac{9}{1568}\sqrt{42}K + \mathcal{O}(K^2) \right), \mathrm{e}nd{align} \mathrm{e}nd{subequations} \noindent where $k_{\text{FD}^\text{ phys},4}$ and $k_{\text{FD}^{\text{ spur1}},4}, k_{\text{FD}^{\text{ spur2}},4}$ are wave numbers corresponding to the physical modes and spurious modes, respectively, of the FD4 scheme. The physical modes indicate a fourth order accurate numerical dispersion error, while the leading terms in the spurious modes of $k$ are proportional to $\mathcal{O}(1/h)$, indicating an exponential increase or damping corresponding to the opposite sign in front. The existence of spurious, or non-physical modes for a variety of discretizations has been discussed in the literature, e.g., \cite{ainsworth2006dispersive, cohen1, cohen2}. The presence of spurious modes in not ideal, however in practice these have not shown to be serious issues for numerical methods. We would like to note, that to the best of the authors' knowledge, the existence of spurious modes for high order FD discretizations has not been analytically identified in the literature. Equations \mathrm{e}qref{eq:dis_FD_semi2} and \mathrm{e}qref{eq:dis_FD_semi2s2} provide explicit formulas for the spurious modes that we have not found in the literature. Below, we focus on the physical modes, and prove that for the FD scheme of order $2M$ (FD2M), the dispersion error is of $2M$-th order. Thus, the dispersion error is of the same order as the local truncation error for the finite difference schemes. \begin{theorem}\label{thm5} The physical modes of the dispersion relation \mathrm{e}qref{Dissemi4}, for the spatial semi-discrete finite difference method FD2M, results in the dispersion error identity \begin{align} \label{q1} \displaystyle k_{\text{FD}^{\text{ phys}},2M} = \pm k^{\text{ex}} \left( 1 + \varsigma_{_{2M}} \right), \mathrm{e}nd{align} \noindent for any $M\geq1$, where \begin{align} \label{q0} \displaystyle \varsigma_{_{2M}} = \frac{[(2M-1)!!]^2}{2^{2M}(2M+1)!} K^{2M} + \mathcal{O}(K^{2M+2}). \mathrm{e}nd{align} \noindent In other words, the dispersion error of the FD2M scheme \mathrm{e}qref{eq:semi} is of order $2M$. \\ \mathrm{e}nd{theorem} \begin{proof} Here, we only consider $k_{\text{FD}^{\text{ phys}},2M}$ with plus sign in front. Define $\varsigma_{_{2M}} := \displaystyle \frac{k_{\text{FD}^{\text{ phys}},2M}-k^{\text{ex}}}{k^{\text{ex}}}$. Then, substituting from \mathrm{e}qref{Dissemi3} for $k^{\text{ex}}$, rearranging, and (using results from \cite{bokil2018high}) we obtain the identity \begin{align*} \displaystyle k^{\text{ex}} \varsigma_{_{2M}} &= k_{\text{FD}^{\text{ phys}},2M} - \frac{2}{h}\sum_{p=1}^{M} \frac{\lambda_{2p-1}^{2M}}{(2p-1)} \sin\left[\left(p - \frac{1}{2}\right) k_{\text{FD}^{\text{ phys}},2M} h\right], \mathrm{e}nd{align*} i.e, $k_{\text{FD}^{\text{ phys}},2M}$ satisfies \mathrm{e}qref{q1} for $\varsigma_{_{2M}}$ as defined in \mathrm{e}qref{q0}. Next, we prove the identity \mathrm{e}qref{q0}. Because we only consider the physical modes here, it is reasonable to assume that $k_{\text{FD}^{\text{ phys}},2M} = k^{\text{ex}}\left(1 + O(K^{\tau}) \right)$ for some $\tau>0$. Hence, when $k_{\text{FD}^{\text{ phys}},2M}h= K \left(1 + \mathcal{O}(K^{\tau})\right)$ is small enough, performing a Taylor expansion with respect to $k_{\text{FD}^{\text{ phys}},2M} h$ we get \begin{align*} \displaystyle k^{\text{ex}}\varsigma_{_{2M}} &= k_{\text{FD}^{\text{ phys}},2M} - \frac{2}{h}\sum_{p=1}^{M} \frac{\lambda_{2p-1}^{2M}}{(2p-1)} \sum_{\mathrm{e}ll = 0}^{\mathrm{i}nfty} \frac{(-1)^{\mathrm{e}ll}}{(2\mathrm{e}ll + 1)!} \left[\frac{1}{2} \left(2p - 1\right) k_{\text{FD}^{\text{ phys}},2M} h\right]^{2\mathrm{e}ll + 1} \notag \\ &= k_{\text{FD}^{\text{ phys}},2M} - \frac{2}{h}\sum_{\mathrm{e}ll = 0}^{\mathrm{i}nfty}\sum_{p=1}^{M} \frac{\lambda_{2p-1}^{2M}}{(2p-1)} \frac{(-1)^{\mathrm{e}ll}}{(2\mathrm{e}ll + 1)!} \left[\frac{1}{2} \left(2p -1 \right) k_{\text{FD}^{\text{ phys}},2M} h\right]^{2\mathrm{e}ll + 1} \notag \\ &= k_{\text{FD}^{\text{ phys}},2M} - k_{\text{FD}^{\text{ phys}},2M} \sum_{\mathrm{e}ll = 0}^{\mathrm{i}nfty} \left[\sum_{p=1}^{M} \lambda_{2p-1}^{2M}(2p-1)^{2\mathrm{e}ll}\right] \frac{(-1)^{\mathrm{e}ll}}{2^{2\mathrm{e}ll}(2\mathrm{e}ll + 1)!} \left( k_{\text{FD}^{\text{ phys}},2M} h \right)^{2\mathrm{e}ll}. \mathrm{e}nd{align*} \noindent Based on the derivation of $\displaystyle \lambda_{2p-1}^{2M}$ as discussed in \cite{bokil2018high}, we have the following identities \begin{align*} \displaystyle & \sum_{p=1}^M \lambda_{2p-1}^{2M} = 1, \\ & \sum_{p=1}^M \lambda_{2p-1}^{2M}(2p-1)^{2\mathrm{e}ll} = 0, \quad \text{ for } \mathrm{e}ll = 1,2,..,M-1, \\%\label{lam2}\\ & \sum_{p=1}^M \lambda_{2p-1}^{2M}(2p-1)^{2M} = (-1)^{M+1} \left[(2M-1)!!\right]^2. \mathrm{e}nd{align*} \noindent Therefore, \begin{align} \displaystyle \label{eq:qq4} k^{\text{ex}}\varsigma_{_{2M}} &= k_{\text{FD}^{\text{ phys}},2M}-k_{\text{FD}^{\text{ phys}},2M} \left[ 1 -\frac{1}{2^{2M}} \frac{[(2M-1)!!]^2}{(2M+1)!}\left( k_{\text{FD}^{\text{ phys}},2M} h \right)^{2M} + \mathcal{O}\left(\left( k_{\text{FD}^{\text{ phys}},2M} h \right)^{2M+2}\right) \right] \notag \\ &= -k_{\text{FD}^{\text{ phys}},2M} \left[ -\frac{1}{2^{2M}} \frac{[(2M-1)!!]^2}{(2M+1)!}\left( K \right)^{2M} + \mathcal{O}\left(K^{2M+\tau}+K^{2M+2}\right) \right] \notag \\ &= -k^{\text{ex}} \left[ -\frac{1}{2^{2M}} \frac{[(2M-1)!!]^2}{(2M+1)!}\left( K \right)^{2M} + \mathcal{O}\left(K^{2M+\tau}+K^{2M+2}\right) \right], \mathrm{e}nd{align} \noindent which proves \mathrm{e}qref{q0}. Hence, with the assumption $k_{\text{FD}^{\text{ phys}},2M} = k^{\text{ex}}\left(1 + \mathcal{O}(K^{\tau})\right)$ and $\tau>0$, we can deduce that $k_{\text{FD}^{\text{ phys}},2M}= k^{\text{ex}}\left(1 + \mathcal{O}(K^{2M}) \right)$. \mathrm{e}nd{proof} Next, we illustrate the relative phase errors of the FD2M scheme for \mathrm{e}qref{eq:semi}, $M=1,\ldots,5$, with the parameter set $\mathbf{p}$ fixed at values given in \mathrm{e}qref{eq:parameter}. The numerical wave number $k_{\text{FD},2M}$ is obtained by solving \mathrm{e}qref{Dissemi4} exactly or with the help of a Newton solver (we set the tolerance at $10^{-18}$). Since $$k^{\text{ex}}h = \WH{\omega}\, \sqrt{\mathrm{e}psilon(\WH{\omega};\mathbf{p})}\, \omega_{1}h,$$ then $k_{\text{FD},2M}h$ depends on $\WH{\omega}$, $\mathbf{p}$, $\omega_{1}h$, and $M$, and so does the relative phase error \begin{align} \label{ERRTP2} \displaystyle \Psi_{\text{FD},2M}(\WH{\omega}) := \left| \frac{k_{\text{FD},2M}(\WH{\omega})-k^{\text{ex}}(\WH{\omega})}{k^{\text{ex}}(\WH{\omega})} \right| = \left| \frac{k_{\text{FD},2M}(\WH{\omega})h -k^{\text{ex}}(\WH{\omega})h}{k^{\text{ex}}(\WH{\omega})h}\right|. \mathrm{e}nd{align} First, we fix $\omega_{1} h=\pi/30$, and present the relative phase errors as functions of $\WH{\omega}\mathrm{i}n[0,3]$ in Figure \ref{Fig:FD}. Because the leading error term in the numerical wave number for the FD2M scheme is proportional to $K^{2M}$, we expect $2M$ order accuracy of the relative phase error with respect to $K$ at a fixed angular frequency. We observe that all schemes have significantly larger error around $\WH{\omega}=1$, while the error fades out near $\WH{\omega}=\sqrt{\mathrm{e}psilon_{s}/\mathrm{e}psilon_{\mathrm{i}nfty}}$, where $K$ is close to zero. As expected from analysis, higher order spatial accuracy does result in reduced relative phase errors. We present the relative phase errors at $\WH{\omega}=1$ with $\omega_1 h =\pi/30$ in the left plot in Figure \ref{Fig:FD}, while in the right plot we depict the $2M$ order convergence of relative phase errors with respect to $K$ for fixed $\WH{\omega}=1$. The slopes of phase errors in this plot are shown to be the same as those of reference lines with slope $2M$, indicating the $2M$th order of accuracy for each FD2M scheme, which agrees with the results in Theorem \ref{thm5}. We note the presence of just one peak in these plots as compared to the presence of two peaks in analogous plots of phase errors for temporal discretizations presented in Section \ref{time}. \begin{figure}[htb] \centering \mathrm{i}ncludegraphics[scale= 0.3] {pics/Phase_Semi_Error} \mathrm{i}ncludegraphics[scale= 0.3] {pics/Semi_RelErrorOrder} \caption{The relative phase error of physical modes for the spatial discretization FD2M. Left: fix $\omega_{1}h=\pi/30$ with $\WH{\omega}\mathrm{i}n[0,3]$; right: fix $\WH{\omega}=1$ with different $\omega_{1}h \mathrm{i}n\left\{ \pi/30,\pi/60,\pi/120,\pi/240\right\} $.} \label{Fig:FD} \mathrm{e}nd{figure} \section{Fully discrete FDTD Methods} $\,$ \label{fdtd} In this section, we consider the high order staggered spatial discretizatons \mathrm{e}qref{eq:semi} combined with either the leap-frog scheme in time \mathrm{e}qref{eq:LF} or the trapezoidal scheme in time \mathrm{e}qref{eq:TP} presented in Section \ref{time}. These fully discrete methods are second order accurate in time and $2M$-th order accurate in space, thus we denote them as $(2,2M)$ leap-frog FDTD schemes or $(2,2M)$ trapezoidal FDTD methods. In particular, the $(2,2)$ leap-frog method is the extension of the standard Yee FDTD method to Lorentz dispersive media. Finally, comparisons will be made among all finite difference schemes under considerations. We first compute the dispersion relation for the fully discrete ($2,2M$) schemes. To do so, we assume the plane wave solutions \begin{align} \label{disp} \displaystyle X_j^n \mathrm{e}quiv X_0 e^{i\left( k_{\text{FD},2M}^{*} j h - \omega n \Delta t \right)}, \mathrm{e}nd{align} where $*$ is either LF or TP. Substituting \mathrm{e}qref{disp} into the appropriate $(2,2M)$ FDTD method (which we have not explicitly written out here for brevity), we obtain the linear system \begin{align} \label{DisLF2} \mathcal{A}^{\text{*}}_{\text{FD},2M}\textbf{U}^{\text{*}}_{\text{FD}} = \textbf{0}, \mathrm{e}nd{align} where the coefficient matrix for the two schemes will be discussed in the next two sections. \subsection{ Fully discrete dispersion analysis: $(2,2M)$ leap-frog-FDTD schemes} We first consider the $(2,2M)$ leap-frog FDTD scheme. For the leap-frog temporal discretization the coefficient matrix in the linear system \mathrm{e}qref{DisLF2} is given as \renewcommand{1}{1.5} \begin{align} \mathcal{A}^{\text{LF}}_{\text{FD},2M} = \begin{pmatrix} \sin \left( \frac{W}{2} \right) & \Lambda^{\text{LF}}_{2M} & 0 & 0\\ \Lambda^{\text{LF}}_{2M} & \mathrm{e}psilon_\mathrm{i}nfty \sin \left( \frac{W}{2} \right) & \sin \left( \frac{W}{2} \right) & 0 \\ 0 & 0 & i\sin \left( \frac{W}{2} \right) & \frac{\Delta t}{2} \cos \left( \frac{W}{2} \right) \\ 0 & \frac{\Delta t}{2}\omega_p^2 \cos \left( \frac{W}{2} \right) & - \frac{\Delta t}{2}\omega_1^2 \cos \left( \frac{W}{2} \right) & i\sin \left( \frac{W}{2} \right) -\gamma\Delta t \cos \left( \frac{W}{2} \right) \\ \mathrm{e}nd{pmatrix}, \mathrm{e}nd{align} \renewcommand{1}{1} with $$\Lambda^{\text{LF}}_{2M} = \frac{\Delta t}{h} \sum_{p=1}^{M} \frac{\lambda_{2p-1}^{2M}}{(2p-1)} \sin \left[ \left( p-\frac{1}{2} \right) k_{\text{FD},2M}^{\text{LF}} h \right]. $$ Based on previous discussions, we can derive the identity \begin{align} \displaystyle \label{DisLF7} \sum_{p=1}^{M} \frac{[(2p-3)!!]^2}{(2p-1)!} \sin^{2p-1} \left( \frac{k_{\text{FD},2M}^{\text{LF}} h}{2} \right) = \frac{1}{2} k^{\text{LF}} h. \mathrm{e}nd{align} For both the fully discrete methods, we focus our discussions on the physical modes. For the case of $W\ll 1$ and $K\ll 1$, we analyze the Taylor expansion of the physical modes for the fully discrete leap-frog FDTD schemes and observe the following pattern: \begin{align} \displaystyle k^{\text{LF}}_{\text{FD}^{\text{ phys}},2M} &= \pm k^{\text{ex}}\left( 1+ \frac{1}{12}\left( \frac{\delta(\WH{\omega};\textbf{p})}{\mathrm{e}psilon(\WH{\omega};\textbf{p})} - \frac{1}{2} \right)W^2+ \frac{[(2M-1)!!]^2}{2^{2M}(2M+1)!}K^{2M} + \mathcal{O}(K^{2M+2} + K^{2M} W^2+W^4) \right), \quad M \geq 1. \label{eq:disp_FD_LF1} \mathrm{e}nd{align} Furthermore, with the relation $\displaystyle K=\frac{\sqrt{\mathrm{e}psilon(\WH{\omega};\mathbf{p})}}{\nu\sqrt{\mathrm{e}psilon_{\mathrm{i}nfty}}}W$, we can treat $k_{\text{FD}^{\text{ phys}},2M}^{\text{LF}}$ as a function of $W$ and $\displaystyle \nu=\frac{\Delta t}{h\sqrt{\mathrm{e}psilon_{\mathrm{i}nfty}}}$, the CFL (Courant-Friedrich-Lewy) number subject to the stability constraint for the $(2,2M)$ leap-frog FDTD scheme. Assuming $\nu=O(1),$ we have \begin{align} \displaystyle k^{\text{LF}}_{\text{FD}^{\text{ phys}},2M} = \begin{cases} \pm \displaystyle k^{\text{ex}} \left( 1 + \frac{1}{12}\left( \frac{\delta(\WH{\omega};\textbf{p})}{\mathrm{e}psilon(\WH{\omega};\textbf{p})} - \frac{1}{2} + \frac{\mathrm{e}psilon(\WH{\omega};\textbf{p})}{2\mathrm{e}psilon_\mathrm{i}nfty\nu^2}\right)W^2 + \mathcal{O}(W^4) \right), & M = 1, \\ \\ \displaystyle \pm k^{\text{ex}} \Big( 1 + \frac{1}{12}\left( \frac{\delta(\WH{\omega};\textbf{p})}{\mathrm{e}psilon(\WH{\omega};\textbf{p})} - \frac{1}{2} \right)W^2 + \mathcal{O}(W^{4}) \Big), & M \geq 2. \mathrm{e}nd{cases} \label{eq:disp_FD_LF2} \mathrm{e}nd{align} We can see that due to the second order time discretizations employed, the fully discrete scheme always results in a second order dispersion error. Particularly for all $M\ge 2,$ the leading term in the dispersion error is identical, and independent of $\nu$ which comes solely from the temporal discretization. To compare the performance of the scheme for $M=1$ and $M\geq2$, we will focus on comparison of the coefficients of leading error terms in \mathrm{e}qref{eq:disp_FD_LF2}. We first consider ${\WH{\gamma}}=0$. We can make the following conclusions. \begin{itemize} \mathrm{i}tem For ${\WH{\omega}}$ in the medium absorption band, i.e. ${\WH{\omega}} \mathrm{i}n (1, \sqrt{\mathrm{e}psilon_{s}/\mathrm{e}psilon_{\mathrm{i}nfty}}),$ it is easy to check that $$\displaystyle \left| \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} -\frac{1}{2} + \frac{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})}{2 \mathrm{e}psilon_{\mathrm{i}nfty}\nu^2} \right| \geq \left| \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} -\frac{1}{2} \right|$$ based on the inequalities $\displaystyle\frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})}\leq-1$ and $\displaystyle\frac{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})}{2 \mathrm{e}psilon_{\mathrm{i}nfty}\nu^2}\leq0,$ which implies that the high order schemes reduce the dispersion error as one would expect. This is true independent of other parameter choices. \mathrm{i}tem For other ${\WH{\omega}}$ values, the outcome will depend on the parameters. We can show that the general condition for the higher order scheme ($M\ge2$) to be more accurate in its dispersion error is equivalent to the inequality \begin{align} \displaystyle\left(\frac{\mathrm{e}psilon_d}{\mathrm{e}psilon_\mathrm{i}nfty}\right)^2 + (1-2\nu^2)(\WH{\omega}^2 - 1)^2 - 2 \left(\frac{\mathrm{e}psilon_d}{\mathrm{e}psilon_\mathrm{i}nfty}\right)\left(\WH{\omega}^2-1 + \nu^2(1-3\WH{\omega}^2)\right) \geq 0. \label{Sp333} \mathrm{e}nd{align} This is a quadratic inequality in ${\WH{\omega}}^2.$ We can conclude that with the CFL condition $\nu\leq1,$ which is a necessary condition to ensure the fully discrete $(2,2M)$ leap-frog-FDTD scheme is stable for any $M\geq1$ \cite{bokil2012,bokil2018high}, we have \begin{itemize} \mathrm{i}tem if $0< \nu \leq \frac{1}{\sqrt{2}}$, the condition \mathrm{e}qref{Sp333} always holds for all ${\WH{\omega}} \geq 0$. \mathrm{i}tem if $\frac{1}{\sqrt{2}}< \nu < 1$ and \begin{itemize} \mathrm{i}tem if $0<\mathrm{e}psilon_{d}/\mathrm{e}psilon_{\mathrm{i}nfty}\leq 2\nu^2-1$, then the condition \mathrm{e}qref{Sp333} holds on \[\displaystyle {\WH{\omega}}_{L} \leq {\WH{\omega}} \leq {\WH{\omega}}_{R}, \] \mathrm{i}tem if $\mathrm{e}psilon_{d}/\mathrm{e}psilon_{\mathrm{i}nfty} \geq 2\nu^2-1$, then the condition \mathrm{e}qref{Sp333} holds on \[\displaystyle 0 \leq {\WH{\omega}} \leq {\WH{\omega}}_{R}, \] \mathrm{e}nd{itemize} where \mathrm{e}nd{itemize} \begin{align*} \WH{\omega}_{L} &= \sqrt{ \frac{-1 - \mathrm{e}psilon_{d}/\mathrm{e}psilon_{\mathrm{i}nfty} + 2 \nu^2 + 3 \mathrm{e}psilon_{d}/\mathrm{e}psilon_{\mathrm{i}nfty} \nu^2-\nu \sqrt{ -4 \mathrm{e}psilon_{d}/\mathrm{e}psilon_{\mathrm{i}nfty} - 4 (\mathrm{e}psilon_{d}/\mathrm{e}psilon_{\mathrm{i}nfty})^2 + 8 \mathrm{e}psilon_{d}/\mathrm{e}psilon_{\mathrm{i}nfty} \nu^2 + 9 (\mathrm{e}psilon_{d}\nu/\mathrm{e}psilon_{\mathrm{i}nfty})^2 }}{ 2 \nu^2 -1 } }, \\ \WH{\omega}_{R} &= \sqrt{ \frac{-1 - \mathrm{e}psilon_{d}/\mathrm{e}psilon_{\mathrm{i}nfty} + 2 \nu^2 + 3 \mathrm{e}psilon_{d}/\mathrm{e}psilon_{\mathrm{i}nfty} \nu^2+ \nu \sqrt{-4 \mathrm{e}psilon_{d}/\mathrm{e}psilon_{\mathrm{i}nfty} - 4 (\mathrm{e}psilon_{d}/\mathrm{e}psilon_{\mathrm{i}nfty})^2 + 8 \mathrm{e}psilon_{d}/\mathrm{e}psilon_{\mathrm{i}nfty} \nu^2 + 9 (\mathrm{e}psilon_{d}\nu/\mathrm{e}psilon_{\mathrm{i}nfty})^2 }}{2 \nu^2 -1} }. \mathrm{e}nd{align*} \mathrm{e}nd{itemize} The case of ${\WH{\gamma}}>0$ is even more complicated. For low loss materials, in general we expect similar conclusions as in the lossless case. We now perform a numerical study, and compare the leading error terms in \mathrm{e}qref{eq:disp_FD_LF2} with $\nu=0.6$ (which is small enough to guarantee that the scheme is stable for arbitrary $M$ (see next section and \cite{bokil2018high}). The absolute values of coefficients of leading error terms are plotted in Figure \ref{Fig:coeff}, with $\mathrm{e}psilon_{\mathrm{i}nfty}=2.25$, $\mathrm{e}psilon_{s}=5.25$ and various ${\WH{\gamma}}$ values. It is observed that we can not determine which method performs better for the general case. From Figure \ref{Fig:coeff}, it's clear that higher order schemes have smaller dispersion error for ${\WH{\gamma}}=0, 0.01$ in the range ${\WH{\omega}} \mathrm{i}n[0,3].$ This is no longer true for ${\WH{\gamma}}=0.1, 1.$ The discussion here reveals an interesting fact. For some parameter values, we can have counterintuitive results that the lower order scheme performs better than higher order scheme when numerical dispersion is present. \begin{figure}[h] \centering \mathrm{i}ncludegraphics[scale= 0.28] {pics/coef_gam_0-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.28] {pics/coef_gam_001-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.28] {pics/coef_gam_01-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.28] {pics/coef_gam_1-eps-converted-to.pdf} \caption{Absolute value of coefficients of leading error terms in \mathrm{e}qref{eq:disp_FD_LF2} (denoted by $C$) for the $(2,2M)$ leap-frog FDTD scheme. } \label{Fig:coeff} \mathrm{e}nd{figure} \subsection{ Fully discrete dispersion analysis: $(2,2M)$ trapezoidal-FDTD schemes} We repeat the analysis done in the previous section for the fully discrete $(2,2M)$ trapezoidal FDTD schemes. We obtain the numerical dispersion relation for these schemes by setting the determinant of the matrix \begin{align} \renewcommand{1}{1.5} \mathcal{A}_{\text{FD},2M}^{\text{TP}}=\begin{pmatrix} \sin \left( \frac{W}{2} \right) & \Lambda_{2M}^{\text{TP}} & 0 & 0 \\ \Lambda_{2M}^{\text{TP}} & \mathrm{e}psilon_\mathrm{i}nfty \sin \left( \frac{W}{2} \right) & \sin \left( \frac{W}{2} \right) & 0 \\ 0 & 0 & i\sin \left( \frac{W}{2} \right) & \frac{\Delta t}{2} \cos \left( \frac{W}{2} \right) \\ 0 & \frac{\Delta t}{2}\omega_p^2 \cos \left( \frac{W}{2} \right) & - \frac{\Delta t}{2}\omega_1^2 \cos \left( \frac{W}{2} \right) & i\sin \left( \frac{W}{2} \right) -\gamma \Delta t \cos \left( \frac{W}{2} \right) \\ \mathrm{e}nd{pmatrix} \renewcommand{1}{1} \mathrm{e}nd{align} to zero. In the above, we have $$\displaystyle \Lambda_{2M}^{\text{TP}}=\frac{\Delta t}{h} \sum_{p=1}^{M} \frac{\lambda_{2p-1}^{2M}}{(2p-1)} \sin \left[ \left( p-\frac{1}{2} \right) k_{\text{FD},2M}^{\text{TP}} h \right] \cos \left( \frac{W}{2} \right).$$ The numerical dispersion is given by \begin{align} \displaystyle \label{DisTP7} \sum_{p=1}^{M} \frac{[(2p-3)!!]^2}{(2p-1)!} \sin^{2p-1} \left( \frac{k_{\text{FD},2M}^{\text{TP}} h}{2} \right) = \frac{1}{2} k^\text{TP} h. \mathrm{e}nd{align} \noindent By requiring $W\ll 1$ and $K\ll 1$, we can obtain the physical modes in the form \begin{align} \displaystyle k^{\text{TP}}_{\text{FD}^{\text{ phys}},2M} &= \pm k^{\text{ex}}\left( 1+ \frac{1}{12}\left( \frac{\delta(\WH{\omega};\textbf{p})}{\mathrm{e}psilon(\WH{\omega};\textbf{p})} + 1 \right)W^2 + \frac{[(2M-1)!!]^2}{2^{2M}(2M+1)!}K^{2M} + \mathcal{O}(K^{2M+2} + K^{2M} W^2+W^4) \right), \quad M \geq 1. \label{eq:disp_FD_TP1} \mathrm{e}nd{align} For $W\ll 1$ with $\nu =\mathcal{O}(1)$, Taylor expansion gives us \begin{align} \renewcommand{1}{2} \displaystyle k^{\text{TP}}_{\text{FD}^{\text{ phys}},2M} = \left\{ \begin{array}{ll} \displaystyle \pm k^{\text{ex}} \left( 1 + \frac{1}{12}\left( \frac{\delta(\WH{\omega};\textbf{p})}{\mathrm{e}psilon(\WH{\omega};\textbf{p})} +1 + \frac{\mathrm{e}psilon(\WH{\omega};\textbf{p})}{2\mathrm{e}psilon_\mathrm{i}nfty \nu^2}\right)W^2 + \mathcal{O}(W^4) \right), & M = 1, \\ \displaystyle \pm k^{\text{ex}} \left( 1 + \frac{1}{12}\left( \frac{\delta(\WH{\omega};\textbf{p})}{\mathrm{e}psilon(\WH{\omega};\textbf{p})} +1 \right)W^2 + \mathcal{O}(W^{4}) \right), & M \geq 2. \mathrm{e}nd{array} \right. \label{eq:disp_FD_TP2} \renewcommand{1}{1} \mathrm{e}nd{align} This shows second order dispersion error in all cases. When ${\WH{\gamma}}=0$, it is easy to check that $\displaystyle \left| \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} +1 + \frac{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})}{2\mathrm{e}psilon_{\mathrm{i}nfty}\nu^2} \right| \geq \left| \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} +1 \right|$ for any ${\WH{\omega}}\geq0$ and $\nu>0$. Hence, the high order FDTD schemes with $M \geq 2$ always have smaller dispersion error than the $(2,2)$ FDTD scheme. On the other hand, numerical tests comparing the coefficients of leading order error terms in the trapezoidal FDTD schemes are provided in Figure \ref{Fig:coeff2}, with various ${\WH{\gamma}}$ values and the same parameters as used in Figure \ref{Fig:coeff}. The plots indicate that it is again difficult to determine which coefficient (for $M=1$ or $M\geq 2$) is larger when ${\WH{\gamma}}$ is large. \begin{figure}[h] \centering \mathrm{i}ncludegraphics[scale= 0.28] {pics/coef_gam2_0-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.28] {pics/coef_gam2_001-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.28] {pics/coef_gam2_01-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.28] {pics/coef_gam2_1-eps-converted-to.pdf} \caption{Absolute value of coefficients of leading error terms in \mathrm{e}qref{eq:disp_FD_TP2} (denoted by $C$) for the $(2,2M)$ trapezoidal FDTD scheme. } \label{Fig:coeff2} \mathrm{e}nd{figure} \subsection{Comparison among fully discrete FDTD schemes} Here, we will present comparisons of the relative phase error for both the leap-frog and trapezoidal FDTD schemes using the parameters values fixed as in \mathrm{e}qref{eq:parameter}. For the fully discrete schemes, $\omega_{1}\Delta t$ and $\omega_{1}h$ are needed to determine $k^{*}_{\text{FD},2M}$. As shown in \cite{bokil2017energy, bokil2018high}, the schemes based on the trapezoidal rule are unconditionally stable, while the leap-frog schemes are conditionally stable, with the stability condition as $\nu \leq \nu^{2M}_{max}$, with $\nu_{max}^{2M}$ defined as the largest CFL number of the $(2,2M)$ leap-frog-FD scheme, given by the formula \cite{bokil2018high, bokil2012} \begin{align} \label{eq:CFL_max} \displaystyle \nu_{max}^{2M} = \frac{1}{\displaystyle \sum_{p=1}^{M} \frac{[(2p-3)!!]^2}{(2p-1)!}}. \mathrm{e}nd{align} We note that as $M$ increases, $\nu_{max}^{2M}$ decreases but is bounded from below by $\nu_{max}^{\mathrm{i}nfty}=2/\pi$, i.e. in the limiting case ($M\rightarrow\mathrm{i}nfty$), $\nu_{max}^{2M}$ approaches $2/\pi$ \cite{bokil2012}. First, we will consider the schemes with a normalized CFL number $\displaystyle \nu/\nu^{2M}_{max} = 0.7$ for both types of temporal discretizations. Relative phase errors are plotted in the range ${\WH{\omega}}\mathrm{i}n[0,3]$. In Figure \ref{Fig:Phase_Error_FD_fully1}, we show errors of LF$(2,2M)$ and TP$(2,2M)$ with $W_1 = \pi/30$. The fully discrete schemes do give two peaks near ${\WH{\omega}}=1$ and ${\WH{\omega}}=\sqrt{\mathrm{e}psilon_{s}/\mathrm{e}psilon_{\mathrm{i}nfty}}$. As seen in Section \ref{time}, the phase errors for schemes based on semi-discretizations in time have two peaks in this range, while only one peak in observed for the semi-discrete spatial schemes as seen in Section \ref{semi}. Thus, it is reasonable to believe that the second peak results from time discretization, while the first one is associated with both space and time discretization. Comparing FDTD schemes with the same time discretization, phase error for the scheme with $M=2$ is smaller than that of the second order scheme for $M=1$. However, there is no significant difference among the phase errors with $M\geq2$ indicating that dispersion errors are dominated by time discretizations when $M\geq2$. These observations are consistent with our analysis. On the other hand, difference in phase error plots between LF$(2,2M)$ and TP$(2,2M)$ is similar to the results obtained for the semi-discrete in time schemes as seen in the second plot of Figure \ref{Fig:semi_time0}. \begin{figure}[h] \centering \mathrm{i}ncludegraphics[scale= 0.3] {pics/LF_PhErr_omg0dt_pi30} \mathrm{i}ncludegraphics[scale= 0.3] {pics/TP_PhErr_omg0dt_pi30} \caption{The relative phase error of fully discrete FDTD schemes for the physical modes with $\nu/\nu_{max}^{2M}=0.7$ and $W_1=\pi/30$. Left: the leap-frog scheme; right: the trapezoidal scheme.} \label{Fig:Phase_Error_FD_fully1} \mathrm{e}nd{figure} In the second experiment, we will consider the fully discrete trapezoidal FDTD scheme with various CFL numbers. We give the contour plots of the dispersion error at ${\WH{\omega}}=1$ in Figure \ref{Fig:Phase_Error_FD_fully2}, with $W_{1}\mathrm{i}n[0.05,0.3]$ and $\omega_{1} h \mathrm{i}n[0.01,0.1]$. Here, the vertical coordinate is $W={\WH{\omega}}\,W_{1}$ and the horizontal coordinate is $\displaystyle |K| =|{\WH{\omega}}\sqrt{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} \, \omega_{1} h|$. In this coordinate system, for the range of values considered, the dispersion error of TP(2,2) can be improved by both taking smaller time steps and/or refining the spatial grid. With fixed time step and spatial grid, we can also reduce the phase error by increasing the scheme to fourth order. The contour lines in Figure \ref{Fig:Phase_Error_FD_fully2} of higher order ($M=2, 3$) schemes are horizontal, and the contours for TP(2,4) and TP(2,6) have no visible difference. Neither decreasing space mesh size nor increasing spatial order can reduce the phase error, which also illustrates the dominant role of temporal errors. Both our analysis and figures demonstrate that FDTD schemes with $M\geq3$ do not improve the phase error of fully discrete schemes significantly beyond that achieved for $M=2$. Hence, LF$(2,4)$ and TP$(2,4)$ seem to be the ``best'' schemes to work with from this perspective for most parameter choices (except for materials with large loss, or low-loss materials with certain range of frequencies as shown in Figures \ref{Fig:coeff} and \ref{Fig:coeff2}). \begin{figure} \centering \mathrm{i}ncludegraphics[scale= 0.23] {pics/Contour_TP22} \mathrm{i}ncludegraphics[scale= 0.23] {pics/Contour_TP24} \mathrm{i}ncludegraphics[scale= 0.23] {pics/Contour_TP26} \caption{The contour plots of relative phase error of fully discrete FD schemes for the physical modes with trapezoidal scheme. $\WH{\omega}=1$. } \label{Fig:Phase_Error_FD_fully2} \mathrm{e}nd{figure} \section{Spatial Discretization: Discontinuous Galerkin Schemes} \label{semidg} In this section, similar to Section \ref{semifdtd}, we perform semi-discrete and fully discrete analysis when the spatial variable is discretized by DG schemes. Here, we define the grid as $x_{j+1/2}=(j+1/2)h, \, j\mathrm{i}n\mathbb{Z},$ with uniform mesh size $h$. Let $I_j=[x_{j-1/2},x_{j+1/2}]$ be a mesh element, with $x_j=\frac{1}{2}(x_{j-\frac{1}{2}}+x_{j+\frac{1}{2}})$ as its center. We now define a finite dimensional discrete space, \begin{equation}\label{ldg:vhk} V_h^p=\{v : v|_{I_j} \mathrm{i}n P^p(I_j), \, j\mathrm{i}n \mathbb{Z} \}, \mathrm{e}nd{equation} which consists of piecewise polynomials of degree up to $p$ with respect to the mesh. For any $v\mathrm{i}n V_h^p$, let $v^+_{j+\frac{1}{2}}$ (resp. $v^-_{j+\frac{1}{2}}$) denote the limit value of $v$ at $x_{j+ \frac{1}{2}}$ from the element $I_{j+1}$ (resp. $I_j$), $[v]_{j+\frac{1}{2}}=v^+_{j+\frac{1}{2}} - v^-_{j+\frac{1}{2}}$ denote its jump, and $\{v\}_{j+\frac{1}{2}}=\frac{1}{2}(v^+_{j+\frac{1}{2}}+v^-_{j+\frac{1}{2}})$ be its average, again at $x_{j+\frac{1}{2}}$. The semi-discrete DG method for the system \mathrm{e}qref{eq:sys} is formulated as follows: find $H_h(t,\cdot)$, $D_h(t,\cdot)$, $E_h(t,\cdot)$, $P_h(t,\cdot)$, $J_h(t,\cdot)\mathrm{i}n V_h^p$, such that $\forall j,$ \begin{subequations} \label{eq:1d:sch} \begin{align} &\mathrm{i}nt_{I_j}\dd{t}{H_h}\phi dx +\mathrm{i}nt_{I_j} E_h\dd{x}\phi dx- (\widehat{E_h}\phi^-)_{j+1/2} + (\widehat{E_h}\phi^+)_{j-1/2}=0,\quad \forall \phi\mathrm{i}n V_h^p, \label{eq:sch1}\\ &\mathrm{i}nt_{I_j}\dd{t}{D_h} \phi dx +\mathrm{i}nt_{I_j} H_h\dd{x}\phi dx- (\widetilde{H_h}\phi^-)_{j+1/2} + (\widetilde{H_h}\phi^+)_{j-1/2}=0,\quad \forall \phi\mathrm{i}n V_h^p, \label{eq:sch2}\\ & \dd{t}{P_h}=J_h, \label{eq:sch3}\\ & \dd{t}{J_h}= -2\gamma J_h -\omega_1^2P_h+\omega_p^2E_h, \label{eq:sch4}\\ & D_{h}=\mathrm{e}psilon_{\mathrm{i}nfty} E_{h} + P_{h}. \label{eq:sch5} \mathrm{e}nd{align} \mathrm{e}nd{subequations} Both the terms $\widehat{E_h}$ and $\widetilde{H_h}$ are numerical fluxes, and they are single-valued functions defined on the cell interfaces and should be designed to ensure numerical stability and accuracy. In the present work, we consider the following general form of numerical fluxes similar to the ones introduced in \cite{cheng2017L2}, \begin{subequations} \label{eq:flux} \begin{align} & \widehat{E_{h}} = \{E_{h}\} + \alpha[E_{h}] + \beta_1[H_{h}], \label{eq:flux_E} \\ & \widetilde{H_h} = \{H_{h}\} - \alpha[H_{h}] + \beta_2[E_{h}]. \label{eq:flux_H} \mathrm{e}nd{align} \mathrm{e}nd{subequations} Here, $\alpha$, $\beta_1$ and $\beta_2$ are constants that are taken to be $\mathcal{O}(1)$, with $\beta_1$ and $\beta_2$ being non-negative for stability. For example, if we take $\alpha=\beta_1=\beta_2=0$, we have the central flux \begin{align} \label{eq:flux:c} \widehat{E_{h}}=\{E_{h}\}, \quad \widetilde{H_h}=\{H_{h}\}; \mathrm{e}nd{align} if $\alpha=\pm1/2$ and $\beta_1=\beta_2=0$, we have the alternating flux \begin{align} \label{eq:flux:a} \widehat{E_{h}}=E_{h}^{-}, \quad\widetilde{H_h}=H_{h}^{+}; \quad \text{or} \quad \widehat{E_{h}}=E_{h}^{+}, \quad\widetilde{H_h}=H_{h}^{-}; \mathrm{e}nd{align} and if $\alpha=0$, $\beta_{1}=1/(2\sqrt{\mathrm{e}psilon_\mathrm{i}nfty})$, and $\beta_{2}=\sqrt{\mathrm{e}psilon_{\mathrm{i}nfty}}/2$, we have the ``upwind" flux for the Maxwell's equations neglecting Lorentz dispersion \begin{align} \label{eq:flux:u} \widehat{E_{h}}=\{E_{h}\}+\frac{1}{2\sqrt{\mathrm{e}psilon_{\mathrm{i}nfty}}}[H_{h}], \quad \widetilde{H_h}=\{H_{h}\}+\frac{\sqrt{\mathrm{e}psilon_{\mathrm{i}nfty}}}{2}[E_{h}]. \mathrm{e}nd{align} In particular, when using the alternating flux with $p=0$, it is easy to check that the DG scheme is equivalent to FD2 discretization. \subsection{Semi-discrete in space dispersion analysis} In order to carry out the dispersion analysis, for piecewise $P^p$ polynomials, we choose the basis functions on each element $I_{j}$ to be Lagrange polynomials $\phi_m^j(x)=\phi_{m}(\xi)$, $\xi=(x-x_{j})/h$: $$\phi_{m}(\xi_{n})=\delta_{m,n}= \left\{ \begin{array}{ll} 1, & n=m\\ 0, & n\neq m\\ \mathrm{e}nd{array} \right. ,\quad m,n=0,\ldots,p, \quad \text{with} \quad \xi_{n}= \left\{ \begin{array}{ll} 0, & p=0,\\ \frac{n}{p}-\frac{1}{2}, & \text{otherwise}.\\ \mathrm{e}nd{array} \right. $$ Then, the numerical solution on $I_{j}$ can be written as \begin{align} X_{h}=\sum_{m=0}^{p}\mathcal{X}^{m}_{j}\phi_{m}^j. \mathrm{e}nd{align} Here, $X$ can be $H$, $D$, $E$, $P$ and $J$. In particular, $\mathcal{X}^{m}_{j}$ is the point value of $X_{h}$ at $x_{j}+\xi_{m}h$. We define the vector $\textbf{X}_{j}=[\mathcal{X}^{0}_{j}, \cdots, \mathcal{X}^{p}_{j} ]^{T}$. Then, the semi-discrete scheme \mathrm{e}qref{eq:1d:sch} can be transformed into \begin{subequations} \label{eq:DG_matrix} \begin{align} & \mc{M} (\textbf{H}_{j})_{t} +\mc{V}\textbf{E}_{j} +\mc{Q}_{-1}(\alpha)\textbf{E}_{j-1} +\mc{Q}_{0}(\alpha)\textbf{E}_{j} +\mc{Q}_{1}(\alpha)\textbf{E}_{j+1} +\mc{S}_{-1}(\beta_{1})\textbf{H}_{j-1} +\mc{S}_{0}(\beta_{1})\textbf{H}_{j} +\mc{S}_{1}(\beta_{1})\textbf{H}_{j+1} = 0\\ & \mc{M} (\textbf{D}_{j})_{t} + \mc{V}\textbf{H}_{j} +\mc{Q}_{-1}(-\alpha)\textbf{H}_{j-1} +\mc{Q}_{0}(-\alpha)\textbf{H}_{j} +\mc{Q}_{1}(-\alpha)\textbf{H}_{j+1} +\mc{S}_{-1}(\beta_{2})\textbf{E}_{j-1} +\mc{S}_{0}(\beta_{2})\textbf{E}_{j} +\mc{S}_{1}(\beta_{2})\textbf{E}_{j+1} = 0 \\ & (\textbf{P}_{j})_{t}=\textbf{J}_{j}\\ & (\textbf{J}_{j})_{t}=-2\gamma\textbf{J}_{j} -\omega_{1}^{2} \textbf{P}_{j} +\omega_{p}^{2}\textbf{E}_{j}\\ & \textbf{D}_{j}=\mathrm{e}psilon_{\mathrm{i}nfty}\textbf{E}_{j} +\textbf{P}_{j} \mathrm{e}nd{align} \mathrm{e}nd{subequations} where, $\mc{M}$, $\mc{V}$, $\mc{Q}_{*}$ and $\mc{S}_{*}$ are $(p+1)\times(p+1)$ matrices, with $*$ being $\pm1$ or 0. $\mc{M}$ is the element mass matrix, $$(\mathcal{M})_{m,n}=h \mathrm{i}nt_{-1/2}^{1/2} \phi_{m}(\xi) \phi_{n}(\xi) d\xi.$$ $\mathcal{V}$ is the element stiffness matrix, $$(\mathcal{V})_{m,n}=\mathrm{i}nt_{-1/2}^{1/2} \phi'_{m}(\xi) \phi_{n}(\xi) d\xi.$$ $\mathcal{Q}_{*}$ and $\mathcal{S}_{*}$ are related to the numerical flux, \begin{align*} (\mathcal{Q}_{-1}(z))_{m,n}=\left\{\begin{array}{ll} \frac{1}{2}-z, & m=0,n=p,\\ 0, & \text{otherwise},\\ \mathrm{e}nd{array} \right. && (\mathcal{S}_{-1}(z))_{m,n} =\left\{\begin{array}{ll} -z, & m=0,n=p,\\ 0, & \text{otherwise},\\ \mathrm{e}nd{array} \right. \mathrm{e}nd{align*} \begin{align*} (\mathcal{Q}_{1}(z))_{m,n} =\left\{\begin{array}{ll} -\frac{1}{2}-z, & m=p,n=0,\\ 0, & \text{otherwise},\\ \mathrm{e}nd{array} \right. && (\mathcal{S}_{1}(z))_{m,n} =\left\{\begin{array}{ll} -z, & m=p,n=0,\\ 0, & \text{otherwise},\\ \mathrm{e}nd{array} \right. \mathrm{e}nd{align*} \begin{align*} (\mathcal{Q}_{0}(z))_{m,n} =\left\{\begin{array}{ll} \frac{1}{2}+z, & m=n=0,\\ -\frac{1}{2}+z, & m=n=p,\\ 0, & \text{otherwise},\\ \mathrm{e}nd{array} \right. && (\mathcal{S}_{0}(z))_{m,n} =\left\{\begin{array}{ll} z, & m=n=0,\\ z, & m=n=p,\\ 0, & \text{otherwise}.\\ \mathrm{e}nd{array} \right. \mathrm{e}nd{align*} Following standard practice for dispersion analysis for DG schemes, we formulate the linear system resulting from \mathrm{e}qref{eq:DG_matrix}. With the assumption that $$\mathcal{X}^{m}_{j}(t)=X_{0}^{m}e^{i (k_{\text{DG},p} j h -\omega t)}, \quad m=0, \ldots, p,$$ we can obtain \begin{align} \mathcal{A}_{\text{DG},p}\textbf{U}_{\text{DG}} = \textbf{0}, \mathrm{e}nd{align} with $\textbf{U}_{\text{DG}}=[H^{0}_{0},\cdots,H^{p}_{0}, E^{0}_{0},\cdots,E^{p}_{0}, P^{0}_{0},\cdots, P^{p}_{0}, J^{0}_{0},\cdots,J^{p}_{0}]^T$, and \begin{align} \mathcal{A}_{\text{DG},p}=\begin{pmatrix} -i\omega \mc{M}+\mc{R} & \mc{P} & 0 & 0\\ \widetilde{\mc{P}} & -i\omega \mathrm{e}psilon_{\mathrm{i}nfty}\mc{M}+\widetilde{\mc{R}} & -i\omega\mc{M} & 0\\ 0 & 0 & -i\omega \mathcal{I} & -\mathcal{I}\\ 0 & -\omega_{p}^{2}\mathcal{I} & \omega_{1}^{2}\mathcal{I} & (-i\omega+2\gamma) \mathcal{I} \\ \mathrm{e}nd{pmatrix}. \mathrm{e}nd{align} Here, $\mc{I}$ is the $(p+1)\times(p+1)$ identity matrix, and \begin{align*} \displaystyle & \mc{P}= \mc{V} +\mc{Q}_{-1}(\alpha)e^{-i k_{\text{DG},p} h} +\mc{Q}_{0}(\alpha) +\mc{Q}_{1}(\alpha)e^{i k_{\text{DG},p} h} , && \mc{R}= \mc{S}_{-1}(\beta_1)e^{-i k_{\text{DG},p} h} +\mc{S}_{0}(\beta_1) +\mc{S}_{1}(\beta_1)e^{i k_{\text{DG},p} h} ,\\ & \widetilde{\mc{P}}= \mc{V} +\mc{Q}_{-1}(-\alpha)e^{-i k_{\text{DG},p} h} +\mc{Q}_{0}(-\alpha) +\mc{Q}_{1}(-\alpha)e^{i k_{\text{DG},p} h} , && \widetilde{\mc{R}}= \mc{S}_{-1}(\beta_2)e^{-i k_{\text{DG},p} h} +\mc{S}_{0}(\beta_2) +\mc{S}_{1}(\beta_2)e^{i k_{\text{DG},p} h} . \mathrm{e}nd{align*} Then, we can derive the corresponding numerical dispersion relation for the DG methods by solving $\det(\mc{A}_{\text{DG},p})=0$. Due to the dependence on the flux parameters $\alpha, \beta_1, \beta_2$ and the coupling of the local degrees of freedom, the dispersion relation is more complicated than that of the FD scheme. We have the following theorem, which characterizes the dispersion relation satisfied by $k_{\text{DG},p}$. \begin{theorem} \label{thm1} Consider the DG scheme \mathrm{e}qref{eq:1d:sch} with $V^{p}_{h}$ as the discrete space, then $k_{\text{DG},p}$ are the roots of a quartic polynomial equation in terms of $\xi=e^{ik_{\text{DG},p}h}$ if $\alpha^2+\beta_{1}\beta_{2}\ne1/4$, and $k_{\text{DG},p}$ are the roots of a quadratic polynomial equation in terms of $\xi=e^{ik_{\text{DG},p}h}$ when $\alpha^2+\beta_{1}\beta_{2}=1/4.$ \mathrm{e}nd{theorem} \begin{proof} For $p=0$, we can obtain \begin{align} \label{eq:p0} \det(\mc{A}_{\text{DG},0}) =& \left(e^{-2i k_{\text{DG},0} h}+e^{2i k_{\text{DG},0} h}\right) \left(-1+4(\alpha^2+\beta_{1}\beta_{2})\right) \nonumber\\ & + 4 \left(e^{-i k_{\text{DG},0} h}+e^{i k_{\text{DG},0} h}\right) \left(-4 (\alpha^{2} +\beta_{1}\beta_{2}) + i \left( \beta_{1}\, \mathrm{e}psilon(\WH{\omega}; \mathbf{p}) +\beta_{2} \right) \omega h \right) \nonumber\\ & + 2 \left(1 + 12 (\alpha^{2} +\beta_{1}\beta_{2}) - 4 i \left( \beta_{1}\, \mathrm{e}psilon(\WH{\omega}; \mathbf{p}) +\beta_{2} \right) \omega h - 2 (k^{\text{ex}} h)^2 \right). \mathrm{e}nd{align} Hence, the conclusion is straightforward. For $p\geq1$, note that the term $e^{ik_{\text{DG},p}h}$ only appears in $\mc{P}_{p0}$, $\widetilde{\mc{P}}_{p0}$, $\mc{R}_{p0}$ and $\widetilde{\mc{R}}_{p0}$, and $e^{-ik_{\text{DG},p}h}$ only appears in $\mc{P}_{0p}$, $\widetilde{\mc{P}}_{0p}$, $\mc{R}_{0p}$ and $\widetilde{\mc{R}}_{0p}$. Hence, by the properties of determinant under row or colume operations, $\det(\mc{A}_{\text{DG},p})$ is in the form of $C_{0} +C_{1}e^{ik_{\text{DG},p}h} +C_{2} e^{2ik_{\text{DG},p}h} + C_{-1} e^{-ik_{\text{DG},p}h} + C_{-2} e^{-2ik_{\text{DG},p}h}$, where $C_{i}$, $i=-2,-1,0,1,2$, do not depend on $k_{\text{DG},p}$. Hence, $k_{\text{DG},p}$ is the root of $C_{-2} + C_{-1}\xi+C_{0}\xi^2 +C_{1}\xi^3 +C_{2}\xi^4=0 $ with $\xi=e^{ik_{\text{DG},p}h}$. Furthermore, if $\alpha^2+\beta_{1}\beta_{2}=1/4$, we have the following two cases: \begin{itemize} \mathrm{i}tem Case 1: $\alpha=\pm1/2$ and at least one of $\beta_{1}$ and $\beta_{2}$ is zero. Without loss of generality, we assume $\alpha=1/2$ and $\beta_{1}=0$. It is easy to check that $e^{ik_{\text{DG},p}h}$ can only appear in $\mc{P}_{p0}$, and $\widetilde{\mc{R}}_{p0}$, and $e^{-ik_{\text{DG},p}h}$ can only appear in $\widetilde{\mc{P}}_{0p}$ and $\widetilde{\mc{R}}_{0p}$. Therefore, the determinant $\det(\mc{A}_{\text{DG},p})$ is in the form of $C_{0} +C_{1}e^{ik_{\text{DG},p}h}+ C_{-1} e^{-ik_{\text{DG},p}h}$. \mathrm{i}tem Case 2: $\alpha\neq\pm1/2$, $\beta_{1}\neq0$ and $\beta_{2}\neq0$. Then, all of $\mc{P}$, $\widetilde{\mc{P}}$, $\mc{R}$ and $\widetilde{\mc{R}}$ include $e^{ik_{\text{DG},p}h}$ and $e^{-ik_{\text{DG},p}h}$. However, with the help of the fact that $$\frac{1/2+\alpha}{\beta_{1}} = \frac{\beta_{2}}{1/2-\alpha},$$ we can check that the new matrix $\left(\mc{D}\,\mc{A}_{\text{DG},p}\right)$ only has $e^{-ik_{\text{DG},p}h}$ in its first row and $e^{ik_{\text{DG},p}h}$ in its $(p+1)$-th row, where the matrix $\mc{D}$ is defined as following: \begin{align} \mc{D}_{m,n}=\left\{ \begin{array}{ll} 1, & m=n ,\\ -(1/2+\alpha)/\beta_{1}, & m=p+2, n=1,\\ (1/2+\alpha)/\beta_{1}, & m=2p+2, n=p+1,\\ 0, & \text{otherwise},\\ \mathrm{e}nd{array} \right. \qquad m, n=1,\cdots 4(p+1). \mathrm{e}nd{align} Hence, we can obtain that $\det(\mc{D}\,\mc{A}_{\text{DG},p})$ should be in the form of $C_{0} +C_{1}e^{ik_{\text{DG},p}h}+ C_{-1} e^{-ik_{\text{DG},p}h}$. On the other hand, it is easy to check that $\det(\mc{D})=1$. Therefore, the determinant $\det(\mc{A}_{\text{DG},p})=\det(\mc{D}\,\mc{A}_{\text{DG},p})$ . \mathrm{e}nd{itemize} In both cases, we can reach the conclusion that $k_{\text{DG},p}$ is the root of $C_{0} +C_{1}\xi +C_{2}\xi^2=0 $ with $\xi=e^{ik_{\text{DG},p}h}$. \mathrm{e}nd{proof} By Theorem \ref{thm1}, we can see that for the DG scheme \mathrm{e}qref{eq:1d:sch} employing the central flux \mathrm{e}qref{eq:flux:c} ($\alpha=\beta_{1}=\beta_{2}=0$), there are four discrete wave numbers $k_{\text{DG},p},$ corresponding to two physical modes and two spurious modes. While for the alternating fluxes \mathrm{e}qref{eq:flux:a} and the upwind flux \mathrm{e}qref{eq:flux:u} ($\alpha^2+\beta_{1}\beta_{2}=1/4$), there are only two discrete wave numbers $k_{\text{DG},p}$, corresponding to the physical modes. This conclusion holds for arbitrary $p.$ Unlike the FD scheme, when we increase the order of the accuracy of the scheme, the number of modes won't change when the dispersion relation is expressed by representing the discrete wavenumber as a function of the angular frequency. Unfortunately, we can not obtain the analytical dispersion relation formula or formula with closed form for general $p\geq0$. Instead, in the following, we will discuss the cases of $p=0, \ldots, 3$ based on the small wave number limit $K \rightarrow 0$, while for higher order cases the dispersion relation becomes more cumbersome and is not included in this paper. In the following, we write \begin{align} \label{eq:b} b=\omega \left( \beta_{1}\, \mathrm{e}psilon(\WH{\omega}; \mathbf{p}) +\beta_{2} \right), \quad \text{and} \quad B=b\, h. \mathrm{e}nd{align} Note that, $b(\omega)=0$ if and only if $\beta_{1}=\beta_{2}=0$, and in the Taylor expansion, we assume $B\ll 1$ as well. The results are given as follows. \begin{itemize} \mathrm{i}tem When $\alpha=\beta_{1}=\beta_{2}=0$, there are four discrete wave numbers. Two of them correspond to the physical modes \renewcommand{1}{1.5} \begin{align} \label{eq:dis_DG_semi1} k_{\text{DG}^{\text{phys}},p} =& \left\{ \begin{array}{ll} \pm k^{\text{ex}} \left( 1 +\frac{1}{6} K^2 +\mc{O}\left( K^4 \right) \right), & p=0, \\ \pm k^{\text{ex}} \left( 1 -\frac{1}{48} K^2 +\mc{O}\left( K^4 \right) \right), & p=1, \\ \pm k^{\text{ex}} \left( 1 +\frac{1}{16800} K^6 +\mc{O}\left( K^8 \right) \right), & p=2, \\ \pm k^{\text{ex}} \left( 1 -\frac{1}{806400} K^6 +\mc{O}\left( K^8 \right) \right), & p=3. \\ \mathrm{e}nd{array}\right. \mathrm{e}nd{align} \renewcommand{1}{1} The other two are the spurious modes \renewcommand{1}{1.5} \begin{align} k_{\text{DG}^{\text{spur}}, p} =& \left\{ \begin{array}{ll} \pm k^{\text{ex}} \left( -\frac{\pi}{K} + 1 +\frac{1}{6}K^2 +\mc{O}( K^4 ) \right) , & p=0, \\ \pm k^{\text{ex}} \left( \frac{1}{3} +\frac{5}{1296}K^2 + \mc{O}\left( K^4 \right) \right), & p=1, \\ \pm k^{\text{ex}} \left( -\frac{\pi}{K} + \frac{1}{5} +\frac{1}{375}K^2 +\mc{O}\left( K^4 \right) \right) , & p=2, \\ \pm k^{\text{ex}} \left( \frac{1}{7} +\frac{4}{5145}K^2 +\mc{O}\left( K^4 \right) \right), & p=3. \\ \mathrm{e}nd{array}\right. \mathrm{e}nd{align} \renewcommand{1}{1} These formulas show that, when using the central flux, the physical modes have a dispersion error with order \begin{align} \left\{\begin{array}{ll} 2p+2, & \text{if $p$ is even},\\ 2p, & \text{if $p$ is odd}.\\ \mathrm{e}nd{array} \right. \mathrm{e}nd{align} Moreover, the relative phase errors do not rely on other model parameters except $K$. When $p$ is odd, the spurious modes consists of two waves with wave length $(2p+1)$ times the actual wave length. And when $p$ is even, the spurious modes $k_{\text{DG}, s1, s2}$ will be inversely proportional to $h$, similar to the FD case. \mathrm{i}tem When $\alpha^2+\beta_{1}\beta_{2}=1/4$, we have two physical modes: \renewcommand{1}{1.5} \begin{align} \label{eq:dis_DG_semi2} k_{\text{DG}^{\text{phys}},p} =& \left\{ \begin{array}{ll} \pm k^{\text{ex}} \left( 1 +\frac{1}{2} i B +\frac{1}{24}\left( K^2-9B^2\right) +\mc{O}( i K^2 B + i B^3) \right), & p=0, \\ \pm k^{\text{ex}} \left( 1 +\frac{1}{72} i K^2 B +\frac{1}{1080} \left(K^4-5 K^2 B^2\right) +\mc{O}\left( i K^2 B^3 \right) \right), & p=1, \\ \pm k^{\text{ex}} \left( 1 +\frac{1}{7200} i K^4 B +\frac{1}{252000}\left( K^6-7 K^4 B^2 \right) +\mc{O}(i K^6 B + i K^4 B^3) \right), & p=2, \\ \pm k^{\text{ex}} \left( 1 +\frac{1}{1411200} i K^6 B +\frac{1}{88905600} \left( K^8-9 K^6 B^2\right) +\mc{O}(i K^8 B + i K^6 B^3) \right), & p=3. \\ \mathrm{e}nd{array}\right. \mathrm{e}nd{align} \renewcommand{1}{1} Therefore, when $B=0$, i.e. with the alternating fluxes, the scheme has a dispersion error of order $(2p+2)$. In particular, the dispersion errors for $\alpha=1/2$ and $\alpha=-1/2$ are the same. On the other hand, for the upwind flux ($B\neq0$), we can observe a $(2p+1)$-th order dispersion error, which is related to $K$ and $B$ at the same time. \mathrm{e}nd{itemize} It is clear that the order of dispersion error for DG scheme is higher than that of the $L^2$ convergence. This is an advantage of DG schemes, and differs from FD schemes significantly. To verify the results above, in Figure \ref{Fig:semi_DG}, we study the relative phase error of the physical modes of the semi-discrete DG scheme \mathrm{e}qref{eq:1d:sch} with parameters in \mathrm{e}qref{eq:parameter} using the alternating flux (DG-AL) (only the result of one version of the alternating fluxes is shown, because they are identical to each other), the central flux (DG-CE) and the upwind flux (DG-UP). The numerical wave number $k_{\text{DG},p}$ is obtained by solving $\det(\mathcal{A}_{\text{DG},p})=0$ exactly. First, we fix $\omega_{1} h=\pi/30$, and plot the dependence of relative phase error as a function of ${\WH{\omega}}$, see the first row of Figure \ref{Fig:semi_DG}. It is clear that DG-AL always gives smallest error when the same discrete space is used. This can also be verified by comparing the orders and coefficients in \mathrm{e}qref{eq:dis_DG_semi1} and \mathrm{e}qref{eq:dis_DG_semi2}. All schemes have significant larger errors around ${\WH{\omega}}=1$. For DG-AL and DG-CE, the phase errors approach zero near ${\WH{\omega}}=\sqrt{\mathrm{e}psilon_{s}/\mathrm{e}psilon_{\mathrm{i}nfty}}$ where $K$ is close to zero. For DG-UP with $p=0$, the error is dominated by $B$ (see equation \mathrm{e}qref{eq:dis_DG_semi2}). Therefore, the ``zero'' point would shift to the ``zero'' point of $B$, which is about $\sqrt{1+\mathrm{e}psilon_{d}/(2\mathrm{e}psilon_{\mathrm{i}nfty})}\approx1.291$. Comparing FD2 ( Figure \ref{Fig:FD}) and DG-AL with $P^{0}$, they have the same performance. However, once we increase the order to $p=1$, DG-AL has significantly smaller error than FD4, resulting from the smaller coefficients in leading error terms, see \mathrm{e}qref{eq:dis_FD_semi1}, \mathrm{e}qref{eq:dis_FD_semi2} and \mathrm{e}qref{eq:dis_DG_semi2}. In the second row of Figure \ref{Fig:semi_DG}, we present the errors at ${\WH{\omega}}_{1}=1$ with mesh refinement. Slopes indicate the order of accuracy for each scheme, which agree with our analysis in \mathrm{e}qref{eq:dis_DG_semi1} and \mathrm{e}qref{eq:dis_DG_semi2}. \begin{figure}[h] \centering \mathrm{i}ncludegraphics[scale= 0.28] {pics/Phase_Error_semi_AL-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.28] {pics/Phase_Error_semi_CE-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.28] {pics/Phase_Error_semi_UP-eps-converted-to.pdf}\\ \mathrm{i}ncludegraphics[scale= 0.28] {pics/Phase_Error_semi_AL_order-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.28] {pics/Phase_Error_semi_CE_order-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.28] {pics/Phase_Error_semi_UP_order-eps-converted-to.pdf} \caption{The relative phase error of semi-discrete DG scheme for the physical modes. First row: fix $\omega_{1}h=\pi/30$ with ${\WH{\omega}}_{1}\mathrm{i}n[0,3]$; second row: fix ${\WH{\omega}}_{1}=1$ with different $\omega_{1}h\mathrm{i}n\{ \frac{\pi}{30},\frac{\pi}{60},\frac{\pi}{120},\frac{\pi}{240} \}$.} \label{Fig:semi_DG} \mathrm{e}nd{figure} \section{Fully discrete Discontinuous Galerkin Methods} $\,$ \label{dgtd} Here, we consider the DG scheme \mathrm{e}qref{eq:1d:sch} coupled with the leap-frog time discretization \mathrm{e}qref{eq:LF} and the trapezoidal time discretization \mathrm{e}qref{eq:TP}. To analyze dispersion relation for those fully discrete schemes, we assume numerical solutions in the form of $$(\mathcal{X}^{m})^{n}_{j}=X^{m}_{0}e^{i(k_{\text{DG},p}^{\text{*}} j h -\omega n\Delta t)}, \quad m=0, \ldots, p.$$ Then the fully discrete system will yield a linear system \begin{align} \mathcal{A}_{\text{DG},p}^{\text{*}}\textbf{U}_{\text{DG}}=0, \mathrm{e}nd{align} where $*$ can be LF and TP. \subsection{Fully discrete dispersion analysis: leap-frog-DG schemes} From simple algebra, $\mathcal{A}^{\text{LF}}_{\text{DG},p}$ is given by \renewcommand{1}{1.5} \begin{align*} \begin{small} \begin{pmatrix} -i\sin(\frac{W}{2}) \mc{M}+\frac{\Delta t}{2}\cos(\frac{W}{2})\mc{R} & \frac{\Delta t}{2}\mc{P} & 0 & 0\\ \frac{\Delta t}{2}\widetilde{\mc{P}} & -i\mathrm{e}psilon_{\mathrm{i}nfty}\sin(\frac{W}{2})\mc{M}+\frac{\Delta t}{2} \cos(\frac{W}{2})\widetilde{\mc{R}} & -i\sin(\frac{W}{2})\mc{M} & 0\\ 0 & 0 & i\sin(\frac{W}{2}) \mathcal{I} & \frac{\Delta t}{2}\cos(\frac{W}{2})\mathcal{I}\\ 0 & \omega_{p}^{2}\frac{\Delta t}{2}\cos(\frac{W}{2})\mathcal{I} & -\omega_{1}^{2}\frac{\Delta t}{2}\cos(\frac{W}{2})\mathcal{I} & (i\sin(\frac{W}{2})-\gamma\Delta t \cos(\frac{W}{2})) \mathcal{I} \\ \mathrm{e}nd{pmatrix}. \mathrm{e}nd{small} \mathrm{e}nd{align*} \renewcommand{1}{1} Since $\mc{P}$, $\widetilde{\mc{P}}$, $\mc{R}$ and $\widetilde{\mc{R}}$ are multiplied by different factors, we can not cancel $e^{ik^{\text{LF}}_{\text{DG},p}h}$ or $e^{-i k^{\text{LF}}_{\text{DG},p}h}$ in $\mathcal{A}^{\text{LF}}_{\text{DG},p}$ as what we did in case 2 of the proof of Theorem \ref{thm1}. Hence, the dispersion relation satisfied by $k^{\text{LF}}_{\text{DG},p}$ will satisfy the following theorem which differs from Theorem \ref{thm1}. \begin{theorem} \label{thm2} Consider the fully discrete leap-frog-DG scheme with $V^{p}_{h}$ as the discrete space, if $\alpha=\pm\frac{1}{2}$ and $\beta_{1}=\beta_{2}=0$, then $k^{\text{LF}}_{\text{DG},p}$ are the roots of a quadratic polynomial equation in terms of $\xi=e^{ik^{\text{LF}}_{\text{DG},p}h}$. Otherwise, $k^{\text{LF}}_{\text{DG},p}$ are the roots of a quartic polynomial equation in terms of $\xi=e^{ik^{\text{LF}}_{\text{DG},p}h}$. \mathrm{e}nd{theorem} Below, we analyze numerical dispersion property of the physical modes when $W\ll 1$. Note that $\displaystyle B=bh=\frac{b/\omega}{\sqrt{\mathrm{e}psilon_{\mathrm{i}nfty}}\, \nu} W\ll 1$ with a fixed CFL number $\nu$, with $B$ given in \mathrm{e}qref{eq:b}. \begin{itemize} \mathrm{i}tem When using the central flux, i.e. $\alpha=\beta_{1}=\beta_{2}=0$, we have four solutions, and two of them correspond to the physical modes, \begin{align} \label{eq:disp_LF_DG_CE1} \renewcommand{1}{2} k^{\text{LF}}_{\text{DG}^{\text{ phys}},p} = \left\{\begin{array}{ll} \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} -\frac{1}{2}\right) W^2 +\frac{1}{6}K^2 +\mc{O}\left( W^4 + W^2 K^2 + K^4 \right)\right) , & p=0, \\ \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} -\frac{1}{2} \right) W^2 -\frac{1}{48}K^2 +\mc{O}\left( W^4 + W^2 K^2 + K^4\right)\right) , & p=1, \\ \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} -\frac{1}{2} \right) W^2 +\mc{O}\left( W^4 +K^{6}\right)\right) , & p=2,3,\\%p\geq2 \text{ \ and p is even},\\ \mathrm{e}nd{array} \right. \renewcommand{1}{1} \mathrm{e}nd{align} in the case of $K\ll 1$ and $W\ll 1$. They can be further written as \begin{align} \label{eq:disp_LF_DG_CE2} \renewcommand{1}{2} k^{\text{LF}}_{\text{DG}^{\text{ phys}},p} = \left\{\begin{array}{ll} \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} -\frac{1}{2} + \frac{2\mathrm{e}psilon({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon_{\mathrm{i}nfty}\nu^2} \right) W^2 +\mc{O}\left( W^4 \right)\right) , & p=0, \\ \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} -\frac{1}{2} - \frac{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})}{4\mathrm{e}psilon_{\mathrm{i}nfty}\nu^2}\right) W^2 +\mc{O}\left( W^4 \right)\right) , & p=1, \\ \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} -\frac{1}{2} \right) W^2 +\mc{O}\left( W^4 \right)\right) , & p=2, 3, \\ \mathrm{e}nd{array} \right. \renewcommand{1}{1} \mathrm{e}nd{align} with $W\ll 1$ and a fixed CFL number $\nu$. \mathrm{i}tem When using the alternating flux, i.e. $\alpha=\pm1/2$ and $\beta_{1}=\beta_{2}=0$, there are only two solutions, corresponding to the physical modes, \begin{align} \label{eq:disp_LF_DG_AL1} \renewcommand{1}{2} k^{\text{LF}}_{\text{DG}^{\text{ phys}},p} = \left\{\begin{array}{ll} \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} -\frac{1}{2} \right) W^2 +\frac{1}{24}K^2 +\mc{O}\left( W^4 + W^2K^2 + K^4\right)\right) , & p=0, \\ \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} -\frac{1}{2} \right) W^2 +\mc{O}\left( W^4 + K^{2p+2}\right)\right) , & p=1, 2, 3,\\ \mathrm{e}nd{array} \right. \renewcommand{1}{1} \mathrm{e}nd{align} in the case of $K\ll 1$ and $W\ll 1$, or \begin{align} \label{eq:disp_LF_DG_AL2} \renewcommand{1}{2} k^{\text{LF}}_{\text{DG}^{\text{ phys}},p} = \left\{\begin{array}{ll} \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} -\frac{1}{2} + \frac{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})}{2\mathrm{e}psilon_{\mathrm{i}nfty}\nu^2} \right) W^2 +\mc{O}\left( W^4 \right)\right) , & p=0, \\ \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} -\frac{1}{2} \right) W^2 +\mc{O}\left( W^4 \right)\right) , & p=1, 2, 3.\\ \mathrm{e}nd{array} \right. \renewcommand{1}{1} \mathrm{e}nd{align} with $W\ll 1$ and a fixed CFL number $\nu$. \mathrm{i}tem When using the upwind flux, i.e. $\alpha=0$, $\displaystyle \beta_{1}=\frac{1}{2\sqrt{\mathrm{e}psilon_{\mathrm{i}nfty}}}$ and $\displaystyle \beta_{2}=\frac{\sqrt{\mathrm{e}psilon_{\mathrm{i}nfty}}}{2}$, there are four solutions. The two physical modes are \begin{align} \label{eq:disp_LF_DG_UP1} \renewcommand{1}{2} k^{\text{LF}}_{\text{DG}^{\text{ phys}},p} = \left\{\begin{array}{ll} \displaystyle \pm k^{\text{ex}} \left( 1 +\frac{1}{2} i B +\mc{O}\left(W^2 + K^2 \right) \right) , & p=0, \\ \displaystyle \pm k^{\text{ex}} \left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} -\frac{1}{2} \right) W^2 +\mc{O}\left(W^4 +i K^{2p} B \right) \right), & p=1, 2, 3, \\ \mathrm{e}nd{array} \right. \renewcommand{1}{1} \mathrm{e}nd{align} which can also be written as \begin{align} \label{eq:disp_LF_DG_UP2} \renewcommand{1}{2} k^{\text{LF}}_{\text{DG}^{\text{ phys}},p} = \left\{\begin{array}{ll} \displaystyle \pm k^{\text{ex}} \left( 1 +i \frac{b/\omega}{2\sqrt{\mathrm{e}psilon_{\mathrm{i}nfty}}\,\nu} W +\mc{O}\left(W^2 \right) \right) , & p=0, \\ \displaystyle \pm k^{\text{ex}} \left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} -\frac{1}{2} \right) W^2 +\mc{O}\left( i W^3 \right) \right), & p=1, \\ \displaystyle \pm k^{\text{ex}} \left( 1+ \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} -\frac{1}{2} \right) W^2 +\mc{O}\left( W^4 \right) \right), & p=2, 3,\\ \mathrm{e}nd{array} \right. \renewcommand{1}{1} \mathrm{e}nd{align} with $W\ll 1$ and a fixed CFL number $\nu$. \mathrm{e}nd{itemize} The formulations above demonstrate that all fully discrete schemes are second order accurate in numerical dispersion, except for the upwind flux with $P^{0}$, for which the error is of first order. Comparing the leading error terms with the same flux but with different $p$ values, we can see that the temporal error would be dominant when $p\geq2$ by upwind flux or central flux and $p\geq1$ by alternating fluxes. Moreover, it is observed that the leading error terms for high order schemes, when the temporal error dominates, are the same with the FD schemes \mathrm{e}qref{eq:disp_FD_LF1} and \mathrm{e}qref{eq:disp_FD_LF2}, which come from the time discretization \mathrm{e}qref{eq:dis_LF}. In particular, the leading terms of DG scheme with alternating flux are the same as those of FD scheme. Hence, we can also have counterintuitive results that the lower order scheme performs better than higher order scheme when numerical dispersion is concerned for some given dispersive media and discretization parameters. {Similar results are observed for other numerical fluxes as well as for the trapzoidal-DG schemes in next section. \subsection{Fully discrete dispersion analysis: trapezoidal-DG schemes} We can deduce that $\mathcal{A}^{\text{TP}}_{\text{DG},p}$ is given by \renewcommand{1}{1.5} \begin{align*} \begin{small} \begin{pmatrix} -i\sin(\frac{W}{2}) \mc{I}+\frac{\Delta t}{2}\cos(\frac{W}{2})\mc{R} & \frac{\Delta t}{2}\cos(\frac{W}{2})\mc{P} & 0 & 0\\ \frac{\Delta t}{2}\cos(\frac{W}{2})\widetilde{\mc{P}} & -i\mathrm{e}psilon_{\mathrm{i}nfty}\sin(\frac{W}{2})\mc{I}+\frac{\Delta t}{2} \cos(\frac{W}{2})\widetilde{\mc{R}} & -i\sin(\frac{W}{2})\mc{I} & 0\\ 0 & 0 & i\sin(\frac{W}{2}) \mathcal{I} & \frac{\Delta t}{2}\cos(\frac{W}{2})\mathcal{I}\\ 0 & \frac{\Delta t}{2}\omega_{p}^{2}\cos(\frac{W}{2})\mathcal{I} & -\frac{\Delta t}{2}\omega_{1}^{2}\cos(\frac{W}{2})\mathcal{I} & (i\sin(\frac{W}{2})-\gamma\Delta t \cos(\frac{W}{2})) \mathcal{I} \\ \mathrm{e}nd{pmatrix}. \mathrm{e}nd{small} \mathrm{e}nd{align*} \renewcommand{1}{1} Because we multiply $\mc{P}$, $\widetilde{\mc{P}}$, $\mc{R}$ and $\widetilde{\mc{R}}$ by the same factor $\cos(\frac{W}{2})$, it is easy to check that a similar theorem as Theorem \ref{thm1} holds for this fully discrete scheme. \begin{theorem} \label{thm3} Consider the fully discrete trapezoidal-DG scheme with $V^{p}_{h}$ as the discrete space, then $k^{\text{TP}}_{\text{DG},p}$ are the roots of a quartic polynomial equation in terms of $\xi=e^{ik^{\text{TP}}_{\text{DG},p}h}$ if $\alpha^2+\beta_{1}\beta_{2}\ne1/4$, and $k_{\text{DG},p}$ are the roots of a quadratic polynomial equation in terms of $\xi=e^{ik^{\text{TP}}_{\text{DG},p}h}$ when $\alpha^2+\beta_{1}\beta_{2}=1/4.$ \mathrm{e}nd{theorem} Below, we list the physical modes $k^{\text{TP}}_{\text{DG},p}$ for $p\geq0$, and perform an asymptotic analysis when $W\ll 1$ and $K\ll 1.$ \begin{itemize} \mathrm{i}tem When using the central flux, i.e. $\alpha=\beta_{1}=\beta_{2}=0$, we have four solutions, and two of them correspond to the physical modes. When $W\ll1$ and $K\ll 1$, the physical solutions have the form as \begin{align} \label{eq:disp_TP_DG_CE1} \renewcommand{1}{2} k^{\text{TP}}_{\text{DG}^{\text{ phys}},p} = \left\{\begin{array}{ll} \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} +1 \right) W^2 +\frac{1}{6}K^2 +\mc{O}\left( W^4 + W^2K^2 + K^4\right)\right) , & p=0, \\ \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} +1 \right) W^2 - \frac{1}{48}K^2 +\mc{O}\left( W^4 + W^2K^2 + K^4 \right)\right) , & p=1, \\ \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} +1 \right) W^2 +\mc{O}\left( W^4 +K^{6}\right)\right) , & p=2, 3,\\% p\geq2 \text{ \ and p is even},\\ \mathrm{e}nd{array} \right. \renewcommand{1}{1} \mathrm{e}nd{align} and can be further rewritten into \begin{align} \label{eq:disp_TP_DG_CE2} \renewcommand{1}{2} k^{\text{TP}}_{\text{DG}^{\text{ phys}},p} = \left\{\begin{array}{ll} \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} +1 + \frac{2\mathrm{e}psilon({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon_{\mathrm{i}nfty}\nu^2} \right) W^2 +\mc{O}\left( W^4 \right)\right) , & p=0, \\ \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} +1 - \frac{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})}{4\mathrm{e}psilon_{\mathrm{i}nfty}\nu^2} \right) W^2 +\mc{O}\left( W^4 \right)\right) , & p=1, \\ \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} +1 \right) W^2 +\mc{O}\left( W^4 \right)\right) , & p=2, 3.\\ \mathrm{e}nd{array} \right. \renewcommand{1}{1} \mathrm{e}nd{align} in the case of $W\ll 1$ and with a fixed CFL number $\nu.$ \mathrm{i}tem When using the alternating flux, i.e. $\alpha=\pm1/2$ and $\beta_{1}=\beta_{2}=0$, there are only two solutions corresponding to the physical modes, \renewcommand{1}{2} \begin{align} \label{eq:disp_TP_DG_AL1} k^{\text{TP}}_{\text{DG}^{\text{ phys}},p} = \left\{\begin{array}{ll} \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} +1 \right) W^2 +\frac{1}{24}K^2 +\mc{O}\left( W^4 + W^2 K^2 + K^4 \right)\right) , & p=0, \\ \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} +1 \right) W^2 +\mc{O}\left( W^4 + K^{2p+2}\right)\right) , & p=1, 2, 3,\\ \mathrm{e}nd{array} \right. \mathrm{e}nd{align} \renewcommand{1}{1} in the case of $W\ll 1$ and $K\ll 1$, or \renewcommand{1}{2} \begin{align} \label{eq:disp_TP_DG_AL2} k^{\text{TP}}_{\text{DG}^{\text{ phys}},p} = \left\{\begin{array}{ll} \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} +1 + \frac{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})}{2\mathrm{e}psilon_{\mathrm{i}nfty}\nu^2} \right) W^2 +\mc{O}\left( W^4 \right)\right) , & p=0, \\ \displaystyle \pm k^{\text{ex}}\left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} +1 \right) W^2 +\mc{O}\left( W^4 \right)\right) , & p=1, 2, 3.\\ \mathrm{e}nd{array} \right. \mathrm{e}nd{align} \renewcommand{1}{1} in the case of $W\ll 1$ and with a fixed CFL number $\nu.$ \mathrm{i}tem When using the upwind flux, i.e. $\alpha=0$, $\beta_{1}=\frac{1}{2\sqrt{\mathrm{e}psilon_{\mathrm{i}nfty}}}$ and $\beta_{2}=\frac{\sqrt{\mathrm{e}psilon_{\mathrm{i}nfty}}}{2}$, there are only two solutions corresponding to the physical modes, \renewcommand{1}{2} \begin{align} \label{eq:disp_TP_DG_UP1} k^{\text{TP}}_{\text{DG}^{\text{ phys}},p} = \left\{\begin{array}{ll} \displaystyle \pm k^{\text{ex}} \left( 1 +\frac{1}{2} i B +\mc{O}\left(W^2 + K^2 \right) \right) , & p=0, \\ \displaystyle \pm k^{\text{ex}} \left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} +1 \right) W^2 +\mc{O}\left(W^4 +i K^{2p} B \right) \right), & p=1, 2, 3, \\ \mathrm{e}nd{array} \right. \mathrm{e}nd{align} \renewcommand{1}{1} which can be rewritten as \renewcommand{1}{2} \begin{align} \label{eq:disp_TP_DG_UP2} k^{\text{TP}}_{\text{DG}^{\text{ phys}},p} = \left\{\begin{array}{ll} \displaystyle \pm k^{\text{ex}} \left( 1 +i \frac{b/\omega}{2\sqrt{\mathrm{e}psilon_{\mathrm{i}nfty}}\nu} W +\mc{O}\left(W^2 \right) \right) , & p=0, \\ \displaystyle \pm k^{\text{ex}} \left( 1 + \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} +1 \right) W^2 +\mc{O}\left( i W^3 \right) \right), & p=1, \\ \displaystyle \pm k^{\text{ex}} \left( 1+ \frac{1}{12}\left( \frac{\delta({\WH{\omega}};\mathbf{p})}{\mathrm{e}psilon({\WH{\omega}};\mathbf{p})} +1 \right) W^2 +\mc{O}\left( W^4 \right) \right), & p=2, 3,\\ \mathrm{e}nd{array} \right. \mathrm{e}nd{align} \renewcommand{1}{1} in the case of $W\ll 1$ and with a fixed CFL number $\nu.$ \mathrm{e}nd{itemize} We have similar conclusions as those for the fully discrete leap-frog DG schemes, except that the leading error terms for high order schemes come from the fully implicit time discretization \mathrm{e}qref{eq:dis_TP}. \subsection{Comparison among fully discrete DG schemes} In our previous work \cite{bokil2017energy}, we have proved that the fully discrete DG schemes based on the trapezoidal rule is unconditionally stable and the leap-frog schemes are conditionally stable. Following the proof in \cite{bokil2017energy}, we can find $\nu^{p}_{max}$ such that under the condition $\nu\leq \nu^{p}_{max}$, the leap-frog schemes using $P^{p}$ space are stable . Those $\nu^{p}_{max}$ values for $p=0,\ldots,3, \mathrm{i}nfty$ are listed in Table 6.1. For comparison, we also list the CFL condition for FD scheme for various $M$ values in the same table. We can see that the CFL number for DG scheme is much smaller than that for FD scheme, particularly for high order case. \renewcommand{1}{1.5} \begin{table}[H] \label{tab:CFL} \centering \caption{$\nu^{p}_{max}$ for DG scheme and $\nu^{2M}_{max}$ for FD scheme.} \begin{tabular}{c|cccccc} & $p=0$ & $p=1$ & $p=2$ & $p=3$ & & $p\rightarrow\mathrm{i}nfty$ \\ \hline DG-CE & 1 & 0.211325 & 0.101287 & 0.0605268 & & 0 \\ DG-AL & 1 & 0.192450 & 0.089115 & 0.0521629 & & 0\\ DG-UP & 1 & 0.211325 & 0.101287 & 0.0605268 & & 0 \\\hline\hline & $M=1$ & $M=2$ & $M=3$ & $M=4$ & $M=5$ & $M\rightarrow\mathrm{i}nfty$ \\\hline FD & 1 & 0.857143 & 0.805369 & 0.777418 & 0.759479 & 0.636620 \\ \mathrm{e}nd{tabular} \mathrm{e}nd{table} \renewcommand{1}{1} In Figure \ref{Fig:Phase_Error_DG_fully1}, we plot the relative phase errors of fully discrete DG schemes with leap-frog discretization for $W_1= \pi/30, \pi/300$ using material parameters \mathrm{e}qref{eq:parameter}. We can observe that the overall behavior of the plot with $W_1= \pi/30$ is quite different from the FD plots and the plots obtained with $W_1= \pi/300$, and the magnitude of the errors is very large. This phenomenon results from $\displaystyle\omega_1 h = \frac{1}{\sqrt{\mathrm{e}psilon_{\mathrm{i}nfty}}\,\nu}W_1$ and the tiny CFL numbers restricted by the stability condition which makes the mesh size $h$ extra large. We conclude that the small CFL number is one disadvantage of high order DG schemes. When comparing the figures obtained with $W_1= \pi/300$ using the three numerical fluxes, it is clear that the alternating flux has the smallest error, while the error obtained by the upwind flux is the largest. The overall dependence of the error on $\WH{\omega}$ is very similar to those from the FD schemes. \begin{figure}[h] \centering \mathrm{i}ncludegraphics[scale= 0.28] {pics/Phase_Error_LF_AL_30-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.28] {pics/Phase_Error_LF_CE_30-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.28] {pics/Phase_Error_LF_UP_30-eps-converted-to.pdf}\\ \mathrm{i}ncludegraphics[scale= 0.28] {pics/Phase_Error_LF_AL-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.28] {pics/Phase_Error_LF_CE-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.28] {pics/Phase_Error_LF_UP-eps-converted-to.pdf} \caption{The relative phase error in physical modes of the fully discrete DG schemes with leap-frog time discretization, using $\nu/\nu_{max}^{p}=0.7$. First row: $W_1=\pi/30$ ; second row: $W_1=\pi/300.$ } \label{Fig:Phase_Error_DG_fully1} \mathrm{e}nd{figure} Next, we consider the unconditionally stable DG scheme with trapezoidal rule and varying CFL numbers. Figure \ref{Fig:Phase_Error_DG_fully2} shows the contour plots of the dispersion error at $\WH{\omega}=1$ with $(W_1,\omega_{1} h) \mathrm{i}n[0.05,0.3]\times[0.01,0.1]$. It is observed that DG-AL with $p\geq1$ have horizontal contour lines, indicating dispersion errors are dominated by temporal ones. In comparison, DG-UP and DG-CE have horizontal contour lines when $p\geq2$. The values of numerical dispersion errors obtained by high order DG schemes are very similar, which also illustrates the dominant role of temporal errors. This observation is consistent with our theoretical analysis. \begin{figure} \centering \mathrm{i}ncludegraphics[scale= 0.18] {pics/Phase_Error_TP_AL_P02-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.18] {pics/Phase_Error_TP_AL_P12-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.18] {pics/Phase_Error_TP_AL_P22-eps-converted-to.pdf}\\ \mathrm{i}ncludegraphics[scale= 0.18] {pics/Phase_Error_TP_CE_P02-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.18] {pics/Phase_Error_TP_CE_P12-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.18] {pics/Phase_Error_TP_CE_P22-eps-converted-to.pdf}\\ \mathrm{i}ncludegraphics[scale= 0.18] {pics/Phase_Error_TP_UP_P02-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.18] {pics/Phase_Error_TP_UP_P12-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.18] {pics/Phase_Error_TP_UP_P22-eps-converted-to.pdf} \caption{The contour plot of relative phase error of fully discrete DG schemes for the physical modes with trapezoid rule. ${\WH{\omega}}=1$. First row: DG-AL; second row: DG-CE; third row: DG-UP.} \label{Fig:Phase_Error_DG_fully2} \mathrm{e}nd{figure} \section{Benchmark on Physical Quantities} \label{sec:numerical} In this section, we will verify the performance of the finite difference and discontinuous Galerkin methods by plotting quantities that are important for wave propagation such as the normalized ratio between the numerical and exact phase velocity (also refractive index); normalized attenuation constant; normalized energy velocity; and normalized group velocity, to validate the performance of the numerical methods (see \cite{gilles2000comparison}). The model parameters in \mathrm{e}qref{eq:parameter} are used in computing all the quantities and plots below. We first define $\psi$, given as \begin{align} \displaystyle \label{def:reflex} \psi = \frac{k}{\omega} = \sqrt{\mathrm{e}psilon({\WH{\omega}};\mathbf{p}) }. \mathrm{e}nd{align} We note that $\psi$ is the complex index of refraction of the medium, whose real part is the real refractive index of the medium, whereas the imaginary part is related to the absorption or extinction coefficient \cite{oughstun1988velocity}. We use $\Re$ and $\Im$ to denote the real and the imaginary parts of a complex number. Let the superscripts $E$ and $ N$ denote the value of a quantity related to the exact solution of system \mathrm{e}qref{eq:sys} and a numerical approximation, respectively. We have the following definitions (see \cite{gilles2000comparison}): \begin{itemize} \mathrm{i}tem \textbf{Normalized Phase Velocity}: We consider the ratio between the real parts of the exact and numerical phase velocities, with the phase velocity, $v_p$, defined as $v_{p}=\omega/k = 1/\psi $. We define \begin{align} \label{eq:pha_vel} \text{Normalized Phase Velocity} = \frac{\Re(v_p^N)}{\Re(v_p^E)}. \mathrm{e}nd{align} \mathrm{i}tem \textbf{Normalized Attenuation Constant}: We consider the ratio between the imaginary parts of the exact and numerical $\psi$, which is also the ratio between the imaginary parts of the exact and numerical indices of refraction. We define \begin{align} \label{eq:att_con} \text{Normalized Attenuation Constant} = \frac{\Im\left( \psi^N \right)}{\Im\left( \psi^E \right)}. \mathrm{e}nd{align} \mathrm{i}tem \textbf{Normalized Energy Velocity}: The {\mathrm{i}t velocity of energy transport} of a (monochromatic) plane-wave field is an important concept of wave propagation in a dispersive medium. In \cite{oughstun1988velocity} this velocity is defined as a ratio of the time-average value of the Poynting vector to the total time-average electromagnetic energy density stored in both the field and the medium. The normalized energy velocity is a quantity that is defined (see \cite{gilles2000comparison, oughstun1988velocity}) as a function of the real and imaginary parts of the quantity $\psi$ given as \[\displaystyle \text{Energy Velocity} = \left[ \Re(\psi) + \frac{\left( \Re\left( \psi^2\right) - \mathrm{e}psilon_s\right) \left( \Re\left( \psi^2\right) - \mathrm{e}psilon_\mathrm{i}nfty\right) + (\Im\left( \psi^2 \right))^2}{\left( \mathrm{e}psilon_s - \mathrm{e}psilon_\mathrm{i}nfty\right) \Re\left( \psi \right)} \right]^{-1}. \] Based on the definition of the energy transport velocity, we define the ratio between the exact and numerical energy transport velocity to be the normalized energy velocity as \begin{align} \label{eq:ene_vel} \text{Normalized Energy Velocity} = \frac{\text{Energy Velocity}\,^N }{\text{Energy Velocity}\,^E }. \mathrm{e}nd{align} \mathrm{i}tem \textbf{Normalized Group Velocity}: We define the normalized group velocity to be the real part of the ratio of group velocities of the exact and numerical solutions. We have \begin{align} \label{eq:grp_vel} \text{Normalized Group Velocity} = \Re \left( \frac{v_g^N}{v_g^E} \right), \mathrm{e}nd{align} where the group velocity is defined by $\displaystyle v_g = \frac{\partial \omega}{\partial k}$. Here, both $v^{E}_g$ and $v^{N}_g$ are obtained numerically by $$\displaystyle (v_g)^{-1} = \frac{\partial k}{\partial \WH{\omega}} \frac{\partial \WH{\omega}}{\partial \omega} \approx \frac{k(\WH{\omega}+0.001)-k(\WH{\omega})}{0.001} \frac{\partial \WH{\omega}}{\partial \omega}.$$ \mathrm{e}nd{itemize} In Figures \ref{Fig:phy_FD1} and \ref{Fig:FD_TP_phy}, we plot the four physical quantities defined in \mathrm{e}qref{eq:pha_vel}-\mathrm{e}qref{eq:grp_vel} for the leap-frog and trapezoidal FD schemes in various ranges of values for ${\WH{\omega}}$: below resonance (${\WH{\omega}} < 1$), near resonance (${\WH{\omega}} \approx 1$), at the upper edge of the medium absorption band (${\WH{\omega}} \approx 1.527$), and far above resonance (${\WH{\omega}} >3$). Figure \ref{Fig:phy_FD1} offers excellent agreement with the plots in \cite{gilles2000comparison} for the (2,2) Yee scheme (leap-frog FDTD scheme with $M=1$) and a (2,4) leap-frog FDTD scheme ($M=2$). Both schemes have large errors at the resonance frequency and the upper edge of the medium absorption band ${\WH{\omega}}=\sqrt{\mathrm{e}psilon_{s}/\mathrm{e}psilon_{\mathrm{i}nfty}}$. Higher order schemes have values for the physical quantities that are closer to 1, which indicates smaller dispersion error with increase in the spatial order of the scheme. We note that, while the increase in spatial order reduces the four physical quantities near resonance for both the leap-frog and trapezoidal FDTD methods, there is virtually no change with spatial order at the upper edge of the medium absorption band. This is also true for the DG schemes. A comparison between Figures \ref{Fig:phy_FD1} and \ref{Fig:FD_TP_phy} suggests that the main differences between the two temporal discretizations can be observed for frequencies below and far beyond resonance. For ${\WH{\omega}}<1,$ the plots obtained by the trapezoidal FDTD schemes are monotone. This is not the case for the leap-frog FDTD method as shown in Figure \ref{Fig:phy_FD1}. The results can be understood by comparing equations \mathrm{e}qref{eq:dis_LF} with \mathrm{e}qref{eq:dis_TP}. The leading error coefficients in the two time schemes are different, with one being monotone on ${\WH{\omega}}$ and the other not. For high frequencies, the leap-frog FDTD scheme can no longer resolve frequencies beyond $14.8$, when the fields start to decay exponentially and have an increasing phase velocity. This number changes to around $10$ for the trapezoidal scheme, which shows different resolution offered by the two temporal schemes. In Figure \ref{Fig:phy_DG_AL1}, we plot the four physical quantities defined in \mathrm{e}qref{eq:pha_vel}-\mathrm{e}qref{eq:grp_vel}, obtained by DG-AL scheme using trapezoidal time discretization with a fixed CFL number $\nu=0.7.$ This choice is made based on previous observations that the DG-AL performs the best among all three fluxes. The overall behaviors of the physical quantities for FD and DG schemes are very similar when comparing Figure \ref{Fig:FD_TP_phy} with Figure \ref{Fig:phy_DG_AL1}. The main difference lies in the last column for high frequencies. The increasing resolution in the higher order DG scheme is evident, while increasing order does not impact this much for FD schemes. Thus, high order DG schemes in space can have a better performance in resolving high frequencies when compared with the FD scheme using the same mesh size. Similar conclusion holds with leap frog time discretization, and the plots are omitted for brevity. \begin{figure} \centering \mathrm{i}ncludegraphics[scale=0.258]{pics/PhVelo_FullLF1} \mathrm{i}ncludegraphics[scale=0.258]{pics/PhVelo_FullLF2} \mathrm{i}ncludegraphics[scale=0.258]{pics/PhVelo_FullLF3} \mathrm{i}ncludegraphics[scale=0.258]{pics/PhVelo_FullLF4} \\ \mathrm{i}ncludegraphics[scale=0.258]{pics/AttVelo_FullLF1} \mathrm{i}ncludegraphics[scale=0.258]{pics/AttVelo_FullLF2} \mathrm{i}ncludegraphics[scale=0.258]{pics/AttVelo_FullLF3} \mathrm{i}ncludegraphics[scale=0.258]{pics/AttVelo_FullLF4} \\ \mathrm{i}ncludegraphics[scale=0.258]{pics/EnVelo_FullLF1} \mathrm{i}ncludegraphics[scale=0.258]{pics/EnVelo_FullLF2} \mathrm{i}ncludegraphics[scale=0.258]{pics/EnVelo_FullLF3} \mathrm{i}ncludegraphics[scale=0.258]{pics/EnVelo_FullLF4} \\ \mathrm{i}ncludegraphics[scale=0.258]{pics/GpVelo_FullLF1} \mathrm{i}ncludegraphics[scale=0.258]{pics/GpVelo_FullLF2} \mathrm{i}ncludegraphics[scale=0.258]{pics/GpVelo_FullLF3} \mathrm{i}ncludegraphics[scale=0.258]{pics/GpVelo_FullLF4} \caption{Results for the leap-frog time discretization and FD2M with CFL number $\nu/\nu^{2M}_{max}=0.7$. First row: Normalized phase velocity; Second row: normalized attenuation constants; Third row: normalized energy velocity; Fourth row: normalized group velocity.} \label{Fig:phy_FD1} \mathrm{e}nd{figure} \begin{figure} \centering \mathrm{i}ncludegraphics[scale=0.258]{pics/PhVelo_FullTP1} \mathrm{i}ncludegraphics[scale=0.258]{pics/PhVelo_FullTP2} \mathrm{i}ncludegraphics[scale=0.258]{pics/PhVelo_FullTP3} \mathrm{i}ncludegraphics[scale=0.258]{pics/PhVelo_FullTP4} \\ \mathrm{i}ncludegraphics[scale=0.258]{pics/AttVelo_FullTP1} \mathrm{i}ncludegraphics[scale=0.258]{pics/AttVelo_FullTP2} \mathrm{i}ncludegraphics[scale=0.258]{pics/AttVelo_FullTP3} \mathrm{i}ncludegraphics[scale=0.258]{pics/AttVelo_FullTP4} \\ \mathrm{i}ncludegraphics[scale=0.258]{pics/EnVelo_FullTP1} \mathrm{i}ncludegraphics[scale=0.258]{pics/EnVelo_FullTP2} \mathrm{i}ncludegraphics[scale=0.258]{pics/EnVelo_FullTP3} \mathrm{i}ncludegraphics[scale=0.258]{pics/EnVelo_FullTP4} \\ \mathrm{i}ncludegraphics[scale=0.258]{pics/GpVelo_FullTP1} \mathrm{i}ncludegraphics[scale=0.258]{pics/GpVelo_FullTP2} \mathrm{i}ncludegraphics[scale=0.258]{pics/GpVelo_FullTP3} \mathrm{i}ncludegraphics[scale=0.258]{pics/GpVelo_FullTP4} \caption{Results for the trapezoidal time discretization and FD2M with CFL number $\nu/\nu^{2M}_{\max}=0.7$. First row: Normalized phase velocity; Second row: normalized attenuation constants; Third row: normalized energy velocity; Fourth row: normalized group velocity.} \label{Fig:FD_TP_phy} \mathrm{e}nd{figure} \begin{figure} \centering \mathrm{i}ncludegraphics[scale= 0.258] {pics/Phase_Velo_TP_AL_21-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.258] {pics/Phase_Velo_TP_AL_22-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.258] {pics/Phase_Velo_TP_AL_23-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.258] {pics/Phase_Velo_TP_AL_24-eps-converted-to.pdf} \\ \mathrm{i}ncludegraphics[scale= 0.258] {pics/Att_Const_TP_AL_21-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.258] {pics/Att_Const_TP_AL_22-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.258] {pics/Att_Const_TP_AL_23-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.258] {pics/Att_Const_TP_AL_24-eps-converted-to.pdf}\\ \mathrm{i}ncludegraphics[scale= 0.258] {pics/Eng_Velo_TP_AL_21-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.258] {pics/Eng_Velo_TP_AL_22-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.258] {pics/Eng_Velo_TP_AL_23-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.258] {pics/Eng_Velo_TP_AL_24-eps-converted-to.pdf} \\ \mathrm{i}ncludegraphics[scale= 0.258] {pics/Grp_Velo_TP_AL_21-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.258] {pics/Grp_Velo_TP_AL_22-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.258] {pics/Grp_Velo_TP_AL_23-eps-converted-to.pdf} \mathrm{i}ncludegraphics[scale= 0.258] {pics/Grp_Velo_TP_AL_24-eps-converted-to.pdf} \\ \caption{Results for the trapezoidal time discretization and DG-AL with CFL number $\nu=0.7$. First row: Normalized phase velocity; Second row: normalized attenuation constants; Third row: normalized energy velocity; Fourth row: normalized group velocity. } \label{Fig:phy_DG_AL1} \mathrm{e}nd{figure} \section{Conclusions} \label{conclude} In this paper, we studied the exact and numerical dispersion relations of a one-dimensional Maxwell's equations in a linear dispersive material characterized by a single pole Lorentz model for electronic polarization with low loss (i.e. when $\WH{\gamma}$ is small). We consider two different high order spatial discretizations, the FD and DG methods, each coupled with two different second order temporal discretizations, leap-frog and trapezoidal integrators, to construct both semi-discrete and fully discrete schemes. In addition, for the DG schemes we have considered three different types of fluxes: central, upwind and alternating fluxes. Comparisons based on dispersion analysis are made of the FD and DG methods and the leap-frog and trapezoidal time discretizations. It is well known that the FD and DG (which are a class of finite element methods) schemes, both being very popular discretizations, differ quite a lot in how they simulate wave phenomenon in their discrete grids. For example, DG schemes work well for multi-dimensional problems and can be constructed on unstructured meshes for complicated geometries. The FD schemes are simpler to code, and are mostly defined on structured meshes. The extension of the FD methods to non-uniform and unstructured meshes are cumbersome. Both types of spatial discretizations can be designed with high spatial order accuracy. The FD scheme achieves this by extending the stencil of the discretization, while higher order polynomials are needed for the DG construction. When we express the dispersion relation for the discrete wavenumber as a function of the angular frequency $\omega$, the number of spurious modes will increase with $M$ (the accuracy order) of the FD scheme, while for DG schemes, the number of spurious modes is independent of $p$ (the polynomial order). However, as shown in the Appendix of the FD scheme, using an alternative description of phase error, when the discrete angular frequency $\omega$ is expressed as a function of the wave number $k$ the conclusions are reversed. Namely, there are no spurious modes for FD schemes, while more spurious modes will be present for higher order DG schemes, see \cite{cheng2017L2} for relevant discussions in free space. When comparing the order of numerical errors, the FD schemes manifest the same order of accuracy of the dispersion error and point-wise convergence error, while the DG schemes have higher order of accuracy in dispersion error than in the $L^2$ errors \cite{cohen1,cohen2} (superconvergence in dispersion error). The CFL numbers for the two methods when coupled with an explicit time stepping are also different. It is known that the CFL number will approach a constant other than zero when $M\rightarrow\mathrm{i}nfty$ for the FD scheme, but the CFL number will go to zero when $p\rightarrow\mathrm{i}nfty$ for the DG scheme. Therefore, high order DG schemes require much smaller time steps than high order FD schemes. Based on the numerical dispersion results in this paper, we observe that the physical dispersion of the material plays an important role in the numerical dispersion errors. For the low-loss materials considered, we can observe that the error is largest near the resonance frequency. This is no longer true for materials with high loss (i.e. when $\WH{\gamma}$ is not small). An interesting finding is that for some materials and discretization parameters, we observe counterintuitive results that the dispersion error of a low order scheme can be potentially smaller than that of high order schemes (see for example Figure \ref{Fig:coeff}). This demonstrates that the dispersion analysis conducted for free space may not be revealing for general dispersive media. We find that the second order accuracy of the temporal discretizations limits the accuracy of the numerical dispersion errors, and is a good motivator for considering high order temporal discretizations, which are non-trivial to construct for the case of dispersive Maxwell models \cite{Youngcold}. This limiting behavior in the medium absorption band is made clear by the difference in errors in the semi-discrete schemes versus the fully discrete schemes. In our future work we will investigate higher order temporal discretizations. \appendix \YJ{ change title} \begin{figure}[H] \centering \mathrm{i}ncludegraphics[scale=0.25]{pics/PhVelo_FullTP1} \mathrm{i}ncludegraphics[scale=0.25]{pics/PhVelo_FullTP2} \mathrm{i}ncludegraphics[scale=0.25]{pics/PhVelo_FullTP3} \mathrm{i}ncludegraphics[scale=0.25]{pics/PhVelo_FullTP4} \\ \mathrm{i}ncludegraphics[scale=0.25]{pics/AttVelo_FullTP1} \mathrm{i}ncludegraphics[scale=0.25]{pics/AttVelo_FullTP2} \mathrm{i}ncludegraphics[scale=0.25]{pics/AttVelo_FullTP3} \mathrm{i}ncludegraphics[scale=0.25]{pics/AttVelo_FullTP4} \\ \mathrm{i}ncludegraphics[scale=0.25]{pics/EnVelo_FullTP1} \mathrm{i}ncludegraphics[scale=0.25]{pics/EnVelo_FullTP2} \mathrm{i}ncludegraphics[scale=0.25]{pics/EnVelo_FullTP3} \mathrm{i}ncludegraphics[scale=0.25]{pics/EnVelo_FullTP4} \\ \mathrm{i}ncludegraphics[scale=0.25]{pics/GpVelo_FullTP1_v2} \mathrm{i}ncludegraphics[scale=0.25]{pics/GpVelo_FullTP2_v2} \mathrm{i}ncludegraphics[scale=0.25]{pics/GpVelo_FullTP3_v2} \mathrm{i}ncludegraphics[scale=0.25]{pics/GpVelo_FullTP4_v2} \\ \caption{Dispersion relations for trapezoidal scheme of FD with CFL number $\nu/\nu^{M}_{\max}=0.7$. First row: Normalized phase velocity, second row: normalized attenuation constants, third row: normalized energy velocity, forth row: normalized group velocity } \label{Fig:FD_TP_phy} \mathrm{e}nd{figure} \begin{figure}[H] \centering \mathrm{i}ncludegraphics[scale= 0.25] {pics/Phase_Velo_TP_AL_1.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Phase_Velo_TP_AL_2.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Phase_Velo_TP_AL_3.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Phase_Velo_TP_AL_4.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Att_Const_TP_AL_1.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Att_Const_TP_AL_2.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Att_Const_TP_AL_3.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Att_Const_TP_AL_4.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Eng_Velo_TP_AL_1.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Eng_Velo_TP_AL_2.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Eng_Velo_TP_AL_3.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Eng_Velo_TP_AL_4.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Group_Velo_TP_AL_1.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Group_Velo_TP_AL_2.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Group_Velo_TP_AL_3.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Group_Velo_TP_AL_4.eps} \caption{The physical quantities for trapezoidal scheme of with CFL number DG-AL$\nu/\nu^{M}_{\max}=0.7$. First row: Normalized phase velocity, second row: normalized attenuation constants, third row: normalized energy velocity, forth row: normalized group velocity } \label{Fig:AL_TP_phy} \mathrm{e}nd{figure} \begin{figure}[H] \centering \mathrm{i}ncludegraphics[scale= 0.25] {pics/Phase_Velo_TP_CE_1.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Phase_Velo_TP_CE_2.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Phase_Velo_TP_CE_3.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Phase_Velo_TP_CE_4.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Att_Const_TP_CE_1.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Att_Const_TP_CE_2.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Att_Const_TP_CE_3.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Att_Const_TP_CE_4.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Eng_Velo_TP_CE_1.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Eng_Velo_TP_CE_2.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Eng_Velo_TP_CE_3.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Eng_Velo_TP_CE_4.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Group_Velo_TP_CE_1.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Group_Velo_TP_CE_2.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Group_Velo_TP_CE_3.eps} \mathrm{i}ncludegraphics[scale= 0.25] {pics/Group_Velo_TP_CE_4.eps} \caption{The physical quantities for trapezoidal scheme of DG-CE with CFL number $\nu/\nu^{M}_{\max}=0.7$ First row: Normalized phase velocity, second row: normalized attenuation constants, third row: normalized energy velocity, forth row: normalized group velocity } \label{Fig:CE_TP_phy} \mathrm{e}nd{figure} \section{An Alternative Dispersion Analysis for Semi-Discrete Finite Difference Schemes} In this appendix, we provide an alternative method of analyzing the dispersion error of the semi-discrete in space high order FD schemes (FD2M). We express the discrete angular frequency $\omega$ as a function of the continuous wavenumber $k\mathrm{i}n\mathbb{R}$, and measure the relative errors that result for different $M, M\mathrm{i}n \mathbb{N}$, with $2M$ being the spatial accuracy of the schemes. We introduce the following definitions \begin{align} \displaystyle \label{Nota} \WH{k}:= kh, \qquad F_{2M}(\WH{k}):= 2\sum_{p=1}^M \frac{[(2p-3)!!]^2}{(2p-1)!} \sin^{2p-1}\left( \frac{\WH{k}}{2} \right). \mathrm{e}nd{align} \noindent For the exact dispersion relation of Maxwell's equations in a one spatial dimensional Lorentz dielectric, by solving $\det(\mathcal{A})=0$ with $\mathcal{A}$ given by \mathrm{e}qref{DisEx2}, we get the following quartic equation for the continuous angular frequency $\WH{\omega}^{\text{ex}} = \omega^{\text{ex}}/\omega_{1}$, \begin{align} \displaystyle (\WH{\omega}^{\text{ex}})^4 + 2i\,\WH{\gamma}\,(\WH{\omega}^{\text{ex}})^3 - \frac{1}{\mathrm{e}psilon_\mathrm{i}nfty} \left( \mathrm{e}psilon_s + \frac{\WH{k}^2}{(\omega_1h)^2} \right) (\WH{\omega}^{\text{ex}})^2 - \frac{2i}{\mathrm{e}psilon_\mathrm{i}nfty} \WH{\gamma}\, \frac{\WH{k}^2}{(\omega_1h)^2} \, \WH{\omega}^{\text{ex}} + \frac{1}{\mathrm{e}psilon_\mathrm{i}nfty}\frac{\WH{k}^2}{(\omega_1h)^2} = 0. \label{qq6} \mathrm{e}nd{align} \noindent Similarly, considering the dispersion relation of semi-discrete FD2M scheme \mathrm{e}qref{Dissemi4}, we have \begin{align} \displaystyle \label{qq9} (\WH{\omega}^{\text{FD},2M})^4 + 2i\,\WH{\gamma}\,(\WH{\omega}^{\text{FD},2M})^3 - \frac{1}{\mathrm{e}psilon_\mathrm{i}nfty} \left( \mathrm{e}psilon_s + \frac{F_{2M}(\WH{k})^2}{(\omega_1 h)^2} \right) (\WH{\omega}^{\text{FD},2M})^3 - \frac{2i}{\mathrm{e}psilon_\mathrm{i}nfty} \WH{\gamma}\, \frac{F_{2M}(\WH{k})^2}{(\omega_1 h)^2}\, \WH{\omega}^{\text{FD},2M} + \frac{1}{\mathrm{e}psilon_\mathrm{i}nfty} \frac{F_{2M}(\WH{k})^2}{(\omega_1 h)^2}= 0. \mathrm{e}nd{align} Clearly, both \mathrm{e}qref{qq6} and \mathrm{e}qref{qq9} have four (complex) roots each. Therefore, the FD scheme has no spurious modes for the discrete angular frequency. To better understand the errors, similar to previous sections, we first consider the lossless material ($\WH{\gamma}=0$) as an example. In this case, only even order terms appear in \mathrm{e}qref{qq6} and \mathrm{e}qref{qq9}, and we can get \begin{subequations} \label{QAex} \begin{align} \displaystyle \WH{\omega}^{\text{ex}}_{1,2}( \WH{k} ) &= \pm \frac{1}{\sqrt{2}} \left[ \frac{\mathrm{e}psilon_s}{\mathrm{e}psilon_\mathrm{i}nfty} + \frac{\WH{k}^2}{\mathrm{e}psilon_\mathrm{i}nfty(\omega_1h)^2} - \sqrt{ \left( \frac{\mathrm{e}psilon_s}{\mathrm{e}psilon_\mathrm{i}nfty} + \frac{\WH{k}^2}{\mathrm{e}psilon_\mathrm{i}nfty(\omega_1h)^2} \right)^2 - \frac{4\WH{k}^2}{\mathrm{e}psilon_\mathrm{i}nfty(\omega_1h)^2} }\, \right]^{1/2} , \label{QAex1} \\ \WH{\omega}^{\text{ex}}_{3,4} (\WH{k}) &= \pm \frac{1}{\sqrt{2}} \left[ \frac{\mathrm{e}psilon_s}{\mathrm{e}psilon_\mathrm{i}nfty} + \frac{\WH{k}^2}{\mathrm{e}psilon_\mathrm{i}nfty(\omega_1h)^2} + \sqrt{ \left( \frac{\mathrm{e}psilon_s}{\mathrm{e}psilon_\mathrm{i}nfty} + \frac{\WH{k}^2}{\mathrm{e}psilon_\mathrm{i}nfty(\omega_1h)^2} \right)^2 - \frac{4\WH{k}^2}{\mathrm{e}psilon_\mathrm{i}nfty(\omega_1h)^2} }\, \right]^{1/2} , \label{QAex2} \mathrm{e}nd{align} \mathrm{e}nd{subequations} and $\WH{\omega}^{\text{FD},2M}_{1,2}( \WH{k} )=\WH{\omega}^{\text{ex}}_{1,2}(F_{2M}(\WH{k})), \quad \WH{\omega}^{\text{FD},2M}_{3,4}( \WH{k} )=\WH{\omega}^{\text{ex}}_{3,4}(F_{2M}(\WH{k})).$ In Figure \ref{Fig: AA2}, we present the relative dispersion errors with $\WH{k}\mathrm{i}n[0,2\pi]$ and the parameter values $$\mathrm{e}psilon_s = 5.25, \quad \mathrm{e}psilon_\mathrm{i}nfty = 2.25, \quad \omega_1 h = \frac{\pi}{30}.$$ In this figure, we can observe the decrease of error when $M$ (order of the scheme) increases. The numerical error in the first and second solutions of the discrete angular frequency, are smaller than that of the third and fourth solution, which can be understood if we consider the small wavenumber limit. In this case, we can derive expressions for the relative phase error as \begin{align} \renewcommand{1}{2.5} \WH{\Psi}_{\text{FD},2M}(\WH{k}) := \left| \frac{\WH{\omega}^{\text{ex}}_{i}(\WH{k}) - \WH{\omega}^{\text{FD},2M}_{i}(\WH{k})}{\WH{\omega}^{\text{ex}}_{i}(\WH{k})} \right| = \left\{ \begin{array}{ll} \displaystyle \frac{[(2M-1)!!]^2}{2^{2M}(2M+1)!}\, \WH{k}^{2M} + \mathcal{O}(\WH{k}^{2M+2}), & i = 1, 2, \\ \displaystyle \frac{[(2M-1)!!]^2}{2^{2M}(2M+1)!} \,\frac{\mathrm{e}psilon_d}{\mathrm{e}psilon_{s}^2 } \,\frac{k^2}{\omega_1^2} \, \WH{k}^{2M} + \mathcal{O}(\WH{k}^{2M+2}), & i = 3, 4, \\ \mathrm{e}nd{array} \right. \renewcommand{1}{1} \label{A5} \mathrm{e}nd{align} which indicates a dispersion error of order $2M$ and is consistent to our previous conclusion (see Theorem \ref{thm5}). By comparing the coefficients, we verify that, for the parameters we consider, the leading error coefficient corresponding to $\WH{\omega}^{\text{FD},2M}_{3,4}( \WH{k} )$ is indeed much larger than that for $\WH{\omega}^{\text{FD},2M}_{1,2}( \WH{k} ).$ \begin{figure}[h] \centering \mathrm{i}ncludegraphics[scale=0.3]{pics/RelE1_g0} \mathrm{i}ncludegraphics[scale=0.3]{pics/RelE2_g0} \caption{Relative phase error \mathrm{e}qref{A5} for the spatial discretization FD2M with $\WH{\gamma}=0$. $\WH{k}\mathrm{i}n[0,2\pi]$. Left: $i=1,2$; The inset in the left plot displays a zoomed-in region of the relative phase error for low values of $\WH{k}$; Right: $i=3,4$. } \label{Fig: AA2} \mathrm{e}nd{figure} For low-loss material, e.g. $\WH{\gamma}= 0.01,$ the conclusions are very similar. The error plots show no visible difference from the no loss case, and are thus omitted. \mathrm{e}nd{document}
\begin{document} \draft \title{Absorption by cold Fermi atoms in a harmonic trap} \author{Gediminas Juzeli\={u}nas and Marius Ma\v{s}alas} \address{Institute of Theoretical Physics and Astronomy, A. Go\v{s}tauto 12, \\ Vilnius 2600, Lithuania } \date{\today} \maketitle \begin{abstract} We study the absorption spectrum for a strongly degenerate Fermi gas confined in a harmonic trap. The spectrum is calculated using both the exact summation and also the Thomas-Fermi (TF) approximation. In the latter case, relatively simple analytical expressions are obtained for the absorption lineshape at large number of trapped atoms. At zero temperature, the approximated lineshape is characterized by a $\left(1-z^2\right)^{5/2}$ dependence which agrees well with the exact numerical calculations. At non-zero temperature, the spectrum becomes broader, although remains non-Gaussian as long as the fermion gas is degenerate. The changes in the trap frequency for an electronically excited atom can introduce an additional line broadening. \end{abstract} \pacs{32.70.Jz, 42.50.Fx, 32.80.-t} In recent years there has been a great deal of interest in the dilute gas of trapped atoms cooled to temperatures below $1$ $\mu {\rm K}$. At such low temperatures, an important role is played by the quantum statistics of atoms. Bosons tend to occupy the lowest translational level of the trap to form the Bose - Einstein condensate \cite{anders:95,Ketterle,Hulet}. On the other hand, fermions can still occupy many trap levels (predominantly up to the Fermi level), as it was observed recently \cite{demarco:99}. The physical properties of a Fermi gas (such as the specific heat) depend on the number of atoms in the system \cite{Butts:97,Wallis}, as well as on the trap anisotropy \cite{Wallis}. The optical spectroscopy has proven to be useful in getting information about cold atomic gases \cite {anders:95,Ketterle,Hulet,demarco:98,Lewenst:94,Lewenst:99,javan}. The Fermi-Dirac (FD) statistics is known to change the optical response of the system compared to the classical one. The signatures of quantum degeneracy emerge in the scattering spectra of homogeneous\ \cite{javan} and trapped \cite{demarco:98} Fermi gases. Effects of quantum statistics are also featured in the scattering of short laser pulses from a trapped Fermi gas \cite {Lewenst:99}. Furthermore, the spontaneous emission appears to be inhibited in a cold Fermi gas \cite{Busch}. The effects of quantum degeneracy should manifest in the absorption spectra as well. The aim of the present paper is to investigate absorption by a cold Fermi gas confined in a harmonic trap. The analysis concentrates on the degenerate Fermi gas (i.e. very low temperatures), for which the quantum statistics of the atoms plays an important role. The theory involves exact calculations, as well as the Thomas - Fermi (TF) approximation. Consequently, the absorption spectrum is analyzed both for small and large numbers of trapped atoms. Consider a gas of Fermi atoms confined in a harmonic trap. The harmonic approximation is relevant for the traps used in recent experiments \cite {demarco:99}. We shall neglect atomic collisions, since the s-wave collisions are forbidden between the spin - polarized fermions. Consequently, one can make use of the following one - atom Hamiltonian: \begin{equation} H_{1-at}=\left| g\right\rangle H_{g}\left\langle g\right| +\sum_{ex}\left| ex\right\rangle \left( \hbar \omega _{0,ex}+H_{ex}\right) \left\langle ex\right| , \label{eq:1} \end{equation} where $\left| g\right\rangle $ and $\left| ex\right\rangle $ represent the ground and an excited electronic state of an atom, $\hbar \omega _{0,ex}$ {\em \ } is the excitation energy. It is noteworthy that the ground electronic level of the fermion atom has a number of magnetic sublevels over which the summation is to be carried out in the Hamiltonian (\ref{eq:1}). However, such a summation is not necessary if the atoms are spin - polarized, as it is the case in the experiment by DeMarco and Jin on $^{40}$K atoms \cite{demarco:99}. Here also $H_{g}$ $\left( H_{ex}\right) $ is the Hamiltonian for the translational motion of a trapped atom in the ground (excited) electronic{\em \ } state: \begin{equation} H_{g,ex}\left({\bf r},{\bf p}\right)=\frac{p^{2}}{2M}+\frac{M\Omega _{g,ex}^{2}\left( x^{2}+\lambda _{y}^{2}y^{2}+\lambda _{z}^{2}z^{2}\right) }{2}, \label{eq:2} \end{equation} where ${\bf p}=-i\hbar {\bf \nabla }$ is the momentum operator, $M$ is the atomic mass, $\Omega _{g}$ $\left( \Omega _{ex}\right) $ is the frequency of the translational motion along the x axis if the atom is in the ground (excited) electronic state, and the dimensionless parameters $\lambda _{y}$ and $ \lambda _{z}$ describe the extent of anisotropy of the trap. Note that the frequency $\Omega _{g}$\ can be generally different from $\Omega _{ex}$ due to the changes in the magnetic moment of the atom following its transition to an excited electronic state. The effects related to this fact will be explored using the TF approximation. The lineshape of the absorption spectrum is given by \begin{equation} I\left( \omega \right) =\sum_{i,f}\rho _{i}\left| \left\langle f\right| V\left| i\right\rangle \right| ^{2}\delta \left( \omega -\omega _{fi}\right) . \label{eq:3} \end{equation} Here $\left| i\right\rangle \equiv \left| g\right\rangle \left| {\bf n} \right\rangle _{g}$ and $\left| f\right\rangle \equiv \left| ex\right\rangle \left| {\bf m}\right\rangle _{ex}$ are the initial and final states of an atom, $\left| {\bf n}\right\rangle _{g}\equiv \left| n_{x},n_{y},n_{z}\right\rangle _{g}$ and $\left| {\bf m}\right\rangle _{ex}\equiv \left| m_{x},m_{y},m_{z}\right\rangle _{ex}$ are the atomic translational states characterized by the energies \begin{equation} \varepsilon _{{\bf n}}^{g,ex}=\hbar \Omega _{g,ex}\left( n_{x}+\lambda _{y}n_{y}+\lambda _{z}n_{z}\right) +\varepsilon _{{\bf 0}}^{g,ex}, \end{equation} where $\varepsilon _{{\bf 0}}^{g,ex}=\hbar \Omega _{g,ex}\left( 1+\lambda _{y}+\lambda _{z}\right) /2$, and $\omega _{fi}=\omega _{0ex}+\left( \varepsilon _{{\bf m}}^{ex}-\varepsilon _{{\bf n}}^{g}\right) /\hbar $ is the transition frequency. Here also $\rho _{i}\equiv \rho \left( \varepsilon _{{\bf n}}^{g}\right) =\left[ \exp \left( \beta \varepsilon _{{\bf n}}^{g}-\beta \mu \right) +1\right] ^{-1}$ is the FD distribution function for the trapped atoms, $\mu $ is the chemical potential and $\beta =1/k_{B}T$. The operator \begin{equation} V=\sum_{ex}\left\{ \left| ex\right\rangle d_{ex}\exp \left( i{\kappa x} \right) \left\langle g\right| +h.c.\right\} \label{eq:5} \end{equation} describes the interaction between an atom and the electromagnetic field propagating along the x axis, ${\kappa }$ is the wave number of the light, and $ d_{ex}$ is the atomic transition dipole moment along the polarization of the light. Consider first the absorption spectrum using the exact summation over the translational levels. At this stage, it is assumed that $\Omega _{g}=\Omega _{ex}=\Omega $, yet the trap can still be anisotropic. At zero temperature, only the levels with $n_{x}+\lambda _{y}n_{y}+\lambda _{z}n_{z}\leq n_{F}=E_{F}/\hbar \Omega $ are occupied by the atoms, where $E_{F}\equiv \left. \mu \right| _{T=0}$ is the Fermi energy. In such a situation, the absorption lineshape takes the form: \begin{eqnarray} I\left( \omega \right) &=&\sum_{ex}\left| d_{ex}\right| ^{2}\sum_{n_{x}=0}^{n_{F}}\sum_{m=-n_{x}}^{\infty }K_{x}\delta \left( \omega -\omega _{0,ex}-m\Omega \right) \nonumber \\ &&\times n_{x}!\left( n_{x}+m\right) !e^{-\alpha ^{2}} \left( \alpha ^{2}\right) ^{2n_{x}+m} \nonumber \\ &&\times \left( \sum_{j=0}^{j_x } \frac{\left( -\alpha ^{-2}\right)^{j}}{ j!\left( n_{x}-j\right) !\left( n_{x}+m-j\right) !}\right) ^{2} \label{I-tiksl}, \end{eqnarray} with $j_x =\min \left(n_{x},n_{x}+m\right)$, where $\alpha =\kappa \left( \hbar /2M\Omega \right) ^{1/2}$, and the factor $K_{x}=\sum_{n_{y}=0}^{\left[ \frac{1}{\lambda _{y}}\left( n_{F}-n_{x}\right) \right] }\sum_{n_{z}=0}^{\left[ \frac{1}{\lambda _{z}} \left( n_{F}-n_{x}-\lambda _{y}n_{y}\right) \right] }1$ represents a number of occupied translational states $\left| n_{x},n_{y},n_{z}\right\rangle $ for a fixed value of $n_{x}$, the brackets $ \lbrack ...\rbrack $ labeling the integer part of a number. The Fermi number $n_{F}$ is determined by the condition $ \sum_{n_{x}=0}^{n_{F}}K_{x}=N$, where $N$ is the number of trapped atoms. The factor $K_{x}$ reflects the trap geometry. For traps with a cylindric symmetry $\left( \lambda _{y}=\lambda _{z}=\lambda \right) $, one finds $ K_{x}=\left(\left[ q_x \right] +1\right) \left(\left[ q_x \right] + 2\right) /2$, where $q_x =\left( n_{F}-n_{x}\right) /\lambda $. For an anisotropic trap of a cigar shape $\left( \lambda_{y},\lambda _{z}\gg 1 \right) $, one has $K_{x}=1$ provided the number of trapped atoms is small enough $\left(n_{F} < \lambda_{y},\lambda_{z}\right)$. In such a situation, the trap becomes one-dimensional (1D), giving $n_{F}=N-1$. On the other hand, for an isotropic three-dimensional (3D) trap $\left( \lambda _{y}=\lambda _{z}=1\right) $, one arrives at $K_{x}=\left( n_{F}-n_{x}+1\right) \left( n_{F}-n_{x}+2\right) /2$. If the number of atoms is sufficiently large ($n_{F}>\lambda _{y},\lambda_{z}$), the anisotropic traps of cigar shape $\left( \lambda _{y},\lambda _{z}\gg 1\right) $ are no longer one dimensional, since the Fermi energy is then greater than the energy of the translational quanta in the $y$ and $z$ directions. Such a situation corresponds to the recent experiment by DeMarco and Jin \cite{demarco:99}. Figure \ref{fig:1} shows the absorption lineshapes for various degrees of the trap anisotropy in the case where $\lambda _{y}=\lambda _{z}=\lambda $. A single excited electronic state $\left| ex\right\rangle $ has been taken into account in these and the subsequent figures. Oscillations are seen clearly in the thick solid line representing a purely 1D case ($\lambda =20$), as well as in the thin one corresponding to an anisotropic 3-D case ($\lambda =5$). This can be related to the oscillations of the density of Fermi atoms in the one-dimensional \cite{Gleis:00} and anisotropic three dimensional traps \cite {Wallis} at a sufficiently small number of the trapped atoms. Oscillations do not appear in the lineshape of an isotropic 3D trap ( $\lambda =1$). Note that in contrast to a single trapped atom \cite{Gajda:99}, the zero-temperature lineshape of the trapped Fermi gas has a cut off at the frequencies smaller than $\omega _{0,ex}$. This can be explained by the fact that fermions occupy excited translational levels of the trap at T=0 (up to the Fermi level), so that optical absorption can be accompanied by a decrease in the translational energy of the atoms. \begin{figure} \caption{Absorption lineshape calculated using Eq.(\ref{I-tiksl} \label{fig:1} \end{figure} When $N$ or $T$ is increasing, the behavior of a quantum system becomes more similar to that of the classical one. To get analytical formulas for the absorption lineshape at arbitrarily large values of $N$ and $T$, we shall make use of the semiclassical Thomas-Fermi (TF) approximation. In the TF approximation, the state of an atom is labeled by the radius-vector ${\bf r}$ and wave vector ${\bf k =\bf p /\hbar }$ (see e.g. refs.\cite{Butts:97,Wallis}). The density of such states in the six-dimensional phase space $\left( {\bf r}, {\bf k}\right) $ is $\left( 2\pi \right) ^{-3}$. The number density of the fermion atoms in the phase space is: \begin{equation} \rho \left( {\bf r},{\bf k},T\right) =\frac{1}{\left( 2\pi \right) ^{3}}\frac{1}{ \exp \left\{ \beta H_g \left( {\bf r},{\hbar \bf k}\right) -\beta \mu \right\} +1}, \label{eq:7} \end{equation} where the chemical potential $\mu $ is related to the number of trapped atoms via the normalization condition $\int {\rm d}^{3}{\bf r} {\rm d}^{3}{\bf k}\rho \left( {\bf r},{\bf k} ,T\right)=N $. Applying the TF approximation, the lineshape reads: \begin{equation} I\left( \omega \right) =\sum_{ex}\left| d_{ex}\right| ^{2}\int {\rm d}^{3}{\bf r}{\rm d}^{3}{\bf k}\rho \left({\bf r}, {\bf k},T\right) \delta \left( \omega - \omega _{{\bf r}, {\bf k}} \right) \label{eq:8}, \end{equation} where $\omega _{{\bf r}, {\bf k}}= M \left( \Omega_{ex}^{2} - \Omega _{g}^{2} \right) \left( x^{2}+\lambda _{y}^{2}y^{2}+\lambda _{z}^{2}z^{2} \right)/2\hbar +\hbar k_x \kappa/M +\hbar \kappa^2 /M + \omega _{0,ex}$ is the transition frequency. If $\Omega _{g} =\Omega _{ex}$, the frequency $\omega _{{\bf r}, {\bf k}}$ does not depend on the atomic position ${\bf r}$, so the lineshape is determined exclusively by the momentum distribution function: \begin{equation} n\left( {\bf k},T\right) =\int {\rm d}^{3}{\bf r}\rho\left( {\bf r},{\bf k} ,T\right). \label{eq:9} \end{equation} For $T=0$ the distribution function is given by \cite{Butts:97}: \begin{equation} n\left( {\bf k},0\right) =\frac{8N}{\pi ^{2}K_{F}^{3}}\left( 1-\frac{k^{2} }{K_{F}^{2}}\right) ^{3/2}, \label{eq:10} \end{equation} where $K_{F}=\left( 2ME_{F}/\hbar ^{2}\right) ^{1/2}$ is the maximum momentum of the trapped Fermi atoms at zero temperature, and $E_{F}=\hbar \Omega _{g}\left( 6 \lambda _{y}\lambda _{z}N\right) ^{1/3}$ is the Fermi energy. Note that the momentum distribution is isotropic even though the trap is anisotropic \cite{Butts:97}. This leads to an isotropic absorption lineshape in the case where $\Omega_{ex}=\Omega_{g}$. Applying the distribution function (\ref{eq:10}), one arrives at the $ \left(1-z^2\right)^{5/2}$ behavior of the lineshape if $\Omega_{ex}=\Omega_{g}=\Omega $: \begin{equation} I\left( \omega \right) =\frac{16N}{5\pi \Delta }\sum_{ex}\left| d_{ex}\right| ^{2}\left[ 1-\left( \omega -\omega _{max}\right) ^{2}/\Delta ^{2}\right] ^{5/2}, \label{eq:11} \end{equation} where the central frequency $\omega _{max }=\omega _{0,ex}+\alpha^2 \Omega$ is shifted by the recoil frequency $\omega_{rec}=\alpha^2\Omega$ as compared to $\omega _{0,ex}$, \begin{equation} \Delta =\alpha \Omega \left( 6N\lambda _{y}\lambda _{z}\right) ^{1/6} \label{eq:12} \end{equation} being the spectral half-width. In the experiment by DeMarco and Jin \cite{demarco:99} using trapped $^{40}$K atoms, $\Omega =2\pi \times 19$ Hz, $N=7\times 10^{5}$ and $ \lambda \approx 7$, giving $\alpha \approx 36$ for $\omega=4\times 10^{16}$ Hz. Consequently one has $\Delta \approx 6\times 10^{5}$ Hz. This is less than the typical radiative linewidths for free atoms. Yet, for trapped fermions the spontaneous emission is suppressed \cite{Busch}, so the Doppler broadening can be dominant. The approximated lineshape (\ref{eq:11}) depends on the trap anisotropy exclusively through the characteristic frequency $\Omega _{char}=\Omega \left( \lambda _{y}\lambda _{z}\right) ^{1/3}$ which is a measure of the trap hardness. The bigger $\Omega _{char}$ is, the tighter is the trap, and the broader is the absorption spectrum. In fact, the maximum momentum of the atoms is larger in tighter traps (for the same number of trapped atoms) leading to the increase in the Doppler broadening. For instance, compared to an isotropic trap $\left( \lambda _{y}=\lambda _{z}=1\right) $, the spectrum of a squeezed trap ($\lambda _{y}$, $\lambda _{z}>1$) is broader. Exact and approximated lineshapes are presented in Figs. \ref{fig:2} and \ref{fig:3}. For an isotropic trap (Fig. \ref{fig:2}), the agreement appears to be very good, even though the number of atoms $N$ is rather small. Deviations are seen only in the tails of the spectrum corresponding to the periphery of the fermion cloud. In such an area, the fermion density becomes small and the TF approximation fails \cite{Butts:97}. For anisotropic traps, the exact spectrum undergoes some oscillations about the approximated one even for relatively large values of N (see Fig. \ref{fig:3}). In fact, the energy of translational quanta depends now on the specific directions of atomic motion, so a larger number of trapped atoms is needed to populate substantially the translational levels in all three directions. \begin{figure} \caption{Absorption lineshape at $T=0$ calculated using the exact summation (dashed line) and the TF approximation (solid line) for $\alpha=9$, $N=35$ and $\lambda_x=\lambda_y=\lambda =1$.} \label{fig:2} \end{figure} \begin{figure} \caption{The same as in Fig. \ref{fig:2} \label{fig:3} \end{figure} Consider next the situation where $T\neq 0$ and the frequencies $\Omega _{g}$ and $\Omega _{ex}$ are not necessarily equal. The lineshape (\ref {eq:8}) takes then the form for an isotropic trap: \begin{eqnarray} I\left( \omega ,T\right) &=&\sum_{ex}\frac{\left| d_{ex}\right| ^{2}}{16\pi\alpha^6\Omega^6 p^{5/2}}\int_{0}^{\infty }y^{2}{\rm d}y \ln \left\{ 1+\exp \left[\beta \mu \right.\right. \nonumber \\ &&\left. \left. -y^{2}-p\left( \omega -\omega _{max} + m_{ex}y^2/\beta\hbar\right)^{2}\right] \right\}, \label{eq:13} \end{eqnarray} where $m_{ex}=1-\Omega _{ex}^{2}/\Omega _{g}^{2}$, \quad $p=\beta \hbar /4\alpha ^2\Omega $ and $\Omega\equiv \Omega _{g}$. If $\Omega_{g} = \Omega _{ex}$, the result (\ref{eq:13}) can be extended readily to anisotropic traps. In such a case, the lineshape (\ref{eq:13}) acquires an extra factor $1/ \lambda_x \lambda_y $, and the chemical potential $\mu $ depends on $\lambda_x \lambda_y $, in addition to $T$ and $N$. We are interested primarily in the strongly degenerate Fermi gas $\left( {\beta }{E_{F}}\gg 1\right) $, for which the Sommerfeld expansion holds for the chemical potential: \begin{equation} \mu =E_{F}\left( 1-\frac{\pi ^{2}}{3}\left( \frac{1}{\beta E_{F}}\right) ^{2}\right) \label{eq:14} \end{equation} In the opposite (non-degenerate gas) limit $\left( {\beta }{E_{F}}\longrightarrow 0\right) $, one has $\mu =\beta ^{-1} \ln \left[ \left( \beta E_F\right) ^{3}/6\right] $, and the lineshape (\ref{eq:13}) reduces to the Gaussian form if $\Omega_{g} = \Omega _{ex}$. \begin{figure} \caption{Absorption lineshape at various temperatures for an isotropic trap with $\Omega _{g} \label{fig:4} \end{figure} \begin{figure} \caption{Absorption lineshape for an isotropic trap with $N=10667$, $\alpha=9$, $k_{B} \label{fig:5} \end{figure} Figure \ref{fig:4} shows the temperature dependence of the lineshapes for $\Omega _{g}=\Omega _{ex}$. At very low temperature ($kT/E_{F}=0.1$), the absorption spectrum is seen to be close to the zero-temperature limit. As the temperature increases, the absorption lineshape becomes broader and is no longer characterized by the $\left( 1-z^{2}\right) ^{5/2}$ behavior. Yet, the lineshape is still non-Gaussian, since the gas is strongly degenerate. Figure \ref{fig:5} shows the absorption spectrum for various values of $m_{ex}=1-\Omega _{ex}^{2}/\Omega _{g}^{2}$. For $\Omega _{ex}>\Omega _{g}$, the maximum position of the spectrum is shifted to larger frequencies. For $\Omega _{ex}<\Omega _{g}$, one has the opposite. Furthermore, one can see the obvious increase in the spectrum width if $\Omega _{ex}>\Omega _{g}$. This is due to the fact that an increase in the translational frequency of the electronically excited atoms leads to an increase and broader distribution of frequencies of the optical transitions. In summary, we have studied the absorption spectrum by a cold gas of Fermi atoms using both the exact summation and also the Thomas - Fermi approximation. Oscillations have been obtained in the absorption spectrum calculated numerically for one-dimensional and anisotropic three-dimensional traps at a sufficiently small number of trapped atoms and $T=0$. No such oscillations appear for the isotropic three-dimensional traps. Applying the TF approximation, relatively simple analytical expressions have been obtained for the lineshape of three dimensional traps at a sufficiently large number of trapped particles. At zero temperature, the approximated spectrum is characterized by a $\left( 1-z^{2}\right) ^{5/2}$ dependence. At non-zero temperature, the spectrum becomes broader, although remains non-Gaussian as long as the fermion gas is degenerate. The changes in the trap frequency for an electronically excited atom can introduce an additional line broadening. The authors would like to express their appreciation for the useful discussions with H. Carmichael and T. Mossberg. This work has been completed during the Fulbright Scholarship by G. J. at the University of Oregon. \begin{references} \bibitem{anders:95} M.H. Anderson, J.R. Ensher, M.R. Matthews, C.E. Wieman and E.A. Cornell, Science {\bf 269}, 198 (1995). \bibitem{Ketterle} K.B. Davis, M.O. Mewes, M.R. Andrews, N.J. van Druten, D.S. Durfee, D.M. Kurn and W. Ketterle, Phys. Rev. Lett. {\bf 75}, 3969 (1995). \bibitem{Hulet} C.C. Bradley, C.A. Sackett, J.J Tollett and R.G. Hulet, Phys. Rev. Lett. {\bf 75}, 3969 (1995). \bibitem{demarco:99} B. DeMarco and D.S. Jin, Science {\bf 285}, 1703 (1999). \bibitem{Butts:97} D.A. Butts and D.S. Rokhsar, Phys. Rev. A {\bf 55}, 4346 (1997). \bibitem{Wallis} J. Schneider and H. Wallis, Phys. Rev. A {\bf 57}, 1253 (1998). \bibitem{demarco:98} B. DeMarco and D.S. Jin, Phys. Rev. A {\bf 58}, R4267 (1998). \bibitem{Lewenst:94} M. Lewenstein, L. You, J. Cooper and K. Burnett, Phys. Rev. A {\bf 50}, 2207 (1994). \bibitem{Lewenst:99} T. Wong, \"{O}. M\"{u}stecaplio\~{g}lu, L. You and M. Lewenstein, Phys. Rev. A {\bf 62}, 033608 (2000). \bibitem{javan} J. Javanainen and J. Ruostekoski, Phys. Rev. A {\bf 52}, 3033 (1995). \bibitem{Busch} T. Busch, J.R. Anglin, J.I. Cirac and P. Zoller, Europhys. Lett. {\bf 44}, 1 (1998). \bibitem{Gleis:00} F. Gleisberg, W. Wonneberger, U. Schl\"{o}der and C. Zimmermann, e-print quant-phys/0009085 (2000). \bibitem{Gajda:99} A. Orlowskij, M. Gajda, P. Krekora, R.J. Glauber, J. Mostowski, Opt. and Spectrosc. {\bf 87}, 645 (1999). \end{references} \end{document}
\begin{document} \title{Quantum-enhanced interferometry with large heralded photon-number states} \author{G.S. Thekkadath} \affiliation{Clarendon Laboratory, University of Oxford, Parks Road, Oxford OX1 3PU, UK} \email{[email protected]} \author{M.E. Mycroft} \affiliation{Faculty of Physics, University of Warsaw, ul. Pasteura 5, 02-093 Warsaw, Poland} \author{B.A. Bell} \affiliation{Clarendon Laboratory, University of Oxford, Parks Road, Oxford OX1 3PU, UK} \author{C.G. Wade} \affiliation{Clarendon Laboratory, University of Oxford, Parks Road, Oxford OX1 3PU, UK} \author{A. Eckstein} \affiliation{Clarendon Laboratory, University of Oxford, Parks Road, Oxford OX1 3PU, UK} \author{D.S. Phillips} \affiliation{Clarendon Laboratory, University of Oxford, Parks Road, Oxford OX1 3PU, UK} \author{R.B. Patel} \affiliation{Clarendon Laboratory, University of Oxford, Parks Road, Oxford OX1 3PU, UK} \author{A. Buraczewski} \affiliation{Faculty of Physics, University of Warsaw, ul. Pasteura 5, 02-093 Warsaw, Poland} \author{A.E. Lita} \affiliation{National Institute of Standards and Technology, 325 Broadway, Boulder, Colorado 80305, USA} \author{T. Gerrits} \affiliation{National Institute of Standards and Technology, 325 Broadway, Boulder, Colorado 80305, USA} \affiliation{National Institute of Standards and Technology, 100 Bureau Drive, Gaithersburg, Maryland 20899, USA} \author{S.W. Nam} \affiliation{National Institute of Standards and Technology, 325 Broadway, Boulder, Colorado 80305, USA} \author{M. Stobi\'{n}ska} \affiliation{Faculty of Physics, University of Warsaw, ul. Pasteura 5, 02-093 Warsaw, Poland} \author{A.I. Lvovsky} \affiliation{Clarendon Laboratory, University of Oxford, Parks Road, Oxford OX1 3PU, UK} \author{I.A. Walmsley} \affiliation{Clarendon Laboratory, University of Oxford, Parks Road, Oxford OX1 3PU, UK} \affiliation{Department of Physics, Imperial College London, Prince Consort Rd, London SW7 2AZ, UK} \begin{abstract} Quantum phenomena such as entanglement can improve fundamental limits on the sensitivity of a measurement probe. In optical interferometry, a probe consisting of $N$ entangled photons provides up to a $\sqrt{N}$ enhancement in phase sensitivity compared to a classical probe of the same energy. Here, we employ high-gain parametric down-conversion sources and photon-number-resolving detectors to perform interferometry with heralded quantum probes of sizes up to $N=8$ (i.e. measuring up to 16-photon coincidences). Our probes are created by injecting heralded photon-number states into an interferometer, and in principle provide quantum-enhanced phase sensitivity even in the presence of significant optical loss. Our work paves the way towards quantum-enhanced interferometry using large entangled photonic states. \end{abstract} \maketitle \section*{Introduction} Optical interferometry provides a means to sense very small changes in the path of a light beam. These changes may be induced by a wide range of phenomena, from pressure and temperature variations that impact refractive index, to modifications of the space-time metric that characterize gravitational waves. In its simplest form, interferometry measures distortions via the phase difference $\phi$ between the two paths of the interferometer. The uncertainty $\Delta \phi$ in a measurement of this phase difference is limited fundamentally by the quantum noise of the illuminating light beams. This noise can be reduced by employing light exhibiting nonclassical properties such as entanglement and squeezing in order to improve the sensitivity of an interferometer beyond classical limits~\cite{RDD2015review}. Quantum states of light are most effective when it is desirable to maximize the phase sensitivity per photon inside an interferometer, such as in gravitational wave detectors~\cite{caves1981quantum,tse2019quantum} or when characterizing delicate photosensitive samples~\cite{crespi2012measuring,wolfgramm2013entanglement,taylor2013biological,taylor2016quantum,cimini2019adaptive}. In principle, $N$-photon quantum states of light such as the highly entangled N00N state can provide up to a $\sqrt{N}$ precision enhancement over a classical state of equal energy~\cite{bollinger1996noon,mitchell2004super,walther2004broglie,nagata2007beating,kim2009three,afek2010high,matthews2011heralding,ulanov2016loss,slussarenko2017unconditional}. Unfortunately these highly entangled states are vulnerable to decoherence, especially at large photon numbers. In practice, their enhanced sensitivity disappears in the presence of loss which may originate from interactions inside the interferometer (e.g. absorption in a sample) as well as external losses in the state preparation and detection~\cite{datta2011quantum}. Although a $\sqrt{N}$ enhancement is not achievable in the presence of loss, one can engineer states that trade-away sensitivity for loss-tolerance in order to still achieve some advantage over classical limits~\cite{dorner2009optimal,kacprowicz2010experimental}. For example, squeezed light~\cite{demkowicz2013fundamental,lang2013optimal,yonezawa2012quantum,berni2015ab} and non-maximally entangled states such as Holland-Burnett states~\cite{holland1993interferometric,sun2008experimental,xiang2011entanglement,thomas2011realworld,xiang2013optimal,jin2016detection,matthews2016towards} can surpass classical limits despite some losses. Importantly, the precision enhancement achievable with such states can grow with $N$, even in the presence of loss~\cite{dorner2009optimal}. Experimental demonstrations have prepared unheralded $N=6$~\cite{xiang2013optimal,jin2016detection} (or heralded $N=2$~\cite{thomas2011realworld}) Holland-Burnett states, but further increase of $N$ is constrained by source brightness as well as detector efficiency and number-resolution. This motivates developing experimental protocols that can produce and detect loss-tolerant states with larger photon numbers. In this work, we address a number of key challenges in order to scale-up quantum-enhanced interferometry using definite photon-number states of light. Firstly, we introduce probe states that are prepared by combining two photon-number states on a beam splitter similarly to Holland-Burnett states. However, unlike the latter, we allow the initial photon-number states to be unequal. We show that these generalized Holland-Burnett states are more sensitive than both Holland-Burnett and N00N states in the presence of loss and approximate the performance of the optimal probe~\cite{dorner2009optimal}. Secondly, we experimentally implement our scheme using high-gain parametric down-conversion sources~\cite{eckstein2011highly,harder2016mesoscopic} and state-of-the-art photon-number-resolving detectors~\cite{lita2008counting} in order to access a large photon-number regime. We herald entangled probes of sizes up to $N=8$ and measure up to 16-photon coincidences, thereby further increasing the scale of experimental multiphoton quantum technologies~\cite{gao2010experimental,wang2016experimental,wang2019boson}. \begin{figure} \caption{ \textbf{Interferometric scheme} \label{fig:Scheme} \end{figure} The idea is illustrated in Fig.~\ref{fig:Scheme}(a). Two type-II parametric down-conversion (PDC) sources each produce pairs of light beams that are quantum-correlated in photon number, i.e. a two-mode squeezed vacuum state \begin{equation} \ket{\chi} = \sqrt{1-\lambda^2}\sum_{n=0}^\infty \lambda^n \ket{n,n}. \label{eq:tmsv} \end{equation} Here, $\lambda$ is a parameter that determines the average number of photons in each beam, $\braket{n} = \lambda^2/ (1-\lambda^2)$. Measuring one of the beams with an ideal lossless photon-number-resolving detector projects the second beam onto a known photon-number state $\ket{h_1}$. Duplicating this procedure with a second independent source and detector, we herald pairs of photon-number states that are not necessarily identical, i.e. the probe $\ket{h_1,h_2}$. When these states are combined on the first beam splitter, multiphoton interference generates a path-entangled probe inside the interferometer~\cite{stobinska2019quantum}. We quantify the phase-sensitivity of the probe inside the interferometer by calculating the quantum Fisher information $\mathcal{Q}$. The quantity $\mathcal{Q}$ provides a lower limit on the best achievable phase uncertainty via the quantum Cramer-Rao bound, $\Delta \phi \geq 1/\sqrt{\mathcal{Q}}$. The bound can be saturated using the optimal measurement strategy, which in the absence of loss is photon counting for the probes considered here~\cite{hofmann2009all,zhong2017optimal}. In Fig.~\ref{fig:Scheme}(b), we plot $\mathcal{Q}$ for several probes with the same total photon number $N=h_1+h_2=8$, but different $\Delta= |h_1 - h_2|$, as a function of the signal transmissivity $\eta_s$ which we assume to be equal in both interferometer modes. Probes with a small $\Delta$ provide a greater advantage over the classical shot-noise limit but are more sensitive to losses. Since the probe is heralded in our scheme, one can choose the optimal $\Delta$ for a given $\eta_s$. Also shown in Fig.~\ref{fig:Scheme}(b) is $\mathcal{Q}$ for the optimal state that maximizes this parameter for a given $N$ and $\eta_s$. This state has been found in Ref.~\cite{dorner2009optimal}; the derivation is reproduced in the Supplementary Method 1. For the loss-free case ($\eta_s=1$), the optimal state is the N00N state. However, for efficiencies below $\sim 90 \%$, our probes significantly surpass the N00N state in terms of $\mathcal{Q}$, exhibiting performance close to optimal. Moreover, in contrast to the N00N and Holland-Burnett states, our probe performs at least as well as the shot-noise limit for any amount of loss. We now turn to the experiment. Both PDC sources are periodically poled potassium titanyl phosphate (ppKTP) waveguides pumped with $\sim 0.5$ ps long pulses from a mode-locked laser at a repetition rate of 100 kHz. The four detectors are superconducting transition edge sensors which we use to count up to 10 photons with a detection efficiency exceeding 95\%~\cite{lita2008counting}. The interferometer is a fiber-based device in which we can control the distance between two evanescently-coupled fibers using a micrometer to vary $\phi$, much like changing the path length difference between two arms of an interferometer. Further details on the experimental setup can be found in the Methods. \begin{figure*} \caption{ \textbf{The weak gain regime} \label{fig:lowSq} \end{figure*} We measure interference fringes given by $\mathrm{pr}_{s_1,s_2,h_1,h_2}(\phi)$, the joint photon-number probability per pump pulse to obtain the herald outcome $(h_1, h_2)$ and measure $(s_1,s_2)$ at the output of the interferometer when the phase difference is $\phi$. We will refer to this as the $(s_1,s_2,h_1,h_2)$ rate. To quantify the phase sensitivity of the rates measured with a particular herald outcome $(h_1, h_2)$, we calculate the Fisher information: \begin{equation} \mathcal{F}_{h_1,h_2}(\phi) = \sum_{s_1,s_2} \frac{ \left[ \partial_\phi \tilde{\mathrm{pr}}_{s_1,s_2,h_1,h_2}(\phi)\right]^2}{\tilde{\mathrm{pr}}_{s_1,s_2,h_1,h_2}(\phi)}, \label{eqn:cfi} \end{equation} where $\partial_\phi$ denotes the partial derivative with respect to $\phi$, and $\tilde{\mathrm{pr}}_{s_1,s_2,h_1,h_2}(\phi)$ is a model fitted to the measured rates (see Supplementary Method 2). Note that $\mathcal{F}_{h_1,h_2}(\phi)$ quantifies the amount of information about $\phi$ in our measurement results, i.e. for a specific measurement strategy, and so $\mathcal{F}_{h_1,h_2}(\phi) \leq \mathcal{Q}$. We compare the performance of our photon counting strategy to the optimal measurement strategy in the Supplementary Discussion 1. Our primary figure of merit is the Fisher information per detected signal photon conditioned on measuring $(h_1,h_2)$ at the heralding detectors, $$\tilde{\mathcal{F}}_{h_1,h_2}(\phi) = \mathcal{F}_{h_1,h_2}(\phi) / \braket{\tilde{n}}, $$ where \begin{equation} \braket{\tilde{n}} = \sum_{s_1,s_2} (s_1+s_2)\tilde{\mathrm{pr}}_{s_1,s_2,h_1,h_2}(\phi) \label{eqn:avg_n} \end{equation} is the total number of detected signal photons. Injecting a coherent state into our interferometer would in principle yield the Fisher information $\mathcal{F}=\braket{\tilde{n}}$ when the detected mean photon number is $\braket{\tilde{n}}$~\cite{datta2011quantum}. Thus, our figure of merit can be easily compared to the shot-noise limit which corresponds to $\tilde{\mathcal{F}}_{h_1,h_2}(\phi)=1$. We measured the total efficiency of both the heralding and signal modes to be between $47-55 \%$ (see Supplementary Method 3). This includes $\sim 90\%$ waveguide transmission, $\sim 70\%$ mode coupling efficiency into fibers, 90\% interferometer transmission, and $\gtrsim 95\%$ detector efficiency. Due to the latter two losses, the detected $\braket{\tilde{n}}$ is 10-15\% smaller than the mean photon number inside the interferometer. As such, the Fisher information per photon inside the interferometer (which is the relevant resource when e.g. probing a delicate sample) is about 10-15\% smaller than $\tilde{\mathcal{F}}_{h_1,h_2}(\phi)$. \section*{Results} We begin with low pump power to test our setup in the weak gain regime ($\lambda \sim 0.25$, $10~\mu$W per source). In Fig.~\ref{fig:lowSq}, we show results for two different probes, (a) $\ket{1,1}$, the well-studied $N=2$ N00N or Holland-Burnett state, and (b) $\ket{2,1}$, a probe studied here for the first time. We calculate $\tilde{\mathcal{F}}_{h_1,h_2}(\phi)$ using two methods. In the first, we discard events in which we know photons were lost by only including rates where $s_1+s_2 = h_1+h_2$ in the sums of Eqs.~\eqref{eqn:cfi} and~\eqref{eqn:avg_n}. These rates are shown in the top panels of Fig.~\ref{fig:lowSq}. Using this first method, $\tilde{\mathcal{F}}_{h_1,h_2}(\phi)$ [green curves] surpasses the shot-noise limit by $0.09 \pm 0.01$ for $\ket{1,1}$ and $0.10 \pm 0.04$ for $\ket{2,1}$ at its highest point. In the second method, we include all measured events. Note that this may include events where $s_1 + s_2 < h_1+h_2$ due to loss in the signal modes, but also $s_1 + s_2 > h_1+h_2$ due to loss in the herald modes. Conditioned on obtaining the herald outcome $(h_1,h_2)$, the probability of the latter occurring can be minimized by reducing the pump power and hence $\lambda$. This increases the purity of the probe at the cost of reducing its heralding rate. Without post-selection, $\tilde{\mathcal{F}}(\phi)$ [red curves] drops below the shot-noise limit mainly due to losses. \begin{figure*} \caption{ \textbf{The high gain regime} \label{fig:highSqCFI} \end{figure*} In addition to loss, the spectral purity and distinguishability of our photons are also sources of imperfection that reduce the contrast of the fringes and hence diminish $\tilde{\mathcal{F}}_{h_1,h_2}(\phi)$~\cite{birchall2016beating}. Consider the probe $\ket{1,1}$, for example. For $\phi = \pm \pi/2$, the whole interferometer acts as a balanced beam splitter, in which case Hong-Ou-Mandel interference should lead to a complete suppression in coincidences at its output. However, as can be seen in the orange $(1,1,1,1)$ fit in Fig.~\ref{fig:lowSq}(a), the visibility of this interference effect is $\sim 75\%$. This visibility exceeds $\sqrt{0.5}$, which is the minimum required for demonstrating post-selected quantum-enhanced sensitivity with the probe $\ket{1,1}$~\cite{nagata2007beating,resch2008timereversal,thomas2011realworld}. In addition to spectral mismatch between the signal modes, the visibility is degraded by uncorrelated background photons ($\sim 5\%$ of detected photons) and the slight multi-mode nature of our sources, both of which reduce the purity of our heralded photons. We discuss source imperfections in more detail in the Supplementary Discussion 2. The finite detector energy resolution also plays a small role as the detectors have a $\sim 1 \%$ chance to mislabel an event by $\pm 1$ photon~\cite{humphreys2015tomography}. Next, we increase the pump power to reach a high-gain regime ($\lambda \sim 0.75$, $135~\mu$W per source) in which we can herald large photon numbers. We detect 16-photon events at a rate of roughly $7$ per second, which is much higher than the state-of-the-art achievable with bulk crystal PDC sources~\cite{wang2016experimental} or quantum dots~\cite{wang2019boson}. In Fig.~\ref{fig:highSqCFI}(a), we plot $\tilde{\mathcal{F}}_{h_1,h_2}(\phi)$ calculated without post-selection for all probes with $N=8$. As expected given the amount of loss in our experiment, probes with larger $\Delta$ are more phase sensitive due to their increased robustness to loss [Fig.~\ref{fig:Scheme}(b)]. In particular, the sensitivity of the $\Delta=N$ probe should be shot-noise limited regardless of losses~\cite{pezze2007phase}. However in practice, the heralded detection of $0$ photons could occur due to photon loss in the corresponding herald mode, resulting in the contamination of the signal with states for which $\Delta\ne N$. This degrades the performance of the $\Delta=8$ probe [orange curve]. In the Supplementary Discussion 3, we show that shot-noise limited performance with the $\Delta=N$ probe is recovered by blocking one of the sources. \section*{Discussion} The fringes produced by our probes exhibit a number of different features compared to those measured with N00N or Holland-Burnett states. For example, with these two states, the expected signature of $N$-photon interference are fringe oscillations that vary as $\cos(N\phi)$. While our measured fringes do not exhibit such oscillations in the high gain regime, they do exhibit sharper features than classical fringes. We show this explicitly by comparing our rates to those measured with distinguishable photons. This is achieved by temporally delaying photons coming from the top source with respect to photons coming from the bottom source by more than their coherence time. As an example, we consider the probe $\ket{3,2}$ in Fig.~\ref{fig:effect_of_distinguishability}. When the photons are injected inside the interferometer at the same time, the fringe contrast is significantly higher than when they are temporally delayed [Fig.~\ref{fig:effect_of_distinguishability}(a)]. Likewise, when we calculate $\tilde{\mathcal{F}}_{3,2}(\phi)$ without post-selection, we find an improvement in the probe's sensitivity in the former case [Fig.~\ref{fig:effect_of_distinguishability}(b)]. This demonstrates that the probe sensitivity derives from multiphoton interference even at high photon numbers. With any finite amount of loss, $\tilde{\mathcal{F}}_{h_1,h_2}(\phi)$ vanishes when all fringes share a common turning point such as at $\phi = 0$. In the case of Holland-Burnett ($\Delta = 0$) and N00N states, there are also common turning points at $\phi = \pm \pi/2$ which causes the reduction in $\tilde{\mathcal{F}}_{h_1,h_2}(\phi)$ around these phase values [Fig.~\ref{fig:highSqCFI}(c)]. In contrast, the probes with $\Delta=4,6,8$ do not have a dip in $\tilde{\mathcal{F}}_{h_1,h_2}(\pm \pi/2)$. The origin of this effect for $\Delta = 8$ can be seen directly in the rates shown in Fig.~\ref{fig:highSqCFI}(b). The region of the fringe with high sensitivity to $\phi$ (i.e. large gradient) is different for different values of $|s_1-s_2|$. This feature of $\tilde{\mathcal{F}}_{h_1,h_2}(\phi)$ allows estimating $\phi$ without prior knowledge of the range in which it lies, as is required for N00N or Holland-Burnett states, and thus provides a means for global phase estimation without using an adaptive protocol~\cite{xiang2011entanglement,daryanoosh2018adaptive}. \begin{figure} \caption{ \textbf{Testing multiphoton interference} \label{fig:effect_of_distinguishability} \end{figure} Finally, we briefly compare our results to other works reporting Fisher information per detected photon. The highest achieved here is $\sim 1.1$ using the herald outcome $(2,1)$, i.e. a $N=3$ probe. Ref.~\cite{matthews2016towards} and Ref.~\cite{slussarenko2017unconditional} respectively report $\sim 1.25$ and $\sim 1.2$ using a $N=2$ probe. The latter work also achieves a Fisher information per photon inside the interferometer (i.e. accounting for undetected photons) of $~\sim 1.15$ which thus far is the only experiment demonstrating an unconditional improvement to the shot-noise limit. In the Supplementary Discussion 4, we estimate that an efficiency of $80\%$ (in all four modes) and quantum interference visibility of $85\%$ would be sufficient to demonstrate an improvement to the shot-noise limit with $N=8$ photons without post-selection. Although we do not attain these parameters in our experiment, our results do demonstrate the robustness of our probes to losses despite their large size. For example, the Fisher information per photon calculated without post-selection for the $N=8$ probe with $\Delta=6$ [Fig.~\ref{fig:highSqCFI}(a)] is slightly higher than that of the $N=2$ N00N state [Fig.~\ref{fig:lowSq}(a)]. This contradicts the usual expectation that large entangled probes will necessarily be more fragile to noise and loss. In summary, we proposed and experimentally demonstrated a scheme for quantum-enhanced interferometry that exploits bright two-mode squeezed vacuum sources and photon-number-resolving detectors. We measured interference fringes involving up to 16 photons which is significantly higher than the previous state-of-the-art~\cite{gao2010experimental,wang2016experimental}. Crucially, our scheme prepares probes that are nearly optimally robust to losses and hence addresses one of the principal challenges when scaling-up to large entangled photonic states. With further improvements in the quality (e.g. coupling efficiency into optical fiber and purity) of bright two-mode squeezed vacuum sources compatible with transition edge sensors~\cite{harder2016mesoscopic,vaidya2019broadband}, we believe our loss-tolerant scheme provides a promising route towards achieving quantum-enhanced resolution using large entangled photonic states. \section*{Methods} \subsection*{Sources} We pick 150-fs pulses from a mode-locked Ti:Sapphire laser (Coherent Mira-HP) at a rate of 100 kHz using a Pockels-cell-based pulse picker having a 50 dB extinction ratio. This repetition rate is chosen to accommodate the recovery time of the transition edge sensor detectors. The pump pulses are filtered to $783$ $\pm~2$ nm [full-width at half maximum] using a pair of angle-tuned bandpass filters. We split the pulses into two paths that are matched in length using a translation stage. In each path, we pump a 8 mm long ppKTP waveguide that is phase-matched for type-II parametric down-conversion. At the exit of the waveguide, the pump light is rejected with a longpass filter, and the orthogonally-polarized down-converted modes are separated using a polarizing beam splitter. Each down-converted mode is filtered with a bandpass filter whose bandwidth is chosen to transmit the main feature of the down-converted spectrum but reject its side-lobes. The herald modes ($1566$ $\pm~7$ nm) are coupled into single-mode fibers and sent directly to the detectors. The signal modes ($1567$ $\pm~7$ nm) are coupled into polarization-maintaining single-mode fibers and sent into the interferometer. Details on the coupling efficiency and the spectral indistinguishability of the signal modes are provided in the Supplementary Discussion 2. \subsection*{Interferometer} The interferometer is a fiber-based variable beam splitter (Newport F-CPL-1550-P-FP). The splitting ratio is adjusted by controlling the distance between two evanescently-coupled fibers using a micrometer, which is analogous to changing the path length difference between two arms of an interferometer. In fact, any variable beam splitter that coherently splits light into two modes can be described by the same transformation as a Mach-Zender-type interferometer~\cite{florez2018variable}. During data acquisition, we scan the distance $x$ between the two evanescently-coupled fibers. To display our data as a function of the interferometer phase, we first calculate the transmission coefficient $T(x)$ of the variable beam splitter using the measured $(1,0,1,0)$ and $(0,1,1,0)$ rates: \begin{equation} T(x) = \frac{\mathrm{pr}_{1,0,1,0}(x)}{\mathrm{pr}_{1,0,1,0}(x) + \mathrm{pr}_{0,1,1,0}(x)}. \end{equation} At low powers, we find that the quantity $T(x)$ typically varies within $[0.02, 0.98]$. To obtain the corresponding phase, we correct for the imperfect visibility: \begin{equation} T_{\mathrm{corr}}(x) = \frac{T(x) - \min{[T(x)]}}{\max{[T(x)]}-\min{[T(x)]}} \end{equation} such that $T_{\mathrm{corr}}(x)$ varies between [0,1]. For a single photon injected into a Mach-Zender type interferometer with phase difference $\phi$ between its two arms, one expects $T_{\mathrm{corr}}(x) = [1-\cos{(\phi)}]/2$. Solving for $\phi$, we find: \begin{equation} \phi(x) = \arccos{\left(2T_{\mathrm{corr}}(x)-1\right)}. \end{equation} \subsection*{Detectors} Our detectors are superconducting transition edge sensor detectors that operate at a temperature of 85 mK inside a dilution refrigerator. Details on their physical operation can be found in Ref.~\cite{lita2008counting}. An electrical trigger signal from the pump laser begins a 6 $\mu s$ time window of data acquisition during which the detector outputs are amplified and recorded with an analogue-to-digital converter. We use a matched-filter technique in real-time to convert each detector's output trace into a scalar value~\cite{figueroa2000optimal}. The scalar value is then converted into a photon number using bins that are set during an initial calibration run prior to data acquisition. \section*{Data availability} The data sets generated and/or analyzed during this study are available from the corresponding author on reasonable request. Correspondence and requests for materials should be addressed to G.S.T.$^{*}$ \section*{Competing interests} The authors declare no competing interests. \section*{Author contributions} Both G.S.T. and M.E.M. contributed equally. G.S.T. performed the experiment with assistance from B.A.B, C.G.W, A.E, D.S.P.; M.E.M. and G.S.T. performed numerical calculations with assistance from A.B.; A.E.L., T.G., and S.W.N. developed the detectors; R.B.P., M.S., A.I.L., and I.A.W. initiated and/or supervised the project; G.S.T. and M.E.M. wrote the manuscript with input from all authors. \onecolumngrid \section*{Supplementary information} \renewcommand{S\arabic{equation}}{S\arabic{equation}} \renewcommand{S\arabic{figure}}{S\arabic{figure}} \subsection*{Supplementary Note 1: Quantum Fisher information of generalized Holland-Burnett states} Here we derive the quantum Fisher information of our generalized Holland-Burnett states. We first consider the ideal lossless case. In general, the quantum Fisher information of a pure state $\ket{\Psi(\phi)}$ that depends on some parameter $\phi$ is given by~\cite{braunstein1994statistical}: \begin{equation} \mathcal{Q} = 4\left( \braket{\partial_\phi\Psi(\phi)|\partial_\phi\Psi(\phi)} - \left| \braket{\partial_\phi\Psi(\phi)|\Psi(\phi)} \right|^2 \right) \end{equation} where $\ket{\partial_\phi\Psi(\phi)} \equiv \partial_\phi \ket{\Psi(\phi)}$. In our case, $\ket{\Psi}$ is the two-mode state inside the interferometer (before the phase shift) and $\ket{\Psi(\phi)}=e^{i\hat{c}^\dagger\hat{c}\phi}\ket{\Psi}$ is the state after the phase shift $\phi$ is applied in the upper interferometer mode $c$. After some simple algebra, one finds that $\mathcal{Q}$ is independent of $\phi$ and is determined by: \begin{equation} \mathcal{Q} = 4 \left[\braket{\Psi | (\hat{c}^\dagger\hat{c})^2 | \Psi} - \braket{\Psi | \hat{c}^\dagger\hat{c}| \Psi}^2\right]. \label{eqn:QFI} \end{equation} We wish to calculate $\mathcal{Q}$ for the particular probe $\ket{\Psi}=\hat{U}_{\mathrm{BS}}\ket{h_1,h_2}$ where $\hat{U}_{\mathrm{BS}}$ is the balanced beam splitter unitary transformation. The second term in Eq.~\eqref{eqn:QFI} is given by: \begin{align} \braket{\Psi| \hat{c}^\dagger\hat{c} |\Psi} &= \braket{h_1,h_2| \hat{U}_{\mathrm{BS}}^{\dagger}c^{\dagger}c \, \hat{U}_{\mathrm{BS}} |h_1,h_2}\\ &= \braket{h_1,h_2| \hat{U}_{\mathrm{BS}}^{\dagger}c^{\dagger}\hat{U}_{\mathrm{BS}}\,\hat{U}^{\dagger}_{\mathrm{BS}}c \, \hat{U}_{\mathrm{BS}} |h_1,h_2}\label{unitary}\\ &= \braket{h_1,h_2| \left(\frac{a^{\dagger} + b^{\dagger}}{\sqrt{2}}\right) \left(\frac{a+b}{\sqrt{2}}\right) |h_1,h_2}\label{BS_transformation}\\ &= \frac{1}{2}\braket{h_1,h_2| a^{\dagger}a + b^{\dagger}b |h_1,h_2}\\ &= \tfrac{1}{2} (h_1+h_2), \end{align} where in line~\eqref{unitary} we used the fact that $\hat{U}_{\mathrm{BS}}$ is unitary and in line~\eqref{BS_transformation} we transformed mode $c$ to the input modes $a$ and $b$. The first term in equation~\eqref{eqn:QFI} is calculated in a similar manner: \begin{align} \braket{\Psi| (\hat{c}^\dagger\hat{c})^2 |\Psi} &= \braket{h_1,h_2| \hat{U}_{\mathrm{BS}}^{\dagger}c^{\dagger}c c^{\dagger}c \, \hat{U}_{\mathrm{BS}} |h_1,h_2}\\ &= \braket{h_1,h_2|\left( \hat{U}_{\mathrm{BS}}^{\dagger}c^{\dagger}\hat{U}_{\mathrm{BS}}\,\hat{U}^{\dagger}_{\mathrm{BS}}c \, \hat{U}_{\mathrm{BS}}\right)^2 |h_1,h_2}\\ &= \braket{h_1,h_2| \left(\frac{a^{\dagger} + b^{\dagger}}{\sqrt{2}}\right)^2 \left(\frac{a+b}{\sqrt{2}}\right)^2 |h_1,h_2}\\ &= \frac{1}{4}\braket{h_1,h_2| (a^{\dagger} + b^{\dagger})^2 (a+b)^2 |h_1,h_2}\\ &= \frac{1}{4}\braket{h_1,h_2| a^{\dagger}aa^{\dagger}a + b^{\dagger}bb^{\dagger}b + 4a^{\dagger}ab^{\dagger}b + a^{\dagger}a + b^{\dagger}b |h_1,h_2}\\ &= \tfrac{1}{4}(h_1^2 + h_2^2 + 4h_1h_2 + h_1 + h_2). \end{align} Therefore $\mathcal{Q}$ is given by \begin{equation} \mathcal{Q} = \tfrac{1}{4}(h_1^2 + h_2^2 + 4h_1h_2 + h_1 + h_2) - \tfrac{1}{4} (h_1+h_2)^2 = 2h_1h_2 + h_1 + h_2. \label{eqn:QFI_gHB} \end{equation} Eq.~\eqref{eqn:QFI_gHB} only applies when there are no losses in the system. In the presence of losses, the probe $\ket{\Psi}$ is transformed to a mixed state $\hat{\rho}$. Then, $\mathcal{Q}$ is calculated using \begin{equation} \mathcal{Q} = \mathrm{Tr}\Big\{\hat{\rho}(\phi)\Lambda^2[\hat{\rho}(\phi)]\Big\} \label{eqn:QFI_general} \end{equation} where $\hat{\rho}(\phi) = e^{-i\hat{c}^\dagger\hat{c}\phi}\hat{\rho}e^{i\hat{c}^\dagger\hat{c}\phi}$ is the probe state after the phase shift $\phi$ and $\hat{\Lambda}[\hat{\rho}(\phi)]$ is a Hermitian operator called the ``symmetric logarithmic derivative" defined implicitly via \begin{equation} \partial_\phi \hat{\rho}(\phi) = \frac{1}{2}\Big\{\hat{\Lambda}[\hat{\rho}(\phi)]\hat{\rho}(\phi) + \hat{\rho}(\phi)\hat{\Lambda}[\hat{\rho}(\phi)]\Big\}. \label{eqn:implicit_SLD} \end{equation} We notice that by combining Eq.~\eqref{eqn:implicit_SLD} with Eq.~\eqref{eqn:QFI_general} we obtain an alternative equation for the QFI \begin{equation} \mathcal{Q} = \mathrm{Tr}\Big\{\partial_{\phi}\hat{\rho}(\phi)\Lambda[\hat{\rho}(\phi)]\Big\}. \end{equation} By writing $\hat{\rho}$ in its eigenbasis, $\hat{\rho}=\sum_i p_i\ket{e_i}\bra{e_i}$ and writing out the derivative $\partial_{\phi}\hat{\rho}(\phi) = ie^{-i\hat{c}^\dagger\hat{c}\phi}[\hat{\rho}, \hat{c}^\dagger\hat{c}]e^{i\hat{c}^\dagger\hat{c}\phi}$, it can be shown that $\mathcal{Q}$ is given by~\cite{RDD2015review} \begin{equation} \mathcal{Q} = \sum_{i,j} \frac{2\left|\braket{e_i|\hat{c}^\dagger\hat{c}|e_j}\right|^2(p_i-p_j)^2}{p_i+p_j} \label{eqn:qfi_mixed_state} \end{equation} which is independent of $\phi$. The sum is taken over all terms with a non-vanishing denominator. \subsection*{Supplementary Method 1: Optimal states} In the main text we compare the performance of our probes to the ``optimal states" which provide the largest possible quantum Fisher information given some amount of loss~\cite{dorner2009optimal,RDD2009optimal}. A general $N$-photon pure state inside the interferometer can be written in the Fock basis as \begin{equation} \ket{\Psi} = \sum_{n=0}^N \alpha_n \ket{n, N-n}. \label{eq:general_state} \end{equation} In the absence of loss, the optimal state is found by optimizing the coefficients $\{ \alpha_n \}$ to maximize the quantum Fisher information $\mathcal{Q} = 4 \left[\braket{\Psi | (\hat{c}^\dagger\hat{c})^2 | \Psi} - \braket{\Psi | \hat{c}^\dagger\hat{c}| \Psi}^2\right]$. In the presence of loss, $\ket{\Psi}$ turns into a mixture $\hat{\rho}$ which can be written in the following form \begin{equation} \hat{\rho} = \sum_j p_j \ket{\Psi_j}\bra{\Psi_j}, \end{equation} where $\ket{\Psi_j}$ do not have to be orthogonal. Due to the convexity of quantum Fisher information, $\mathcal{Q}'$ of $\hat{\rho}$ is upper bounded by \begin{equation} \mathcal{Q}' \leq \mathcal{Q} = 4 \sum_j p_j \left(\braket{\Psi_j| (\hat{c}^\dagger\hat{c})^2 | \Psi_j} - \braket{\Psi_j |\hat{c}^\dagger\hat{c} | \Psi_j}^2\right). \label{eq:QFI_bound} \end{equation} The bound is attained if the kets $\ket{\Psi_j}$ are orthogonal, which is the case for e.g. N00N states or if photon losses are present in only one interferometer mode. Applying Eq.~\eqref{eq:QFI_bound} to Eq.~\eqref{eq:general_state}, we obtain \begin{equation} \mathcal{Q} = 4 \bigg( \sum_{n=0}^N n^2 x_n - \sum_{l=0}^N \sum_{m=0}^{N-l} \frac{\big(\sum_{k=l}^{N-m} x_n n B_{lm}^n\big)^2}{\sum_{n=l}^{N-m} x_n B_{lm}^n}\bigg), \label{eqn:optimal} \end{equation} where $x_n = |\alpha_n|^2$, $B_{lm}^n \equiv \binom{n}{l}\binom{N-n}{m}\eta_{s_1}^n(\eta_{s_1}^{-1} - 1)^l \eta_{s_2}^{N-n}(\eta_{s_2}^{-1} - 1)^m$ and $\eta_{s_1}, \eta_{s_2}$ denote the transmittances in the signal modes. The optimal states are found by numerically maximizing $\mathcal{Q}$ over the probabilities $\{x_n\}$. Since $\mathcal{Q}$ is a concave function of $\{x_n\}$~\cite{RDD2009optimal}, any maximum is global. Although $\mathcal{Q}' < \mathcal{Q}$ [Eq.~\eqref{eq:QFI_bound}] when losses are present in both modes, the difference between the two quantities is small relative to the difference between the shot-noise limit and the Heisenberg limit~\cite{RDD2009optimal}. Due to this approximation, the optimized $\mathcal{Q}$ is a slight over-estimate of true quantum Fisher information $\mathcal{Q}'$ of the optimal states. Fig.~1(b) in the main text shows $\mathcal{Q}$ of our probes and the optimal state as a function of equal transmissivity in the signal modes $\eta_{s_1} = \eta_{s_2} = \eta_s$ which varied from 0 to 1 in steps of 0.01. The optimal state was calculated in \textsc{Mathematica} by maximizing over coefficients $\{x_n\}$ in Eq.~\eqref{eqn:optimal}, assuming they all sum up to 1 and are real and positive. We computed $\mathcal{Q}$ of our probes in Python using the following method. We started with two copies of the state in Eq.~(1) in the main text, inserted a beam splitter in each signal mode, and traced over the reflected port to model signal transmissivities $\eta_s$. The two matrices were then combined on the first interferometer beam splitter forming a four mode density matrix, which was then reduced to two modes by projectively measuring the two herald modes. Eigenvalues and eigenvectors were found for the two-mode density matrix inside the interferometer which were then used to calculate $\mathcal{Q}$ via Eq.~\eqref{eqn:qfi_mixed_state}. To compare the ideal performance of our probes with the optimal state, we exclude the effect of imperfect heralding on the former by using $\eta_{h_1}=\eta_{h_2}=1$ in the calculation of $\mathcal{Q}$. From the calculations described above we show that for $\eta_s \in (0, 0.5 \rangle$ the best approximation to the optimal state is given by the probe with $\Delta = 8$; for $\eta_s \in (0.5,0.58 \rangle$ by the probe with $\Delta = 6$; for $\eta_s \in (0.58,0.66 \rangle$ by the probe with $\Delta = 4$; for $\eta_s \in (0.66,0.69 \rangle$ by the probe with $\Delta = 2$; and for $\eta_s > 0.69$ by the probe with $\Delta = 0$, as shown by the colored line in Fig.~1(b). \subsection*{Supplementary Method 2: Modelling the measured rates} Here we describe the model $\tilde{\mathrm{pr}}(s_1,s_2,h_1,h_2,\phi)$ used to fit the experimentally measured rates. We model optical loss by placing fictitious beam splitters (see Fig.~\ref{fig:schematic_loss}) and tracing over the reflected modes. For now, we assume $\eta_{d_1}=\eta_{d_2}=1$. We will treat the effect of these detection losses at the end. The sources produce two-mode squeezed vacuum states: \begin{equation} \ket{\chi_i} = \sqrt{1-\lambda_i^2}\sum_{n=0}^{\infty}\lambda_i^n\ket{n,n}, \end{equation} where $i=1,2$ represent sources 1 and 2, respectively. The joint photon-number distribution of this two-mode squeezed vacuum state after the losses is given by: \begin{equation} \tilde{\mathrm{pr}}_i(x,y) = (1-\lambda_i^2)\sum_{n=\max{(x,y)}}^{\infty}{n \choose x}{n \choose y}\lambda^{2n}_i \eta_{h_i}^x \eta_{s_i}^y (1-\eta_{h_i})^{n-x} (1-\eta_{s_i})^{n-y}. \label{eqn:joint_photon_tmsv} \end{equation} The intuition for the expression above is as follows. Imagine that there are two detectors after the fictitious beam splitters that give the detection outcome $(x,y)$. The source must have produced at least $\max{(x,y)}$ photon pairs and perhaps some photons were lost (i.e. reflected at the beam splitters). The probability to produce $n$ pairs of photons is $(1-\lambda_i)^2\lambda^{2n}_i$. Having produced $n$ pairs, the probability to reflect $n-x$ [$n-y$] photons and transmit $x$ [$y$] photons in the herald [signal] mode is ${n \choose x}\eta_{h_i}^x(1-\eta_{h_i})^{n-x}$ [${n \choose y}\eta_{s_i}^y(1-\eta_{s_i})^{n-y}$]. In principle, $n$ can range up to $\infty$, but in practice it suffices to truncate this sum at some value where $(1-\lambda_i)^2\lambda^{2n}_i$ becomes small. In our numerics, we truncate the sum at $n=50$. \begin{figure} \caption{ Losses are modelled by placing fictitious beam splitters in all four modes before the interferometer and just before the signal detectors. Coefficients show the transmission of the beam splitters. } \label{fig:schematic_loss} \end{figure} If we obtain the herald outcome $(h_1,h_2)$, then the (unnormalized) state that is injected into the interferometer is given by: \begin{equation} \hat{\rho} = \sum_{m,n=0}^{\infty} \tilde{\mathrm{pr}}_1(h_1,m)\tilde{\mathrm{pr}}_2(h_2,n)\ket{m,n}\bra{m,n}. \label{eqn:input_state_interf} \end{equation} Losses occurring inside the interferometer can be absorbed into $\eta_s$ or $\eta_d$ if they are equal in both interferometer modes, which was approximately the case in our experiment. Thus, the interferometer transformation can be described by a unitary operator $\hat{U}(\phi)$ which depends on the phase difference $\phi$ between both arms. The probability that we wish to calculate is given by: \begin{equation} \tilde{\mathrm{pr}}(s_1,s_2,h_1,h_2,\phi) = \braket{s_1,s_2|\hat{U}(\phi)\hat{\rho}\hat{U}^\dagger(\phi)|s_1,s_2}. \end{equation} Knowing that there are a total of $s_1+s_2$ photons before the interferometer, we can constrain $n=s_1+s_2-m$ and truncate the sum at $s_1+s_2$ in Eq.~\eqref{eqn:input_state_interf}. Thus, we obtain: \begin{equation} \tilde{\mathrm{pr}}(s_1,s_2,h_1,h_2,\phi) = \sum_{m=0}^{s_1+s_2}\tilde{\mathrm{pr}}_1(h_1,m)\tilde{\mathrm{pr}}_2(h_2,s_1+s_2-m) \left|\braket{s_1,s_2|\hat{U}(\phi)|m,s_1+s_2-m}\right|^2. \label{eqn:joint_prob_final} \end{equation} The matrix element $\left|\braket{s_1,s_2|\hat{U}(\phi)|m,s_1+s_2-m}\right|^2$ is derived in Ref.~\cite{leonhardt2010essential} and is given by: \begin{equation} \begin{split} \left|\braket{s_1,s_2|\hat{U}(\phi)|m,s_1+s_2-m}\right|^2 &= \frac{m!(s_1+s_2-m)!}{s_1!s_2!}(\sin{[\phi/2]})^{2(s_1+m)}(\cos{[\phi/2]})^{2(s_2-m)} \\ &\qquad{}\times \left(\sum_{k=0}^{s_1} {s_1 \choose k} {s_2 \choose s_2+k-m} (-1)^{k} \tan{[\phi/2]}^{-2k} \right)^2. \end{split} \end{equation} Alternatively, the matrix element can also be evaluated using Kravchuk polynomials~\cite{stobinska2019quantum}. The model for temporally distinguishable photons follows the same approach as above. While the derivation below focuses on temporal distinguishability, the same equations are valid to describe distinguishability in any other degree of freedom. We adopt a heuristic approach (e.g. as in Ref~\cite{birchall2016beating}) in which the temporal mode of the photons produced in the top source is decomposed into a component completely indistinguishable ($\parallel$) to the temporal mode of the bottom source photons as well as a component completely distinguishable ($\perp$). With this decomposition, Eq.~\eqref{eqn:input_state_interf} becomes: \begin{equation} \hat{\rho}^{dist} = \sum_{m,n=0}^{\infty} \sum_{l=0}^{m} {m \choose l} \mathcal{M}^{l} (1-\mathcal{M})^{m-l} \tilde{\mathrm{pr}}_1(h_1,m)\tilde{\mathrm{pr}}_2(h_2,n)\ket{l,n}_\parallel\bra{l,n}_\parallel \otimes \ket{m-l,0}_\perp\bra{m-l,0}_\perp. \label{eqn:input_state_interf_dist} \end{equation} where $\mathcal{M} \in [0,1]$ is a mode overlap parameter characterizing the distinguishability of the photons. For $\mathcal{M} = 0$ ($\mathcal{M} = 1$), the photons from top and bottom sources are completely distinguishable (indistinguishable). Since our detectors cannot resolve the time difference between $\perp$ and $\parallel$, they convolve the probabilities for the photons to have originated from either temporal mode. This measurement is described by the following incoherent sum of projectors: \begin{equation} \hat{\Pi} = \sum_{x=0}^{s_1}\sum_{y=0}^{s_2} \ket{s_1-x,s_2-y}_\parallel \bra{s_1-x,s_2-y}_\parallel \otimes \ket{x,y}_\perp \bra{x,y}_\perp. \label{eqn:povm_convolute} \end{equation} Many of the terms in the sum of Eq.~\eqref{eqn:povm_convolute} can be eliminated due to constraints on the photon numbers. For example, a total of $m-l$ photons are produced in mode $\perp$ and so $x+y=m-l$. Moreover, $s_1+s_2=m+n$. After applying these constraints, the final joint probability is given by: \begin{equation} \begin{split} \tilde{\mathrm{pr}}^{dist}(s_1,s_2,h_1,h_2,\phi) &= \mathrm{Tr}\left( \hat{\Pi} \hat{U}(\phi) \hat{\rho}^{dist} \hat{U}^\dagger(\phi) \right) \\ &= \sum_{m=0}^{s_1+s_2} \sum_{l=0}^m \sum_{x=\max(0, m-s_2)}^{\min(s_1,m-l)} \binom{m}{l} \mathcal{M}^l(1-\mathcal{M})^{m-l} \tilde{\mathrm{pr}}_1(h_1,m)\tilde{\mathrm{pr}}_2(h_2,s_1+s_2-m) \\ &\qquad{}\times \left|\braket{s_1-x,l+s_2-m+x|\hat{U}(\phi)|l,s_1+s_2-m}\right|^2 \\ &\qquad{}\times \left|\braket{x,m-l-x|\hat{U}(\phi)|m-l,0}\right|^2. \end{split} \label{eqn:joint_prob_final_dist} \end{equation} Finally, we can now consider the effect of the losses just before the detectors. These losses can be modelled with a transformation analogous to Eq.~\eqref{eqn:joint_photon_tmsv}. Applying this transformation on Eq.~\eqref{eqn:joint_prob_final}, we obtain: \begin{equation} \tilde{\mathrm{pr}}(s_1,s_2,h_1,h_2,\phi; \eta_{d_1}, \eta_{d_2}) = \sum_{j=s_1}^\infty\sum_{k=s_2}^\infty {j \choose s_1}{k \choose s_2} \eta_{d_1}^{s_1} \eta_{d_2}^{s_2} (1-\eta_{d_1})^{j-s_1} (1-\eta_{d_1})^{k-s_2} \tilde{\mathrm{pr}}(j,k,h_1,h_2,\phi) \label{eqn:final_joint_prob_wLoss} \end{equation} The same method is used for the distinguishable photons model, i.e. replace $ \tilde{\mathrm{pr}}(j,k,h_1,h_2,\phi)$ with $\tilde{\mathrm{pr}}^{dist}(j,k,h_1,h_2,\phi)$ in Eq.~\eqref{eqn:final_joint_prob_wLoss}. In our numerics, we truncate the sums in Eq.~\eqref{eqn:final_joint_prob_wLoss} to only include the effect of losing a few photons, which is a good approximation given the high efficiency of our number-resolving detectors. The equations above are evaluated numerically and fitted to the experimentally measured $\mathrm{pr}(s_1,s_2,h_1,h_2,\phi)$ by varying the fit parameters $\eta_{h_1},\eta_{h_2},\eta_{s_1},\eta_{s_2},\eta_{d_1},\eta_{d_2},\lambda_1,\lambda_2$. Fitting is performed using the Python package \textsc{lmfit} with a least squares method. Note that, for the sake of increasing the speed of the fitting, we used $\mathcal{M}=1$ for all data except for the dashed lines in Fig.~4 of the main text where we used $\mathcal{M}=0$. Thus, the fit parameters generally did not correspond to the measured efficiencies and squeezing parameters (see below). Instead, the fitting procedure converged on larger $\lambda$ values and smaller $\eta$ values to emulate the effect of imperfect interference (i.e. reduced fringe visibility). We tested the full model (i.e. including $\mathcal{M}$) by fitting a subset of rates measured in the high gain regime and found the fit parameters: $\eta_{h_1} = 0.50$, $\eta_{h_2} = 0.50$, $\eta_{s_1} = 0.61$, $\eta_{s_2} = 0.50$, $\eta_{d_1} = 0.9$, $\eta_{d_2} = 0.99$, $\lambda_1 = 0.68$, $\lambda_2 = 0.68$, and $\mathcal{M}=0.73$. These efficiency values are within error to the measured values (see below), and $\mathcal{M}=0.73$ is roughly consistent with the measured $\sim 75 \%$ quantum interference visibility of the $(1,1,1,1)$ rate. \subsection*{Supplementary Method 3: Estimating efficiencies} We characterize the efficiency of our setup using a Klyshko-like method that is generalized to photon-number-resolving detection~\cite{worsley2009absolute}. We set the variable beam splitter to maximize reflection and measure the joint photon-number distribution $\mathrm{pr}_i(x,y)$ pumping one source at a time. We fit the measured $\mathrm{pr}_i(x,y)$ to $\tilde{\mathrm{pr}}_i(x,y)$ [see Eq.~\eqref{eqn:joint_photon_tmsv}] using three parameters: the PDC gain $\lambda_i$ and the total efficiency of the herald mode ($\eta_{h_i}$) and the signal mode ($\eta_{s_i}$). Note that the latter will also include the detection efficiency $\eta_{d_i}$. By repeating the procedure with five different pump powers, we find that $\eta_{s_1}\eta_{d_1} = 56 \pm 3 \%$ and $\eta_{h_1} = 47 \pm 1 \%$ for the first source, and $\eta_{s_2}\eta_{d_2} = 52 \pm 4 \%$ and $\eta_{h_2} = 51 \pm 1 \%$ for the second source. \subsection*{Supplementary Discussion 1: Comparing the performance of photon counting and the optimal measurement} \begin{figure} \caption{ Quantum Fisher information per photon, $Q/ \eta_s N$ [dashed lines], and the maximum classical Fisher information per photon, $\mathrm{max} \label{fig:comparing_qfi_and_cfi} \end{figure} Here we compare the performance of our measurement strategy, photon counting, to the optimal measurement strategy. In Fig.~\ref{fig:comparing_qfi_and_cfi}, we plot the classical Fisher information per photon obtained with photon counting for various $N=8$ probes, assuming ideal heralding ($\eta_{h_1}=\eta_{h_2}=1$) and balanced loss inside the interferometer ($\eta_{s_1}=\eta_{s_2}=\eta_s$). Since this quantity generally depends on $\phi$ when $\eta_s < 1$, we focus on the region with the largest phase sensitivity, i.e. $\mathrm{max}[\tilde{\mathcal{F}}(\phi)]$ [continuous lines]. In the same plot, we reproduce the quantum Fisher information curves from Fig.~1(b) normalized by the number of detected photons, i.e. $\mathcal{Q}/\eta_s N$ [dashed lines]. For $\eta_s =1$, the quantum and classical Fisher information are equal which means that photon counting is the optimal measurement strategy, as expected~\cite{hofmann2009all}. For $\eta_s < 1$, photon counting is no longer optimal (except for $\Delta = N$) but still provides quantum-enhanced phase sensitivity for $\eta_s \gtrsim 0.55$. To the best of our knowledge, the optimal measurement strategy for lossy Holland-Burnett interferometry is not known. We note that homodyne and weak-field homodyne (i.e. combining a signal with local oscillator then performing photon counting) have been shown to be more loss-tolerant than photon counting using Gaussian probes~\cite{ono2010effects,oh2017practical}. However, it remains an open question whether these measurement strategies would be advantageous using our non-Gaussian probes in the presence of loss. \subsection*{Supplementary Discussion 2: Source imperfections} \begin{figure} \caption{ Characterization of the sources. (a) Typical intensity distribution of the waveguide spatial mode measured at 1550 nm. Its non-Gaussian features limit the fiber coupling efficiency to 70\%. Scale bar shows dimension in the object plane, i.e. at facet of waveguide. (b) and (c) Joint spectral intensities of sources 1 and 2, respectively. (d) Spectra of the four modes in the experiment. } \label{fig:source_charac} \end{figure} The main imperfections limiting the estimation precision are (i) photon loss and (ii) incomplete interference of the input photons. The main contribution to photon loss ($\sim$50\% end-to-end, see Supplementary Method 3) are the inefficiences caused by the spatial overlap between the waveguide mode and the fiber mode. The diffusion process used to produce KTP waveguides leads to non-Gaussian and asymmetric features in the waveguide spatial mode [Fig.~\ref{fig:source_charac}(a)] which limit the fiber coupling efficiency to $\sim$70\%. It may be possible to improve the spatial mode of the waveguide by optimizing the diffusion process~\cite{padberg2020characterisation} or employing ridge waveguides~\cite{volk2018fabrication}. The quantum interference visibility ($\sim 75\%$) is mainly limited by spectral mode mismatch between the signal modes as well as the spectral purity of the sources. We show the joint spectral intensities [Fig.~\ref{fig:source_charac}(b-c)] and marginal spectra [Fig.~\ref{fig:source_charac}(d)] of the sources measured using a time-of-flight spectrometer (resolution $\pm 0.1$nm). Although the joint spectral intensities appear decorrelated and the signal spectra are well overlapped, we suspect that non-uniform spectral phase (perhaps due to pump chirp or dispersion through optical elements) may have reduced the quantum interference visibility. Moreover, the spectral purity is degraded by uncorrelated background photons ($\sim 5\%$ of detected photons). These background photons are generated in a continuum of spectral modes and likely originate from processes where one photon from a down-converted pair is generated in an unguided waveguide mode~\cite{ecksteinthesis}, in which case their contribution could be reduced by minimizing propagation losses inside the waveguide. \subsection*{Supplementary Discussion 3: Recovering shot-noise limited performance} \begin{figure} \caption{ $\tilde{\mathcal{F} \label{fig:SNL_limited} \end{figure} Using the $\Delta = N$ probe, all photons injected into the interferometer originate from one source. As such, imperfections such as spectral purity and mode matching should not affect the performance of the probe. The $\Delta = N$ probe is prepared by considering trials where e.g. $(h_1,h_2)=(N,0)$. However, even when $h_2=0$, this second source can still inject unwanted light into the interferometer due to losses in the herald modes. This generally degrades the performance of the $\Delta=N$ probe. Here we show that shot-noise limited performance is recovered by blocking one of the sources. In Fig.~\ref{fig:SNL_limited}, we plot $\tilde{\mathcal{F}}_{5,0}(\phi)$ for the $\Delta = N = 5$ probe calculated without post-selection. We performed the measurement with a single source blocked and with both sources unblockled. In the latter case, we find that $\tilde{\mathcal{F}}_{5,0}(\phi)$ reaches $0.991\pm0.001$ at its highest point, demonstrating shot-noise limited performance. Ideally, $\tilde{\mathcal{F}}_{5,0}(\phi)$ should be flat with $\phi$. However, experimental imperfections such as imbalanced detector efficiency, detector dark counts ($\sim 1\%$), and imperfect interferometer visibility cause the dips in $\tilde{\mathcal{F}}_{5,0}(0)$ and $\tilde{\mathcal{F}}_{5,0}(\pm \pi)$ where the photons should ideally always exit the interferometer from one port. \subsection*{Supplementary Discussion 4: Parameters required to surpass shot-noise limit without post-selection} Here we provide an analysis on estimating the efficiency and quality of the two-mode vacuum sources required to surpass the shot-noise limit without post-selection. We focus on the $\Delta=6$ [i.e. $(h_1,h_2)=(7,1)$] probe as this is the most loss-tolerant $N=8$ probe that can surpass the shot-noise limit in our scheme. For simplicity, we assume equal efficiency $\eta$ in all four modes of the experiment ($\eta = \eta_{h_1} = \eta_{h_2} = \eta_{s_1} = \eta_{s_2}$) and equal PDC gain parameters $\lambda$. There are three main experimental parameters to consider: (i) the efficiency $\eta$, (ii) the distinguishability $\mathcal{M}$ of photons between the top and bottom sources, (iii) the PDC gain $\lambda$. Given these parameters, we estimate the sensitivity of the probe by calculating the classical Fisher information $\tilde{\mathcal{F}}_{7,1}(\phi)$ (per detected signal photon). Since this quantity generally depends on $\phi$, we focus on region in phase with the largest possible sensitivity, i.e. $\max[\tilde{\mathcal{F}}_{7,1}(\phi)]$. \begin{figure} \caption{ (a) Effect of squeezing strength on imperfect heralding. We plot $\mathrm{max} \label{fig:proximity_to_snl} \end{figure} We begin by focusing on the effect of the PDC gain $\lambda$ and assume $\mathcal{M}=1$ for now. As shown in Fig.~\ref{fig:proximity_to_snl}(a), a smaller $\lambda$ provides a larger $\mathrm{max}[\tilde{\mathcal{F}}_{7,1}(\phi)]$. This is because lowering $\lambda$ increases the photon-number purity of the heralded probe in the presence of loss in the heralding arms, i.e. it reduces the probability that the herald detectors under-counted the true number of photon pairs produced by the sources. As a reference, we include the perfect heralding case which is shown by the black line. While reducing $\lambda$ minimizes the detrimental effects of imperfect heralding, it also drastically decreases the heralding rate. For example, assuming $\eta=0.5$ and a 100 kHz laser repetition rate, $\lambda=0.75$ would produce a $N=8$ probe roughly once per second whereas $\lambda=0.35$ would produce such a probe only about once per day. Next we consider the combined effect of imperfect distinguishability and efficiency. In Fig.~\ref{fig:proximity_to_snl}(b) [(c)], we plot $\mathrm{max}[\tilde{\mathcal{F}}_{7,1}(\phi)]$ as a function of $\eta$ and $\mathcal{M}$ for $\lambda = 0.75$ [$\lambda=0.35$]. The approximate region achieved in our experiment is shown in yellow. Improvements in both $\eta$ and $\mathcal{M}$ are necessary to unconditionally surpass the shot-noise limit. As a reference point, a distinguishability of $\mathcal{M}\sim0.85$ was achieved in Ref.~\cite{stobinska2019quantum} using the same type of high-gain PDC sources as used in our experiment. With such a distinguishability, the efficiency would need to be improved to $\sim 80\%$ [$\sim 70\%$] when using $\lambda=0.75$ [$\lambda=0.35$]. \end{document}
\begin{document} \title{Integer programming for weakly coupled stochastic dynamic programs with partial information} \begin{abstract} This paper introduces algorithms for problems where a decision maker has to control a system composed of several components and has access to only partial information on the state of each component. Such problems are difficult because of the partial observations, and because of the curse of dimensionality that appears when the number of components increases. Partially observable Markov decision processes (POMDPs) have been introduced to deal with the first challenge, while weakly coupled stochastic dynamic programs address the second. Drawing from these two branches of the literature, we introduce the notion of weakly coupled POMDPs. The objective is to find a policy maximizing the total expected reward over a finite horizon. Our algorithms rely on two ingredients. The first, which can be used independently, is a mixed integer linear formulation for generic POMDPs that computes an optimal memoryless policy. The formulation is strengthened with valid cuts based on a probabilistic interpretation of the dependence between random variables, and its linear relaxation provide a practically tight upper bound on the value of an optimal history-dependent policies. The second is a collection of mathematical programming formulations and algorithms which provide tractable policies and upper bounds for weakly coupled POMDPs. Lagrangian relaxations, fluid approximations, and almost sure constraints relaxations enable to break the curse of dimensionality. We test our generic POMDPs formulations on benchmark instance forms the literature, and our weakly coupled POMDP algorithms on a maintenance problem. Numerical experiments show the efficiency of our approach. \end{abstract} \keywords{Partially Observable Markov Decision Process, Mixed integer linear program, Probability distribution, Marginal probabilities, Weakly coupled dynamic programs} \section{Introduction} 1.5abel{sec:intro} Many real world situations involve the control of a stochastic system composed of $M$ components on which the decision maker has only partial informations. Typically, at each time-step, a component $m$ in $[M]$ is in a state $s^m$ in a state space $\calX_S^m$. The decision maker does not know the exact state $s^m$ of component $m$, but has access to a noisy observation $o^m$ providing only partial information on $s^m$. Based on these observations, the decision maker chooses the actions $a^m$ that should be performed on each components of the system. This decision typically involves allocating some limited resources between the different components. Each component then transitions to the new state $s'^m$ with a given probability, and the decision maker receives a reward $\sum_{m} r^m(s^m,a^m,s'^m)$ where $r^m$ is the individual reward of component $m$. The goal of the decision maker is to find a policy $\bfdelta$ that prescribes which actions $(a^m)_{m \in [M]}$ to take given observations $(o^m)_{m \in [M]}$ in order to maximize the expected total reward over a finite horizon. Typical applications are (1) predictive maintenance problems, (2) multi-armed restless bandits and their applications to clinical trials, (3) inventory problems with inventory records inaccuracy, and (4) nurse assignments problems. See Appendix~\ref{app:examples} for a detailed treatment of these applications. As an illustration, consider the case of airplane predictive maintenance. An airplane regularly undergoes maintenance at slots known in advance. At the beginning of each maintenance slot, the decision maker must decide which equipments it will maintain given its limited maintenance resources (machines, skilled technicians, etc.). On recent generations of airplanes, sensors signals are recorded during flights, and a score that gives a noisy evaluation of the equipment wear is deduced from these signals. The reward is in term of costs avoided. Each maintenance has a given cost. And if a component fails in between two maintenance, the plane cannot take-off, and its flights of the day must be canceled leading to huge client refunding costs. Based on the scores observed on each components, its maintenance resources, and an evaluation of the failures risks, the decision maker chooses which equipments to maintain. When the system has only a single component whose state is observed by the decision maker, the problem is naturally modeled as a \emph{Markov Decision Process} (MDP) and well solved using dynamic programming algorithms. Controlling a partially observed multi-equipments system is more difficult because (1) the decision maker has only access to partial information, and (2) the system is composed of several components. Let us briefly recap how these difficulties have been addressed in the literature. The problem of controlling a system with only partial information is naturally modeled as a \emph{Partially Observable Markov Decision Process} (POMDP) over a finite horizon. As detailed in the survey of~\citet{Cassandra2003ASO}, a wide range of applications have been modeled as POMDPs, among which maintenance problems~\citep{Eckles1968} or clinical decision making~\citep{Denton2018}. On such partially-observed systems, POMDP approaches typically provide lower cost policies than MDP approaches. This performance comes from the statistical models used to describe reality: Markov chains for MDP, versus hidden Markov models (HMM) for POMDP. When data is scarce and only small dimensional model can be learned, HMM provide a much needed additional flexibility. Finding an optimal policy of the POMDP problem over a finite horizon is known to be PSPACE-complete \citep{Tsitsiklis1987}. State-of-the-art approaches rely on the fact that a POMDP is equivalent to a continuous state MDP in the \emph{belief state} space \citep[Theorem 4]{Eckles1968}. The belief state is the posterior probability distribution on the state space given all the past decisions and observations. Leveraging this result, several dynamic programming algorithms have been derived for POMDPs with finite \citep{Sondik1973} or infinite horizon \citep{Sondik1978}. More details about these kind of algorithms can be found for instance in the survey of \citet{POMDPMonahan1982}, or more recently the book of \citet{Krishnamurthy2016}. While the exact algorithms become quickly intractable when the size of the state and observation spaces grow, some approximate algorithms have shown significant improvements. \citet{Hauskrecht2000} and \citet{Pineau2008} survey various approximations methods for POMDPs. For POMDP problems with a finite horizon, \citet{Walraven2019} recently proposed an effective algorithm with guarantees. \citet{Aras2007} propose a mixed integer programming approach to exactly solve POMDP problems over a finite horizon. However, solving such a program is computationally expensive even for small instances. Part of this difficulty comes from the fact that an optimal POMDP policy depends on the all history of action and decisions. Using a memoryless policy leads to a more tractable problem, still NP-hard \citep{Littman94memoryless} but no more PSPACE-hard. And there is a broad class of systems where memoryless policies perform well \citep{Barto1983,Li2011}, even if it is not the case on some pathological cases \citet{Littman94memoryless}. Furthermore, our problem suffers from the \emph{curse of dimensionality} (like MDPs) since the size of the spaces grow exponentially with the number of components. MDPs are in theory well-solved by dynamic programming algorithms, but the curse of dimensionality makes such approaches impractical to control systems with several components. Approximate dynamic programming algorithms \citep{Bertsekas2007,Farias01thelinear,Powell2011,Meuleau1998} provide generic methodologies to address this curse. In another paradigm, reinforcement learning approaches \citep{Sutton1998} to build such algorithms is an active research area in the machine learning community. In the operations research literature, the notion of weakly coupled dynamic programs \citep{Meuleau1998,hawkins2003langrangian,Adelman2008} and decomposable Markov decision processes \citep{bertsimas2016decomposable} have been introduced in order to catch the specific structure of multi-components, where component-specific actions on each components must be coordinated, or a single action affects all components, respectively. And \citet{Walraven2018} recently introduced a different POMDP problem with multiple components where the resource constraints enforce policies to induce a maximum expected resource consumption over the finite time horizon. Such models have been applied to stochastic inventory routing with limited vehicle capacity \citep{Kleywegt2002}, stochastic multi-product dispatch problems \citep{Papadaki2003}, scheduling problems \citep{Whittle1988}, resources allocation \citep{Gittins1979}, revenue management \citep{Topaloglu2009} among others \citep{hawkins2003langrangian}. The specific structure of these problems can then be leveraged in mathematical programming formulations that use approximate value functions \citep{de2003linear} or approximate moments \citep{bertsimas2016decomposable} as variables, notably using the Lagrangian relaxation of non-anticipativity constraints \citep{Brown2010} or of the linking constraints \citep{Ye2014}. All the mathematical programs in this paper can be formulated either using moments variables (or marginal probabilities) or value function variables. Since they lead to better numerical results, we include in the paper the formulations with moment variables. The value-function formulations can be found in the PhD dissertation of the first author~\citep{VCohenThesis2020}. When the decision maker has only access to a partial observation for each component, each subsystem is a POMDP and all the subsystems are linked by resource constraints on the actions taken on each components. This structure requires to extend the notion of weakly coupled stochastic dynamic program to the notion of \emph{weakly coupled POMDP}, which has been introduced by \citet{Parizi2019} on infinite horizon problems. \citet{Parizi2019,Abbou2019} both propose approximate policies based on the MDP relaxations. Numerical experiments on finite horizon problems in Appendix~\ref{sub:app_nums:implicit_policy} show that taking into account the fact that observations are partial improve the performance of such algorithms. For multi-armed bandits, the optimization problem has been introduced by \citet{MeshramMG18} as \emph{restless partially observable multi-armed bandit} and new index policies have been proposed recently to solve it \citep{MeshramMG18,KazaMMM2019}. However, the algorithms and results proposed hold when the state spaces and observation spaces contain at most two elements, and the algorithms do not scale to larger spaces. There is therefore a need for more efficient algorithms for weakly coupled POMDPs that address both the partially observable aspect and the curse of dimensionality. To the best of our knowledge, mathematical programming techniques have not been exploited for the control of partially observed systems with multiple components. The purpose of this paper is to show that such techniques lead to practically efficient algorithms for such systems, both in the single and the multi-component cases. Our first contributions are mathematical programming formulations for generic POMDPs. \begin{enumerate} \item We propose an exact Non-Linear Program (NLP) formulation and an exact Mixed Integer Linear Program (MILP) formulation for POMDP problems with memoryless policies. When solved with off-the-shelve solvers, they provide a practically efficient solution approach. Numerical experiments on instances from the literature show that, on several class of problems over a finite horizon among which maintenance problems, our MILP approach provides better solutions than a state-of-the-art POMDP algorithms such as SARSOP (which is not restricted to memoryless policies). \item We introduce an extended formulation with valid inequalities that improve the resolution of our MILP. Such inequalities come from a probabilistic interpretation of the dependence between random variables. Numerical experiments show their efficiency. \item We prove that the linear relaxation of our MILP is equivalent to the MDP relaxation of the POMDP, which corresponds to the case where the action depend on the current state. In addition, we show that the strengthened linear relaxation is a (practically tight) upper bound of the optimal value of POMDP with history-dependent policies. \end{enumerate} We then show how, as for the fully observed cases, mathematical programming techniques can successfully exploit the specific structure of multi-components systems to address the curse of dimensionality that affect them. More precisely, we introduce several formulations and algorithms for weakly coupled POMDP with memoryless policies that leverage our formulations and valid inequalities for generic POMDPs. \begin{enumerate}[resume] \item Our main contribution is a history-dependent policy for weakly coupled POMDPs, each value of which is defined through the resolution of a MILP approximation. To the best of our knowledge, it is the first algorithm that can address large scale instances over finite horizon: When approaches in the literature considered instances with $10$ components each of them with $2$ states, we can provide policies with at most $10\%$ optimality gap on instances with $20$ components and $5$ states within a reasonable computation time. \item We introduce tractable upper bounds on the value of an optimal memoryless policy and on the value of an optimal history-dependent policy. The first one is a Lagrangian relaxation bound computed thanks to a column generation algorithm, and the second comes from an extended linear programming formulation. These bounds are compute enable to compute the optimality gaps mentioned in the previous point. \item We provide a shared hierarchy of lower and upper bounds on the value of optimal memoryless and history dependent policies, decomposable policies, the value our formulations, and the value of natural information relaxations. \item Detailed numerical experiments on maintenance and multi-armed bandits problems evaluate the different algorithms and bounds proposed. \end{enumerate} The paper is organized as follows. Section~\ref{sec:problem} recalls the formal definition of a POMDP and introduces the notion of weakly coupled POMDP. Section~\ref{sec:pomdp} introduces our mathematical programming formulations (NLP and MILP) for POMDP with memoryless policies, the valid inequalities and the theoretical study of the linear relaxations. Section~\ref{sec:wkpomdp} introduces our heuristic, which is based on approximate integer formulation, and also describes different mathematical programming formulations, including the Lagrangian relaxation, that give useful bounds for weakly coupled POMDPs. Finally, Section~\ref{sec:num} summarizes our numerical experiments. The proofs of all the theorems in Sections~\ref{sec:pomdp} and~\ref{sec:wkpomdp} are respectively available in Appendices~\ref{app:pomdp} and~\ref{app:wkpomdp:proofs}. \section{Weakly coupled POMDP} 1.5abel{sec:problem} \subsection{Background on POMDPs} 1.5abel{sub:problem:background_pomdp} A POMDP is a multi-stage stochastic optimization problem defined as follows. It models on a horizon $T$ in $\bbZ_+$ the evolution of a system. At each time $t$ in $[T]$, the system is in a random state $S_t$, which takes value in a finite state space $\calX_S$. The system starts in state $s$ in $\calX_S$ with probability $p(s) := \bbP1.5eft(S_1 = s \right)$. At time $t$, the decision maker does not have access to $S_t$, but observes $O_t$, whose value belongs to a finite state space $\calX_{O}$. When the system is in state $S_t=s$, it emits an observation $O_t=o$ with probability $\bbP(O_t=o | S_t=s) := p(o|s)$. Then, the decision maker takes an action $A_t$, which belongs to a finite space $\calX_A$. Given an action $A_t=a$, the system transits from state $S_t=s$ to state $S_{t+1} = s'$ with probability $\bbP1.5eft(S_{t+1}=s' | S_t=s, A_t=a \right) := p(s'|s,a)$, and the decision maker receives the immediate reward $r(s,a,s')$, where the reward function is defined as a real valued function $r \colon \calX_S \times \calX_A \times \calX_S \rightarrow \bbR$, which we will also view as a vector $\bfr = 1.5eft(r(s,a,s')\right) \in \bbR^{\calX_S\times\calX_A\times\calX_S}$. We denote by $\pfrak$ the vector of probabilities $\pfrak = 1.5eft(p(s), p(o|s), p(s'|s,a) \right)_{\substack{s,s' \in \calX_S, o\in \calX_O \\ a \in \calX_A}}$. A POMDP is parametrized by the fifth tuple $1.5eft(\calX_S,\calX_O,\calX_A, \pfrak,\bfr \right)$. \paragraph{POMDP problem.} Let $1.5eft(\calX_S,\calX_O,\calX_A, \pfrak,\bfr \right)$ be a POMDP. Given a finite horizon $T\in \bbZ_{+}$, the choices made by the decision maker are modeled using a policy $\bfdelta=(\delta^1, 1.5dots, \delta^T)$, where $\delta^t$ is the conditional probability distribution of taking action $A_t$ at time $t$ given the history of observations and actions $H_t = 1.5eft(O_{1},A_{1},1.5dots,A_{t-1},O_t\right)$ in $\calX_{H}^{t}:= 1.5eft(\calX_O \times \calX_A \right)^{t-1} \times \calX_O$, i.e., $\delta_{a|h}^t := \bbP(A_t=a|H_t=h),$ for any $a$ in $\calX_A$ and $h$ in $\calX_H^{t}$. We denote by $\Deltahis$ the set of policies \begin{align*} \displaystyle \Deltahis = \bigg\{ \bfdelta \in \bbR^{\sum_{t=1}^T\calX_H^t \times \calX_A} \colon \sum_{a \in \calX_A} \delta^t_{a|h} = 1 \ \mathrm{and} \ \delta^t_{a|h} \geq 0, \forall h \in \calX_H^t, a \in \calX_A, t \in [T] \bigg\}. \end{align*} In $\Delta_{\mathrm{his}}$, ``his'' refers to policies that take into account the history of observations and actions by opposition to memoryless policies which will be introduced below. A policy $\bfdelta \in \Deltahis$ leads to the probability distribution $\bbP_{\bfdelta}$ on $1.5eft(\calX_S \times \calX_O \times \calX_A \right)^T \times \calX_S$ such that \begin{align*} \bbP_{\bfdelta} 1.5eft((S_t = s_t, O_t=o_t, A_t=a_t)_{1 1.5eq t 1.5eq T}, S_{T+1} = s_{T+1}\right) = p(s_1) \prod_{t=1}^T p(o_t | s_t) p(s_{t+1} \vert s_t, a_t ) \delta^t_{a_t|h_t}, \end{align*} where $h_t = (s_1,o_1,1.5dots,a_{t-1},o_t)$. We denote by $\bbE_{\bfdelta}$ the expectation according to $\bbP_{\bfdelta}$. The goal of the decision maker is to find a policy $\bfdelta$ in $\Deltahis$ maximizing the expected total reward over the finite horizon $T$. The POMDP problem is: \begin{equation}1.5abel{pb:POMDP_perfectRecall} \max_{\bfdelta \in \Deltahis} \bbE_{\bfdelta} \bigg[ \sum_{t=1}^{T}r(S_t,A_t,S_{t+1})\bigg] \tag{$\rm{P}_{\rm{his}}$} \end{equation} It is known that~\ref{pb:POMDP_perfectRecall} is PSPACE-hard \citep{Tsitsiklis1987}. \paragraph{POMDP problem with memoryless policies.} Let $1.5eft(\calX_S,\calX_O,\calX_A, \pfrak,\bfr \right)$ be a POMDP. Given a finite horizon $T \in \bbZ_{+}$, A memoryless policy is a vector $\bfdelta=(\delta^1, 1.5dots, \delta^T)$, where $\delta^t$ is the conditional probability distribution at time $t$ of action $A_t$ given observation $O_t$, i.e., $\delta_{a|o}^t := \bbP(A_t=a|O_t=o)$ for any $a$ in $\calX_A$ and $o$ in $\calX_O$. Such policies are said memoryless because the choice of $A_t$ only depends on the current observation $O_t$, in contrast with the history of observations and actions $H_t$. We denote by $\Deltaml$ the set of memoryless policies, where ``ml'' refers to memoryless \begin{align}1.5abel{eq:problem:def_policy_set} \displaystyle \Deltaml = \bigg\{ \bfdelta \in \bbR^{T \times \calX_A \times \calX_O } \colon \sum_{a \in \calX_A} \delta^t_{a|o} = 1 \ \mathrm{and} \ \delta^t_{a|o} \geq 0,\enskip \forall o \in \calX_O, \enskip a \in \calX_A \bigg\}. \end{align} With an abuse of notation, the definition of $\Deltaml$ ensures that $\Deltaml \subseteq \Deltahis$ in the sense that for any policy $\bfdelta$ in $\Deltaml$, we can define a policy $\tilde{\bfdelta}$ in $\Deltahis$ with the same values, i.e., such that $\tilde{\delta}_{a|h}^t := \delta_{a|o}^t$ for any $a$ in $\calX_A$, $o$ in $\calX_O$, $h$ in $\calX_H^t$ and $t$ in $[T]$. This time, the policy $\bfdelta \in \Deltaml$ endows $1.5eft(\calX_S \times \calX_O \times \calX_A \right)^T \times \calX_S$ with the probability distribution $\bbP_{\bfdelta}$ characterized by \begin{align}1.5abel{eq:problem:probability_distrib} \bbP_{\bfdelta} 1.5eft((S_t = s_t, O_t=o_t, A_t=a_t)_{1 1.5eq t 1.5eq T}, S_{T+1} = s_{T+1}\right) = p(s_1) \prod_{t=1}^T p(o_t | s_t) p(s_{t+1} \vert s_t, a_t ) \delta^t_{a_t|o_t}, \end{align} and the decision maker now seeks a memoryless policy $\bfdelta$ in $\Deltaml$ maximizing the expected total reward over the finite horizon $T$. The POMDP problem with memoryless policy is therefore \begin{equation}1.5abel{pb:POMDP} \max_{\bfdelta \in \Deltaml} \bbE_{\bfdelta} \bigg[ \sum_{t=1}^{T}r(S_t,A_t,S_{t+1})\bigg]. \tag{$\rm{P}_{\rm{ml}}$} \end{equation} Problem~\ref{pb:POMDP} is NP-hard \citep{Littman94memoryless}. In Section~\ref{sec:num}, we provide numerical experiments showing that memoryless policies perform well on different kinds of problems modeled as POMDPs. \subsection{Problem definition of weakly coupled POMDP} 1.5abel{sub:problem:wkpomdp} A weakly coupled POMDP models a system composed of $M$ components, each of them evolving independently as a POMDP. Let $S_t^m$ and $O_t^m$ be random variables that represent respectively the state and the observation of component $m$ at time $t$, and that belong respectively to the state space $\calX_S^m$ and the observation space $\calX_O^m$ of component $m$. Each component is assumed to evolve individually as a POMDP. We denote by $\pfrak^m$ and $\bfr^m$ respectively the probability distributions and the immediate reward functions of component $m$. We denote by $\bfS_t = \big( S_t^1, 1.5dots, S_t^M \big)$ and $\bfO_t = \big( O_t^1, 1.5dots, O_t^M \big)$ the state and the observation of the full system at time $t$, which lie respectively in the \emph{state space} $\calX_S = \calX_S^1 \times \cdots \times \calX_S^M$ and the \emph{observation space} $\calX_O = \calX_O^1 \times \cdots \times \calX_O^M$. The spaces $\calX_S$ and $\calX_O$ represent the state space and the observation space of the full system. We assume that the action space $\calX_A$ can be written \begin{align}1.5abel{eq:problem:def_form_action_space} \calX_A = 1.5eft\{\mathbf{a} \in \calX_A^1 \times \cdots \times \calX_A^M \colon \sum_{m =1}^M \mathbf{D}^m(a^m) 1.5eq \mathbf{b} \right\}, \end{align} where $\calX_A^m$ is the individual action space of component $m$, and $\mathbf{D}^m : \calX_A^m \rightarrow \bbR^q$ is a given function for each component $m$ in $[M]$, and $\mathbf{b} \in \bbR^q$ is a given vector for some finite integer $q$. We assume in the rest of the paper that the vector $\bfb$ is non-negative. This is without loss of generality since an instance with a generic $\bfb$ can be turned into an equivalent one with non-negative $\bfb$: It suffices to set $\bfD'^m := \bfD^m-\frac{k}{M}$ for every $m$ in $[M]$, $\bfb' := \bfb - k$ where $k=\min_{i \in [q]} b_i$ and $b_i$ is the $i$-th coordinate of vector $\bfb$, and $\calX_A = 1.5eft\{\bfa \in \calX_A^1 \times \cdots \times \calX_A^M \colon \sum_{m=1}^M \bfD'^m(a^m) 1.5eq \bfb' \right\}$. Each component is assumed to evolve independently, hence the joint probability of emission factorizes as \begin{equation}1.5abel{eq:problem:factor_emission} \bbP(\bfO_t = \bfo | \bfS_t = \bfs) =\prod_{m=1}^M p^m(o^m | s^m), \end{equation} and the joint probability of transition factorizes as \begin{equation}1.5abel{eq:problem:factor_transition} \bbP(\bfS_{1} = \bfs) = \prod_{m=1}^M p^m(s^m) \quad \text{and} \quad \bbP(\bfS_{t+1} = \bfs' | \bfS_t=\bfs, \bfA_t=\bfa) = \prod_{m=1}^M p^m(s'^m |s^m, a^m), \end{equation} for all $t$ in $[T]$. In addition, the reward is assumed to decompose additively \begin{equation}1.5abel{eq:problem:factor_rewards} r(\bfs,\bfa,\bfs') = \sum_{m=1}^M r^m(s^m, a^m, s'^m). \end{equation} Hence, the weakly coupled POMDP problem with memoryless policies is the following: \begin{equation}1.5abel{pb:decPOMDP_wc} \max_{\bfdelta \in \Deltaml} \bbE_{\bfdelta} \bigg[ \sum_{t=1}^{T}r(\bfS_t,\bfA_t,\bfS_{t+1}) \bigg] \tag{{$\rm{P}_{\rm{ml}}^{\rm{wc}}$}} \end{equation} where the expectation is taken according to $\bbP_{\bfdelta}$ defined in~\eqref{eq:problem:probability_distrib}. Similarly, we define the weakly coupled POMDP problem with history-dependent policies $\rm{P}_{\rm{his}}^{\rm{wc}}$ by replacing $\Deltaml$ with $\Deltahis$. Note that unless $\calX_A = \emptyset$ there always exists a feasible policy of~\ref{pb:decPOMDP_wc}. A weakly coupled POMDP is fully parametrized by $1.5eft((\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m,\bfr^m,\bfD^m)_{m\in [M]}, \bfb\right)$. Remark that a weakly coupled POMDP~\ref{pb:decPOMDP_wc} is a POMDP~\ref{pb:POMDP} with state space $\calX_S = \calX_S^1 \times \cdots \times \calX_S^M$, observation space $\calX_O = \calX_O^1 \times \cdots \times \calX_O^M$, the action space $\calX_A$ defined in~\eqref{eq:problem:def_form_action_space}. \begin{rem}1.5abel{rem:problem:def_emission_proba} In the definition of POMDP, we could have considered a variant where the observation $O_t$ may depend on $A_{t-1}$ given $S_t$ and the emission probability distribution becomes $\bbP(O_t=o|A_{t-1}=a',S_t=s):= p(o|a',s)$. All the mathematical programming formulations and theoretical results in this paper can be extended to this case. We choose to consider the case above to lighten the notation. \end{rem} \begin{rem}1.5abel{rem:problem:decomposablePOMDP} \citet{bertsimas2016decomposable} consider the notion of \emph{decomposable MDP} as an alternative to weakly coupled MDP. The only difference is that the action space is generic and does not decompose along the components. We can define a similar notion of decomposable POMDP. It turns out that using a transformation similar to the one introduced by \citet[Sec 4.3]{bertsimas2016decomposable}, we can prove that the frameworks of weakly coupled and decomposable POMDPs are equivalent. Indeed, given a generic action space $\calX_A$, it suffices to define the set of individual action spaces as $\calX_A^m = \calX_A$ for each component $m$ in $[M]$. For any $(a^1,1.5dots,a^M) \in \calX_A^1 \times \cdots \times \calX_A^M$, we enforce the following linking constraints $a^m = a^{m+1}$ for all $m$ in $[M-1]$. Therefore, the action space can be written $\big\{\bfa \in \calX_A^1 \times \cdots \times \calX_A^M \colon a^m = a^{m+1}, \ \forall m \in [M-1] \big\}$, which has the requested form~\eqref{eq:problem:def_form_action_space}. In this paper, we choose the weakly coupled POMDP framework because we want to exploit it in our algorithms the explicit structure it presupposes on the action space~\eqref{eq:problem:def_form_action_space}. \end{rem} \subsection{Example of application to a maintenance problem} 1.5abel{sub:problem:example} We now illustrate how the maintenance problem mentioned in the introduction can be casted as a weakly coupled POMDP. As mentioned in the introduction, Appendix~\ref{app:examples} describes three more examples, and more POMDPs applications can be found for instance in the survey of~\citet{Cassandra2003ASO}. The system is composed of $M$ components. The time discretization $t \in [T]$ corresponds to the different maintenance slots. The decision maker chooses which equipment to maintain at the beginning of each of these slots. We model the degradation of component $m$ using a state $S_t^m$, which belongs to a finite state space $\calX_S^m$ and is not observed by the decision maker. We assume that there is a \emph{failure state} $s^{m,F}$ in $\calX_S^m$ for each component $m$ in $[M]$, corresponding to its most critical degradation state. Component $m$ starts in state $s$ with probability $p^m(s)$. At each time $t$, component $m$ is in state $S_t^m = s$, and it emits an observation $O_t^m=o$ with probability $p^m(o|s)$. Then, the decision maker takes an action $\mathbf{A}_t$ in $\{0,1\}^M$ where $A_t^m$ is a binary variable equal to $1$ when component $m$ is maintained. At each maintenance slot, the decision maker can maintain at most $K$ components. Hence, we write the action space $\calX_A$ as follows \begin{align}1.5abel{eq:int_prog_weakPOMDP:problem:pred_maint:def_action_space} \calX_A = 1.5eft\{\bfa=(a^1,1.5dots,a^M) \in \{0,1\}^M \colon \sum_{m=1}^M a^m1.5eq K \right\}. \end{align} Therefore, $\calX_A$ contains only one scalar constraint ($q=1$) and satisfies~\eqref{eq:problem:def_form_action_space} by setting $D^m(a) = a$ for every $a \in \{0,1\}$ and $m \in [M]$, and $b = K$. We assume that each component $m$ evolves independently from state $S_t^m =s$ to state $S_{t+1}^{m}=s'$ with probability $p^m(s'|s,a)$, and the decision maker receives reward $r^m(s, a, s')$. In addition, we assume that when a component is maintained, it behaves like a new one, i.e., $p^m(s'|s,1) =p^m(s')$, for any $s,s'$ in $\calX_S^m$, and the conditional probabilities factorize as~\eqref{eq:problem:factor_emission} and~\eqref{eq:problem:factor_transition}. Each component has a maintenance cost $C_R^m$ and a failure cost $C_F^m$ at each component $m$. The individual immediate reward function can be written $r^m(s,a,s') = -\mathds{1}_{s_F^m}(s') C_F^m - \mathds{1}_{1}(a) C_R^m$, for any $s,s' \in \calX_S^m$ and $a \in \{0,1\}$. We assume that the reward decomposes additively as~\eqref{eq:problem:factor_rewards}. Given a finite horizon $T$, the predictive maintenance problem with capacity constraints consists in finding a policy in $\Deltaml$ that solves~\ref{pb:decPOMDP_wc} with $\calX_A$, $(\pfrak^m)_{m \in[M]}$ and $(\bfr^m)_{m\in [M]}$. \section{Integer programming for POMDPs} 1.5abel{sec:pomdp} In this section, we provide an integer formulation that gives an optimal memoryless policy for~\ref{pb:POMDP} as well as valid inequalities. We are given a POMDP $(\calX_S, \calX_O, \calX_A, \pfrak, \bfr)$ and a finite horizon $T \in \bbZ_{+}$. We denote by $v_{\rm{ml}}^*$ the optimal value of \ref{pb:POMDP}. \subsection{An exact Nonlinear Program} 1.5abel{sub:pomdp:NLP} We introduce the following nonlinear program (NLP) with a collection of variables\\ $\bfmu = 1.5eft((\mu_s^1)_{s}, 1.5eft((\mu_{soa}^t)_{s,o,a},(\mu_{sas'}^t)_{s,a,s'}\right)_{t}\right)$, $\bfdelta = 1.5eft( 1.5eft(\delta_{a|o}^t\right)_{a,o} \right)_{t}$. \begin{subequations}1.5abel{pb:pomdp:NLP_pomdp} \begin{alignat}{2} \max_{\bfmu, \bfdelta} \enskip & \sum_{t=1}^T \sum_{\substack{s,s' \in \calX_S \\ a \in \calX_A}} r(s,a,s') \mu_{sas'}^t & \quad & 1.5abel{eq:pomdp:NLP_obj_function}\\ \mathrm{s.t.} \enskip & \sum_{o \in \calX_O} \mu_{soa}^t = \sum_{s' \in \calX_S} \mu_{sas'}^t & \quad \forall s \in \calX_S, a \in \calX_A, t \in [T] 1.5abel{eq:pomdp:NLP_consistency_sa}\\ & \sum_{s \in \calX_S, a \in \calX_A} \mu_{sas'}^t = \sum_{o \in \calX_O, a \in \calX_A} \mu_{s'oa}^{t+1} & \quad \forall s' \in \calX_S, t \in [T] 1.5abel{eq:pomdp:NLP_consistency_s} \\ & \mu_s^1 = \sum_{o \in \calX_O, a \in \calX_A} \mu_{soa}^{1} & \quad \forall s \in \calX_S 1.5abel{eq:pomdp:NLP_consistency_initial} \\ & \mu_s^1 = p(s) & \quad \forall s \in \calX_S 1.5abel{eq:pomdp:NLP_initial2} \\ & \mu_{sas'}^{t} = p(s'|s,a) \sum_{s'' \in \calX_S} \mu_{sas''}^t & \quad \forall s,s' \in \calX_S, a \in \calX_A, t \in [T] 1.5abel{eq:pomdp:NLP_indep_state}\\ &\mu_{soa}^t = \delta^t_{a|o} p(o|s) \sum_{o' \in \calX_O,a' \in \calX_A} \mu_{so'a'}^t & \quad \forall s \in \calX_S, o \in \calX_O, a \in \calX_A, t \in [T] 1.5abel{eq:pomdp:NLP_indep_action} \\ & \bfdelta \in \Deltaml, \bfmu \geq 0 1.5abel{eq:pomdp:NLP_constraint_policy} \end{alignat} \end{subequations} Given a policy $\bfdelta \in \Deltaml$, we say that $\bfmu$ is the vector of \emph{moments} of the probability distribution $\bbP_{\bfdelta}$ induced by $\bfdelta$ when \begin{subequations}1.5abel{eq:pomdp:def_moments} \begin{alignat}{2} &\mu_s^1 = \bbP_{\bfdelta}(S_1=s), & & \quad \forall s \in \calX_S \\ &\mu_{soa}^t = \bbP_{\bfdelta}(S_t=s, O_t=o, A_t=a), & & \quad \forall s \in \calX_S, o \in \calX_O, a \in \calX_A, \forall t \in [T] \\ &\mu_{sas'}^t = \bbP_{\bfdelta}(S_t=s, A_t=a, S_{t+1}=s'), & & \quad \forall s,s' \in \calX_S, a \in \calX_A, \forall t \in [T] \end{alignat} \end{subequations} Thanks to the properties of probability distributions, such vector of moments~\eqref{eq:pomdp:def_moments} of $\bbP_{\bfdelta}$ satisfy the constraints of Problem~\eqref{pb:pomdp:NLP_pomdp}. Conversely, given a feasible solution of Problem~\eqref{pb:pomdp:NLP_pomdp}, Theorem~\ref{theo:pomdp:NLP_optimal_solution} ensures that $\bfmu$ is the vector of moments of $\bbP_{\bfdelta}$. We denote by $z^*$ the optimal value of Problem~\eqref{pb:pomdp:NLP_pomdp}. \begin{theo}1.5abel{theo:pomdp:NLP_optimal_solution} Let $(\bfmu, \bfdelta)$ be a feasible solution of NLP~\eqref{pb:pomdp:NLP_pomdp}. Then $\bfmu$ is the vector of moments of the probability distribution $\bbP_{\bfdelta}$ induced by $\bfdelta$, and $(\bfmu, \bfdelta)$ is an optimal solution of NLP~\eqref{pb:pomdp:NLP_pomdp} if and only if $\bfdelta$ is an optimal policy of ~\ref{pb:POMDP}. In particular, $v_{\rm{ml}}^* = z^*$. \end{theo} \begin{rem}1.5abel{rem:pomdp:with_observation} \emph{Taking into account initial observations.} Suppose that the decision maker has access to an initial observation $\ovo$ in $\calX_O$. Hence, for any policy $\bfdelta$ in $\Deltaml$ we have $\bbP_{\bfdelta} (O_1=\ovo) = 1.$ Taking into account the initial observation requires to slightly modify the constraints of Problem~\eqref{pb:pomdp:NLP_pomdp}: We replace constraints~\eqref{eq:pomdp:NLP_initial2} and \eqref{eq:pomdp:NLP_indep_action} in Problem~\eqref{pb:pomdp:NLP_pomdp} at time $t=1$ by \begin{subequations} \begin{alignat}{2} &\mu_s^1 = \bbP_{\bfdelta}(S_1=s|O_1=\ovo), & \quad \forall s \in \calX_S, \\ &\mu_{soa}^1 = \delta_{a|o}^1 \mathds{1}_{\ovo}(o) \sum_{o' \in \calX_O, a' \in \calX_A}\mu_{so'a'}^1, & \quad \forall s \in \calX_S, o \in \calX_O, a \in \calX_A 1.5abel{eq:pomdp:NLP_init_obs_indep}, \end{alignat} \end{subequations} where the probability distribution $\bbP_{\bfdelta}(S_1|O_1)$ can be computed using Bayes formula. This remark will be useful in Section~\ref{sec:wkpomdp}. \end{rem} \subsection{Turning the nonlinear program into an MILP} 1.5abel{sub:pomdp:MILP} We define the set of \emph{deterministic memoryless policies} $\Deltaml^{\rm{d}}$ as \begin{equation}1.5abel{eq:pomdp:def_deterministic_policy} \Deltaml^{\rm{d}} = \bigg\{ \bfdelta \in \Deltaml \colon \delta^t_{a|o} \in \{0,1\},\ \forall o \in \calX_O,\ \forall a \in \calX_A,\ \forall t \in [T] \bigg\}. \end{equation} The following proposition states that we can restrict our policy search in~\ref{pb:POMDP} to the set of deterministic memoryless policies. \begin{prop}{\cite[Proposition 1]{Bagnell2004}}1.5abel{prop:pomdp:det_policies} There always exists an optimal policy for~\ref{pb:POMDP} that is deterministic, i.e., \begin{equation}1.5abel{eq:pomdp:prop:det_policies} \max_{\bfdelta \in \Deltaml} \bbE_{\bfdelta} \bigg[ \sum_{t=1}^{T} r(S_t,A_t, S_{t+1}) \bigg] = \max_{\bfdelta \in \Deltaml^{\rm{d}}} \bbE_{\bfdelta} \bigg[ \sum_{t=1}^{T} r(S_t,A_t, S_{t+1})\bigg]. \end{equation} \end{prop} Theorem~\ref{theo:pomdp:NLP_optimal_solution} ensure that~\ref{pb:POMDP} and Problem~\eqref{pb:pomdp:NLP_pomdp} are equivalent, and in particular admit the same optimal solution in terms of $\bfdelta$. However, Problem~\eqref{pb:pomdp:NLP_pomdp} is hard to solve due to the nonlinear constraints~\eqref{eq:pomdp:NLP_indep_action}. By Proposition~\ref{eq:pomdp:prop:det_policies}, we can add the integrality constraints of $\Deltaml^{\rm{d}}$ in~\eqref{pb:pomdp:NLP_pomdp}, and, by a classical result in integer programming, we can turn Problem~\eqref{pb:pomdp:NLP_pomdp} into an equivalent MILP: It suffices to replace constraint~\eqref{eq:pomdp:NLP_indep_action} by the following McCormick inequalities~\citep{Mccormick1976}. \begin{subequations}1.5abel{eq:pomdp:McCormick_linearization} \begin{alignat}{2} &\mu_{soa}^t 1.5eq p(o|s) \sum_{o' \in \calX_O,a' \in \calX_A} \mu_{so'a'}^t & \quad \forall s \in \calX_S, o \in \calX_O, a \in \calX_A, t \in [T] 1.5abel{eq:pomdp:MILP_McCormick_1} \\ &\mu_{soa}^t 1.5eq \delta^t_{a|o} & \quad \forall s \in \calX_S, o \in \calX_O, a \in \calX_A, t \in [T] 1.5abel{eq:pomdp:MILP_McCormick_2} \\ &\mu_{soa}^t \geq p(o|s)\sum_{o' \in \calX_O,a' \in \calX_A} \mu_{so'a'}^t + \delta^t_{a|o} - 1 & \quad \forall s \in \calX_S, o \in \calX_O, a \in \calX_A, t \in [T]. 1.5abel{eq:pomdp:MILP_McCormick_3} \end{alignat} \end{subequations} For convenience, we denote by $\mathrm{McCormick}1.5eft(\bfmu, \bfdelta \right)$ the set of McCormick linear inequalities~\eqref{eq:pomdp:McCormick_linearization}. Thus, by using McCormick's linearization on constraints~\eqref{eq:pomdp:NLP_indep_action}, we get that~\ref{pb:POMDP} is equivalent to the following MILP: \begin{equation} \begin{aligned}1.5abel{pb:pomdp:MILP_pomdp} \max_{\bfmu, \bfdelta} \enskip & \sum_{t=1}^T \sum_{\substack{s,s' \in \calX_S \\ a \in \calX_A}} r(s,a,s') \mu_{sas'}^t & \quad & \\ \mathrm{s.t.} \enskip & \bfmu \ \mathrm{satisfies}~\eqref{eq:pomdp:NLP_consistency_sa}-\eqref{eq:pomdp:NLP_indep_state} \\ & \mathrm{McCormick}\big(\bfmu,\bfdelta\big) \\ & \bfdelta \in \Deltaml^{\rm{d}}, \bfmu \geq 0. \end{aligned} \end{equation} For convenience, given a POMDP $1.5eft(\calX_S,\calX_O,\calX_A,\pfrak,\bfr \right)$, we define respectively the feasible sets of Problem~\eqref{pb:pomdp:NLP_pomdp} and MILP~\eqref{pb:pomdp:MILP_pomdp} as $\calQ1.5eft(T,\calX_S,\calX_O,\calX_A,\pfrak \right)$ and $\calQ^{\mathrm{d}}1.5eft(T,\calX_S,\calX_O,\calX_A,\pfrak \right).$ We write respectively $\calQ$ and $\calQ^{\mathrm{d}}$ when $1.5eft(T,\calX_S,\calX_O,\calX_A,\pfrak\right)$ is clear from the context. \subsection{Valid inequalities} 1.5abel{sub:pomdp:valid_cuts} Before introducing our valid inequalities, we start by explaining why the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp} is not sufficient to define a feasible solution of Problem~\eqref{pb:pomdp:NLP_pomdp}. It turns out that given a feasible solution $(\bfmu,\bfdelta)$ of the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp}, the vector $\bfmu$ is not necessarily the vector of moments of the probability distribution $\bbP_{\bfdelta}$ induced by $\bfdelta$. Indeed, when the coordinates of the vector $\bfdelta$ are continuous variables, the McCormick's constraints~\eqref{eq:pomdp:McCormick_linearization} are, in general, no longer equivalent to bilinear constraints~\eqref{eq:pomdp:NLP_indep_action}. Then, $(\bfmu,\bfdelta)$ is not necessarily a feasible solution of Problem~\eqref{pb:pomdp:NLP_pomdp} anymore, which implies that $\bfmu$ is not necessarily the vector of moments of the probability distribution $\bbP_{\bfdelta}$. Intuitively, it means that the feasible set of the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp} is too large. Actually, we can reduce the feasible set of the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp} by adding valid inequalities. To do so, we introduce new variables $1.5eft((\mu_{s'a'soa}^t)_{s',a',s,o,a}\right)_{t}$ and the inequalities \begin{subequations}1.5abel{eq:pomdp:Valid_cuts_pomdp} \begin{alignat}{2} &\sum_{s'\in \calX_S, a' \in \calX_A} \mu_{s'a'soa}^t = \mu_{soa}^{t}, \quad &\forall s \in \calX_S, o \in \calX_O, a \in \calX_A, 1.5abel{eq:pomdp:Valid_cuts_pomdp_consistency1}\\ &\sum_{a \in \calX_A} \mu_{s'a'soa}^t = p(o|s)\mu_{s'a's}^{t-1}, \quad &\forall s',s \in \calX_S, o \in \calX_O, a' \in \calX_A, 1.5abel{eq:pomdp:Valid_cuts_pomdp_consistency2}\\ &\mu_{s'a'soa}^t = p(s|s',a',o)\sum_{\ovs \in \calX_S} \mu_{s'a'\ovs oa}^t, \quad &\forall s',s \in \calX_S, o \in \calX_O, a',a \in \calX_A, 1.5abel{eq:pomdp:Valid_cuts_pomdp_main} \end{alignat} \end{subequations} \noindent where we use the constants $$p(s|s',a',o) = \bbP(S_t=s | S_{t-1}=s',A_{t-1}=a',O_t=o),$$ for any $s,s' \in \calX_S$, $a'\in \calX_A$ and $o \in \calX_O$. Note that $p(s|s',a',o')$ does not depend on the policy $\bfdelta$ and can be easily computed during a preprocessing using Bayes rules. Therefore, constraints in~\eqref{eq:pomdp:Valid_cuts_pomdp} are linear. \begin{prop}1.5abel{prop:pomdp:valid_cuts_pomdp} Inequalities \eqref{eq:pomdp:Valid_cuts_pomdp} are valid for MILP~\eqref{pb:pomdp:MILP_pomdp}, and there exists a solution $\bfmu$ of the linear relaxation of~\eqref{pb:pomdp:MILP_pomdp} that does not satisfy constraints~\eqref{eq:pomdp:Valid_cuts_pomdp}. \end{prop} The MILP formulation obtained by adding inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp} in MILP~\eqref{pb:pomdp:MILP_pomdp} is an extended formulation, and has many more constraints than the initial MILP~\eqref{pb:pomdp:MILP_pomdp}. Its linear relaxation therefore takes longer to solve. Inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp} strengthen the linear relaxation, and numerical experiments in Section \ref{sec:num} show that these inequalities enable to speed up the resolution of MILP~\eqref{pb:pomdp:MILP_pomdp}. \paragraph{Probabilistic interpretation.} Given a feasible solution $(\bfmu,\bfdelta)$ of the linear relaxation of~\eqref{pb:pomdp:MILP_pomdp}, $\bfmu$ can still be interpreted as the vector of moments of a probability distribution $\bbQ_{\bfmu}$ over $1.5eft(\calX_S \times \calX_O \times \calX_A\right)^T \times \calX_S$. However, as it has been mentioned above, the vector $\bfmu$ does not necessarily correspond to the vector of moments of $\bbP_{\bfdelta}$, which is due to the fact that $(\bfmu,\bfdelta)$ does not necessarily satisfy the nonlinear constraints~\eqref{eq:pomdp:NLP_indep_action}. Besides, constraints~\eqref{eq:pomdp:NLP_indep_action} is equivalent to the property that, \begin{equation}1.5abel{eq:pomdp:strongIndep} \text{according to $\bbQ_{\bfmu}$, action $A_t$ is independent from state $S_t$ given observation $O_t$.} \end{equation} Hence, given a feasible solution $(\bfmu,\bfdelta)$ of the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp}, the distribution $\bbQ_{\bfmu}$ does not necessarily satisfy the conditional independences~\eqref{eq:pomdp:strongIndep}. Remark that~\eqref{eq:pomdp:strongIndep} implies the weaker result that, \begin{equation}1.5abel{eq:pomdp:weakIndep} \text{according to $\bbQ_{\bfmu}$, $A_t$ is independent from $S_t$ given $O_t$, $A_{t-1}$ and $S_{t-1}$.} \end{equation} Proposition~\ref{prop:pomdp:valid_cuts_pomdp} says that the independences in~\eqref{eq:pomdp:weakIndep} are not satisfied in general by a feasible solution $(\bfmu,\bfdelta)$ of the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp}, but that we can enforce them using linear inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp} on $(\bfmu,\bfdelta)$ in an extended formulation. \subsection{Strengths of the relaxations} 1.5abel{sub:pomdp:inf_relax} When the decision maker directly observes the state of the system, the POMDP problem becomes a MDP problem and the resulting optimization problem is called the \emph{MDP approximation} \citep{Hauskrecht2000}. It turns out that the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp} is related to the MDP approximation. Given a collection of variables $\bfmu = 1.5eft((\mu_s^1)_{s},(\mu_{sas'}^t)_{s,a,s'})_{t}\right)$ and the following linear program which is known to solve exactly a MDP (e.g. \citet{Epenoux1963}). \begin{subequations}1.5abel{pb:pomdp:LP_MDP} \begin{alignat}{2} \max_{\bfmu} \enskip & \sum_{t=1}^T \sum_{\substack{s,s' \in \calX_S \\ a \in \calX_A}} r(s,a,s')\mu_{sas'}^t & \quad &\\ \mathrm{s.t.} \enskip & \mu_{s}^1 = \sum_{a' \in \calX_A, s' \in \calX_S} \mu_{sa's'}^{1} & \forall s \in \calX_S 1.5abel{eq:pomdp:LP_MDP_consistent_action_initial}\\ & \sum_{s'\in \calX_S,a' \in \calX_A} \mu_{s'a's}^t = \sum_{a' \in \calX_A, s' \in \calX_S} \mu_{sa's'}^{t+1} & \forall s \in \calX_S, t \in [T] 1.5abel{eq:pomdp:LP_MDP_consistent_action}\\ & \mu_{s}^1 = p(s) & \forall s \in \calX_S 1.5abel{eq:pomdp:LP_MDP_initial} \\ & \mu_{sas'}^{t} = p(s'|s,a) \sum_{s'' \in \calX_S }\mu_{sas''}^t & \forall s \in \calX_S, a \in \calX_A, t \in [T] 1.5abel{eq:pomdp:LP_MDP_consistent_state} \end{alignat} \end{subequations} In Problem~\eqref{pb:pomdp:LP_MDP}, the variables $(\mu_s^1)_s$ and $(\mu_{sas'}^t)_{sas'}$ respectively represent the probability distribution of $S_1$ and $1.5eft(S_t,A_t,S_{t+1}\right)$ for any $t$ in $[T]$. Theorem~\ref{theo:pomdp:MDP_approx_equivalence} below states that the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp} is equivalent to the MDP approximation of~\ref{pb:POMDP}. We introduce the following quantities: \begin{enumerate}[label={--}] \item $z_{\rm{R}}^*$: the optimal value of the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp}. \item $z_{\rm{R}^{\rm{c}}}^*$: the optimal value of the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp} with inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp}. \item $v_{\rm{his}}^*$: the optimal value of~\ref{pb:POMDP_perfectRecall}. \item $v_{\rm{MDP}}^*$: the optimal value of linear program~\eqref{pb:pomdp:LP_MDP}, which is the optimal value of the MDP approximation. \end{enumerate} \begin{theo}1.5abel{theo:pomdp:MDP_approx_equivalence} Let $(\bfmu,\bfdelta)$ be feasible solution of the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp}. Then $(\bfmu,\bfdelta)$ is an optimal solution of the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp} if and only if $\bfmu$ is an optimal solution of linear program~\eqref{pb:pomdp:LP_MDP}. In particular, $z_{\rm{R}}^* = v_{\rm{MDP}}^*$. In addition, the following inequalities hold: \begin{equation}1.5abel{eq:pomdp:inequality_information} z^* 1.5eq v_{\rm{his}}^* 1.5eq z_{\rm{R}^{\rm{c}}}^* 1.5eq z_{\rm{R}}^*. \end{equation} \end{theo} Inequality~\eqref{eq:pomdp:inequality_information} ensures that by solving MILP~\eqref{pb:pomdp:MILP_pomdp}, we obtain an integrality gap $z_{\rm{R}}^*-z^*$ that bounds the approximation error $v_{\rm{his}}^* - z^*$ due to the choice of a memoryless policy instead of a policy that depends on all history of observations and actions. In addition, Theorem~\ref{theo:pomdp:MDP_approx_equivalence} ensures that the integrality gap $z_{\rm{R}^{\rm{c}}}^*-z^*$ obtained using valid inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp} gives a tighter bound on the approximation error. \section{Integer programming for weakly coupled POMDPs} 1.5abel{sec:wkpomdp} We now focus on the problem~\ref{pb:decPOMDP_wc} of finding a high/maximum expected reward policy for a weakly coupled POMDP $1.5eft((\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m,\bfr^m,\bfD^m)_{m\in[M]},\bfb\right)$. Based on the results of Section~\ref{sec:pomdp}, a naive approach is to use the mathematical programs of the previous section on the POMDP $1.5eft(\calX_S,\calX_O,\calX_A',\pfrak',\bfr' \right),$ where $\calX_S = \calX_S^1 \times \cdots \times \calX_S^M,$ $\calX_O=\calX_O^1 \times \cdots \times \calX_O^M,$ and, $\calX_A'=\calX_A^1 \times \cdots \times \calX_A^M,$ $\pfrak',$ and $\bfr'$ are respectively defined by~\eqref{eq:problem:def_form_action_space}-\eqref{eq:problem:factor_rewards}. More precisely, NLP~\eqref{pb:pomdp:NLP_pomdp} becomes \begin{subequations}1.5abel{pb:wkpomdp:POMDP_NLP} \begin{alignat}{2} \max_{\bfmu,\bfdelta} \enskip & \sum_{t=1}^T \sum_{\substack{\bfs,\bfs' \in \calX_S \\ \bfa \in \calX_A}} \mu_{\bfs \bfa \bfs'}^{t}\sum_{m=1}^M r^m(s,a,s') & \quad &\\ \mathrm{s.t.} \enskip & 1.5eft(\bfmu,\bfdelta\right) \in \calQ 1.5eft(T,\calX_S, \calX_O, \calX_A',\pfrak'\right) & 1.5abel{eq:wkpomdp:POMDP_NLP:per_pomdp} \\ & 1.5eft( \sum_{m=1}^M \bfD^m(a^m) - \bfb \right)\delta_{\bfa|\bfo}^t 1.5eq 0 & \forall \bfo \in \calX_O, \bfa \in \calX_A', t \in [T] 1.5abel{eq:wkpomdp:POMDP_NLP:link} \end{alignat} \end{subequations} where constraints~\eqref{eq:wkpomdp:POMDP_NLP:link} model the linking constraints $\sum_{m=1}^M \bfD^m(A_t^m) 1.5eq \bfb$, which are not included in $\calQ 1.5eft(T,\calX_S, \calX_O, \calX_A',\pfrak'\right)$. Indeed, given a solution $(\bfmu,\bfdelta)$ of NLP~\eqref{pb:wkpomdp:POMDP_NLP}, if $\bfa \in \calX_A' \backslash \calX_A$, then $\delta_{\bfa|\bfo} = 0$ for all $t$ in $[T]$ and $\bfo$ in $\calX_O$. This results ensures that $\bbP_{\bfdelta} 1.5eft(1.5eft\{ \bfA_t \in \calX_A, \enskip \forall t \in [T] \right\} \right) = 1$, which shows that the linking constraint is satisfied almost surely. NLP~\eqref{pb:wkpomdp:POMDP_NLP} is intractable for at least three reasons. (A) The number of variables required to encode a policy $\bfdelta$ and its moments $\bfmu$ is exponential (the vector $(\mu_{\bfs \bfa \bfs'})_{\substack{\bfs,\bfs' \in \calX_S \\ a \in \calX_A}}$ has $|\calX_A| \prod_{m=1}^M |\calX_S^m| |\calX_O^m|$ coordinates) and the number of constraints required to ensure that $\bfmu$ corresponds to the moment of the distribution induced by $\bfdelta$ is exponential. (B) The latter constraints are nonlinear. And (C) the number of inequalities required to ensure that the linking constraints are satisfied is exponential. If the second difficulty can be addressed using the approach developed in Section~\ref{sub:pomdp:MILP}, the two others are specific to weakly coupled POMDP. \subsection{An approximate integer program} 1.5abel{sub:wkpomdp:approx} Consider the variables $\bftau^m = \Big( (\tau_s^{1,m})_{s}, (\tau_{sas'}^{t,m})_{s,a,s'}, (\tau_{soa}^{t,m})_{s,o,a}, (\tau_a^{t,m})_{a} \Big)_{t \in [T]},$ $\bfdelta^m =(\delta^{t,m})_{t \in [T]}$ and the following MILP, \begin{subequations}1.5abel{pb:wkpomdp:decPOMDP_MILP} \begin{alignat}{2} \max_{\bftau,\bfdelta} \enskip & \sum_{t=1}^T \sum_{m=1}^M \sum_{\substack{s,s' \in \calX_S^m \\ a \in \calX_A^m}} r^m(s,a,s')\tau_{sas'}^{t,m} & \quad &\\ \mathrm{s.t.} \enskip & 1.5eft(\bftau^m,\bfdelta^m\right) \in \calQ^{\mathrm{d}}1.5eft(T,\calX_S^m, \calX_O^m, \calX_A^m,\pfrak^m\right) & \forall m \in [M] 1.5abel{eq:wkpomdp:decPOMDP_MILP_per_pomdp} \\ &\sum_{s \in \calX_S^m, o \in \calX_A^m}\tau_{soa}^{t,m} = \tau_a^{t,m} & \forall a \in \calX_A^m, m \in [M], t \in [T] 1.5abel{eq:wkpomdp:decPOMDP_MILP_consistent_action} \\ &\sum_{m =1}^M\sum_{a \in \calX_A^m} \bfD^m(a) \tau_a^{t,m} 1.5eq \bfb & \forall t \in [T] 1.5abel{eq:wkpomdp:decPOMDP_MILP_linking_cons} \end{alignat} \end{subequations} In order to obtain~\eqref{pb:wkpomdp:decPOMDP_MILP}, we modify NLP~\eqref{pb:wkpomdp:POMDP_NLP} in three ways. \begin{itemize} \item[\textup{(A)}] We reduce the number of variables and constraints required to encode a policy and its moments by considering ``local'' variables $(\bftau^m,\bfdelta^m)$ in $\calQ1.5eft(T,\calX_S^m, \calX_O^m, \calX_A^m,\pfrak^m\right)$ for each component $m$ instead of $(\bfmu,\bfdelta)$. \item[\textup{(B)}] We consider deterministic policies $\bfdelta^m \in \Delta^{\mathrm{d},m}$ and linearize constraints using McCormick inequalities: We replace $\calQ$ by $\calQ^{\mathrm{d}}$. \item[\textup{(C)}] We replace the almost sure linking constraint $\sum_{m=1}^M \bfD^m(A_t^m) 1.5eq \bfb,$ by the constraint in expectation $\sum_{m =1}^M \bbE_{\bfdelta^m} 1.5eft[ \bfD^m(A_t^m) \right] 1.5eq \bfb$, which enables to reduce the number of inequalities~\eqref{eq:wkpomdp:POMDP_NLP:link} required to encode it. \end{itemize} The reader is already familiar with \textup{(B)}, which we used in Section~\ref{sub:pomdp:MILP} to turn the NLP~\eqref{pb:pomdp:NLP_pomdp} into the MILP~\eqref{pb:pomdp:MILP_pomdp}. Let us now focus \textup{(A)} and \textup{(C)}. (A) Moving from $\bfmu$ and $\bfdelta$ to $\bftau^m$ and $\bfdelta^m =(\delta^{t,m})_{t \in [T]}$ enables to obtain a MILP with $O\bigg(\sum_{m=1}\vert \calX_S^m\vert \vert \calX_A^m \vert\Big(\vert \calX_O^m\vert + \vert \calX_S^m\vert \Big)\bigg)$ variables and constraints when NLP~\eqref{pb:wkpomdp:POMDP_NLP} had $O\bigg(\prod_{m=1}\vert \calX_S^m\vert \vert \calX_A^m \vert\Big(\prod_{m=1}^M\vert \calX_O^m\vert + \prod_{m=1}^M \vert \calX_S^m\vert \Big)\bigg)$ variables and constraints. Furthermore, by Theorem~\ref{theo:pomdp:NLP_optimal_solution}, constraints~\eqref{eq:wkpomdp:decPOMDP_MILP_per_pomdp} and ~\eqref{eq:wkpomdp:decPOMDP_MILP_consistent_action} ensure that the variables $\bftau^m$ can still be interpreted as the vector of moments of the probability distribution $\bbP_{\bfdelta^m}$ induced by the deterministic policy $\bfdelta^m$ for each component $m$ in $[M].$ However, given a feasible solution $(\bftau^m,\bfdelta^m)_{m\in [M]}$ there is no guarantee that there exists a policy $\bfdelta$ of~\ref{pb:decPOMDP_wc} such that the variables $(\bftau^m)_{m \in [M]}$ represent the moments of the distribution $\bbP_{\bfdelta}$ on the whole system. It is the reason why, following the conventions in the graphical model literature \citep{wainwright2008graphical}, we denote by $\bftau$ the approximate vector of moments (or pseudomarginals) instead of $\bfmu$, which we reserve for exact vector of moments. (C) If we leverage the interpretation of $(\bfdelta^m)$ as ``local'' policies for each component, the linking constraints $\sum_{m=1}^M \bfD^m(A_t^m) 1.5eq \bfb$ are satisfied almost surely by the collection of constraints \begin{equation}1.5abel{eq:wkpomdp:decPOMDP_link_almost_surely} \sum_{m =1}^M \delta^{t,m}_{a^m|o^m}\bfD^m(a^m) 1.5eq \bfb, \quad \forall \bfo \in \calX_O, \enskip \bfa \in \calX_A', \enskip t \in [T]. \end{equation} Indeed, since we use deterministic ``local'' policies for each component, the latter constraint ensures that $\sum_{m \colon \delta^{t,m}_{a^m|o^m} = 1} \bfD^m(a^m) 1.5eq \bfb$. However, the collection of constraints~\eqref{eq:wkpomdp:decPOMDP_link_almost_surely} has still an exponential number of elements. When we replace the almost sure constraint $\sum_{m=1}^M \bfD^m(A_t^m) 1.5eq \bfb$ by the (weaker) constraint in expectation $\bbE_{\bfdelta}1.5eft[\sum_{m=1}^M \bfD^m(A_t^m)\right] 1.5eq \bfb$, the linearity of expectation enables to decouple the different components and obtain the family of constraint~\eqref{pb:pomdp:MILP_pomdp}, which has a polynomial number of elements. In addition to its tractability, MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} has the advantage when it is solved as a subroutine in the algorithms of Sections~\ref{sub:wkpomdp:first_policy} and~\ref{sub:wkpomdp:matheuristic} that compute practically efficient policies for large scale weakly coupled POMDP. But its optimal value is neither a lower bound nor an upper bound on $v_{\rm{ml}}^*$ (see Appendix~\ref{app:counter_example} for examples of this fact). We will see in the next section that it shares lower and upper bounds with~\ref{pb:decPOMDP_wc}. As it was the case for MILP~\eqref{pb:pomdp:MILP_pomdp}, we can strengthen MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} with the valid inequalities of Section~\ref{sub:pomdp:valid_cuts}, which are now reformulated as follows, and whose analysis is deferred to Section~\ref{sub:wkpomdp:valid_ineq}. \begin{subequations}1.5abel{eq:wkpomdp:dec_Valid_cuts} \begin{alignat}{2} &\sum_{s'\in \calX_S^m, a' \in \calX_A^m} \tau_{s'a'soa}^{t,m} = \tau_{soa}^{t,m},& \forall s \in \calX_S^m, o \in \calX_O^m, a \in \calX_A^m, 1.5abel{eq:wkpomdp:dec_Valid_cuts_consistency1} \\ &\sum_{a \in \calX_A^m} \tau_{s'a'soa}^{t,m} = p^m(o|s)p^m(s|s',a')\tau_{s'a'}^{t-1,m}, & \forall s,s' \in \calX_S^m, o \in \calX_O^m, a' \in \calX_A^m, 1.5abel{eq:wkpomdp:dec_Valid_cuts_consistency2} \\ &\tau_{s'a'soa}^{t,m} = p^m(s|s',a',o)\sum_{\overline{s} \in \calX_S^m} \tau_{s'a'\overline{s} oa}^{t,m}, \quad \quad &\forall s,s' \in \calX_S^m, o \in \calX_O^m,a,a' \in \calX_A^m. 1.5abel{eq:wkpomdp:dec_Valid_cuts_main} \end{alignat} \end{subequations} \subsection{Strengths of the linear relaxation} 1.5abel{sub:wkpomdp:strengths_linear_relaxation} While in Section~\ref{sub:pomdp:inf_relax} we showed that the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp} is equivalent to the MDP approximation, one may ask the question: How do we relate the linear relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} with the MDP approximation of a weakly coupled POMDP? As stated the theorem below, we are able to link the value of the linear relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} (with and without valid inequalities~\eqref{eq:wkpomdp:dec_Valid_cuts}) with the optimal value $v_{\rmml}^*$ and $v_{\rmhis}^*$. We denote respectively by $z_{\rmIP}$, $z_{\rmRc}$ and $z_{\rmR}$ the optimal values of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} and its linear relaxation with and without valid inequalities~\eqref{eq:wkpomdp:dec_Valid_cuts}. \begin{theo}1.5abel{theo:wkpomdp:ineq_decPOMDP} The linear relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} is a relaxation of the MPD approximation of the weakly coupled POMDP. Furthermore, the inequalities $v_{\rmMDP}^* 1.5eq z_{\rmR}$ and $v_{\rmhis}^* 1.5eq z_{\rmRc} 1.5eq z_{\rmR}$ hold. \end{theo} It turns out the linear relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} is equivalent to the fluid formulation of \citet{bertsimas2016decomposable}, which is a relaxation of the MDP approximation of a weakly coupled POMDP. We show this result in Appendix~\ref{app:linksBertsimas}. \subsection{An upper bound and a lower bound} 1.5abel{sub:wkpomdp:lb_ub} In this section we introduce bounds $z_{\rmLB}$, $z_{\rmUB}$, and $z_{\rmLR}$ that enable to quantify the distance between the optimal values $v_{\rm{ml}}^*$ and $z_{\rmIP}$ of~\ref{pb:decPOMDP_wc} and MILP~\eqref{pb:wkpomdp:decPOMDP_MILP}. More precisely, we prove \begin{equation*} \begin{array}{rcl} z_{\rm{LB}} 1.5eq& v_{\rm{ml}}^* &1.5eq z_{\rm{UB}} \\ z_{\rm{LB}} 1.5eq& z_{\rmIP} &1.5eq z_{\rm{UB}} \end{array} \quad \text{and} \quad z_{\rm{UB}} 1.5eq z_{\rm{LR}} 1.5eq z_{\rmRc} 1.5eq z_{\rmR} \end{equation*} where $z_{\rm{R}}$ and $z_{\rm{R}^{\rm{c}}}$ denote the optimal values of the linear relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} and the linear relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} with valid inequalities~\eqref{eq:wkpomdp:dec_Valid_cuts}, respectively. Bounds $z_{\rmLB}$, $z_{\rmUB}$, and $z_{\rmLR}$ are defined as optimal values of mathematical programs obtained by playing with approximations~(A), (B), and~(C). Since $z_{\rmLB}$ and~$z_{\rmUB}$ are hard to compute, their interest is mainly theoretical: They enable to bound the difference between $v_{\rm{ml}}^*$ and~$z_{\rmIP}$. We also introduce the tractable upper bound~$z_{\rmLR}$ to evaluate the quality of the policy we use in the numerical experiments. In this section, we need to compare MILP formulations that do not share the same set of variables. We therefore say that a problem P is a \emph{relaxation} of problem P' when given a feasible solution of P' we can build a feasible solution of P with the same value. \subsubsection{The lower bound from decomposable policies} A policy $\bfdelta$ in $\Delta$, defined on $\calX_A$, is \emph{decomposable} if there exist policies $\bfdelta^m$ in $\Delta^m$, defined on $\calX_A^m$, such that $\bfdelta = \prod_{m=1}^M \bfdelta^m.$ The main advantage of decomposable policies is that they decouple components: given such a $\bfdelta$, the random variables of each component are independent under $\bbP_{\bfdelta}$. We can therefore compute exact probabilities using local moments $\bftau^m$. An optimal policy can then be computed using the following MILP, whose optimal value we use as lower bound $z_{\rm{LB}}$. \begin{subequations}1.5abel{pb:wkpomdp:decPOMDP_MILP_LB} \begin{alignat}{2} z_{\rm{LB}}:= \max_{\bftau,\bfdelta} \enskip & \sum_{t=1}^T \sum_{m=1}^M \sum_{\substack{s,s' \in \calX_S^m \\ a \in \calX_A^m}} r_m(s,a,s')\tau_{sas'}^{t,m} & \quad &\\ \mathrm{s.t.} \enskip &1.5eft(\bftau^m,\bfdelta^m\right) \in \calQ^{\mathrm{d}}1.5eft(T, \calX_S^m, \calX_O^m, \calX_A^m,\pfrak^m\right) & \forall m \in [M] 1.5abel{eq:wkpomdp:MILP_per_POMDP_Lb} \\ &\sum_{m =1}^M \sum_{a \in \calX_A^m} \bfD^m(a)\delta^{t,m}_{a|o^m} 1.5eq \bfb & \forall \bfo \in \calX_O, t \in [T] 1.5abel{eq:wkpomdp:decPOMDP_link_constraints_Lb} \end{alignat} \end{subequations} \begin{theo}1.5abel{theo:wkpomdp:lower_bound_MILP} Let $(\bftau^m,\bfdelta^m)$ be an optimal solution of~\eqref{pb:wkpomdp:decPOMDP_MILP_LB}. Then $\bfdelta = \prod_{m=1}^M \bfdelta^m$ is an optimal deterministic decomposable policy for~\ref{pb:decPOMDP_wc}. Furthermore, MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} is also a relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP_LB}. In particular, $z_{\rmLB} 1.5eq v_{\rm{ml}}^*$ and $z_{\rm{LB}} 1.5eq z_{\rmIP}.$ \end{theo} Remark that MILP~\eqref{pb:wkpomdp:decPOMDP_MILP_LB} is obtained from NLP~\eqref{pb:wkpomdp:POMDP_NLP} by using modifications~(A) and (B), but not (C). Indeed, the almost sure constraint~\eqref{eq:wkpomdp:decPOMDP_link_constraints_Lb} is required to ensure that the decomposable policy we obtain is feasible. Since there is an exponential number of such constraints, a line generation approach is required to solve this problem in practice. \subsubsection{An upper bound through a nonlinear formulation} At first sight, it may seem that MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} is a relaxation of NLP~\eqref{pb:wkpomdp:POMDP_NLP}. Indeed, modification (A) relaxes the problem: Given a solution $(\bfmu,\bfdelta)$ of NLP~\eqref{pb:wkpomdp:POMDP_NLP}, and defining $\bftau^m$ and $\bfdelta^m$ respectively as the marginal moments and marginal policy as follows \begin{align*} &1.5eft(\tau_s^1,\tau_{soa}^{t,m},\tau_{sas'}^{t,m}\right) = 1.5eft( \sum_{s^{-m}} \mu_{\bfs}^1,\sum_{\substack{s^{-m},o^{-m} \\ a^{-m}}} \mu_{\bfs\bfo\bfa}^t, \sum_{\substack{s^{-m},a^{-m} \\ s'^{-m}}} \mu_{\bfs\bfa\bfs'}^t \right), & \delta_{a|o}^{t,m} = \sum_{\substack{s^{-m},o^{-m} \\ a^{-m}}} \delta^t_{\bfa|\bfo} \prod_{m' \neq m} p^{m'}(o^{m'}|s^{m'})\tau_{s^{m'}}^{t,m'}, \end{align*} where the sum over $s^{-m}$, $o^{-m}$ and $a^{-m}$ indicates respectively the sum over all the vector in $\prod_{m'\neq m} \calX_S^{m'}$, $\prod_{m'\neq m} \calX_O^{m'}$ and $\prod_{m'\neq m} \calX_A^{m'}$, we obtain a solution that satisfies constraints of $\calQ1.5eft(T,\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m\right)$. And modification (C) further relaxes the problem by turning an almost sure constraint into a constraint in expectation. However, if there is an optimal policy $\bfdelta$ that is deterministic, the moments $\bfdelta^m$ of such a policy defined above are not necessarily deterministic anymore. This is the reason why MILP~\eqref{pb:wkpomdp:decPOMDP_MILP}, which forces the $\bfdelta^m$ to be integer, is not a relaxation of NLP~\eqref{pb:wkpomdp:POMDP_NLP}. But the following non-linear program, \begin{subequations}1.5abel{pb:wkpomdp:decPOMDP_NLP} \begin{alignat}{2} z_{\rmUB}:=\max_{\bftau,\bfdelta} \enskip & \sum_{t=1}^T \sum_{m=1}^M \sum_{\substack{s,s' \in \calX_S^m \\ a \in \calX_A^m}} r_m(s,a,s')\tau_{sas'}^{t,m} & \quad &\\ \mathrm{s.t.} \enskip & 1.5eft(\bftau^m,\bfdelta^m\right) \in \calQ1.5eft(T,\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m \right) & \quad\forall m \in [M] 1.5abel{eq:wkpomdp:NLP_per_POMDP_v1} \\ &\sum_{s \in \calX_S^m, o \in \calX_A^m}\tau_{soa}^{t,m} = \tau_a^{t,m} & \forall a \in \calX_A^m, m \in [M], t \in [T] 1.5abel{eq:wkpomdp:decPOMDP_NLP_consistent_action} \\ &\sum_{m=1}^M\sum_{a \in \calX_A^m} \bfD^m(a) \tau_a^{t,m} 1.5eq \bfb & \forall t \in [T] 1.5abel{eq:wkpomdp:decPOMDP_NLP_linking_cons} \end{alignat} \end{subequations} which is obtained by using only modifications (A) and (C), is a relaxation. We denote by $z_{\rmUB}$ the optimal value of the nonlinear program~\eqref{pb:wkpomdp:decPOMDP_NLP}. \begin{theo}1.5abel{theo:wkpomdp:upper_bound_NLP} The nonlinear program~\eqref{pb:wkpomdp:decPOMDP_NLP} is a relaxation of~\ref{pb:decPOMDP_wc} and MILP~\eqref{pb:wkpomdp:decPOMDP_MILP}. In particular, $v_{\rm{ml}}^* 1.5eq z_{\rm{UB}}$ and $z_{\rmIP} 1.5eq z_{\rm{UB}}.$ \end{theo} Once again, variables $\bftau^m$ can be interpreted as the vector of moments of the probability distribution $\bbP_{\bfdelta^m}$ on component $m$ but there is no guarantee that it defines a joint probability distribution over the whole system. Problem~\eqref{pb:wkpomdp:decPOMDP_NLP} is a Quadratically Constrained Quadratic Program (QCQP) due to constraints~\eqref{eq:wkpomdp:NLP_per_POMDP_v1} and is in general non-convex. Recent QCQP solvers such as \texttt{Gurobi 9.0} \citep{gurobi} are able to solve small instances of Problem~\eqref{pb:wkpomdp:decPOMDP_NLP} to optimality, but they cannot address large instances due to the limits of a Spatial Branch-and-Bound \citep{Liberti2008}. \subsubsection{A tractable upper bound through Lagrangian relaxation} 1.5abel{sub:wkpomdp:lagrangian_relaxations} The main computational difficulty in~\eqref{pb:wkpomdp:decPOMDP_NLP} comes from the nonlinear constraints $\tau_{soa}^{t,m} = \delta_{a|o}^{t,m}\sum_{\substack{o' \in \calX_O^m \\ a' \in \calX_A^m}} \tau_{soa'}^{t,m}$, which we cannot linearize using McCormick inequalities as in Section~\ref{sub:pomdp:MILP} because we do not assume that $\bfdelta^m$ is integer anymore. However, if we perform a Lagrangian relaxation where we relax linking constraint~\eqref{eq:wkpomdp:decPOMDP_NLP_linking_cons}, then we obtain an subproblem for each component that is a POMDP, which we can now reformulate as a MILP using the approach of Section~\ref{sub:pomdp:MILP}. Let $z_{\rmLR}$ denote the value of this Lagrangian relaxation of NLP~\eqref{pb:wkpomdp:decPOMDP_NLP}. \begin{restatable}{prop}{lagrangian}1.5abel{prop:wkpomdp:lagrangian_relax} The value of the Lagrangian relaxations of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} and NLP~\eqref{pb:wkpomdp:decPOMDP_NLP} are equal and the following inequalities hold: \begin{align}1.5abel{eq:wkpomdp:weak_duality} z_{\rmUB} 1.5eq z_{\rmLR} 1.5eq z_{\rmRc} 1.5eq z_{\rmR} \end{align} \end{restatable} Proposition~\ref{prop:wkpomdp:lagrangian_relax} ensures that $z_{\rmLR}$ can be computed using the Lagrangian relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP}. Geoffrion's theorem \citep{Geoffrion1974} ensures that we can compute $z_{\rmLR}$ by solving the Dantzig-Wolfe reformulation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} within a column generation approach. The proof of Proposition~\ref{prop:wkpomdp:lagrangian_relax} and the full details of the column generation algorithm are available in Appendix~\ref{app:ColGen}. Note that Lagrangian relaxation has already been used in the literature on weakly coupled stochastic dynamic programs to compute upper bounds \citep{Adelman2008,Ye2014,hawkins2003langrangian}. \subsubsection{Benefits and drawbacks of the formulations} 1.5abel{sub:wkpomdp:lb_ub:size_formulations} \begin{figure} \caption{The relaxations of Section~\ref{sub:wkpomdp:lb_ub} \end{figure} Figure~\ref{fig:wkpomdp:interpret_bound} summarizes the links between the feasible sets of the different formulations they have been established in the theorems. Table~\ref{tab:wkpomdp:comparison_size_formulations} highlights the benefits and the drawbacks of the different formulations. It reports the behavior of the formulations regarding several criteria formulated as questions: are the numbers of variables (Pol. variables) and constraints (Pol. constraints) polynomial? Does the formulation have linking constraints between the components (Link. constraints)? Is the formulation linear (Linearity)? Are there integer variables in the formulation (Int. variables)? Does the formulation provide a feasible policy (Feas. pol.)? Is the optimal value an upper bound or a lower bound regarding $v_{\mathrm{ml}}^*$? Is the formulation tractable regarding the size of the instances (small with $\vert \calX_S \vert 1.5eq 10^2$, medium with $10^2 1.5eq \vert \calX_S \vert 1.5eq 10^4$ and large with $\vert \calX_S \vert \geq 10^4$)? The tractability criteria should be understood as an advice on the formulation to choose and the scale order is only one indicator among others. \begin{table}[h!] \begin{center} \resizebox{16cm}{!}{ \begin{tabular}{lccccccccccc} Formulations & \rot{Pol. variables} & \rot{Pol. constraints} & \rot{Link. constraints} & \rot{Linearity} & \rot{Int. variables} & \rot{Feas. policy} & \rot{Lower bound} & \rot{Upper bound} & Small & \multicolumn{1}{b{1.5cm}}{Tractability \newline Medium} & Large \\ 1.5line NLP~\eqref{pb:wkpomdp:POMDP_NLP} & & & \checkmark & & & \checkmark &$-$ & $-$ & & & \\ MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} & \checkmark & \checkmark & \checkmark & \checkmark & \checkmark & & & & \checkmark & \checkmark& \\ Lower bound~\eqref{pb:wkpomdp:decPOMDP_MILP_LB} & \checkmark & & \checkmark & \checkmark & \checkmark & \checkmark & \checkmark & & \checkmark & & \\ Upper bound~\eqref{pb:wkpomdp:decPOMDP_NLP} & \checkmark & \checkmark & \checkmark & & & & & \checkmark & \checkmark & & \\ Lagrangian Relaxation & \checkmark & \checkmark & & \checkmark & \checkmark & & & \checkmark & \checkmark& \checkmark& \checkmark \\ Linear Relaxation of~\eqref{pb:wkpomdp:decPOMDP_MILP} & \checkmark & \checkmark & \checkmark & \checkmark & & & & \checkmark &\checkmark & \checkmark & \checkmark\\ 1.5line \end{tabular}} \end{center} \caption{Comparison of the properties of the formulations.} 1.5abel{tab:wkpomdp:comparison_size_formulations} \end{table} \subsection{Valid inequalities} 1.5abel{sub:wkpomdp:valid_ineq} Since Inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp} are valid for MILP~\eqref{pb:pomdp:MILP_pomdp}, their local counterpart~\eqref{eq:wkpomdp:dec_Valid_cuts} are valid for all the MILPs introduced in this section. \begin{prop}1.5abel{prop:wkpomdp:decPOMDP_valid_cuts} Inequalities~\eqref{eq:wkpomdp:dec_Valid_cuts} are valid for MILP~\eqref{pb:wkpomdp:decPOMDP_MILP}, MILP~\eqref{pb:wkpomdp:decPOMDP_MILP_LB} and Problem~\eqref{pb:wkpomdp:decPOMDP_NLP}, and there exists a solution of the linear relaxation of~\eqref{pb:wkpomdp:decPOMDP_MILP} that does not satisfy constraints~\eqref{eq:wkpomdp:dec_Valid_cuts}. \end{prop} As in the generic POMDP case, inequalities~\eqref{eq:wkpomdp:dec_Valid_cuts} help the resolution of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} in practice. However, since the extended formulation obtained by adding inequalities~\eqref{eq:wkpomdp:dec_Valid_cuts} in MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} has a large number of variables and constraints when the number of components is large ($M \geq 15$), the linear relaxation takes longer to solve. \subsection{Deducing a history-dependent policy from MILP~\eqref{pb:wkpomdp:decPOMDP_MILP}} 1.5abel{sub:wkpomdp:first_policy} In MILP~\eqref{pb:wkpomdp:decPOMDP_MILP}, we consider ``local'' policies $\bfdelta^m$ on each component $m$ in $[M].$ However, in general, given a vector of ``local'' policies $(\bfdelta^m)_{m\in[M]},$ there is no guarantee that there exists a policy $\bfdelta$ that coincides with $\bfdelta^m$ for every components $m$ in $[M].$ In this section we describe how we can use MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} to deduce a history-dependent policy for weakly coupled POMDPs. Consider a history $\bfh = (\bfo_1,\bfa_1,1.5dots,\bfo_{t-1},\bfa_{t-1}, \bfo_t)$ available at time $t$. Conditionally to $\bfh$, the vectors of state component $1.5eft(S_{t'}^m\right)_{11.5eq t'1.5eq t}$ for all $m$ in $[M]$ become independent, i.e., $$\bbP_{\bfdelta} 1.5eft(\bfS_t =\bfs \vert \bfH_t=\bfh \right) = \prod_{m=1}^M \overbrace{\bbP_{\bfdelta}1.5eft(S_t^m=s^m \vert H_t^m=h^m \right)}^{p^m(s^m|h^m)}.$$ In the POMDP literature, the probability distribution $\bbP_{\bfdelta}1.5eft(S_t^m\vert H_t^m \right)$ is called the \emph{belief state} of component $m$. We can use the \emph{belief state update} (see Appendix.\ref{sub:app_wkpomdp_heuristic:belief_state}) on each of the components to compute the belief state $p^m(s^m|h^m)$. We introduce the following algorithm: \begin{algorithm}[H] \caption{History-dependent policy $\rm{Act}_{\rmIP,T}^t(\bfh)$} 1.5abel{alg:wkpomdp:heuristic_individual} \begin{algorithmic}[1] \STATE \textbf{Input} An history of observations and actions $\bfh \in (\calX_O \times \calX_A)^{t-1}\times \calX_O$. \STATE \textbf{Output} An action $\bfa \in \calX_A.$ \STATE Compute the belief state $p^m(s|h^m)$ according to the belief state update (see Appendix.~\ref{sub:app_wkpomdp_heuristic:belief_state}) for every state $s$ in $\calX_S^m$ and every component $m$. \STATE Remove constraints and variables indexed by $t'<t$ in MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} and solve the resulting problem with horizon $T - t$, initial probability distributions $1.5eft(p^m(s|h^m)\right)_{s \in \calX_S^m}$ for every component $m$ in $[M]$ and initial observation $\bfo_t$ (see Remark~\ref{rem:pomdp:with_observation}) to obtain an optimal solution $(\bftau^m,\bfdelta^m)_{m\in [M]}.$ 1.5abel{alg:wkpomdp:modify_constraints} \STATE Return $\bfa = (a^{1},1.5dots,a^{M})$ for which $\delta^{t,m}_{a^{m}|o^m} = 1$ for all $m$ in $[M].$ 1.5abel{alg:wkpomdp:take_action} \end{algorithmic} \end{algorithm} Then we define the \emph{implicit} policy $\bfdelta^{\rmIP}$ as follows: \begin{align*} \delta_{\bfa|\bfh}^{\rmIP,t}= \begin{cases} 1, & \text{if}\ \bfa=\mathrm{Act}_{T}^{\rmIP,t}(\bfh) \\ 0, & \text{otherwise} \end{cases}, & \quad \forall \bfh \in \calX_H^t, \enskip \bfa \in \calX_A, \enskip t \in [T], \end{align*} where implicit means that each value of the policy is obtained by solving a mathematical programming formulation. It is not clear at first sight that policy $\bfdelta^{\rmIP}$ is a feasible policy in $\rm{P}_{\rm{his}}^{\rm{wc}}$ because it is not immediate to see that the action returned by Algorithm~\ref{alg:wkpomdp:heuristic_individual} belongs to $\calX_A.$ The theorem below ensures that the implicit policy $\bfdelta^{\rmIP}$ is a feasible policy of $\rm{P}_{\rm{his}}^{\rm{wc}}$, and that the belief updates can only improve the total expected reward. We denote by $\nu_{\rmIP}$ the total expected reward induced by policy $\bfdelta^{\rmIP}$. \begin{theo}1.5abel{theo:wkpomdp:heuristic} The implicit policy $\bfdelta^{\rmIP}$ is a feasible policy of $\rm{P}_{\rm{his}}^{\rm{wc}}$ and the inequality $z_{\rmIP} 1.5eq \nu_{\rmIP} 1.5eq v_{\rmhis}^*$ holds. \end{theo} It turns out that we can also use lower bound~\eqref{pb:wkpomdp:decPOMDP_MILP_LB} instead of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} at step~\ref{alg:wkpomdp:modify_constraints}. We denote by $\rm{Act}_{T}^{\rmLB,t}(\bfh)$ such an algorithm and $\bfdelta^{\rmLB}$ the induced policy. Thanks to Theorem~\ref{theo:wkpomdp:lower_bound_MILP}, Theorem~\ref{theo:wkpomdp:heuristic} remains true when we use the induced implicit policy with $\rm{Act}_{T}^{\rmLB,t}(\bfh)$. However, we cannot use our other formulations because the resulting actions would not necessary belong to $\calX_A$. Appendix~\ref{app:LPpolicy} introduces a variant of Algorithm~\eqref{alg:wkpomdp:heuristic_individual} which can be used with these formulations, as well as numerical experiments showing that the resulting policies are not as efficient as $\bfdelta^{\rmIP}$. \subsection{Rolling horizon matheuristic} 1.5abel{sub:wkpomdp:matheuristic} When the horizon $T$ is long it is computationally interesting to embed the implicit policy $\bfdelta^{\rmIP}$ in a rolling horizon heuristic, which consists in repeatedly solving an optimization problem with a smaller horizon $T_{\rmr} < T$ at each time step and to take action at the current time. This type of rolling horizon heuristic is commonly used for multistage stochastic optimization problems in operations research~\citep{Alistair2005}. This approach is also called \emph{Model Predictive Control}~\citep{Bertsekas} in the optimal control literature. See Appendix~\ref{sub:app_wkpomdp_heuristic:rolling_horizon} for more details. \section{Numerical experiments} 1.5abel{sec:num} In this section we provide numerical experiments on the mathematical program formulations for POMDP and weakly coupled POMDP. First, we illustrate the efficiency of our integer formulation~\eqref{pb:pomdp:MILP_pomdp} and the valid inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp} for POMDP on random instances, and then we show the relevance of using our memoryless policies on different kinds of instances from the literature. Second, we show the efficiency of using our rolling horizon matheuristic of Section~\ref{sub:wkpomdp:matheuristic} on a maintenance problem. Full details of the experiments and additional numerical results can be found in Appendix~\ref{app:nums}. In particular, it also includes numerical results on multi-armed bandit problems similar to the ones of~\citet{bertsimas2016decomposable}. All the mathematical programs have been written in \texttt{Julia} \citep{bezanson2017julia} with the \texttt{JuMP} \citep{DunningHuchetteLubin2017} interface and solved using \texttt{Gurobi} 9.0. \citep{gurobi} with the default settings. Experiments have been run on a server with 192Gb of RAM and 32 cores at 3.30GHz. \subsection{Generic POMDPs: Random instances} 1.5abel{sub:num:random_instances} In this section, we provide numerical experiments on generic POMDPs showing the efficiency of the valid inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp} for our MILP~\eqref{pb:pomdp:MILP_pomdp}. We solve instances of POMDP over different finite horizon $T \in \{10,20\}$ For each triplet $(\calX_S,\calX_O,\calX_A)$, we generate randomly $30$ instances. We refer the reader to Appendix~\ref{app:nums} for more details about how we generate the instances. The average results over the $30$ instances are reported in Table~\ref{tab:num:random_instances_results}. The first four columns indicate the size of state space $\big\vert \calX_{S}\big\vert$, observation space $\big\vert \calX_{O}\big\vert$, action space $\big\vert \calX_{A}\big\vert$ and time horizon $T$. The fifth column indicates the mathematical program used to solve Problem~\eqref{pb:POMDP} with (strengthened) or without (basic)valid inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp}. In the last three columns, we report the integrity gap $g_{\rm{i}}$, the final gap $g_{\rm{f}}$, the percentage of instances solved to optimality $\mathrm{Opt}$ and the computation time ($\mathrm{Time}$). Table~\ref{tab:num:random_instances_results} shows that the valid inequalities introduced for MILP~\eqref{pb:pomdp:MILP_pomdp} are efficient. Indeed, the integrity gap is always significantly lower with the strengthened formulation, which shows the tightening of linear relaxation and it greatly reduces the computation time. In addition, Inequality~\ref{eq:pomdp:inequality_information} ensures that the integrality gaps of~\eqref{pb:pomdp:MILP_pomdp} reported in Table~\ref{tab:num:random_instances_results} are also bounds of the relative gap between $v_{\mathrm{ml}}^*$ and $v_{\rm{his}}^*.$ \begin{table} \begin{minipage}{0.5\textwidth} \begin{center} \resizebox{8.0cm}{!}{ \begin{tabular}{|c|cc|c|SScc|} 1.5line \multirow{3}{*}{$(\vert\calX_S\vert,\vert\calX_O\vert,\vert\calX_A\vert)$} & \multirow{3}{*}{$T$} & \multirow{3}{*}{$\big\vert\Delta_{\mathrm{ml}}^{\mathrm{d}}\big\vert$} & \multirow{3}{*}{Formulation} & \multicolumn{4}{c}{\textbf{MILP~\eqref{pb:pomdp:MILP_pomdp}}} \vline \\ & & & & {$g_{\mathrm{i}}$} & {$g_{\mathrm{f}}$} & {Opt} & {Time} \\ & & & & {(\%)} & {(\%)} & {(\%)} & (s) \\ 1.5line \text{$(3,3,3)$} & 10 & \text{${10^{14}}$} & Basic & 6.02 & Opt & 100 & 1.49 \\ \cline{5-8} & & & Strengthened & 1.70 & Opt & 100 & 0.62 \\ \cline{2-8} & 20 & \text{${10^{28}}$} & Basic & 6.04 & Opt & 100 & 664 \\ \cline{5-8} & & & Strengthened & 1.52 & Opt & 100 & 466 \\ \cline{2-8} 1.5line \text{$(3,4,4)$} & 10 & \text{${10^{24}}$} &Basic & 9.51 & 0.34 & 87 & 512 \\ \cline{5-8} & & & Strengthened & 3.16 & 0.18 & 87 & 514.4 \\ \cline{2-8} & 20 & \text{${10^{48}}$} & Basic & 9.64 & 1.96 & 43 & 2221 \\ \cline{5-8} & & & Strengthened & 2.86 & 1.13 & 61 & 1731\\ \cline{2-8} 1.5line \text{$(3,5,5)$} & 10 & \text{${10^{34}}$} &Basic & 9.33 & 0.83 & 57 & 1591 \\ \cline{5-8} & & & Strengthened & 2.35 & 0.38 & 70 & 1113 \\ \cline{2-8} & 20 & \text{${10^{69}}$} & Basic & 9.60 & 3.30 & 26 & 2702 \\ \cline{5-8} & & & Strengthened & 2.14 & 1.14 & 52 & 1879 \\ \cline{2-8} 1.5line \end{tabular}} \end{center} \end{minipage} \begin{minipage}{0.5\textwidth} \begin{center} \resizebox{8.0cm}{!}{ \begin{tabular}{|c|cc|c|SScc|} 1.5line \multirow{3}{*}{$(\vert\calX_S\vert,\vert\calX_O\vert,\vert\calX_A\vert)$} & \multirow{3}{*}{$T$} & \multirow{3}{*}{$\big\vert\Delta_{\mathrm{ml}}^{\mathrm{d}}\big\vert$} & \multirow{3}{*}{Formulation} & \multicolumn{4}{c}{\textbf{MILP~\eqref{pb:pomdp:MILP_pomdp}}} \vline \\ & & & & {$g_{\mathrm{i}}$} & {$g_{\mathrm{f}}$} & {Opt} & {Time} \\ & & & & {(\%)} & {(\%)} & {(\%)} & (s) \\ 1.5line \text{$(4,3,3)$} & 10 & \text{${10^{14}}$} &Basic & 7.39 & Opt & 100 & 26 \\ \cline{5-8} & & & Strengthened& 2.28 & Opt & 100 & 9.16 \\ \cline{2-8} & 20 & \text{${10^{28}}$} & Basic & 6.01 & 1.01 & 60 & 1598 \\ \cline{5-8} & & & Strengthened & 2.03 & 0.32 & 80 & 987 \\ \cline{2-8} 1.5line \text{$(4,4,4)$} & 10 & \text{${10^{24}}$} & Basic & 12.19 & 0.98 & 65 & 1477 \\ \cline{5-8} & & & Strengthened & 3.44 & 0.27 & 80 & 967 \\ \cline{2-8} & 20 & \text{${10^{48}}$} & Basic & 12.29 & 4.66 & 20 & 2900 \\ \cline{5-8} & & & Strengthened & 3.05 & 1.48 & 30 & 2651 \\ \cline{2-8} 1.5line \text{$(4,5,5)$} & 10 & \text{${10^{34}}$} &Basic & 11.64 & 1.76 & 35 & 2427 \\ \cline{5-8} & & & Strengthened & 3.09 & 0.62 & 65 & 1345 \\ \cline{2-8} & 20 & \text{${10^{69}}$} & Basic & 12.04 & 5.46 & 5 & 3413 \\ \cline{5-8} & & & Strengthened & 3.20 & 1.67 & 32 & 2490\\ \cline{2-8} 1.5line \end{tabular}} \end{center} \end{minipage} \caption{Efficiency of the valid inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp} for MILP~\eqref{pb:pomdp:MILP_pomdp} on random instances of generic POMDPs, with a time limit of $3600$s} 1.5abel{tab:num:random_instances_results} \end{table} \subsection{Generic POMDP: Instances from the literature} 1.5abel{sub:num:literature} In this section, we evaluate the efficiency of MILP~\eqref{pb:pomdp:MILP_pomdp} on instances of POMDP drawn from the literature and we compare its performances with one of the state-of-the-art POMDP solver SARSOP of~\citet{Kurniawati08sarsop}. In particular, it shows how the memoryless policy provided by MILP~\eqref{pb:pomdp:MILP_pomdp} performs on instances from the literature. In fact, SARSOP solver gives an approximate history-dependent policy for the discounted infinite horizon POMDP problem. To adapt this policy for the finite horizon POMDP problem, we proceed as~\citet{DujardinDC15}: We compute a policy using SARSOP solver with a discount factor $\gamma = 0.999$ and we compute the expected sum of rewards over the $T$ time steps by simulation of the history-dependent policy. We perform $10^4$ simulations to compute the expectation. By definition, the objective value $z_{\mathrm{SARSOP}}$ obtained by using this policy is a lower bound of $v_{\mathrm{his}}^*.$ We run the SARSOP algorithm using the Julia library \texttt{POMDPs.jl} of \citet{EgorovSBWGK17}. All the instances can be found at the link~\url{http://pomdp.org/examples/} and further descriptions of each instance are available in the indicated literature on the same website. In particular, it contains two instances \texttt{bridge-repair} and \texttt{machine} that model maintenance problems. For each instance, we report the objective functions $z^*$ and $z_{\rm{SARSOP}}$, and the relative gaps $g(z) = \frac{z_{\rmRc}^* - z}{z_{\rmRc}^*}$ for any $z$ belonging to $\{z^*, z_{\mathrm{SARSOP}}\}.$ The lower the value of $g(z)$, the closer the value of $z$ is to $v_{\rmhis}^*$. All the results are reported in Table~\ref{tab:pomdp:benchmark_instances_results}. The first column indicates the instance considered. The three next columns indicate respectively the cardinality of $\calX_S,$ $\calX_O$ and $\calX_A$ of the instance. The fourth column indicates the algorithm used. The last six columns indicate the total expected reward (Obj.) and the gap values for different finite horizon $T \in \{5,10,20\}.$ One may observe that in most cases the optimal value obtained with our MILP is close to the upper bound $z_{\rmRc}^*.$ Thanks to Theorem~\ref{theo:pomdp:MDP_approx_equivalence}, it says that memoryless policies perform well on finite horizon for these instances. In particular, the gap is noticeably small on the instance of maintenance problems. However, as mentioned in the introduction, one can observe that the memoryless policies fail on instances of navigation problems \cite{Littman94memoryless}. We observe this phenomenon on instances of navigation problems, where the goal is to find a target in a maze, and there are a large number of states relatively to a small number of observations. It is fairly natural: using a memoryless policy in a maze is misleading because if the decision maker meets a wall, he will act as it is the first time he meets a wall, and then will always take the same actions. It seems that on these instances, the SARSOP policies work best on larger horizons, which is expected since the SARSOP policy is built for an infinite horizon problem. The results in Table~\ref{tab:pomdp:benchmark_instances_results} support the remark of~\citet[Section 3]{Walraven2019} saying that the point-based algorithms for infinite discounted POMDP, such as SARSOP, produce policies that can be inefficient on finite horizon. \begin{table} \begin{center} \resizebox{14cm}{!}{ \begin{tabular}{|c|ccc|c|cccccc|} 1.5line \multirow{3}{*}{Instances} & \multicolumn{3}{c}{Size} \vline& \multirow{3}{*}{Algorithms} & \multicolumn{6}{c}{Horizon} \vline\\ & {$\vert \calX_S \vert$} & {$\vert \calX_O \vert$} & {$\vert \calX_A \vert$}& & \multicolumn{2}{c}{$T=5$} & \multicolumn{2}{c}{$T=10$} & \multicolumn{2}{c}{$T=20$} \vline\\ & & & & & Obj. & Gap({$\%$}) & Obj. & Gap({$\%$}) & Obj. & Gap({$\%$}) \\ 1.5line \texttt{1d.noisy} & 4 & 2 & 2 & MILP & \textbf{1.56} & \textbf{18.73} & \textbf{2.97} & \textbf{19.18} & \textbf{5.82} & \textbf{18.73}\\ & & & & SARSOP & 0.57 & 70.12 & 0.67 & 81.76 & 0.81 & 88.71 \\ \texttt{4x5x2}$^{*}$ & 39 & 4 & 4 & MILP & \textbf{0.37} & \textbf{58.13} & \textbf{0.75} & \textbf{57.45} & \textbf{1.86} & \textbf{47.58}\\ & & & & SARSOP & 0.08 & 90.87 & 0.08 & 95.28 & 0.08 & 97.50 \\ \texttt{aircraftID} & 12 & 5 & 6 & MILP & \textbf{5.69} & \textbf{0.00} & \textbf{10.10} & \textbf{0.00} & \textbf{19.76} & \textbf{0.00}\\ & & & & SARSOP & 3.39 & 40.46 & 7.63 & 24.46 & 17.32 & 12.41 \\ \texttt{aloha.10} & 30 & 3 & 9 & MILP & 38.04 & 0.56 & 62.74 & 1.66 & 84.92 & 13.84\\ & & & & SARSOP & \textbf{38.15} & \textbf{0.25} & \textbf{63.74} & \textbf{0.20} & \textbf{89.09} & \textbf{9.61} \\ \texttt{cheng.D3-1} & 3 & 3 & 3 & MILP & \textbf{32.29} & \textbf{1.87} & \textbf{64.38} & \textbf{1.11} & \textbf{128.55} & \textbf{0.72}\\ & & & & SARSOP & 32.04 & 2.65 & 64.16 & 1.45 & 128.28 & 0.93 \\ \texttt{cheng.D4-1} & 4 & 4 & 4 & MILP & \textbf{33.83} & \textbf{5.20} & \textbf{67.37} & \textbf{4.10} & \textbf{134.45} & \textbf{3.54}\\ & & & & SARSOP & 32.40 & 9.1 & 65.90 & 6.19 & 133.05 & 4.54 \\ \texttt{cheng.D5-1} & 5 & 5 & 5 & MILP & \textbf{32.89} & \textbf{3.28} & \textbf{65.64} & \textbf{2.25} & \textbf{131.12} & \textbf{1.73}\\ & & & & SARSOP & 32.47 & 4.50 & 65.23 & 2.86 & 130.81 & 1.96 \\ \texttt{learning.c3} & 24 & 3 & 12 & MILP & \textbf{1.63} & \textbf{45.3} & \textbf{2.20} & \textbf{26.76} & \textbf{2.33} & \textbf{22.48}\\ & & & & SARSOP & 0.33 & 88.89 & 0.33 & 89.00 & 0.34 & 88.67 \\ \texttt{milos-aaai97}$^{*}$ & 20 & 8 & 6 & MILP & \textbf{26.83} & \textbf{10.28} & \textbf{53.41} & \textbf{36.06} & 92.09 & 55.06\\ & & & & SARSOP & 12.62 & 57.79 & 39.52 & 52.69 & \textbf{97.73} & \textbf{52.31} \\ \texttt{network}$^{*}$ & 7 & 2 & 4 & MILP & 20.30 & 2.49 & 95.06 & 22.85 & 203.87 & 36.02\\ & & & & SARSOP & \textbf{20.88} & \textbf{0.00} & \textbf{95.78} & \textbf{22.26} & \textbf{245.88} & \textbf{22.98} \\ \texttt{bridge-repair}$^{**}$ & 5 & 5 & 10 & MILP & \textbf{1992.77} & \textbf{0.15} & \textbf{7801.56} & \textbf{0.44} & \textbf{27937.93} & \textbf{0.13}\\ & & & & SARSOP & 1514.15 & 24.13 & 6832.99 & 12.80 & 26568.42 & 5.03 \\ \texttt{query.s2} & 9 & 3 & 2 & MILP & \textbf{21.54} & \textbf{0.95} & \textbf{46.25} & \textbf{0.10} & \textbf{96.50} & \textbf{0.11}\\ & & & & SARSOP & 15.77 & 27.50 & 31.68 & 31.56 & 64.91& 30.66 \\ \texttt{machine}$^{**}$ & 256 & 16 & 4 & MILP & \textbf{4.90} & \textbf{0.00} & \textbf{9.50} & \textbf{0.81} & \textbf{17.98} & \textbf{0.05}\\ & & & & SARSOP & 4.90 & 0.00 & 9.35 & 2.38 & 15.69 & 12.79 \\ 1.5line \end{tabular} } \\ \footnotesize{$^{*}$ Instances of navigation problem, $^{**}$ Instances of maintenance problem} \end{center} \caption{Performances of our memoryless policy on benchmark instances. The results written in bold indicate the best value obtained for each instance.} 1.5abel{tab:pomdp:benchmark_instances_results} \end{table} \subsection{Weakly coupled POMDP: Performances of the matheuristic on a maintenance problem} 1.5abel{sub:nums:implicit_policy} The aim of this section is to show how close $\nu_{\rmIP}$ is to the optimal value $v_{\rmhis}^*$, and that policy $\bfdelta^{\rmIP}$ can be computed in a reasonable amount of time on large-scale instances of a practical problem. We evaluate the performances of the history-dependent policy $\bfdelta^{\rmIP}$ by running Algorithm~\ref{alg:wkpomdp:matheuristic} on a maintenance problem taken from the literature. Like \citet[Section 5.2]{Walraven2018}, we consider a road authority that performs maintenance on $M$ bridges, each of them evolving independently over a finite horizon $H$. Each bridge is modeled as a POMDP \citep{Ellis1995} with $5$ possible states and observations, and the authority must chooses at most $K$ bridges to maintain at each decision time. As mentionned in Example~\ref{sub:problem:example}, this problem can be modeled as a weakly coupled POMDP. In Appendix~\ref{sub:app_nums:implicit_policy} we describe the maintenance problem and the settings. We consider instances with different values of $M$ in $\{3,4,5,10,15,20\}$. We choose a maintenance capacity $K = \max(\floor*{\gamma \times M},1)$, where $\gamma$ is a scalar belonging $1.5eft\{ 0.2, 0.4, 0.6, 0.8\right\}$ (when $M=3$, then $K$ belongs to $\{1,2\}$) and we solve the problem over the finite horizon $T=24$. We evaluate the performances of matheuristic~\ref{alg:wkpomdp:matheuristic} with formulation $\rmIP$ for rolling horizon $T_{\rmr}$ in $\{2,5\}$. For each instance $(M,K,(\pfrak^m)_{m\in [M]})$, we perform $10^3$ runs of matheuristic~\ref{alg:wkpomdp:matheuristic}. We compute the average total cost $\vert \nu_{\rmIP} \vert$, the average number of failures $\rmF{\rmIP}$ over the $10^3$ simulations. We compare $\nu^{\rmIP}$ with the upper bound $z_{\rmRc}$ and the Lagrangian bound $z_{\rmLR}$ by evaluating the average gap $\rmG_{\rmIP}^{\rmRc} = \frac{z_{\rmRc} - \nu_{\rmIP}}{\vert z_{\rmRc} \vert}$ and $\rmG_{\rmIP}^{\rmLR} = \frac{z_{\rmLR} - \nu_{\rmIP}}{\vert z_{\rmLR} \vert}$. Thanks to Theorem~\ref{theo:wkpomdp:ineq_decPOMDP}, the value of $G_{\rmIP}^{\rmRc}$ indicates how far is $\nu_{\rmIP}$ from $v_{\rmhis}^*$ because $\nu_{\rmIP}1.5eq v_{\rmhis}^* 1.5eq z_{\rmRc}$. The lower the value of $G_{\rmIP}^{\rmRc}$, the better is the performance of policy $\bfdelta^{\rmIP}$. In addition, for each simulation, we compute the average computation time in seconds of the underlying formulation over all steps of the simulation. We then consider the average value over all the $N$ simulations. For the quantities $\vert \nu_{\rmIP} \vert$ and $\rmF_{\rmIP}$ we also report the standard deviations over all simulations. Tables~\ref{tab:wkpomdp:maintenance} displays several results (see Appendix~\ref{sub:app_nums:implicit_policy} for more results). For all the mathematical programs, we set the computation time limit to $3600$ seconds and a final gap tolerance (\texttt{MIPGap} parameter in \texttt{Gurobi}) of $1 \%$, which is enough for the use of our matheuristic. If the resolution has not terminated before this time limit, then we keep the best feasible solution at the end of the resolution. \begin{table} \centering \resizebox{!}{5.0cm}{ \begin{tabular}{|c|cc|ccccccc|} 1.5line \multirow{2}{*}{$M$} & \multirow{2}{*}{$\gamma$} & \multirow{2}{*}{$T_\rmr$} & $\vert \nu_{\rmIP} \vert$ & Std. err. & $\rmF_{\rmIP}$ & Std. err. & $\rmG_{\rmIP}^{\rmLR}$ & $\rmG_{\rmIP}^{\rmRc}$ & Time \\ & & & {($\times10^3$)} & {($\times10^3$)} & & & $(\%)$ & $(\%)$ & ($\si{s}$) \\ 1.5line $10$ & 0.2 & 2 & 22.63 & 5.45 & 18.0 & 5.5 & -34.20 & 17.71 & 0.012 \\ & & 5 & 22.06 & 5.24 & 17.5 & 5.2 & \textbf{-35.85} & \textbf{14.74} & \textbf{0.384}\\\cline{2-10} & 0.4 & 2 & 19.19 & 3.38 & 11.5 & 3.3 & 1.32 & 3.07 & 0.012 \\ & & 5 & 18.91 & 3.26 & 10.6 & 3.2 & \textbf{-0.16} & \textbf{1.57} & \textbf{0.196} \\ \cline{2-10} & 0.6 & 2 & 19.10 & 3.28 & 10.8 & 3.1 & 0.92 & 2.60 & 0.011 \\ & & 5 & 18.81 & 3.06 & 9.7 & 2.9 & \textbf{-0.62} & \textbf{1.03} & \textbf{0.138} \\\cline{2-10} & 0.8 & 2 & 19.09 & 3.27 & 10.8 &3.1 & 0.86 & 2.53 & 0.011 \\ & & 5 & 18.82 & 3.08 & 9.6 & 2.9 & \textbf{-0.56} & \textbf{1.09} & \textbf{0.137} \\ 1.5line 15 & 0.2 & 2 & 31.54 & 6.03 & 25.0 & 6.0 & -22.56 & 12.73 & 0.017 \\ & & 5 & 30.89 & 5.88 & 24.0 & 5.9 & \textbf{-24.14} & \textbf{10.43} & \textbf{0.591} \\\cline{2-10} & 0.4 & 2 & 28.18 & 4.22 & 19.1 & 4.1 & 0.45 & 1.93 & 0.016 \\ & & 5 & 27.67 & 3.97 & 16.8 & 3.9 & \textbf{-1.39} & \textbf{0.06} & \textbf{0.232} \\ \cline{2-10} & 0.6 & 2 & 28.12 & 4.16 & 18.8 & 4.0 & 0.10 & 1.70 & 0.016 \\ & & 5 & 27.67 & 3.84 & 16.3 & 3.7 & \textbf{-1.51} & \textbf{0.05} & \textbf{0.225} \\\cline{2-10} & 0.8 & 2 & 28.12 & 4.16 & 18.8 & 4.0 & 0.11 & 1.71 & 0.015 \\ & & 5 & 27.65 & 3.86 & 16.2 & 3.7 & \textbf{-1.57} & \textbf{-0.01} &\textbf{0.226} \\ 1.5line 20 & 0.2& 2 & 45.06 & 7.01 & 35.9 & 7.1 & -20.37 & 8.67 & 0.022 \\ & & 5 & 44.28 & 6.90 & 35.1 & 6.9 & \textbf{-21.74} & \textbf{6.80} & \textbf{0.660} \\ \cline{2-10} & 0.4 & 2 & 41.18 & 4.72 & 23.0 & 4.7 & 0.35 & 1.50 & 0.020 \\ & & 5 & 40.83 & 4.66 & 22.7 & 4.7 & \textbf{-0.49} & \textbf{0.66} & \textbf{0.469} \\ \cline{2-10} & 0.6 & 2 & 40.96 & 4.25 & 18.4 & 4.1 & 0.32 & 1.22 & 0.020 \\ & & 5 & 40.72 & 4.19 & 17.9 & 4.0 & \textbf{-0.28} & \textbf{0.62} & \textbf{0.316} \\ \cline{2-10} & 0.8 & 2 & 40.96 & 4.24 & 18.3 & 4.0 & 0.29 & 1.21 & 0.019 \\ & & 5 & 40.76 & 4.09 & 17.8 & 3.9 & \textbf{-0.19} & \textbf{0.73} & \textbf{0.313} \\ 1.5line \end{tabular}} \caption{Performances of the matheuritic on different rolling horizon $T_{\rmr} \in \{2,5\}$: Numerical values of $ \vert \nu_{\rmIP} \vert$, $\rmF_{\rmIP}$ (and the corresponding standard errors), $\rmG_{\rmIP}^{\rmLR}$ and $\rmG_{\rmIP}^{\rmRc}$ obtained on an instance $(M,\gamma)$ with $M \in \{10,15,20\}$ and $\gamma \in \{0.2, 0.4, 0.6, 0.8\}$. The values written in bold indicate the best performances of policy $\bfdelta^{\rmIP}$ regarding optimality and scalability (computation time).} 1.5abel{tab:wkpomdp:maintenance} \end{table} One may observe that for all instances, the matheuristic involving our MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} delivers promising results even in the most challenging instance ($M=20$). In particular, the values of $\rmG_{\rmIP}^{\rmRc}$ show that the policy $\bfdelta^{\rmIP}$ gives an optimality gap (in the set of history-dependent policies) of at most $10\%$ on the large-scale instance, which is satisfying regarding the complexity of the optimization problem ($\vert \calX_S \vert = \vert \calX_O \vert \approx 10^{14}$). In Table~\ref{tab:wkpomdp:maintenance}, the negative values of $\rmG_{\rmIP}^{\rmRc}$ result from error approximations due to the Monte-Carlo simulations. It can also be noted that the gap $\rmG_{\rmIP}^{\rmLR}$ is takes negative values for some instances, which shows that $\nu_{\rmIP}$ can take larger values than the Lagrangian relaxation for some instances. It highlights the benefit of using the belief state upadtes in the definition of $\bfdelta^{\rmIP}$. In addition, even for the largest instances ($M = 15$ or $M=20$) and for $T=5$, the average time per action of $\rmAct_{T_\rmr}^{\rmIP,t}(\bfh_t)$ is on the order of $1.0$ second; this amount of time is still feasible even if the $24$ decision times are close together. In Appendix~\ref{sub:app_nums:implicit_policy}, the numerical results show that our policy involving MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} gives better performances than other formulations $\{ \rmLB, \rmRc, \rmR \}$. It would seem that using the Lagrangian relaxation within our matheuristic yields competitive results in terms of performances. However, it takes more time to compute the actions and the sampling leads to higher standard errors. \section{Conclusion} 1.5abel{sec:conclusion} In this paper, we have considered several mathematical programming formulations for POMDPs with memoryless policies. Valid cuts based on properties of independences strengthen these formulations. And we have leveraged these formulations to build practically efficient history-dependent policies. Furthermore, in order to break the curse of dimensionality that impedes the design of efficient policies for POMDPs modeling systems with several components we have introduced the notion of weakly coupled POMDPs, which generalizes the weakly coupled stochastic dynamic programs of~\citet{Adelman2008}. And we have provided mathematical programming formulations and policies tailored for these weakly coupled POMDPs. Numerical experiments show their efficiency, notably on some maintenance problems. Our history-dependent policies are designed to be very efficient on POMDPs where memoryless policies are rather efficient, but not on POMDPs where only tiny bits of informations are observed, such as maze problems, on which ones memoryless policies lead to poor decisions. Future directions include the design of efficient policies for such weakly coupled POMDPs with very few observations. \section*{Acknowledgments} We are grateful to Prof. Fr\'ed\'eric Meunier for his helpful remarks. The authors gratefully acknowledge the financial support of the Operations Research and Machine Learning chaire between Ecole des Ponts Paristech and Air France. \appendix \section{Examples of weakly coupled POMDP applications} 1.5abel{app:examples} In this section we describe three examples of multi-stage stochastic optimization problems that can be formalized as a weakly coupled POMDP. \begin{ex}1.5abel{ex:multi_armed_bandit} \emph{Multi-armed and Restless Bandit} problems are classical resource allocation problems where there are several arms, each of them evolving independently as a MDP, and at each time the decision maker has to activate a subset of arms so as to maximize its expected discounted reward over infinite horizon. We can consider the \emph{regular} multi-armed bandit problem, where only the activated arm states transit randomly and give an immediate reward, or the \emph{restless} multi-armed bandit problem, where all the arm states transit randomly and give an immediate reward. When the decision maker has only access to a partial observation on each arm instead of the arm state, the problem becomes a partially observable multi-armed bandit problem \citep{Krishnamurthy09}. In this case, each arm evolves individually and independently as a POMDP. Such a problem enables to model practical applications such as clinical trials. In this setting, each component represents a medical treatment and activating a component corresponds to testing the treatment. The state of a medical treatment corresponds to its efficiency and the observation corresponds to a noisy measure of the efficiency of a medical treatment. We can formalize the partially observable multi-armed bandit problem within our weakly coupled POMDP framework. Let $M$ be the number of arms. At each time $t$, the decision maker has to activate $K < M$ arms. Since each component evolves as POMDP, we use the same notation to represent the state and the observation of Section~\ref{sub:problem:wkpomdp}. We define the individual action space $\calX_A^m =\{0,1\}$ of arm $m$ and the full action space is $$\displaystyle \calX_A = \big\{\bfa \in \calX_A^1\times \cdots \times \calX_A^M \colon \sum_{m = 1}^M a^m = K\big\},$$ which has the form~\eqref{eq:problem:def_form_action_space} by setting $\bfD^m(a)=\begin{pmatrix}a\\-a\end{pmatrix}$ for all $m$ in $[M]$ and $\bfb = \begin{pmatrix}K\\-K\end{pmatrix}$. In the case of the regular bandit problems, the immediate reward of component $m$ satisfies $r^m(s,0,s') = 0$, and the transition probabilities satisfy $p^m(s'|s,0)$ equals $1$ if $s=s'$ and $0$ otherwise, for every $s,s' \in \calX_S^m$. The goal of the decision maker is to find a policy $\bfdelta$ in $\Deltaml$ (or $\Deltahis$) maximizing the total expected discounted reward over infinite horizon $\bbE_{\bfdelta} 1.5eft[ \sum_{t=1}^{T}r1.5eft(\bfS_t,\bfA_t,\bfS_{t+1}\right)\right]$, where $T$ is a finite horizon. \end{ex} \begin{ex}1.5abel{ex:inventory_control} Consider a supplier that delivers a product to $M$ stores. At each time $t$, we denote by $S_t^m$ the inventory level of store $m$. Unfortunately, due to ''inventory records inaccuracy'' \citep{Mersereau2013} from various uncertainties, the supplier does not observe directly this inventory level. He has instead only access to a noisy observation $O_t^m$ of the inventory level of store $m$. We assume that the inventory level of store $m$ has a known limited capacity $C^m$. Hence, we set $\calX_S^m:= \{0,1.5dots,C^m\}$. Then $O_t^m=o$ is a noisy observation of the current inventory level, whose value belongs to $\calX_O^m:=\calX_S^m$ and is randomly emitted given a current state $S_t^m=s$ according to a known probability $p^m(o|s) = \bbP1.5eft(O_t^m=o|S_t^m=s \right)$. At each time, the supplier has to decide the quantity to produce and to deliver automatically to each store. We denote by $A_t^m$ the quantity of product delivered to store $m$, which belongs to the individual action space $\calX_A^m := \calX_S^m$. The production has to satisfy resource constraints (raw materials, staff, etc.). Hence, the set of feasible actions has the form $$\calX_A := 1.5eft\{\bfa \in \calX_A^1\times \cdots \times \calX_A^M \colon \sum_{m=1}^M h^m a^m 1.5eq H \right\},$$ where $h^m$ is the given number of resources used per unit produced and delivered for store $m$ and $H$ is the given available amount of resource. This action space has the form~\eqref{eq:problem:def_form_action_space} by setting $\bfD^m(a^m) = h^m a^m$ for all $m$ in $[M]$ and $\bfb=H$. The quantity of products in store $m$ cannot exceed capacity $C^m$. Hence, the quantity $\max(S_t^m+A_t^m - C^m, 0)$ is wasted and it induces a waste cost. We denote by $D_t^m$ the random variable representing the demand at store $m$ between time $t$ and $t+1$. The vector of demand is exogenous and independent identically distributed in each store with a known probability distribution $\bbP_D^m$ for store $m$. The inventory level of store $m$ follows the dynamic $$S_{t+1}^m = \max 1.5eft(\min1.5eft(S_t^m + A_t^m,C^m\right) - D_t^m, 0 \right),$$ which gives the transition probability distribution $\bbP(S_{t+1}|S_{t},A_t).$ Now we can define the immediate reward function $$r^m(s,a,s') = \mathrm{price}^m(s+a-s') - \mathrm{waste}^m \max1.5eft(s+a - C^m, 0 \right) -\mathrm{shortage}^m\bbE_{\bbP_D^m} 1.5eft[\max1.5eft(D^m - (s+a), 0\right) \right],$$ where $\mathrm{price}^m$ is the selling price per unit, $\rm{waste}^m$ is the wastage cost per unit and $\mathrm{shortage}^m$ is the shortage cost per unit. It leads us to model this problem as a weakly coupled POMDP. The goal of the supplier is to find a policy $\bfdelta$ in $\Deltaml$ (or $\Deltahis$) maximizing the total expected reward over a finite horizon $T$. This example has been introduced by \citet{Kleywegt2002} for fully observable inventory levels and \citet{Mersereau2013} justifies the relevance of the POMDP framework for the stochastic inventory control problem. \end{ex} \begin{ex}1.5abel{ex:medical_center} Consider a nurse assignment problem for home health care. A medical center follows $M$ patients at home on a daily basis over a given period of time $T$. On day $t$, we denote by $S_t^m$ the health state of patient $m$, whose value belongs to a finite state space $\calX_S^m$. The medical center does not directly observe the health state of each patient. However, at each time $t$, the medical center has access to a partial observation $O_t^m$ corresponding to a signal sent by a machine which diagnoses patient $m$. We assume that this signal is discrete and noisy. Hence, $\calX_O^m$ is a finite space and an observation $o$ is randomly emitted given a state $s \in \calX_S^m$ according to the probability $p^m(o|s)$. At each time, the medical center has to assign nurses to patients. There are $K_1$ available nurses with skill $1$ and $K_2$ available nurses with skill $2$. On day $t$, we denote by $A_t^m$ the action taken by the medical center on patient $m$, whose value belongs to $\calX_A^m = \{0,1,2,3\}$ and the following meaning. \begin{align*} A_t^m = \begin{cases} & 0 \quad \text{if no nurse is sent to patient $m$} \\ & 1 \quad \text{if a nurse with skill $1$ is sent to patient $m$} \\ & 2 \quad \text{if a nurse with skill $2$ is sent to patient $m$} \\ & 3 \quad \text{if two nurses, one with each skill, are sent to patient $m$} \\ \end{cases} \end{align*} Depending on the skill of the nurses sent to patient, the health state of each patient evolves randomly according to a transition probability $p^m(s'|s,a)$, for any $s,s' \in \calX_S^m$, $a \in \calX_A^m$ and $m \in [M]$. Hence, the set of feasible actions is $$\calX_A = \bigg\{\bfa \in \calX_A^1 \times \cdots \times \calX_A^M \colon \sum_{m=1}^M \mathds{1}_{1}(a^m) + \mathds{1}_{3}(a^m) 1.5eq K_1 \ \text{and} \ \sum_{m =1}^M \mathds{1}_{2}(a^m) + \mathds{1}_{3}(a^m) 1.5eq K_2 \bigg\},$$ which has the form~\eqref{eq:problem:def_form_action_space} by setting $\bfD^m(a)= \begin{pmatrix}\mathds{1}_{1}(a) + \mathds{1}_{3}(a)\\ \mathds{1}_{2}(a) + \mathds{1}_{3}(a)\end{pmatrix}$ for all $m$ in $[M]$, and $\bfb = \begin{pmatrix} K_1 \\ K_2 \end{pmatrix}$. Now we can define the immediate reward function $$r^m(s,a,s') = -\rm{cost}_1^m(\mathds{1}_{1}(a)+\mathds{1}_{3}(a)) - \rm{cost}_2^m(\mathds{1}_{2}(a)+\mathds{1}_{3}(a)) - \rm{emergency}^m \mathds{1}_{s_{\rm{critic}}^m}(s'),$$ where $\rm{cost}_i^m$ is the cost induced by sending a nurse with skill $i \in \{1,2\}$ to patient $m$, $s_{\rm{critic}}^m$ is the critical health state of patient $m$ and $\rm{emergency}^m$ is the cost induced by an emergency because patient $m$ reaches its critical health state. It leads us to model this problem as a weakly coupled POMDP. The goal of the medical center is to find a policy $\bfdelta$ in $\Deltaml$ (or $\Deltahis$) maximizing its total expected reward over a finite horizon $T$. \end{ex} \section{Proofs of Section~\ref{sec:pomdp}} 1.5abel{app:pomdp} \proof[Proof of Theorem~\ref{theo:pomdp:NLP_optimal_solution}] Let $(\bfmu, \bfdelta)$ be a feasible solution of Problem~\eqref{pb:pomdp:NLP_pomdp}. We prove by induction on $t$ that $\mu_s^1 = \bbP_{\bfdelta}\big(S_1 = s\big)$, $\mu_{soa}^t=\bbP_{\bfdelta}\big(S_t = s, O_t=o, A_t=a \big)$ and $\mu_{sas'}^t =\bbP_{\bfdelta}\big(S_t = s, A_t=a, S_{t+1} = s' \big)$. At time $t=1$, the statement is immediate. Suppose that it holds up to $t-1$. Constraints~\eqref{eq:pomdp:NLP_indep_action}, \eqref{eq:pomdp:NLP_consistency_s} and induction hypothesis ensure that \begin{align*} \mu_{soa}^{t} = \delta_{a|o}^t p(o|s) \sum_{o',a'} \mu_{so'a'}^t = \delta_{a|o}^t p(o|s) \sum_{s',a'} \mu_{s'a's}^{t-1} &= \delta_{a|o}^t p(o|s) \sum_{s',a'} \bbP_{\bfdelta}1.5eft(S_{t-1}=s', A_{t-1} = a', S_t=s \right) \\ & = \delta_{a|o}^t p(o|s) \bbP_{\bfdelta}1.5eft(S_t=s \right) \\ &= \bbP_{\bfdelta}1.5eft(S_t=s, O_t=o, A_t=a \right), \end{align*} where the last equality comes from the conditional independences and the law of total probability. Constraints~\eqref{eq:pomdp:NLP_consistency_sa},\eqref{eq:pomdp:NLP_indep_state} and the induction hypothesis ensure that: \begin{align*} \mu_{sas'}^t = p(s'|s,a)\sum_{\ovs} \mu_{sa\ovs}^t = p(s'|s,a) \sum_{o} \mu_{soa}^t &= p(s'|s,a) \sum_{o} \bbP_{\bfdelta} 1.5eft( S_t=s, O_t=o, A_t=a \right) \\ &= \bbP_{\bfdelta}(S_t=s, A_t=a,S_{t+1}=s') \end{align*} where the last equality comes from the conditional independences and the law of total probability. Consequently, $$\sum_{t=1}^T \sum_{\substack{s,s' \in \calX_S \\ a \in \calX_A}} r(s,a,s') \bbP_{\bfdelta}1.5eft(S_t=s,A_t=a,S_{t+1}=s' \right) = \bbE_{\bfdelta} \bigg[ \sum_{t=1}^{T}r(S_t,A_t,S_{t+1})\bigg],$$ which implies that $\bfdelta$ is optimal if and only if $(\bfmu,\bfdelta)$ is optimal for Problem~\eqref{pb:pomdp:NLP_pomdp} and $v_{\rm{ml}}^* = z^*$. It achieves the proof. \qed \proof[Proof of Proposition~\ref{prop:pomdp:valid_cuts_pomdp}] Let $(\bfmu,\bfdelta)$ be a feasible solution of Problem~\eqref{pb:pomdp:MILP_pomdp}. We define $$\mu_{s'a'soa}^t = \delta^t_{a|o}p(o|s)\mu_{s'a's}^{t-1}$$ for all $(s',a',s,o,a) \in \calX_S \times \calX_A \times \calX_S \times \calX_O \times \calX_A$, $t \in [T]$. These new variables satisfy constraints in \eqref{eq:pomdp:Valid_cuts_pomdp} : \begin{align*} \sum_{a \in \calX_A} \mu_{s'a'soa}^t &= 1.5eft(\sum_{a \in \calX_A} \delta^t_{a|o}\right)p(o|s)\mu_{s'a's}^{t-1} = p(o|s)\mu_{s'a's}^{t-1}\\ \sum_{a' \in \calX_A, s' \in \calX_S} \mu_{s'a'soa}^t &= 1.5eft(\sum_{a' \in \calX_A, s' \in \calX_S} \mu_{s'a's}^{t-1}\right) \delta^t_{a|o} p(o|s) = \delta^t_{a|o} p(o|s) \sum_{o' \in \calX_O,a' \in \calX_O} \mu_{so'a'}^{t} = \mu_{soa}^t \end{align*} \noindent The remaining constraint \eqref{eq:pomdp:Valid_cuts_pomdp_main} is obtained using the following observation : \begin{align*} \frac{\mu_{s'a'soa}^t}{\sum_{s'' \in \calX_S} \mu_{s'a's''oa}^t} = \frac{p(o|s)\mu_{s'a's}^{t-1}}{\sum_{s'' \in \calX_S} p(o|s'')\mu_{s'a's''}^{t-1}} = \frac{p(o|s)p(s|s',a')\sum_{\ovs} \mu_{s'a'\ovs}^{t-1}}{\sum_{s'' \in \calX_S} p(o|s'')p(s''|s',a')\sum_{\ovs} \mu_{s'a'\ovs}^{t-1}} = \frac{\displaystyle p(o|s)p(s|s',a')}{\displaystyle \sum_{s'' \in \calX_S} p(o|s'')p(s''|s',a')} \end{align*} By setting $p(s|s',a',o) = \frac{\displaystyle p(o|s)p(s|s',a')}{\displaystyle \sum_{\overline{s} \in \calX_S} p(o|\overline{s})p(\overline{s}|s',a')}$, equality \eqref{eq:pomdp:Valid_cuts_pomdp_main} holds. If $\sum_{s'' \in \calX_S} \mu_{s'a's''oa}^t = 0$, then $\mu_{s'a'soa}^t=0$ and constraint~\eqref{eq:pomdp:Valid_cuts_pomdp_main} is satisfied. Now we prove that there exists a solution $\bfmu$ of the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp} that does not satisfy inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp}. We define such a solution $(\bfmu,\bfdelta)$. We set $\mu_s^1 = p(s)$ for all $s$ in $\calX_S$, and for all $t$ in $[T]$: \begin{align} &\mu_{soa}^1 = \begin{cases} &p(o|s)\mu_s^1, \ \text{if} \ a = \phi(s) 1.5abel{eq:pomdp:proof_mu_soa}\\ &0,\ \text{otherwise} \end{cases}, & \text{if \ $t=1$}, \\ &\mu_{sas'}^{t} = p(s'|s,a) \sum_{o \in\calX_O} \mu_{soa}^t & \\ &\mu_{soa}^t = \begin{cases} &p(o|s)\sum_{s'\in \calX_S,a' \in \calX_A} \mu_{s'a's}^{t-1}, \ \text{if} \ a = \phi(s) 1.5abel{eq:pomdp:proof_mu_soa}\\ &0,\ \text{otherwise} \end{cases}, & \text{if \ $t\geq 2$}, \\ &\delta_{a|o}^t = \begin{cases} &\frac{\sum_{s \in \calX_S}\mu_{soa}^t}{\sum_{s \in \calX_S, a \in \calX_A} \mu_{soa}^{t}} \ \text{if} \ \sum_{s \in \calX_S, a \in \calX_A} \mu_{soa}^{t} \neq 0 1.5abel{eq:pomdp:proof_delta}\\ &\mathds{1}_{\tilde{a}}(a),\ \text{otherwise} \end{cases} & \end{align} where $\phi : \calX_S \rightarrow \calX_A$ is an arbitrary mapping and $\tilde{a}$ is an arbitrary element in $\calX_A$. We prove that $\bfmu$ is a feasible solution of the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp}. First, it is easy to see constraints~\eqref{eq:pomdp:NLP_initial2}-\eqref{eq:pomdp:NLP_indep_state} are satisfied. It remains to prove that constraints ~\eqref{eq:pomdp:MILP_McCormick_1}, ~\eqref{eq:pomdp:MILP_McCormick_2}, ~\eqref{eq:pomdp:MILP_McCormick_3} are satisfied. First, ~\eqref{eq:pomdp:MILP_McCormick_1} holds because \begin{align*} \mu_{soa}^t 1.5eq \max1.5eft(0,p(o|s) \sum_{s' \in \calX_S, a' \in \calX_A} \mu_{s'a's}^{t-1}\right) 1.5eq p(o|s) \sum_{s' \in \calX_S, a' \in \calX_A} \mu_{s'a's}^{t-1}, \end{align*} Second, ~\eqref{eq:pomdp:MILP_McCormick_2} holds because \begin{align*} \mu_{soa}^t 1.5eq \sum_{s' \in \calX_S}\mu_{s'oa}^t = \delta_{a|o}^t \sum_{s' \in \calX_S}p(o|s')\sum_{s'' \in \calX_S, a'' \in \calX_A} \mu_{s''a''s'}^{t-1} 1.5eq \delta_{a|o}^t, \end{align*} where we used definition~\eqref{eq:pomdp:proof_delta} from the first to second line. Third, \eqref{eq:pomdp:MILP_McCormick_3} holds because \begin{align*} \mu_{soa}^t - p(o|s)\sum_{\substack{s' \in \calX_S\\ a' \in \calX_A}} \mu_{s'a's}^{t-1} & \geq \sum_{s'' \in \calX_S} \overbrace{\mu_{s''oa}^t - p(o|s'')\sum_{s' \in \calX_S, a' \in \calX_A} \mu_{s'a's''}^{t-1}}^{1.5eq 0} \\ &= \sum_{s'',s' \in \calX_S, a' \in \calX_A} p(o|s'')\mu_{s'a's''}^{t-1}(\delta_{a|o}^t - 1) \\ &\geq \delta_{a|o}^t - 1, \end{align*} which yields ~\eqref{eq:pomdp:MILP_McCormick_3}. Therefore, $(\bfmu,\bfdelta)$ is a solution of the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp}. Now, we prove that such a solution does not satisfy inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp}. We define the new variables: \begin{align*} \mu_{s'a'soa}^t = \begin{cases} & \mu_{s'a's}^{t-1}\frac{\mu_{soa}^t}{\sum_{o'\in \calX_O,a' \in \calX_A}\mu_{so'a'}^t} \ \text{if} \ \sum_{o'\in \calX_O,a' \in \calX_A}\mu_{so'a'}^t \neq 0 \\ & 0 \ \text{otherwise} \end{cases} \end{align*} Hence, $\bfmu$ satisfies constraints~\eqref{eq:pomdp:Valid_cuts_pomdp_consistency1} and \eqref{eq:pomdp:Valid_cuts_pomdp_consistency2}. However, constraint~\eqref{eq:pomdp:Valid_cuts_pomdp_main} is not satisfied in general. Indeed, since the mapping $\phi$ is arbitrary, we can set $\phi$ such that $p(s|s',a',o) >0$ and $\mu_{s'a'soa}^t = 0$. Therefore, there exists a solution $\bfmu$ of the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp} that does not satisfy inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp}. It achieves the proof. \qed \proof[Proof of Theorem~\ref{theo:pomdp:MDP_approx_equivalence}] We first prove the equivalence between the linear relaxation of our MILP~\eqref{pb:pomdp:MILP_pomdp} and its MDP approximation. Note that the two objective functions are the same. Hence, we only need to prove that we can construct a feasible solution from a problem to another. Let $(\bfmu,\bfdelta)$ be a feasible solution of the linear relaxation of Problem~\eqref{pb:pomdp:MILP_pomdp}. Constraints~\eqref{eq:pomdp:NLP_consistency_sa}- ~\eqref{eq:pomdp:NLP_indep_state} ensure that $(\mu_s^1,\mu_{sas'}^t)_{t \in [T]}$ is a feasible solution of Problem~\eqref{pb:pomdp:LP_MDP}. Let $\bfmu$ be a feasible solution of Problem~\eqref{pb:pomdp:LP_MDP}. It suffices to define variables $\delta_{a|o}^t$ and $\mu_{soa}^t$ for all $a$ in $\calX_A$, $o$ in $\calX_O$, $s$ in $\calX_S$, and $t$ in $[T]$. We define these variables using ~\eqref{eq:pomdp:proof_mu_soa} and~\eqref{eq:pomdp:proof_delta}. In the proof of Proposition~\ref{prop:pomdp:valid_cuts_pomdp}, we proved that $(\bfmu,\bfdelta)$ is a feasible solution of the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp}. Consequently, the equivalence holds and $z_{\rm{R}}^* = v_{\rm{MDP}}^*$. Now we prove that inequalities~\eqref{eq:pomdp:inequality_information} hold. Note that Proposition~\eqref{prop:pomdp:valid_cuts_pomdp} ensures that $$z^* 1.5eq z_{\rm{R}^{\rm{c}}}^* 1.5eq z_{\rm{R}}^*.$$ It remains to prove the two following inequalities. \begin{align} &z^* 1.5eq v_{\rm{his}}^* 1.5abel{eq:pomdp:ineq1}\\ &v_{\rm{his}}^* 1.5eq z_{\rm{R}^{\rm{c}}}^* 1.5abel{eq:pomdp:ineq2} \end{align} First, we prove Inequality~\eqref{eq:pomdp:ineq1}. By definition, we have $\Deltaml \subseteq \Deltahis$. Hence, we obtain $v_{\rm{ml}}^* 1.5eq v_{\rm{his}}^*$. Using Theorem~\ref{theo:pomdp:NLP_optimal_solution}, we deduce that $z^* 1.5eq v_{\rm{his}}^*$. Therefore the inequality $v_{\rm{ml}}^* 1.5eq v_{\rm{his}}^* 1.5eq z_{\rm{R}}^*$ holds. Now we prove Inequality~\eqref{eq:pomdp:ineq2}. The proof is based on a probabilistic interpretation of the valid inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp}. It suffices to proves that for any policy $\bfdelta$ in $\Deltahis$, the probability distribution $\bbP_{\bfdelta}$ satisfies the weak conditional independences ~\eqref{eq:pomdp:weakIndep}. Let $\bfdelta \in \Deltahis$. The probability distribution $\bbP_{\bfdelta}$ over the random variables $(S_t,A_t,O_t)_{11.5eq t 1.5eq T}$ according to $\bfdelta$ is exactly \begin{align}1.5abel{eq:pomdp:proof_distrib} \bbP_{\bfdelta} (1.5eft(S_t=s_t,O_t=o_t,A_t=a_t \right)_{11.5eq t 1.5eq T}) &= \bbP_{\bfdelta}(S_1=s_1)\prod_{t=1}^T \bbP_{\bfdelta}(S_{t+1}=s_{t+1}|S_t=s_t,A_t=a_t) \notag \\ &\bbP_{\bfdelta}(O_t=o_t|S_t=s_t) \delta^t_{a_t|h_t} \end{align} where $h_t = \{O_1=o_1,A_1=a_1,O_2=o_2,1.5dots, O_t=o_t\}$ is the history of observations and actions. Note that the policy at time $t$ is the conditional probability $\delta^t_{a_t|h_t} = \bbP_{\bfdelta}(A_t=a_t|H_t=h_t)$. We define: \begin{align*} &\mu_{s}^1 = \bbP_{\bfdelta}(S_1=s)\\ &\mu_{soa}^t = \bbP_{\bfdelta}(S_t=s,O_t=o,A_t=a)\\ &\mu_{sas'}^t = \bbP_{\bfdelta}(S_t=s,A_t=a,S_{t+1} = s')\\ &\mu_{s'a'soa}^t = \bbP_{\bfdelta}(S_{t-1}=s',A_{t-1}=a',S_t=s,O_t=o,A_t=a) \end{align*} We define the policy $\tilde{\bfdelta}$ using~\eqref{eq:pomdp:proof_delta}. It is easy to see that constraints of~\eqref{pb:pomdp:MILP_pomdp} are satisfied. Furthermore, we have $\tilde{\bfdelta} \in \Deltaml$. It remains to prove that inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp} are satisfied. By definition of a probability distribution, we directly see that constraints~\eqref{eq:pomdp:Valid_cuts_pomdp_consistency1} are satisfied. We prove \eqref{eq:pomdp:Valid_cuts_pomdp_consistency2} and \eqref{eq:pomdp:Valid_cuts_pomdp_main}. We compute the left-hand side of \eqref{eq:pomdp:Valid_cuts_pomdp_consistency2}: \begin{align*} &\sum_{a \in \calX_A} \mu_{s'a'soa}^t = \sum_{a \in \calX_A} \bbP_{\bfdelta}(S_{t-1}=s',A_{t-1}=a',S_t=s,O_t=o,A_t=a)\\ &= \sum_{a \in \calX_A} \sum_{\substack{s_1,1.5dots,s_{t-2} \\ h_{t-1}}} \\ &\bbP_{\bfdelta}((S_i=s_i,O_i=o_i,A_i=a_i)_{11.5eq i1.5eq t-2},S_{t-1}=s',O_{t-1}=o',A_{t-1}=a',S_t=s,O_t=o,A_t=a)\\ &= p(o|s)p(s|s',a')\sum_{\substack{s_1,1.5dots,s_{t-2} \\ h_{t-1}}} \bbP_{\bfdelta}((S_i=s_i,O_i=o_i,A_i=a_i)_{11.5eq i1.5eq t-2},S_{t-1}=s',O_{t-1}=o_{t-1},A_{t-1}=a') \\ & \sum_{a \in \calX_A} \delta_{a|h_t} \\ &= p(o|s)p(s|s',a')\sum_{\substack{s_1,1.5dots,s_{t-2} \\ h_{t-1}}} \bbP_{\bfdelta}((S_i=s_i,O_i=o_i,A_i=a_i)_{11.5eq i1.5eq t-2},S_{t-1}=s',O_{t-1}=o',A_{t-1}=a')\\ &= p(o|s)p(s|s',a') \bbP_{\bfdelta}(S_{t-1}=s',A_{t-1}=a')\\ &= p(o|s)\mu_{s'a's}^{t-1} \end{align*} where we used the definition of the probability distribution~\eqref{eq:pomdp:proof_distrib} at the third equation. Therefore, constraints~\eqref{eq:pomdp:Valid_cuts_pomdp_consistency2} are satisfied by $\bfmu$. To prove that constraints~\eqref{eq:pomdp:Valid_cuts_pomdp_main} are satisfied, we prove that $$\bbP_{\bfdelta}(S_t=s_t| S_{t-1}=s_{t-1},A_{t-1}=a_{t-1},O_t=o_t,A_t=a_t) = \bbP_{\bfdelta}(S_t=s_t| S_{t-1}=s_{t-1},A_{t-1}=a_{t-1},O_t=o_t)$$ We compute $\bbP_{\bfdelta}(S_t=s_t| S_{t-1}=s',A_{t-1}=a',O_t=o,A_t=a)$: \begin{align*} &\bbP_{\bfdelta}(S_t=s_t| S_{t-1}=s_{t-1},A_{t-1}=a_{t-1},O_t=o_t,A_t=a_t) \\ &= \frac{\bbP_{\bfdelta}(S_{t-1}=s_{t-1},A_{t-1}=a_{t-1},S_t=s_t,O_t=o_t,A_t=a_t)}{\bbP_{\bfdelta}(S_{t-1}=s_{t-1},A_{t-1}=a_{t-1},O_t=o_t,A_t=a_t)}\\ &= \frac{\sum_{\substack{s_1,1.5dots,s_{t-2} \\ h_{t-1}}} \bbP_{\bfdelta}((S_i=s_i,O_i=o_i,A_i=a_i)_{11.5eq i1.5eq t})}{\sum_{\substack{s_1,1.5dots,s_{t-2},s_t \\ h_{t-1}}} \bbP_{\bfdelta}((S_i=s_i,O_i=o_i,A_i=a_i)_{11.5eq i1.5eq t})} \\ &= \frac{\sum_{\substack{s_1,1.5dots,s_{t-2} \\ h_{t-1}}} \delta_{a_t|h_t}^t p(o_t|s_t)p(s_{t}|s_{t-1},a_{t-1}) \bbP_{\bfdelta}((S_i=s_i,O_i=o_i,A_i=a_i)_{11.5eq i1.5eq t-1})}{\sum_{\substack{s_1,1.5dots,s_{t-2},s_t' \\ h_{t-1}}} \delta_{a_t|h_t}^t p(o_t|s_t)p(s_{t}|s_{t-1},a_{t-1}) \bbP_{\bfdelta}((S_i=s_i,O_i=o_i,A_i=a_i)_{11.5eq i1.5eq t-1})} \end{align*} \begin{align*} &= \frac{p(o_t|s_t)p(s_{t}|s_{t-1},a_{t-1}) \sum_{\substack{s_1,1.5dots,s_{t-2} \\ h_{t-1}}} \delta_{a_t|h_t}^t \bbP_{\bfdelta}((S_i=s_i,O_i=o_i,A_i=a_i)_{11.5eq i1.5eq t-1})}{\sum_{s_t'} p(o_t|s_t')p(s_{t}'|s_{t-1},a_{t-1})\sum_{\substack{s_1,1.5dots,s_{t-2} \\ h_{t-1}}} \delta_{a_t|h_t}^t \bbP_{\bfdelta}((S_i=s_i,O_i=o_i,A_i=a_i)_{11.5eq i1.5eq t-1})} \\ &= \frac{p(o_t|s_t)p(s_{t}|s_{t-1},a_{t-1})}{\sum_{s_t'} p(o_t|s_t')p(s_{t}'|s_{t-1},a_{t-1})} \end{align*} where the last line goes from the fact that the term $\delta_{a_t|h_t}^t \bbP_{\bfdelta}((S_i=s_i,O_i=o_i,A_i=a_i)_{11.5eq i1.5eq t-1})$ does not depend on $s_t$. Hence, constraints~\eqref{eq:pomdp:Valid_cuts_pomdp_main} are satisfied by $\bfmu$. We deduce that $\bfmu$ is a feasible solution of MILP~\eqref{pb:pomdp:MILP_pomdp} satisfying the valid inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp}. Therefore, \begin{align*} \bbE_{\bfdelta} 1.5eft[ \sum_{t=1}^T r(S_t,A_t,S_{t+1}) \right] &= \sum_{t=1}^T\sum_{s,a,s'}\bbP_{\bfdelta}(S_t=s,A_t=a,S_{t+1}=s')r(s,a,s') 1.5eq z_{\rm{R}^{\rm{c}}}^* \end{align*} By maximizing over $\bfdelta$ the left-hand side, we obtain $v_{\rm{his}}^* 1.5eq z_{\rm{R}^{\rm{c}}}^*$. It achieves the proof. \qed \section{The rolling horizon matheuristic} 1.5abel{app:wkpomdp:heuristic} In this section, we provide more details on the belief state update used in Algorithm $\rmAct_{T}^{\rmp,t}(\bfh)$ (Section~\ref{sub:wkpomdp:first_policy}) and the rolling horizon matheuristic (Section~\ref{sub:wkpomdp:matheuristic}) \subsection{Belief state update} 1.5abel{sub:app_wkpomdp_heuristic:belief_state} Given a POMDP $1.5eft(\calX_S,\calX_O,\calX_A,\pfrak,\bfr \right)$, at each time $t$, the belief state $(p(s|H_t))_{s \in \calX_S}$ is a sufficient statistic of the history of actions and observations $H_t$ \cite[Theorem 4]{Eckles1968}. Given the action $a_t$ taken at time $t$ an the observation $o_{t+1}$ received at time $t+1$, the belief state can be easily updated over time according to the belief state update \citep[Eq. (1)]{Littman1994}: \begin{align}1.5abel{eq:belief_state_update} p(s_{t+1}|H_{t+1}) = p(s_{t+1}|H_{t}, a_t, o_{t+1}) = \sum_{s \in \calX_S} \frac{p(o_{t+1}|s)p(s|s_t, a_t)}{\sum_{s' \in \calX_S} p(o_{t+1}|s')p(s'|s_t, a_t)}p(s| H_t) \end{align} \subsection{Matheuristic with a rolling horizon} 1.5abel{sub:app_wkpomdp_heuristic:rolling_horizon} We denote by $T_{\rmr} < T$ denote such a rolling horizon. It leads to a matheuristic with formulation $\rmp$. \begin{algorithm}[H] \caption{Rolling horizon matheuristic with formulation $\rmp$.} 1.5abel{alg:wkpomdp:matheuristic} \begin{algorithmic}[1] \STATE \textbf{Input}: $T$, $T_{\rm{r}}$, $\big((\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m,\bfr^m,\bfD^m)_{m\in[M]},\bfb\big)$ \FOR{$t=1,1.5dots,T$} \STATE Receive observation $\bfo_t$ \STATE Take action $\mathrm{Act}_{t+T_\rmr}^{\rmp,t}(\bfh_t)$ 1.5abel{alg:wkpomdp:matheuristic:take_action} \ENDFOR \end{algorithmic} \end{algorithm} Figure~\ref{fig:wkpomdp:scheme_implicite_policy} illustrates two consecutive iterations of Algorithm~\ref{alg:wkpomdp:matheuristic} with a rolling horizon $T_{\mathrm{r}}=5.$ \begin{figure} \caption{Iteration $t=3$ of Algorithm~\eqref{alg:wkpomdp:matheuristic} \caption{Iteration $t=4$ of Algorithm~\ref{alg:wkpomdp:matheuristic} \caption{Scheme of the evaluation of our implicit policy $\bfdelta^{\rmIP} \end{figure} \section{Proofs of Section~\ref{sec:wkpomdp}} 1.5abel{app:wkpomdp:proofs} \proof[Proof of Theorem~\ref{theo:wkpomdp:ineq_decPOMDP}] First, we prove that the linear relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} is a relaxation of the MDP approximation. Let $\bfmu$ be a feasible solution of the linear program~\eqref{pb:pomdp:LP_MDP}. The variable $\mu_{\bfs\bfa\bfs'}^t$ is defined for $\bfa \in \calX_A = \{ \bfa \in \calX_A^1\times\cdots\calX_A^M \colon \sum_{m=1}^M \bfD^m(a^m) 1.5eq \bfb \}$. We extend its definition on $\calX_A' = \calX_A^1\times\cdots\calX_A^M$ by setting $\mu_{\bfs\bfa\bfs'}^t:=0$ for $\bfa \in \calX_A' \backslash \calX_A$. Now we define the variables $(\tau_{s}^m,\tau_{soa}^{t,m},\tau_{sas'}^{t,m})_{s,o,a,t,m}$ as follows \begin{align*} &\tau_s^{1,m} = \sum_{s^{-m} \in \calX_S^{-m}} \mu_{\bfs}^1,& \quad &\tau_{sas'}^{t,m} = \sum_{\substack{s^{-m} \in \calX_S^{-m} \\ a^{-m} \in \calX_A^{-m} \\ s'^{-m} \in \calX_S^{-m}}} \mu_{\bfs\bfa\bfs'}^{t}, \end{align*} and, for each component $m$, we define the variables $\delta_{a|o}^{t,m}$ and $\tau_{soa}^{t,m}$ using~\eqref{eq:pomdp:proof_mu_soa} and~\eqref{eq:pomdp:proof_delta}. We proved that this solution is a feasible solution of the linear relaxation on each component. It reamains to prove that $\sum_{m=1}^M \sum_{a \in \calX_A^m} \tau_{a}^{t,m} \bfD^m(a) 1.5eq \bfb$. It comes from the following computation: \begin{align*} \sum_{m=1}^M \sum_{a \in \calX_A^m} \tau_{a}^{t,m} \bfD^m(a) &= \sum_{m=1}^M \sum_{\bfa \in \calX_A'} \mu_{\bfa}^t\bfD^m(a^m) \\ &= \sum_{\bfa \in \calX_A'} \mu_{\bfa}^t \overbrace{\sum_{m=1}^M\bfD^m(a^m)}^{1.5eq \bfb} 1.5eq \bfb \end{align*} Therefore, $(\bftau^m,\bfdelta^m)_{m\in [M]}$ is a feasible solution of the linear relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP}. In addition, the objective value induced by variable $\bfmu$ is \begin{align*} \sum_{t=1}^T \sum_{\bfs,\bfs',\bfa} \mu_{\bfs\bfa\bfs'}^t\sum_{m=1}^M r^m(s^m,a^m,s'^m) &= \sum_{t=1}^T \sum_{m=1}^M \sum_{\bfs,\bfs',\bfa} \mu_{\bfs\bfa\bfs'}^t r^m(s^m,a^m,s'^m) \\ &= \sum_{t=1}^T \sum_{m=1}^M \sum_{s^m,a^m,s'^m} r^m(s^m,a^m,s'^m) 1.5eq z_{\rmR}. \end{align*} It shows that $v_{\rmMDP}^* 1.5eq z_{\rmR}$. Now we prove that $v_{\rmhis}^* 1.5eq z_{\rmRc}$. To prove this result, it suffices to observe that the linear relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} with valid inequalities~\eqref{eq:wkpomdp:dec_Valid_cuts} is a relaxation of the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp} with valid inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp}. This result appears using the previous technique that defines the ``local'' marginals $\tau_{sas'o'a}^{t,m} = \sum_{s^{-m}a^{-m}s'^{-m}o'^{-m}a^{-m}} \mu_{\bfs\bfa\bfs'\bfo'\bfa'}^t$ on each component $m \in [M]$. Therefore, we obtain that $z_{\rmRc}^* 1.5eq z_{\rmRc}$. On other hand, Theorem~\ref{theo:pomdp:MDP_approx_equivalence} ensures that $v_{\rmhis}^* 1.5eq z_{\rmRc}^*$, which achieves the proof. \qed \proof[Proof of Theorem~\ref{theo:wkpomdp:lower_bound_MILP}] Let $(\bftau^m,\bfdelta^m)_{m\in [M]}$ be a feasible solution of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP_LB}. We prove that $(\bftau^m,\bfdelta^m)_{m\in [M]}$ is a feasible solution of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} and~\ref{pb:decPOMDP_wc}. First, we show that $(\bftau^m,\bfdelta^m)_{m\in [M]}$ is a feasible solution of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP}. We define the variables $\tau_a^{t,m}$ for any $a \in \calX_A^m,$ $m \in [M],$ and $t \in [T]$ such that $\sum_{s \in \calX_S^m, o \in \calX_O^m} \tau_{soa}^{t,m} = \tau_a^{t,m}.$ In addition, we introduce the variables $\tau_o^{t,m} = \sum_{s \in \calX_S^m, o \in \calX_O^m} \tau_{soa}^{t,m}$ for any $o \in \calX_O^m,$ any $m \in [M]$ and $t \in [T].$ It suffices to show that inequality~\eqref{eq:wkpomdp:decPOMDP_MILP_linking_cons} holds. We compute the left-hand side of~\eqref{eq:wkpomdp:decPOMDP_MILP_linking_cons}. \begin{align*} \sum_{m =1}^M \sum_{a \in \calX_A^m} \bfD^m(a)\tau_{a}^{t,m} = \sum_{m =1}^M\sum_{a \in \calX_A^m,o \in \calX_O^m} \bfD^m(a) \delta_{a|o}^{t,m} \tau_{o}^{t,m} = \sum_{m =1}^M \sum_{a \in \calX_A^m} \bfD^m(a^m) \bbE_{\tau^{t,m}} [\delta_{a^m|O_t^m}^{t,m}] 1.5eq \bfb \end{align*} The first equality is a consequence of the tightness of the McCormick constraints~\eqref{eq:pomdp:MILP_McCormick_1}-\eqref{eq:pomdp:MILP_McCormick_3}. The second equality comes from the fact that the variables $(\tau_{o}^{t,m})_{o \in \calX_O^m}$ define a probability distribution over $\calX_O^m.$ Finally, the last inequality results from $$\sum_{m=1}^M \sum_{a \in \calX_A^m} \bfD^m(a) \bbE_{\tau^{t,m}} [\delta_{a|O_t^m}^{t,m}] 1.5eq \sum_{m =1}^M \sum_{a \in \calX_A^m} \bfD^m(a)\max_{o \in \calX_O^m}(\delta_{a|o}^{t,m}) 1.5eq \bfb$$ Therefore, the inequality holds~\eqref{eq:wkpomdp:decPOMDP_MILP_linking_cons} and MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} is a relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP_LB}. Second, we show that $(\bftau^m,\bfdelta^m)_{m\in [M]}$ is a feasible solution of~\ref{pb:decPOMDP_wc}. We define a policy over $\calX_A \times \calX_O$. \begin{align} \delta_{\bfa|\bfo}^t = \prod_{m=1}^M \delta_{a^m|o^m}^{t,m} \end{align} for all $\bfa \in \calX_A$, $\bfo \in \calX_O$ and $t \in [T]$. It suffices to prove that $\bfdelta$ belongs to $\Delta$. Let $\bfo \in \calX_O$ and $t \in [T]$. \begin{align*} \sum_{\bfa \in \calX_A} \delta_{\bfa | \bfo}^t &= \sum_{\bfa \in \calX_A} \prod_{m=1}^M \delta_{a^m|o^m}^{t,m} = \sum_{\bfa \in \calX_A} \prod_{m=1}^M \delta_{a^m|o^m}^{t,m} = 1 \end{align*} The second equality comes from the fact that for any $\bfa \in \calX_A$ such that $\sum_{m =1}^M \bfD^m(a^m) > \bfb,$ $\prod_{m=1}^M \delta_{a^m|o^m}^{t,m} = 0$ because of Constraints~\eqref{eq:wkpomdp:decPOMDP_link_constraints_Lb}. Therefore, $\bfdelta$ is a feasible policy of~\ref{pb:decPOMDP_wc}. Since $\bfdelta= \prod_{m=1}^M \bfdelta^m$, then all the component are independents and the marginal probabilities $\bftau^m$ are exact in the sense that they derive from policy $\bfdelta$. It follows that the objective functions are the same and the inequalities $z_{\rmLB} 1.5eq v_{\rmml}^{*}$ and $z_{\rmLB} 1.5eq z_{\rmIP}$ hold. \qed \proof[Proof of Theorem~\ref{theo:wkpomdp:upper_bound_NLP}] First, we prove that the nonlinear Program~\eqref{pb:wkpomdp:decPOMDP_NLP} is a relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP}. Let $(\bftau^m,\bfdelta^m)_{m\in [M]}$ be a feasible solution of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP}. We prove that $(\bftau^m,\bfdelta^m)_{m\in [M]}$ is a feasible solution of the nonlinear Program~\eqref{pb:wkpomdp:decPOMDP_NLP}. It suffices to prove that for all $m \in [M],$ $(\bftau^m,\bfdelta^m)$ satisfies constraints~\eqref{eq:pomdp:NLP_indep_action}. It comes from the tightness of the McCormick inequalities~\eqref{eq:pomdp:MILP_McCormick_1}-\eqref{eq:pomdp:MILP_McCormick_3} when the policy is deterministic. Hence, it is a relaxation with the same objective function. Therefore, the inequality $z_{\rmIP} 1.5eq z_{\rmUB}$ holds. Second, we prove that the nonlinear Program~\eqref{pb:wkpomdp:decPOMDP_NLP} is a relaxation of~\ref{pb:decPOMDP_wc}. Let $\bfdelta$ be a feasible policy of~\ref{pb:decPOMDP_wc}. We want to define a solution of the non-linear program~\eqref{pb:wkpomdp:decPOMDP_NLP}. We extend the domain of $\bfdelta$ to $\calX_A$ by setting $\delta_{\bfa|\bfo}^t = 0$ when $\sum_{m \in [M]} \bfD^m(a^m) > \bfb,$ for all $\bfo \in \calX_O.$ It is easy to see that $\bfdelta$ is still a policy in $\calX_A.$ Theorem~\ref{theo:pomdp:NLP_optimal_solution} ensures that there exist $\bfmu$ such that $(\bfmu,\bfdelta)$ is a feasible solution of MILP~\eqref{pb:pomdp:MILP_pomdp}. We define the variables $\bftau^m$ and $\bfdelta^m$ on component $m \in [M]$ by induction \begin{align*} &\tau_s^{1,m} = \sum_{s^{-m} \in \calX_S^{-m}} \mu_{\bfs}^1,& \quad &\tau_{soa}^{t,m} = \sum_{\substack{s^{-m} \in \calX_S^{-m} \\ o^{-m} \in \calX_O^{-m} \\ a^{-m} \in \calX_A^{-m}}} \mu_{\bfs\bfo\bfa}^t, \\ &\tau_{sas'}^{t,m} = \sum_{\substack{s^{-m} \in \calX_S^{-m} \\ a^{-m} \in \calX_A^{-m} \\ s'^{-m} \in \calX_S^{-m}}} \mu_{\bfs\bfa\bfs'}^{t}, & \quad & \delta_{a|o}^{t,m} = \sum_{\substack{s^{-m} \in \calX_S^{-m} \\ o^{-m} \in \calX_O^{-m} \\ a^{-m} \in \calX_A^{-m}}} \delta^t_{\bfa|\bfo} \prod_{m' \neq m} p^{m'}(o^{m'}|s^{m'})\tau_{s^{m'}}^{t,m'} \end{align*} for all $s \in \calX_S^m$, $o \in \calX_O^m$, $a \in \calX_A^m$ and $t \in [T].$ By definition of $\bftau^m,$ if $\bfdelta^m$ is in $\Delta^m,$ then the constraints~\eqref{eq:wkpomdp:NLP_per_POMDP_v1} are satisfied by $(\bftau^m,\bfdelta^m)_{m\in [M]}.$ We prove that $\bfdelta^m$ is in $\Delta_{\mathrm{ml}}^m.$ \begin{align*} \sum_{a \in \calX_A^m} \delta_{a|o}^{t,m} &= \sum_{a \in \calX_A^m} \sum_{\substack{s^{-m} \in \calX_S^{-m} \\ o^{-m} \in \calX_O^{-m} \\ a^{-m} \in \calX_A^{-m}}} \delta^t_{\bfa|\bfo} \prod_{m' \neq m} p^{m'}(o^{m'}|s^{m'})\tau_{s^{m'}}^{t,m'} = \sum_{\substack{s^{-m} \in \calX_S^{-m} \\ o^{-m} \in \calX_O^{-m}}} \prod_{m' \neq m} p^{m'}(o^{m'}|s^{m'})\tau_{s^{m'}}^{t,m'} =1 \end{align*} for all $o \in \calX_O^m,$ $m \in [M]$ and $t \in [T].$ The last equality comes from the fact that by induction we have that $\sum_{s \in \calX_S^m} \tau_s^{t,m} = 1.$ Therefore, $\bfdelta^m \in \Deltaml^m.$ It remains to prove that constraints~\eqref{eq:wkpomdp:decPOMDP_NLP_linking_cons} are satisfied by $(\bftau^m,\bfdelta^m)_{m\in [M]}.$ We compute the left-hand side of constraint~\eqref{eq:wkpomdp:decPOMDP_NLP_linking_cons}. \begin{align*} \sum_{m=1}^M \sum_{a \in \calX_A^m} \bfD^m(a) \tau_{a}^{t,m} &= \sum_{m=1}^M \sum_{a \in \calX_A^m} \bfD^m(a)\bigg(\sum_{\substack{\bfs \in \calX_S, \bfo \in \calX_O \\ \bfa' \in \calX_A:a'^m=a^m}} \delta^t_{\bfa'|\bfo} \prod_{m' =1}^M p^{m'}(o^{m'}|s^{m'})\tau_{s^{m'}}^{t,m'}\bigg) \\ &= \sum_{\substack{\bfs \in \calX_S, \bfo \in \calX_O \\ \bfa' \in \calX_A}} \delta^t_{\bfa'|\bfo} \prod_{m'=1}^M p^{m'}(o^{m'}|s^{m'})\tau_{s^{m'}}^{t,m'} \sum_{m =1}^M \sum_{a \in \calX_A^m} \bfD^m(a^m)\\ &= \overbrace{\sum_{\substack{\bfs \in \calX_S, \bfo \in \calX_O \\ \bfa \in \calX_A}} \delta^t_{\bfa|\bfo} \prod_{m'=1}^M p^{m'}(o^{m'}|s^{m'})\tau_{s^{m'}}^{t,m'}}^{=1} \underbrace{\sum_{m=1}^M \bfD^m(a^m)}_{1.5eq \bfb}\\ &1.5eq \bfb \end{align*} Therefore, constraints~\eqref{eq:wkpomdp:decPOMDP_NLP_linking_cons} are satisfied. Consequently, the nonlinear program~\eqref{pb:wkpomdp:decPOMDP_NLP} is a relaxation of~\ref{pb:decPOMDP_wc}. In addition, the objective functions are equal. We deduce that $v_{\rmml}^* 1.5eq z_{\rmUB}.$ \qed \proof[Proof of Proposition~\ref{prop:wkpomdp:decPOMDP_valid_cuts}] Proposition~\ref{prop:pomdp:valid_cuts_pomdp} ensures that inequalities~\eqref{eq:wkpomdp:dec_Valid_cuts} are valid on each component. Hence, these inequalities are valid for MILP~\eqref{pb:wkpomdp:decPOMDP_MILP}, MILP~\eqref{pb:wkpomdp:decPOMDP_MILP_LB} and Problem~\eqref{pb:wkpomdp:decPOMDP_NLP}. Proposition~\ref{prop:pomdp:valid_cuts_pomdp} also ensures that there are solutions of the linear relaxation of~\eqref{pb:pomdp:MILP_pomdp} that do not satisfy constraints~\eqref{eq:pomdp:Valid_cuts_pomdp} on each component. \qed \proof[Proof of Theorem~\ref{theo:wkpomdp:heuristic}] First, we prove that at each time $t \in [T]$, for every observation $\bfh\in\calX_H^t$ the element $\rm{Act}^t(\bfh)$ belongs to $\calX_A,$ i.e., $\sum_{m=1}^M \bfD^m1.5eft(\rm{Act}^{t,m}(\bfh)\right) 1.5eq \bfb.$ Let $(\bftau^m,\bfdelta^m)_{m \in [M]}$ be a feasible solution of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} at step~\ref{alg:wkpomdp:modify_constraints}. Since $O_t^m= o_t^m$ almost surely, $\tau_{soa}^{t,m}$ is equal to $0$ when $o \neq o_t^m$. Hence, we obtain \begin{align*} \tau_a^{t,m} &= \sum_{s \in \calX_S^m, o \in \calX_O^m} \tau_{soa}^{t,m} = \sum_{s \in \calX_S^m} \tau_{s o_t^{m}a}^{t,m} = \delta_{a|o_t^m}^{t,m} \end{align*} It ensures that $\tau_{a}^{t,m} \in \{0,1\}$ for any $a \in \calX_A^m$ and $m \in [M]$. Let $\bfa^*$ be the action taken at step~\ref{alg:wkpomdp:take_action}. Therefore, $\tau_{a}^{t,m} = 1$ when $a = a^{m*}$ and $0$ otherwise. Now we compute the linking constraint of~\eqref{eq:problem:def_form_action_space}. \begin{align*} \sum_{m =1}^M \bfD^m(a^{m*}) = \sum_{m =1}^M \bfD^m(a^{m*})\tau_{a^{m*}}^{t,m} = \sum_{m=1}^M\sum_{a \in \calX_A^m} \bfD^m(a)\tau_{a}^{t,m} 1.5eq \bfb \end{align*} The last inequality comes from the fact that $(\bftau^m,\bfdelta^m)_{m \in [M]}$ satisfies constraint~\eqref{eq:wkpomdp:decPOMDP_MILP_linking_cons}. Now we prove the inequalities. The inequality $\nu_{\rmIP}1.5eq v_{\rmhis}^*$ holds because $\bfdelta^{\rmIP} \in \Delta_{\rmhis}$. It remains to show that $z_{\rmIP} 1.5eq \nu_{\rmIP}$. We do it using a backward induction. Let $(\bftau^{*m},\bfdelta^{*m})_{m \in [M]}$ be an optimal solution of Problem~\eqref{pb:wkpomdp:decPOMDP_MILP}. We denote by $\rmP_t(\bfh_t)$ the feasible set of the optimization problem solved in $\rmAct_T^{\rmIP,t}(\bfh_t)$, for every $t$ in $[T]$. We consider the following induction hypothesis at time $t$: $$\max_{(\bftau^m,\bfdelta^m)_{m\in [M]} \in \rmP_tb(\bfh_t)}\sum_{m=1}^M\bbE_{\bfdelta^{m}}1.5eft[\sum_{t'=t}^T r^m(S_t^m,A_t^m,S_{t+1}^m) | H_{t}^m=h_{t}^m \right] 1.5eq \bbE_{\bfdelta^{\rmIP}}1.5eft[ \sum_{m=1}^M\sum_{t'=t}^T r^m(S_t^m,A_t^m,S_{t+1}^m) | \bfH_{t}=\bfh_{t} \right]$$ If $t=T$, then consider left-hand side is exactly equal to the right-hand side. \begin{align*} &\max_{(\bftau^m,\bfdelta^m)_{m\in [M]} \in \rmP_T(\bfh_T) } \sum_{m=1}^M\bbE_{\bfdelta^m}1.5eft[ r(S_T^m,A_T^m,S_{T+1}^m) | H_T^m=h_T^m \right] \\ &= \max_{(\bftau^m,\bfdelta^m)_{m\in [M]} \in \rmP_T(\bfh_T) } \sum_{m=1}^M\bbE_{\bfdelta^m}1.5eft[ r(S_T^m,A_T^m,S_{T+1}^m) | S_T^m \sim p^m(\cdot|h_T^m) \right] = \bbE_{\bfdelta^{m,\rmIP}} 1.5eft[\sum_{m=1}^M r^m(S_T^m,A_T^m,S_{T+1}^m) | \bfH_{T}=\bfh_{T}\right] \end{align*} The first equality comes from the fact that the belief state is a sufficient statistic of the history. It proves the induction hypothesis for $t=T$. Suppose that the induction hypothesis holds from $t+1$. We compute the term in $t$: \begin{align*} &\max_{(\bftau^m,\bfdelta^m)_{m \in [M]} \in \rmP_t(\bfh_t)} \sum_{m=1}^M \bbE_{\bfdelta^{m}}1.5eft[\sum_{t'=t}^T r^m(S_{t'}^m,A_{t'}^m,S_{t'+1}^m) | H_{t}^m=h_{t}^m\right] \\ &= \max_{(\bftau^m,\bfdelta^m)_{m \in [M]} \in \rmP_t(\bfh_t)} \sum_{m=1}^M \bbE_{\bfdelta^{t,m}} 1.5eft[r^m(S_{t}^m,A_{t}^m,S_{t+1}^m) |H_t^m = h_t^m \right] \\ &+ \sum_{m=1}^M \sum_{a_t^m,o_{t+1}^m} \Bigg(\bbP_{\bfdelta{t,m}}1.5eft(O_{t+1}^m=o_{t+1}^m,A_{t}^m=a_t^m|H_t^m=h_t^m \right) \\ &\times \overbrace{\bbE_{\bfdelta^m} 1.5eft[\sum_{t'=t+1}^T r^m(S_{t'}^m,A_{t'}^m,S_{t'+1}^m) |\underbrace{H_{t}^m = h_{t}^m, A_t^m=a_t^m,O_{t+1}^m=o_{t+1}^m}_{H_{t+1}^m=h_{t+1}^m} \right]}^{\text{does not depend on } \bfdelta^{t,m}} \Bigg)\\ &1.5eq \bbE_{\bfdelta^{t,\rmIP}} 1.5eft[\sum_{m=1}^M r^m(S_{t}^m,A_{t}^m,S_{t+1}^m) |\bfH_t = \bfh_t \right] + \bigg(\sum_{\bfa_t,\bfo_{t+1}}\bbP_{\bfdelta{t,\rmIP}}1.5eft(\bfO_{t+1}=\bfo_{t+1},\bfA_{t}=\bfa_t|\bfH_t=\bfh_t \right) \\ &\times\overbrace{\max_{(\bftau^m,\bfdelta^m) \in \rmP_{t+1}(\bfh_{t+1})} \sum_{m=1}^M \bbE_{\bfdelta^m} 1.5eft[\sum_{t'=t+1}^T r^m(S_{t'}^m,A_{t'}^m,S_{t'+1}^m) |H_{t+1}^m = h_{t+1}^m \right]}^{\text{induction hypothesis}} \bigg) \\ &1.5eq \sum_{m=1}^M \bbE_{\bfdelta^{m,\rmIP}}1.5eft[ r^m(S_t^m,A_t^m,S_{t+1}^m) | H_{t}^m=h_{t}^m\right] \end{align*} The first inequality above comes from the fact that there exists an optimal solution where $\bfdelta^{t,\rmIP}$ is the policy at time $t$ by definition of $\bfdelta^{\rmIP}$ and by decomposing the maximum operator in the sum of the second term. This latter operation can be done since $\bbE_{\bfdelta^m} 1.5eft[\sum_{t'=t+1}^T r^m(S_{t'}^m,A_{t'}^m,S_{t'+1}^m) |H_{t+1}^m = h_{t+1}^m \right]$ does not depend on the policy $\bfdelta^{t',m}$ for $t' < t+1$. It proves the backward induction. Finally, given an optimal feasible solution $(\bftau*^m,\bfdelta*^m)_{m\in [M]}$ of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP}, we get that: \begin{align*} z_{\rmIP} = \sum_{m=1}^M \bbE_{\bfdelta*^m}1.5eft[ \sum_{t=1} r^m(S_t^m,A_t^m,S_{t+1}^m) \right] &= \bbE1.5eft[ \bbE_{\bfdelta*^m}1.5eft[ \sum_{m=1}^M \sum_{t=1}^T r^m(S_t^m,A_t^m,S_{t+1}^m) |O_1^m=o_1^m \right] \right] \\ &1.5eq \bbE1.5eft[ \max_{(\bftau^m,\bfdelta^m)_{m \in [M]} \in \rmIP_1(\bfO_1)} \sum_{m=1}^M \bbE_{\bfdelta^m}1.5eft[ \sum_{t=1} r^m(S_t^m,A_t^m,S_{t+1}^m) |O_1^m=o_1^m \right] \right]\\ &1.5eq \sum_{m=1}^M \bbE_{\bfdelta^{m,\rmIP}}1.5eft[ \sum_{t=1} r^m(S_t^m,A_t^m,S_{t+1}^m) \right] = \nu_{\rmIP} \end{align*} The first inequality comes from the inversion of the maximum operator and the expectation. It achieves the proof. \qed \section{Lagrangian relaxations and column generation approach} 1.5abel{app:ColGen} In this section, we detail the proof of Proposition~\ref{prop:wkpomdp:lagrangian_relax} and we explain how we compute the value of the Lagrangian relaxation $z_{\rmLR}$ using a column generation approach. \subsection{Proof of Proposition~\ref{prop:wkpomdp:lagrangian_relax}} 1.5abel{sub:app_ColGen:Lagrangian_relax} We denote by $\bfbeta=(\beta^t)_{t \in [T]}$ the dual variables associated with constraints~\eqref{eq:wkpomdp:decPOMDP_NLP_linking_cons}. If we relax constraints~\eqref{eq:wkpomdp:decPOMDP_NLP_linking_cons}, then we obtain the Lagrangian function \begin{align}1.5abel{eq:app_ColGen:lagrangian_function} \calL1.5eft( \bftau,\bfdelta,\bfbeta \right) = \sum_{t=1}^T\sum_{m=1}^M \sum_{\substack{s,s' \in \calX_S^m \\ a \in \calX_A^m}} r^m(s,a,s') \tau_{sas'}^{t,m} + \sum_{t=1}^T (\beta^t)^{\bfT} 1.5eft(\bfb - \sum_{m=1}^M \sum_{a \in \calX_A^m}\bfD^m(a)\tau_a^{t,m} \right), \end{align} for any $(\bftau^m,\bfdelta^m)_{m \in [M]}.$ Then, we introduce the dual function $\calG : \bbR_{+}^{T\times q} \rightarrow \bbR,$ with values \begin{subequations}1.5abel{pb:app_ColGen:dual_function} \begin{alignat}{2} \calG 1.5eft(\bfbeta \right):= \max_{\bftau,\bfdelta} \enskip & \sum_{t=1}^T \sum_{m=1}^M \sum_{\substack{s,s' \in \calX_S^m \\ a \in \calX_A^m}} r^m(s,a,s')\tau_{sas'}^{t,m} + \sum_{t=1}^T (\beta^t)^{\bfT} 1.5eft(\bfb - \sum_{m=1}^M \sum_{a \in \calX_A^m}\bfD^m(a)\tau_a^{t,m} \right) & \\ \mathrm{s.t.} \enskip & 1.5eft(\bftau^m,\bfdelta^m\right) \in \calQ1.5eft(T,\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m \right) \quad \forall m \in [M] \end{alignat} \end{subequations} By weak duality, for any $\bfbeta,$ the dual function~\eqref{pb:app_ColGen:dual_function} provides an upper bound obtained by using Approximation (A). We now explain how to compute the dual function. As it is usually the case for Lagrangian relaxation, for every $\bfbeta \in \bbR_{+}^{T\times q},$ the maximum in the computation of $\calG(\bfbeta)$ decomposes over the sum of the maximum over each component. However, the formulations obtained for each component are still nonlinear. Fortunately, the following proposition ensures that we can linearize the formulation without changing the value of the dual function. \begin{prop}1.5abel{prop:wkpomdp:dual_function} For all $\bfbeta \in \bbR_{+}^{T\times q},$ the dual function can be written as \begin{align}1.5abel{pb:wkpomdp:MILP_dual_function} \calG1.5eft( \bfbeta \right) = \sum_{t=1}^T (\beta^t)^{\bfT} \bfb + \sum_{m=1}^M \calG^m(\bfbeta) \end{align} where $\calG^m(\bfbeta)$ is the quantity \begin{subequations} \begin{alignat*}{2} \calG^m 1.5eft(\bfbeta \right):= \max_{\bftau^m,\bfdelta^m} \enskip & \sum_{t=1}^T \sum_{\substack{s,s' \in \calX_S^m \\ a \in \calX_A^m}} 1.5eft(r^m(s,a,s') - (\bfbeta^t)^{\bfT}\bfD^m(a) \right)\tau_{sas'}^{t,m} & \quad &\\ \mathrm{s.t.} \enskip & 1.5eft(\bftau^m,\bfdelta^m\right) \in \calQ^{\mathrm{d}}1.5eft(T,\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m \right) & \end{alignat*} \end{subequations} \end{prop} It follows from Proposition~\ref{prop:wkpomdp:dual_function} that the dual function can be computed by solving MILP~\eqref{pb:pomdp:MILP_pomdp} on each component of the system, which is in general easier than solving Problem~\eqref{pb:wkpomdp:decPOMDP_NLP}. \proof[Proof of Proposition~\ref{prop:wkpomdp:dual_function}] Let $\bfbeta \in \bbR_{+}^{Tq}.$ Then, the value function $\calG$ in $\bfbeta$ can be written: \begin{subequations} \begin{alignat*}{2} \calG 1.5eft(\bfbeta \right)= \max_{1.5eft(\bftau^m,\bfdelta^m\right)_{m\in [M]}} \enskip & \sum_{t=1}^T \sum_{m=1}^M \sum_{\substack{s,s' \in \calX_S^m \\ a \in \calX_A^m}} 1.5eft(r^m(s,a,s') - (\bfbeta^t)^{\bfT}\bfD^m(a) \right)\tau_{sas'}^{t,m} + \sum_{t=1}^T (\beta^t)^{\bfT} \bfb & \quad &\\ \mathrm{s.t.} \enskip & 1.5eft(\bftau^m,\bfdelta^m\right) \in \calQ1.5eft(T,\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m \right) & \quad \forall m \in [M] \end{alignat*} \end{subequations} Since the second term does not depend on $(\bftau^m,\bfdelta^m)_{m\in [M]},$ we only consider the maximization on the first term. In such a problem, there are no linking constraints between the components, which enables to decompose the maximization operator along the components as follows. \begin{subequations} \begin{alignat*}{2} \calG 1.5eft(\bfbeta \right)= \sum_{m=1}^M\max_{\bftau^m,\bfdelta^m} \enskip & \sum_{t=1}^T \sum_{\substack{s,s' \in \calX_S^m \\ a \in \calX_A^m}} 1.5eft(r^m(s,a,s') - (\bfbeta^t)^{\bfT}\bfD^m(a) \right)\tau_{sas'}^{t,m} + \sum_{t=1}^T (\beta^t)^{\bfT} \bfb & \quad &\\ \mathrm{s.t.} \enskip & 1.5eft(\bftau^m,\bfdelta^m\right) \in \calQ1.5eft(T,\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m \right) & \end{alignat*} \end{subequations} Theorem~\ref{theo:pomdp:NLP_optimal_solution} ensures that the optimization subproblem above on component $m$ corresponds to a POMDP problem with memoryless policies of POMDP $1.5eft(\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m,\tilde{\bfr}\right)$ where $\tilde{r}^m(s,a,s') = r^m(s,a,s')- (\bfbeta^t)^{\bfT}\bfD^m(a)$ for any $s,s' \in \calX_S^m$ and $a \in \calX_A^m.$ Thanks to Proposition~\ref{prop:pomdp:det_policies}, the subproblem on component $m$ can be solved using deterministic policies. Therefore, we can replace $\calQ1.5eft(T,\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m \right)$ by $\calQ^{\mathrm{d}}1.5eft(T,\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m \right)$ for any component $m$ and we obtain \begin{align*} \calG1.5eft( \bfbeta \right) = \sum_{t=1}^T (\beta^t)^{\bfT} \bfb + \sum_{m=1}^M \calG^m(\bfbeta), \end{align*} which achieves the proof. \qed By definition of the Lagrangian relaxation, we have $z_{\rmLR}=\min_{\bfbeta \in \bbR_{+}^{Tq}} \calG(\bfbeta)$. We are now able to prove Proposition~\ref{prop:wkpomdp:lagrangian_relax}, which we recall here: 1.5agrangian* \proof Thanks to Proposition~\ref{prop:wkpomdp:dual_function}, the dual functions of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} and NLP~\eqref{pb:wkpomdp:decPOMDP_NLP} are equal. It follows that the value of the Lagrangian relaxations are equal. Now we prove inequality~\eqref{eq:wkpomdp:weak_duality}. First, the inequality $z_{\rmUB} 1.5eq z_{\rmLR}$ comes from weak duality (see e.g.~\citet[Proposition 5.1.3]{Bertsekas99}). Second, to show the second inequality $z_{\rmLR} 1.5eq z_{\rmR},$ it suffices to observe that the dual function $\calG(\bfbeta)$ of Problem~\eqref{pb:wkpomdp:decPOMDP_NLP} is also the dual function of Problem~\eqref{pb:wkpomdp:decPOMDP_MILP}. Indeed, in the expression of $\calG^m(\bfbeta)$ we can replace $\calQ1.5eft(T,\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m \right)$ by $\calQ^{\mathrm{d}}1.5eft(T,\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m \right)$ because the re always exists an optimal policy that is deterministic on the POMDP $1.5eft(\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m,\bfr^m-\bfbeta^T\bfD^m \right)$. A classical result in operations research (see e.g. \citet[Theorem 1]{Geoffrion1974}) states that the bound of the Lagrangian relaxation of an integer program is not worse than the bound of the linear relaxation. It shows that $z_{\rmLR} 1.5eq z_{\rmR}.$ It remains to prove that $z_{\rmLR} 1.5eq z_{\rmRc}$ and $z_{\rmRc} 1.5eq z_{\rmR}.$ The second one comes from the fact that we have a smaller feasible set in the linear relaxation by adding the valid inequalities. The first one comes by adding valid inequalities~\eqref{eq:wkpomdp:dec_Valid_cuts} in the expression of $\calG^m(\bfbeta)$, which is possible since the inequalities are valid, and by using the same arguments (weak duality and Geoffrion's Theorem) we conclude that $z_{\rmLR} 1.5eq z_{\rmRc}.$ \qed \subsection{Column generation approach to compute $z_{\rmLR}$} 1.5abel{sub:app_ColGen:col_gen} In this section, we explain how we apply a column generation algorithm combined with a Dantzig-Wolfe decomposition to compute $z_{\rmLR}$. Proposition~\ref{prop:wkpomdp:lagrangian_relax} ensures that $z_{\rmLR}$ is also the value of the Lagrangian relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP}. Thanks to Geoffrion's Theorem \citep[Theorem 8.2]{Conforti:2014:IP:2765770}, the value of the Lagrangian relaxation MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} satisfies: \begin{equation} \begin{aligned}1.5abel{pb:app_ColGen:decPOMDP_MILP_DW} z_{\rmLR} = \max_{(\bftau^m,\bfdelta^m)_{m \in [M]}} \enskip & \sum_{m=1}^M \sum_{t=1}^T \sum_{\substack{s,s' \in \calX_S^m \\ a \in \calX_A^m}} \tau_{sas'}^{t,m} r^m(s,a,s') &\quad &\\ \mathrm{s.t.} \enskip &(\bftau^m,\bfdelta^m) \in \rmConv1.5eft( \calQ^{\rmd}(T,\calX_S^m,\calX_O^m,\calX_A^m, \pfrak^m) \right), & \forall m \in [M] \\ & \sum_{m=1}^M \sum_{a \in \calX_A^m} \bfD^m(a)\tau_{a}^{t,m} 1.5eq \bfb, & \forall t \in [T], \end{aligned} \end{equation} where we included the constraints $\sum_{s \in \calX_S^m, o \in \calX_A^m}\tau_{soa}^{t,m} = \tau_a^{t,m}$ in the set $\calQ^{\rmd}(T,\calX_S^m,\calX_O^m,\calX_A^m, \pfrak^m)$ and $\rmConv(X)$ denotes the convex hull of a set $X$. Using the definition of the convex hull, we can reformulate Problem~\eqref{pb:app_ColGen:decPOMDP_MILP_DW} as the following master problem: \begin{subequations}1.5abel{pb:app_ColGen:master} \begin{alignat}{2} z_{\rmLR}=\max_{(1.5ambda^m,\bftau^m,\bfdelta^m)_{m\in [M]}} \enskip & \sum_{m=1}^M \sum_{\bftau \in \calM^m} 1.5ambda_{\bftau}^m \bbE_{\bftau} 1.5eft[ \sum_{t=1}^T r^m(S_t^m,A_t^m,S_{t+1}^m) \right] &\quad &\\ \mathrm{s.t.} \enskip &(\bftau^m,\bfdelta^m)= \sum_{(\bftau,\bfdelta)\in \calQ^{\rmd,m}} 1.5ambda_{\bftau,\bfdelta}^m (\bftau,\bfdelta) & \quad \forall m \in [M] \\ &\sum_{(\bftau,\bfdelta) \in \calQ'^m} 1.5ambda_{\bftau,\bfdelta}^m = 1 & \quad\forall m \in [M] 1.5abel{eq:app_ColGen:master_select_lambda} \\ & \sum_{m=1}^M \bbE_{\bftau^m} 1.5eft[\bfD^m(A_t^m) \right] 1.5eq \bfb & \forall t \in [T] 1.5abel{eq:app_ColGen:master_linking_contraints} \\ & 1.5ambda_{\bftau,\bfdelta}^m \geq 0 & \forall (\bftau,\bfdelta) \in \calQ^{\rmd,m}, \forall m \in [M] \end{alignat} \end{subequations} where $\calQ^{\rmd,m} = \calQ^{\rmd}(T,\calX_S^m,\calX_O^m,\calX_A^m, \pfrak^m)$ for every $m$ in $[M]$. It follows that the pricing subproblem on component $m$ writes down: \begin{equation}1.5abel{pb:app_ColGen:Subproblem} \begin{aligned} z^m:=\max_{(\bftau,\bfdelta)} \enskip & \sum_{t=1}^T\sum_{\substack{s,s' \in\calX_S^m \\ a \in \calX_A^m}} \tau_{sas'}^{t}1.5eft(r^m(s,a,s') - \beta_t^{\bfT}\bfD^m(a) \right) &\quad &\\ \mathrm{s.t.} \enskip & (\bftau,\bfdelta) \in \calQ^{\rmd}(T,\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m), & \forall t \in [T], \end{aligned} \end{equation} where $\beta_t \in \bbR_{+}^q$ is the vector dual variables of linking constraint~\eqref{eq:app_ColGen:master_linking_contraints}. It follows that the reduced cost can be written $c=\sum_{m=1}^M z^m + \pi^m$, where $(\pi^m)_{m\in[M]}$ is the vector of dual variables of constraints~\eqref{eq:app_ColGen:master_select_lambda}. Now we are able to derive the column generation algorithm. We assume that $\calX_A \neq \emptyset$ (otherwise the decision maker cannot choose any action). Hence, there exists at least one element $\bfa \in \calX_A^1 \times \cdots \times \calX_A^M$ such that $\sum_{m=1}^M \bfD^m(a^m) 1.5eq \bfb$. Let $\bfa_{\rme}$ be such an element in $\calX_A$. \begin{algorithm}[H] \caption{Column generation to compute $z_{\rmLR}$.} 1.5abel{alg:app_ColGen:ColGen} \begin{algorithmic}[1] \STATE \textbf{Input}: $T$, weakly coupled POMDP $\big((\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m,\bfr^m,\bfD^m)_{m\in[M]},\bfb\big)$ \STATE \textbf{Output}: The optimal value $z_{\rmLR}$ and \STATE \textbf{Initialize} $u_{\rmLB}1.5eftarrow - \infty$, $\calQ'^m 1.5eftarrow \emptyset$ \FOR{$m=1,1.5dots,M$} \STATE Define $\bfdelta^m$ such that $\bfdelta^{t,m}_{a|o}= \mathds{1}_{a_{\rme}^m}(a)$ for every $a \in \calX_A^m$, $o \in \calX_O^m$ and $t \in [T]$. \STATE Compute $\bftau^m$ such that $(\bftau^m,\bfdelta^m) \in \calQ1.5eft(T,\calX_S^m,\calX_O^m,\calX_A^m,\pfrak^m,\bfr^m \right)$. \STATE $z^m 1.5eftarrow \infty$ and $\pi^m1.5eftarrow \infty$ \ENDFOR \WHILE{$\sum_{m=1}^M z^m + \pi^m > 0$} \STATE Add column: $\calQ'^m 1.5eftarrow \calQ'^m \cup \{ (\bftau^m,\bfdelta^m) \}$ \STATE Solve Problem~\eqref{pb:app_ColGen:master} restricted to $(\calM'^m)_{m\in [M]}$ to obtain dual variables $(\bfbeta,\bfpi)$. \STATE $u_{\rmLB} 1.5eftarrow \text{Optimal value of the restricted master problem}$ \FOR{$m=1,1.5dots,M$} \STATE Set reward $\tilde{r}_t^m(s,a,s') :=r^m(s,a,s') - \beta_t^{\bfT}\bfD^m(a)$ for every $s,s' \in \calX_S^m$ and $a \in \calX_A^m$ \STATE Solve MILP~\eqref{pb:pomdp:MILP_pomdp} with valid inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp} for POMDP $(\calX_S^m,\calX_O^m,\calX_A^m,\pfrak,\tilde{\bfr})$ to obtain $(\bftau^m,\bfdelta^m)$ and $z^m$ \ENDFOR \ENDWHILE \STATE Set $\bftau^m := \sum_{(\bftau,\bfdelta) \in \calQ'^m} 1.5ambda_{\bftau'}^m \bftau'^m$ for every $m \in [M]$ \STATE \textbf{return} $(\bftau^m)_{m\in [M]}$ and $u_{\rmLB}$ \end{algorithmic} \end{algorithm} Algorithm~\ref{alg:app_ColGen:ColGen} computes the value of the Lagrangian relaxation $z_{\rmLR}$. We omit the proof in this paper. \section{Numerical experiments} 1.5abel{app:nums} In this appendix, we provide further details on the instances of Section~\ref{sec:num} and additional numerical experiments. \subsection{Generic POMDP: Random instances} 1.5abel{sub:app_nums:random_instances} All the instances are generated by first choosing $\vert\calX_S\vert$, $\vert\calX_O\vert$ and $\vert\calX_A\vert.$ We then randomly generate the initial probability $\big(p(s)\big)_{s \in \calX_S}$, the transition probability $\big(p(s'|s,a)\big)_{\substack{s,s' \in \calX_S\\a \in \calX_A}}$, the emission probability $\big(p(o|s)\big)_{\substack{s \in \calX_S \\o \in \calX_O}}$ and the immediate reward function $\big(r(s,a,s')\big)_{\substack{s,s' \in \calX_S\\a \in \calX_A}}$. An instance is a tuple $(\calX_S,\calX_O,\calX_A,\pfrak,\bfr).$ A way to measure the difficulty of solving a MILP~\eqref{pb:pomdp:MILP_pomdp} for POMDP $(\calX_S,\calX_O,\calX_A,\pfrak,\bfr)$ with horizon $T$ can be characterized by the size of the set of deterministic policies $\vert \Delta_{\mathrm{ml}}^{\mathrm{d}} \vert = \vert\calX_A\vert^{T\vert\calX_O\vert}$, which only depends on the size of the observation space $\calX_O$ and the action space $\calX_A$. Since $\vert \Delta_{\mathrm{ml}}^{\mathrm{d}} \vert$ only depends on $\vert\calX_O\vert$ and $\vert\calX_A\vert$, we generate instances for different values of the pair $(\mathrm{k}_s,\mathrm{k}_a),$ where $\mathrm{k}_a = \vert\calX_O\vert = \vert\calX_A\vert$ and $\mathrm{k}_s=\vert\calX_S\vert.$ \subsection{Generic POMDP: Benchmark instances} 1.5abel{sub:app_nums:benchmark} All the instances can be found at the link~\url{http://pomdp.org/examples/} and further descriptions of each instance are available in the indicated literature on the same website. In particular, it contains two instances \texttt{bridge-repair} and \texttt{machine} that model maintenance problems. The first one, introduced by~\citet{Ellis1995}, consists of the maintenance of a bridge. The modeling is almost similar to the one described in the introduction except that there are more available actions and they consider only one machine. Instead of just choosing whether or not to maintain the bridge, the decision maker chooses whether or not to inspect the bridge and, if so, whether or not to maintain it. The second one, introduced by~\citet[Appendix H.3]{Cassandra1998}, consists of planning the maintenance of a machine with $4$ deteriorating components. Again, the decision maker can choose to inspect before performing a maintenance of the machine. In addition, the action ``maintenance'' is distinguished in two different actions: repair, which consists in maintaining internal components, and replace, which consists in replacing the machine by a new one. It leads to the set of available actions $ \calX_A = \{\mathrm{operate}, \mathrm{inspect}, \mathrm{repair}, \mathrm{replace}\}.$ \paragraph{Metrics.} We give two metrics to evaluate MILP~\eqref{pb:pomdp:MILP_pomdp} against the SARSOP policy. We want to compare the optimal value $z^*$ of MILP~\eqref{pb:pomdp:MILP_pomdp} with the value $z_{\mathrm{SARSOP}}$ obtained by using the SARSOP policy. In addition, Theorem~\ref{theo:pomdp:MDP_approx_equivalence} says that $z^*$ and $z_{\mathrm{SARSOP}}$ are lower bounds of $v_{\mathrm{his}}^*.$ We also compare these values with $z_{\rmR^\mathrm{c}}^*$, the optimal value of the linear relaxation of MILP~\eqref{pb:pomdp:MILP_pomdp} with valid inequalities~\eqref{eq:pomdp:Valid_cuts_pomdp}. By Theorem~\ref{theo:pomdp:MDP_approx_equivalence}, the value of $z_{\rmR^\mathrm{c}}^*$ is an upper bound of $z^*$ and $v_{\mathrm{his}}^*$, and consequently an upper bound of $z_{\mathrm{SARSOP}}.$ It leads to the relative gap $g(z) = \frac{z_{\rmR^\mathrm{c}}^* - z}{z_{\rmR^\mathrm{c}}^*}$ for any $z$ belonging to $\{z^*, z_{\mathrm{SARSOP}}\}.$ \subsection{Weakly coupled POMDP: Multi-armed bandits with partial observations} 1.5abel{sub:app_nums:bandits} In this section, we provide numerical experiments on the partially observable multi-armed bandits problems that are introduced in Appendix~\ref{app:examples}. We show the quality of the approximation~\eqref{pb:wkpomdp:decPOMDP_MILP} by comparing the values of $z_{\rm{LB}}$,$z_{\rmIP},$ $z_{\rmUB},$ $z_{\rmLR},$ $z_{\rmRc}$ and $z_{\rmR}.$ \paragraph{Instances.} We consider instances where the state space and observation space of each bandit have the same cardinality $n$, i.e., $n:=|\calX_S^m| = |\calX_O^m|$ for any $m$ in $[M]$. The resulting system's state space and system's observation space have the size $|\calX_S| = |\calX_O| = n^M$. In each bandit state space, the states and observations are numbered from $1$ to $n$, i.e., $\calX_S^m=\calX_O^m = \{1,1.5dots,n\}.$ Like \citet{bertsimas2016decomposable}, we generate different instances regular (REG.SAR), restless (RSTLS.SAR and RSTLS.SBR) or deterministic (RSTLS.DET.SBR) multi-armed bandits. For each set of instance, the emission probability vector $(p^m(o|s))_{o \in \calX_O^m,s \in \calX_S^m}$ is uniformly drawn from $[0,1]$ and renormalized. These sets of instances differ from each other in the structures of transition probabilities and reward functions. We generate small-scale instances with $M \in \{2,3\}$ arms and $n=4$ states, and medium-scale instances with $M=5$ arms and $n=4$ states. The sets of instances are generated as follows. $T \in \{5,10, 20\}$. \begin{itemize} \item REG.SAR consists of regular partially observable multi-armed bandits. The reward function is defined by $r^m(s,1,s'):= (10/n) \cdot s$ and $r^m(s,0,s'):= 0$ for every state $s,s' \in \calX_S^m$ and every arm $m \in [M]$. Each active transition probability vector $(p^m(s'|s,1))_{s,s' \in \calX_S^m}$ is drawn uniformly from $[0,1]$ and renormalized, for every arm $m \in [M]$. Each passive arm $m$ stays in the same state, i.e., $p^m(s'|s,0) = \mathds{1}_{s}(s')$ for every $s,s' \in \calX_S^m$. \item RSTLS.SAR consists of restless partially observable multi-armed bandits. The reward function is the same as REG.SAR. Each active and passive transition probability vector $(p^m(s'|s,a))_{\substack{s,s' \in \calX_S^m \\ a \in \{0,1\}}}$ is drawn uniformly from $[0,1]$ and renormalized, for every arm $m \in [M]$. \item RSTLS.SBR consists of restless partially observable multi-armed bandits. The reward function is defined by $r^m(s,1,s'):= (10/n)\cdot s$ and $r^m(s,0,s'):= (1/M)\cdot(10/n)\cdot s$ for every state $s,s' \in \calX_S^m$ and every arm $m \in [M]$. The transition probability is randomly drawn as RSTLS.SAR. \item RSTLS.DET.SBR consists of restless partially observable multi-armed bandits. The reward function is the same as RSTLS.SBR. Each active and passive transition probability vector $(p^m(s'|s,a))_{\substack{s,s' \in \calX_S^m \\ a \in \{0,1\}}}$ is randomly drawn and deterministic, for every arm $m \in [M]$. \end{itemize} \paragraph{Metrics.} For each instance, we compute the value $z_{\rmIP},$ the lower bound $z_{\rmLB}$ and the upper bounds $z_{\rmUB},$ $z_{\rmLR},$ $z_{\rmRc}$ and $z_{\rmR}.$ Given an instance, we define the relative gaps with the largest upper bound $z_{\rmR}$: $\rmg_{\rmLB}=\frac{z_{\rmR} - z^{\rmLB}}{z_{\rmRc}},$ $\rmg_{\rmIP}=\frac{z_{\rmR} - z_{\rmIP}}{z_{\rmRc}},$ $\rmg_{\rmUB}=\frac{z_{\rmRc} - z_{\rmUB}}{z_{\rmRc}}$ and $\rmg_{\rmLR}=\frac{z_{\rmRc} - z_{\rmLR}}{z_{\rmRc}}$. Then, we define respectively the metrics $G_{\rm{mean}}(\rmg),$ $G_{\rm{95}}(\rmg)$ and $G_{\rm{max}}(\rmg)$ as the mean, the 95-th percentile and the maximum over a set of instances, for each gap $g$ in $1.5eft\{\rmg_{\rmLB}, \rmg_{\rmIP}, \rmg_{\rmUB}, \rmg_{\rmLR} \right\}.$ In general, the lower the values of the metrics, the closer the bound is to the upper bound $z_{\rmRc}$. In particular, thanks to Theorem~\ref{theo:wkpomdp:upper_bound_NLP} and Proposition~\ref{prop:wkpomdp:lagrangian_relax}the metrics $\rmg_{\rmLB}$ and $\rmg_{\rmLR}$ tell how close are the values of $z_{\rmIP}$ and $v_{\rmml}^*.$ Since the computation of $z_{\rmUB}$ becomes quickly difficult when the sizes of the instance increase, we only compute the values of $\rmg_{\rmUB}$ on small instances. Tables~\ref{tab:app_nums:bandits_numerical_results} summarizes the results. For all the mathematical programs, we set the computation time limit to $3600$ seconds. If the resolution has not terminated before this time limit, then we keep the best upper bound obtained during the resolution. We do not have any guarantee about this upper bound, but it is the best one found by the solver during the resolution. It explains why for some instances we obtain a smaller gap with the Lagrangian relaxation than with MILP~\eqref{pb:wkpomdp:decPOMDP_MILP}. \begin{table} \centering \resizebox{!}{7.6cm} {\begin{tabular}{|c|cc|cccc|cccc|cccc|} 1.5line Instance set & $T$ & $\rmg$ & \multicolumn{4}{c|}{$M=2$} & \multicolumn{4}{c|}{$M=3$} & \multicolumn{4}{c|}{$M=5$} \\ & & & $G_{\rm{mean}}(\rmg)$ & $G_{\rm{95}}(\rmg)$ & $G_{\rm{max}}(\rmg)$ & Time(s) &$G_{\rm{mean}}(\rmg)$ & $G_{\rm{95}}(\rmg)$ & $G_{\rm{max}}(\rmg)$ & Time(s) & $G_{\rm{mean}}(\rmg)$ & $G_{\rm{95}}(\rmg)$ & $G_{\rm{max}}(\rmg)$ & Time(s)\\ 1.5line REG.SAR & 2 & $\rmg_{\rmLB}$ & 9.91 & 15.13 & 15.62 & 0.03 & 15.72 & 21.51 & 22.87 & 0.08 & 17.42 & 25.08 & 26.53 & 0.21 \\ & & $\rmg_{\rmIP}$ & 9.91 & 15.13 & 15.62 & 0.14 & 15.72 & 21.51 & 22.87 & 0.64 & 17.42 & 25.08 & 26.53 & 2.57 \\ & & $\rmg_{\rmUB}$ & 7.54 & 10.54 & 10.70 & 0.16 & 11.18 & 17.89 & 19.40 & 0.36 & $-$ & $-$ & $-$ & $-$ \\ & & $\rmg_{\rmLR}$ & 7.02 & 10.21 & 10.33 & 9.55 & 10.86 & 16.97 & 18.73 & 14.08 & 13.00 & 18.41 & 18.64 & 15.52 \\ \cline{2-15} & 5 & $\rmg_{\rmLB}$ & 6.06 & 9.23 & 9.57 & 0.39 & 10.34 & 13.01 & 13.26 & 1.32 & 14.51 & 18.45 & 18.48 & 3.43\\ & & $\rmg_{\rmIP}$ & 6.06 & 9.23 & 9.57 & 17.14 & 10.34 & 13.01 & 13.26 & 1525.63 & 17.10 & 27.03 & 27.04 & 2907.11 \\ & & $\rmg_{\rmUB}$ & 4.59 & 6.07 & 6.19 & 1260.58 & 7.18 & 9.89 & 9.99 & 3247.98 & $-$ & $-$ & $-$ & $-$ \\ & & $\rmg_{\rmLR}$ & 4.04 & 5.89 & 5.95 & 43.00 & 6.79 & 9.36 & 9.42 & 54.66 & 10.46 & 13.15 & 13.61 & 51.17 \\ \cline{2-15} & 10 & $\rmg_{\rmLB}$ & 3.36 & 5.45 & 5.85 & 2.86 & 6.27 & 8.91 & 9.15 & 39.76 & 8.04 & 11.14 & 11.55 & 349.00 \\ & & $\rmg_{\rmIP}$ & 3.38 & 5.55 & 6.04 & 1283.79 & 10.08 & 18.86 & 19.93 & 3205.30 & 15.53 & 23.63 & 24.08 & $>3600$ \\ & & $\rmg_{\rmUB}$ & 5.08 & 9.41 & 9.51 & 3248.01 & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ \\ & & $\rmg_{\rmLR}$ & 2.13 & 3.27 & 3.46 & 946.01 & 3.90 & 5.74 & 5.84 & 1536.79 & 5.98 & 7.52 & 7.67 & 661.04 \\ 1.5line RSTLS.SAR & 2 & $\rmg_{\rmLB}$ & 12.73 & 16.84 & 17.04 & 0.03 & 17.96 & 22.67 & 22.77 & 0.07 & 15.13 & 16.83 & 17.03 & 0.17 \\ & & $\rmg_{\rmIP}$ & 12.73 & 16.84 & 17.04 & 0.14 & 17.96 & 22.67 & 22.77 & 0.62 & 15.13 & 16.83 & 17.03 & 2.28 \\ & & $\rmg_{\rmUB}$ & 8.33 & 13.09 & 15.56 & 0.07 & 12.01 & 18.18 & 18.93 & 0.06 & $-$ & $-$ & $-$ & $-$ \\ & & $\rmg_{\rmLR} $ & 8.16 & 12.71 & 14.99 & 9.22 & 11.99 & 18.15 & 18.91 & 14.44 & 9.94 & 11.20 & 11.28 & 15.79 \\ \cline{2-15} & 5 & $\rmg_{\rmLB}$ & 10.77 & 13.61 & 14.22 & 0.44 & 14.54 & 18.64 & 18.86 & 1.40 & 13.85 & 16.46 & 17.00 & 3.91 \\ & & $\rmg_{\rmIP}$ & 10.77 & 13.61 & 14.22 & 12.50 & 14.54 & 18.64 & 18.86 & 358.36 & 18.31 & 21.51 & 21.66 & 3030.09 \\ & & $\rmg_{\rmUB} $ & 6.55 & 8.51 & 8.73 & 171.29 & 8.14 & 12.43 & 13.86 & 1716.52 & $-$ & $-$ & $-$ & $-$ \\ & & $\rmg_{\rmLR}$ & 6.32 & 8.16 & 8.35 & 41.99 & 7.92 & 12.01 & 13.67 & 36.89 & 7.92 & 9.86 & 10.19 & 59.57 \\ \cline{2-15} & 10 & $\rmg_{\rmLB}$ & 10.39 & 13.73 & 14.32 & 3.20 & 13.18 & 16.36 & 16.83 & 29.58 & 14.55 & 17.85 & 18.18 & 312.76 \\ & & $\rmg_{\rmIP}$ & 10.86 & 15.35 & 17.28 & 1896.65 & 14.42 & 17.97 & 18.07 & $>3600$ & 18.83 & 26.05 & 27.44 & $>3600$ \\ & & $\rmg_{\rmUB}$ & 5.99 & 8.44 & 9.28 & $>3600$ & $-$ & $-$ & $-$ & $-$& $-$ & $-$ & $-$ & $-$ \\ & & $\rmg_{\rmLR}$ & 5.44 & 7.29 & 7.83 & $>3600$ & 5.74 & 6.93 & 7.05 & 1669.80 & 7.34 & 9.42 & 9.68 & $>3600$ \\ 1.5line RSTLS.SBR & 2 & $\rmg_{\rmLB}$ & 6.14 & 8.70 & 9.06 & 0.02 & 8.50 & 11.51 & 11.67 & 0.03 & 9.07 & 10.98 & 11.54 & 0.07 \\ & & $\rmg_{\rmIP}$ & 6.14 & 8.70 & 9.06 & 0.04 & 8.50 & 11.51 & 11.67 & 0.14 & 9.07 & 10.98 & 11.54 & 0.72 \\ & & $\rmg_{\rmUB}$ & 3.54 & 5.63 & 6.00 & 0.02 & 5.39 & 7.60 & 7.75 & 0.02 & $-$ & $-$ & $-$ & $-$ \\ & & $\rmg_{\rmLR}$ & 3.47 & 5.49 & 5.75 & 6.72 & 5.32 & 7.56 & 7.71 & 7.74 & 6.39 & 7.63 & 7.99 & 7.90 \\ \cline{2-15} & 5 & $\rmg_{\rmLB}$ & 4.65 & 7.58 & 7.84 & 0.18 & 6.77 & 9.13 & 9.24 & 0.66 & 8.61 & 10.38 & 10.51 & 1.72 \\ & & $\rmg_{\rmIP}$ & 4.65 & 7.58 & 7.84 & 4.27 & 6.77 & 9.13 & 9.24 & 311.29 & 10.84 & 14.81 & 16.37 & 3291.89 \\ & & $\rmg_{\rmUB}$ & 2.61 & 4.80 & 5.78 & 18.55 & 3.61 & 5.26 & 5.43 & 500.43 & $-$ & $-$ & $-$ & $-$ \\ & & $\rmg_{\rmLR}$ & 2.33 & 4.16 & 4.85 & 14.43 & 3.43 & 5.14 & 5.27 & 21.78 & 5.10 & 5.89 & 5.91 & 19.59 \\ \cline{2-15} & 10 & $\rmg_{\rmLB}$ & 4.27 & 7.82 & 8.52 & 1.29 & 6.00 & 8.86 & 9.20 & 16.71 & 7.99 & 10.75 & 11.28 & 127.66 \\ & & $\rmg_{\rmIP}$ & 4.37 & 7.90 & 8.52 & 890.65 & 9.87 & 15.37 & 15.70 & 3265.16 & 15.84 & 17.97 & 18.01 & $>3600$ \\ & & $\rmg_{\rmUB}$ & 2.31 & 4.96 & 6.48 & 1836.06 & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ \\ & & $\rmg_{\rmLR}$ & 1.86 & 3.42 & 3.86 & 3168.63 & 2.77 & 4.69 & 4.76 & 3494.95 & 4.73 & 5.65 & 5.80 & 1443.62 \\ 1.5line RSTLS.DET.SBR &2 & $\rmg_{\rmLB}$ & 6.08 & 11.58 & 13.78 & 0.02 & 8.52 & 13.94 & 14.43 & 0.03 & 9.74 & 13.54 & 13.57 & 0.11 \\ & & $\rmg_{\rmIP}$ & 6.08 & 11.58 & 13.78 & 0.05 & 8.52 & 13.94 & 14.43 & 0.26 & 9.74 & 13.54 & 13.57 & 1.76 \\ & & $\rmg_{\rmUB}$ & 4.70 & 8.33 & 9.45 & 0.02 & 6.54 & 9.24 & 9.59 & 0.03 & $-$ & $-$ & $-$ & $-$ \\ & & $\rmg_{\rmLR}$ & 4.67 & 8.31 & 9.42 & 12.44 & 6.35 & 9.22 & 9.56 & 14.00 & 7.43 & 10.17 & 10.60 & 13.76 \\ \cline{2-15} & 5 & $\rmg_{\rmLB}$ & 2.99 & 7.26 & 8.87 & 0.12 & 6.15 & 10.06 & 10.98 & 0.68 & 5.88 & 8.21 & 8.95 & 1.76 \\ & & $\rmg_{\rmIP}$ & 2.99 & 7.26 & 8.87 & 0.17 & 6.24 & 10.06 & 10.98 & 6.47 & 5.88 & 8.21 & 8.95 & 32.56 \\ & & $\rmg_{\rmUB}$ & 2.72 & 6.86 & 8.66 & 2.97 & 5.01 & 8.11 & 9.52 & 77.43 & $-$ & $-$ & $-$ & $-$ \\ & & $\rmg_{\rmLR}$ & 2.45 & 6.31 & 8.15 & 17.40 & 4.58 & 7.26 & 8.62 & 21.58 & 4.95 & 7.41 & 7.58 & 26.48 \\ \cline{2-15} & 10 & $\rmg_{\rmLB}$ & 2.39 & 6.94 & 7.50 & 0.59 & 4.29 & 8.36 & 10.01 & 3.90 & 4.70 & 7.54 & 8.25 & 43.95 \\ & & $\rmg_{\rmIP}$ & 2.39 & 6.94 & 7.50 & 4.79 & 4.29 & 8.36 & 10.01 & 514.32 & 6.38 & 13.10 & 13.87 & 1148.11 \\ & & $\rmg_{\rmUB}$ & 2.23 & 7.04 & 7.60 & 816.32 & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ \\ & & $\rmg_{\rmLR}$ & 1.89 & 6.12 & 7.22 & 76.55 & 3.10 & 5.92 & 6.70 & 197.00 & 3.98 & 6.68 & 7.02 & 116.91 \\ 1.5line \end{tabular}} \caption{The values of $G_{\rm{mean}}(\rmg),$ $G_{\rm{95}}(\rmg),$ and $G_{\rm{max}}(\rmg)$ obtained on the small-scale instances with $M \in \{2,3, 5\},$ $n=4$ and solved with different finite horizon $T \in \{2,5,10\}.$} 1.5abel{tab:app_nums:bandits_numerical_results} \end{table} One can observe in Table~\ref{tab:app_nums:bandits_numerical_results} that for almost a large part of the instances, the values of $z_{\rm{LB}},$ $z_{\rmIP},$ $z_{\rmUB},$ and $z_{\rmLR}$ are close in general. It shows that our formulations have optimal values that are close to the optimal value $v_{\mathrm{ml}}^*$ of \ref{pb:decPOMDP_wc}. In addition, the best bound obtained on the value of $z_{\rmIP}$ is very close to the value of the lower bound $z_{\rmLB}.$ Thanks to Theorem~\ref{theo:wkpomdp:lower_bound_MILP}, it means that most of the multi-armed bandit instances admit optimal policies that are ``decomposable'' (see Section~\ref{sub:wkpomdp:lb_ub}). \subsection{Weakly coupled POMDP: Simulations of the implicit policy} 1.5abel{sub:app_nums:implicit_policy} In this section, detail how we build the instances of Section~\ref{sub:nums:implicit_policy}, and we provide the numerical results of matheuristic~\ref{alg:wkpomdp:matheuristic} involving the different policies in $\{\rmLB, \rmLR, \rmRc, \rmR \}$. \paragraph{Instances} Like \citet{Walraven2018}, we build our instances of weakly coupled POMDP from the \texttt{bridge-repair} instance of \citet{Ellis1995} in which the decision maker has to perform maintenance on a bridge. In our problem, there are only two actions available on each bridge: either structural repair or keep. For each bridge $m$, the sizes of state space, observation space and action space are respectively $\vert \calX_S^m \vert=5$, $\vert \calX_O^m \vert=5$ and $\vert \calX_A^m \vert = 2$. Each bridge starts almost surely in its most healthy state. We add noises to the transition probabilities and emission probabilities of the bridges to ensure that they have slightly different parameters $\pfrak^m$ for all $m$ in $[M]$. For every bridge $m$ in $[M]$, we set $C_F^m = 1000$ and $C_R^m=100$. The bridges are inspected every months and evolve until the horizon of $H=24$ months. One instance consists in the value of the tuple $(M,K,(\pfrak^m)_{m\in [M]})$. We build an instance as follows: first we choose a value of $M$ in $\{3,4,5,10,15, 20\}$, second we build the probabilities $\pfrak^m$ by adding a random real in $[0,0.1]$ to each non-zero value of the probabilities of \citet{Ellis1995}, and finally we choose $K = \max(\floor*{\gamma \times M},1)$, where $\gamma$ is a scalar belonging $1.5eft\{ 0.2, 0.4, 0.6, 0.8\right\}$ (when $M=3$, then $K$ belongs to $\{1,2\}$). The range of values of $K$ is chosen in such a way that it goes from highly restrictive constraints (smallest values of $\gamma$) to more flexible constraints (largest values of $\gamma$), with respect to the value of $M$. When $K\geq M$, the decision maker can consider the subproblems separately, which is much easier. We enforce $K$ to be non smaller than $1$ because if $K=0$, the authority cannot maintain the bridges. Tables~\ref{tab:wkpomdp:maintenance_1},~\ref{tab:wkpomdp:maintenance_2},~\ref{tab:wkpomdp:maintenance_3} summarize the results. In addition, Figure~\ref{fig:num:nb_failures} displays the statistics about the total number of failures counted during simulations of the policies. For all the mathematical programs, we set the computation time limit to $3600$ seconds and a final gap tolerance (\texttt{MIPGap} parameter in \texttt{Gurobi}) of $1 \%$, which is enough for the use of our matheuristic. If the resolution has not terminated before this time limit, then we keep the best feasible solution at the end of the resolution. \begin{table} \centering \resizebox{!}{7.2cm}{ \begin{tabular}{|c|cc|ccccccc|ccccccc|} 1.5line \multirow{3}{*}{$\gamma$} & \multirow{3}{*}{$\rmp$} & \multirow{3}{*}{$T_\rmr$} & \multicolumn{7}{c|}{$M=3$} & \multicolumn{7}{c|}{$M=4$} \\ & & & \multicolumn{2}{c}{$\vert \nu_{\rmp} \vert$ ($\times 10^3$)} & \multicolumn{2}{c}{$\rmF_{\rmp}$} & $\rmG_{\rmp}^{\rmLR}$ & $\rmG_{\rmp}^{\rmRc}$ & Time & \multicolumn{2}{c}{$\vert \nu_{\rmp} \vert$ ($\times 10^3$)} & \multicolumn{2}{c}{$\rmF_{\rmp}$} & $\rmG_{\rmp}^{\rmLR}$ & $\rmG_{\rmp}^{\rmRc}$ & Time \\ & & & {Mean} & {Std. err.} & {Mean} & {Std. err.} & $(\%)$ & $(\%)$ & ($\si{s}$) & {Mean} & {Std. err.} & {Mean} & {Std. err.} & $(\%)$ & $(\%)$ & ($\si{s}$) \\ 1.5line 0.2 & $\rmIP$ & 2 & 5.71 & 2.32 & 4.6 & 2.2 & 11.60 & 14.09 & 0.004 & 8.81 & 2.87 & 6.5 & 2.9 & -1.77 & 13.79 & 0.007 \\ & & 5 & 5.45 & 2.16 & 4.0 & 2.1 & 6.51 & 8.88 & 0.055 & \textbf{8.64} & \textbf{2.92} & \textbf{6.3} & \textbf{2.9} & \textbf{-3.64} & \textbf{11.62} & \textbf{0.116} \\ \cline{2-17} & $\rmLB$ & 2 & 5.71 & 2.31 & 4.5 & 2.2 & 11.61 & 14.10 & 0.005 & 8.81 & 2.88 & 6.5 & 2.9 & -1.75 & 13.81 & 0.013 \\ & & 5 & 5.47 & 1.89 & 3.2 & 1.9 & 6.79 & 9.18 & 0.096 & 8.86 & 2.98 & 6.6 & 3.0 & -1.18 & 14.47 & 0.165 \\ \cline{2-17} & $\rmLR$ & 2 & 5.70 & 2.42 & 4.6 & 2.3 & 11.45 & 13.93 & 0.162 & 8.91 & 3.04 & 6.6 & 3.0 & -0.63 & 15.10 & 0.266 \\ & & 5 & \textbf{5.42} & \textbf{2.28} & \textbf{4.0} & \textbf{2.2} & \textbf{5.95} & \textbf{8.31} & \textbf{0.615} & 8.69 & 3.07 & 6.4 & 3.1 & -3.09 & 12.26 & 2.699 \\ \cline{2-17} & $\rmRc$ & 2 & 7.66 & 3.53 & 6.6 & 3.5 & 49.73 & 53.07 & 0.002 & 12.06 & 4.83 & 10.2 & 4.9 & 34.55 & 55.85 & 0.002 \\ & & 5 & 6.95 & 3.14 & 5.7 & 3.1 & 35.85 & 38.88 & 0.006 & 12.51 & 5.33 & 10.7 & 5.4 & 39.60 & 61.71 & 0.010 \\ \cline{2-17} & $\rmR$ & 2 & 7.67 & 3.52 & 6.6 & 3.5 & 49.85 & 53.19 & 0.004 & 11.87 & 4.62 & 10.0 & 4.7 & 32.44 & 53.41 & 0.006 \\ & & 5 & 6.96 & 3.20 & 5.7 & 3.2 & 35.94 & 38.97 & 0.020 & 12.51 & 5.37 & 10.7 & 5.4 & 39.59 & 61.69 & 0.050 \\ 1.5line 0.4 & $\rmIP$ & 2 & 5.71 & 2.32 & 4.6 & 2.2 & 11.60 & 14.09 & 0.005 & 8.81 & 2.87 & 6.5 & 2.9 & -1.77 & 13.79 & 0.007 \\ & & 5 & 5.45 & 2.16 & 4.0 & 2.1 & 6.51 & 8.88 & 0.062 & \textbf{8.64} & \textbf{2.92} & \textbf{6.3} & \textbf{2.9} & \textbf{-3.64} & \textbf{11.62} & \textbf{0.118} \\ \cline{2-17} & $\rmLB$ & 2 & 5.71 & 2.31 & 4.5 & 2.2 & 11.61 & 14.10 & 0.006 & 8.81 & 2.88 & 6.5 & 2.9 & -1.75 & 13.81 & 0.013 \\ & & 5 & 5.47 & 1.89 & 3.2 & 1.9 & 6.79 & 9.18 & 0.106 & 8.86 & 2.98 & 6.6 & 3.0 & -1.18 & 14.47 & 0.167 \\ \cline{2-17} & $\rmLR$ & 2 & 5.70 & 2.42 & 4.6 & 2.3 & 11.45 & 13.93 & 0.163 & 8.91 & 3.04 & 6.6 & 3.0 & -0.63 & 15.10 & 0.267 \\ & & 5 & \textbf{5.42} & \textbf{2.28} & \textbf{4.0} & \textbf{2.2} & \textbf{5.95} & \textbf{8.31} & \textbf{0.618} & 8.69 & 3.07 & 6.4 & 3.1 & -3.09 & 12.26 & 2.701 \\ \cline{2-17} & $\rmRc$& 2 & 7.66 & 3.53 & 6.6 & 3.5 & 49.73 & 53.07 & 0.002 & 12.06 & 4.83 & 10.2 & 4.9 & 34.55 & 55.85 & 0.002 \\ & & 5 & 6.95 & 3.14 & 5.7 & 3.1 & 35.85 & 38.88 & 0.006 & 12.51 & 5.33 & 10.7 & 5.4 & 39.60 & 61.71 & 0.010 \\ \cline{2-17} & $\rmR$ & 2 & 7.67 & 3.52 & 6.6 & 3.5 & 49.85 & 53.19 & 0.004 & 11.87 & 4.62 & 10.0 & 4.7 & 32.44 & 53.41 & 0.006 \\ & & 5 & 6.96 & 3.20 & 5.7 & 3.2 & 35.94 & 38.97 & 0.020 & 12.51 & 5.37 & 10.7 & 5.4 & 39.59 & 61.69 & 0.051 \\ 1.5line 0.6 & $\rmIP$ & 2 & 5.71 & 2.32 & 4.6 & 2.2 & 11.60 & 14.09 & 0.006 & 7.70 & 1.96& 4.3 & 1.9 & -0.94 & 0.46 & 0.006 \\ & & 5 & 5.45 & 2.16 & 4.0 & 2.1 & 6.51 & 8.88 & 0.064 & \textbf{7.62} & \textbf{1.89} & \textbf{4.0} & \textbf{1.9} & \textbf{-2.05} & \textbf{-0.66} & \textbf{0.067} \\ \cline{2-17} & $\rmLB$ & 2 & 5.71 & 2.31 & 4.5 & 2.2 & 11.61 & 14.10 & 0.006 & 7.71 & 1.96 & 4.3 & 1.9 & -0.85 & 0.55 & 0.010 \\ & & 5 & 5.47 & 1.89 & 3.2 & 1.9 & 6.79 & 9.18 & 0.110 & 7.80 & 1.75 & 3.3 & 1.7 & 0.35 & 1.77 & 0.103 \\ \cline{2-17} & $\rmLR$ & 2 & 5.70 & 2.42 & 4.6 & 2.3 & 11.45 & 13.93 & 0.164 & 7.88 & 2.11 & 4.5 & 2.0 & 1.37 & 2.80 & 0.161 \\ & & 5 & \textbf{5.42} & \textbf{2.28} & \textbf{4.0} & \textbf{2.2} & \textbf{5.95} & \textbf{8.31} & \textbf{0.620} & 7.70 & 1.95 & 4.0 & 1.9 & -0.97 & 0.43 & 0.723 \\ \cline{2-17} & $\rmRc$ & 2.0 & 7.66 & 3.53 & 6.6 & 3.5 & 49.73 & 53.07 & 0.002 & 8.48 & 2.35 & 5.1 & 2.3 & 9.01 & 10.55 & 0.002 \\ & & 5 & 6.95 & 3.14 & 5.7 & 3.1 & 35.85 & 38.88 & 0.007 & 7.83 & 1.93 & 3.9 & 1.9 & 0.71 & 2.12 & 0.008 \\ \cline{2-17} & $\rmR$ & 2 & 7.67 & 3.52 & 6.6 & 3.5& 49.85 & 53.19 & 0.005 & 8.29 & 2.24 & 4.5 & 2.2 & 6.63 & 8.13 & 0.002 \\ & & 5 & 6.96 & 3.20 & 5.7 & 3.2 & 35.94 & 38.97 & 0.021 & 8.28 & 2.27& 4.8 & 2.3 & 6.46 & 7.97 & 0.046 \\ 1.5line 0.8 & $\rmIP$ & 2 & 5.17 & 1.90 & 4.0 & 1.8 & 0.95 & 3.21 & 0.005 & 7.70 & 1.91 & 4.1 & 1.8 & -0.94 & 0.45 & 0.006 \\ & & 5 & 5.11 & 1.86 & 3.5 &1.8 & -0.12 & 2.12 & 0.056 & \textbf{7.61} & \textbf{1.81} & \textbf{3.6} & \textbf{1.7} & \textbf{-2.16} & \textbf{-0.79} & \textbf{0.060} \\ \cline{2-17} & $\rmLB$ & 2 & 5.17 & 1.89 & 3.9 & 1.8 & 1.01 & 3.27 & 0.005 & 7.70 & 1.90 & 4.1 & 1.8 & -0.97 & 0.42 & 0.009 \\ & & 5 & 5.32 & 1.62 & 3.0 & 1.6 & 3.91 & 6.23 & 0.092 & 7.65 & 1.79 & 3.6 & 1.7 & -1.66 & -0.28 & 0.098 \\ \cline{2-17} & $\rmLR$ & 2 & 5.18 & 2.03 & 4.0 & 1.9 & 1.23 & 3.50 & 0.124& 7.82 & 2.03 & 4.2 & 1.9 & 0.59 & 2.01 & 0.162 \\ & & 5 & \textbf{5.10} & \textbf{1.91} & \textbf{3.5} & \textbf{1.8} & \textbf{-0.26} & \textbf{1.98} & \textbf{0.364} & 7.69 & 1.86 & 3.7 & 1.8 & -1.14 & 0.24 & 0.466 \\ \cline{2-17} & $\rmRc$ & 2 & 6.34 & 2.58 & 5.1 & 2.5 & 23.79 & 26.56 & 0.002 & 8.29 & 2.24 & 4.5 & 2.2 & 6.63 & 8.13 & 0.002 \\ & & 5 & 5.26 & 1.96 & 3.7 & 1.9 & 2.75 & 5.05 & 0.006 & 7.83 & 1.93 & 3.9 & 1.9 & 0.71 & 2.12 & 0.008 \\ \cline{2-17} & $\rmR$ & 2 & 6.33 & 2.57 & 5.1 & 2.5 & 23.77 & 26.54 & 0.005 & 8.27 & 2.22& 4.5 & 2.2 & 6.35 & 7.84 & 0.006 \\ & & 5 & 5.26 & 1.96 & 3.7 & 1.9 & 2.75 & 5.05 & 0.020 & 7.87 & 1.98 & 3.9 & 1.9 & 1.16 & 2.58 & 0.044 \\ 1.5line \end{tabular}} \caption{Numerical values of $\vert \nu_{\rmp} \vert$, $\rmF_{\rmp}$ (mean and standard errors), $\rmG_{\rmp}^{\rmLR}$ and $\rmG_{\rmp}^{\rmRc}$ obtained on an instance $(M,\gamma)$ with $M \in \{ 15, 20\}$ and $\gamma \in \{ 0.2, 0.4, 0.6, 0.8\}$. The values written in bold indicate the best performances regarding the objective values.} 1.5abel{tab:wkpomdp:maintenance_1} \end{table} \begin{table} \centering \resizebox{!}{7.0cm}{ \begin{tabular}{|c|cc|ccccccc|ccccccc|} 1.5line \multirow{3}{*}{$\gamma$} & \multirow{3}{*}{$\rmp$} & \multirow{3}{*}{$T_\rmr$} & \multicolumn{7}{c|}{$M=5$} & \multicolumn{7}{c|}{$M=10$} \\ & & & \multicolumn{2}{c}{$\vert \nu_{\rmp} \vert$ ($\times 10^3$)} & \multicolumn{2}{c}{$\rmF_{\rmp}$} & $\rmG_{\rmp}^{\rmLR}$ & $\rmG_{\rmp}^{\rmRc}$ & Time & \multicolumn{2}{c}{$\vert \nu_{\rmp} \vert$ ($\times 10^3$)} & \multicolumn{2}{c}{$\rmF_{\rmp}$} & $\rmG_{\rmp}^{\rmLR}$ & $\rmG_{\rmp}^{\rmRc}$ & Time \\ & & & {Mean} & {Std. err.} & {Mean} & {Std. err.} & $(\%)$ & $(\%)$ & ($\si{s}$) & {Mean} & {Std. err.} & {Mean} & {Std. err.} & $(\%)$ & $(\%)$ & ($\si{s}$) \\ 1.5line 0.2 & $\rmIP$ & 2 & 13.78 & 4.77 & 11.5 & 4.8 & -41.47 & 30.24 & 0.008 & 22.63 & (5.45) & 18.0 & (5.5) & -34.20 & 17.71 & 0.012 \\ & & 5 & 13.38 &4.58 & 11.1 & 4.6 & -43.18 &26.44 & 0.250 & 22.06 & (5.24) & 17.5 & (5.2) & -35.85 & 14.74 & 0.384 \\ \cline{2-17} & $\rmLB$ & 2 & 13.79 & 4.77 & 11.5 & 4.8 & -41.46 & 30.26 & 0.065 & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ \\ & & 5 & 13.64 & 4.52 & 11.3 & 4.5 & -42.07 & 28.89 & 0.378 & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ \\ \cline{2-17} & $\rmLR$ & 2 & 13.69 & 4.42 & 11.4 & 4.4 & -41.88 & 29.34 & 0.321 & 22.43 & (5.20) & 17.8 & (5.2) & -34.78 & 16.67 & 0.467 \\ & & 5 & \textbf{13.27} & \textbf{4.41} & \textbf{11.0} & \textbf{4.4} & \textbf{-43.63} & \textbf{25.43} & \textbf{2.678} & \textbf{21.77} & \textbf{4.88} & \textbf{17.2} & \textbf{4.9} & \textbf{-36.70} & \textbf{13.23} & \textbf{5.839} \\ \cline{2-17} & $\rmRc$ & 2 & 20.38 & 7.16 & 18.6 & 7.2 & -13.47 & 92.54 & 0.003 & 32.31 & 8.34 & 28.6 & 8.4 & -6.06 & 68.03 & 0.005 \\ & & 5 & 21.09 & 7.83 & 19.3 & 7.9 & -10.46 & 99.24 & 0.015 & 31.74 & 9.13 & 28.0 & 9.2 & -7.72 & 65.07 & 0.027 \\ \cline{2-17} & $\rmR$ & 2 & 20.05 & 6.85 & 18.2 & 6.9 & -14.88 & 89.41 & 0.007 & 31.92 & 8.03 & 28.2 & 8.1 & -7.18 & 66.04 & 0.015 \\ & & 5 & 21.03 & 7.77 & 19.3 & 7.8 & -10.70 & 98.71 & 0.070 & 31.64 & 9.02 & 27.9 & 9.1 & -8.00 & 64.56 & 0.136 \\ 1.5line 0.4 & $\rmIP$ & 2 & 10.43 & 2.50 & 6.4 & 2.5 & 0.76 & 2.13 & 0.007 & 19.19 & 3.38 & 11.5 & 3.3 & 1.32 & 3.07 & 0.012 \\ & & 5 & \textbf{10.27} & \textbf{2.39} & \textbf{5.7} & \textbf{2.4} & \textbf{-0.77} & \textbf{0.58} & \textbf{0.124} & 18.91 & 3.26 & 10.6 & 3.2 & -0.16 & 1.57 & 0.196 \\ \cline{2-17} & $\rmLB$ & 2 & 10.44 & 2.49 & 6.4 & 2.5 & 0.90 & 2.27 & 0.030 & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ \\ & & 5 & 10.51 & 2.38 & 6.0 & 2.4 & 1.56 & 2.94 & 0.776 & $-$ & $-$ & $-$& $-$ & $-$ & $-$ & $-$ \\ \cline{2-17} & $\rmLR$ & 2 & 10.38 & 2.38 & 6.3 & 2.4 & 0.32 & 1.68 & 0.220 & 19.09 & 3.32 & 11.4 & 3.2 & 0.78 & 2.53 & 0.418 \\ & & 5 & 10.28 & 2.31 & 5.8 & 2.3 & -0.62 & 0.72 & 2.451 & \textbf{18.85} & \textbf{3.19} & \textbf{10.6} & \textbf{3.1} & \textbf{-0.51} & \textbf{1.21} & \textbf{4.920} \\ \cline{2-17} & $\rmRc$ & 2 & 13.17 & 3.59 & 9.5 & 3.6 & 27.26 & 28.98 & 0.003 & 22.13 & 4.32 & 14.6 & 4.3 & 16.83 & 18.85 & 0.005 \\ & & 5 & 11.35 & 3.01 & 7.6 & 3.0 & 9.65 & 11.14 & 0.011 & 20.53 & 3.86 & 13.0 & 3.9 & 8.37 & 10.24 & 0.020 \\ \cline{2-17} & $\rmR$ & 2 & 13.11 & 3.54 & 9.4 & 3.6 & 26.67 & 28.38 & 0.007 & 22.13 & 4.32 & 14.6 & 4.3 & 16.83 & 18.85 & 0.014 \\ & & 5 & 11.16 & 2.97 & 7.3 & 3.0 & 7.86 & 9.32 & 0.056 & 20.62 & 3.97 & 13.1 & 4.0& 8.86 & 10.74 & 0.110 \\ 1.5line 0.6 & $\rmIP$ & 2 & 10.26 & 2.31 & 5.7 & 2.2 & 0.00 & 0.80 & 0.006 & 19.10 & 3.28 & 10.8 & 3.1 & 0.92 & 2.60 & 0.011 \\ & & 5 &10.22 & 2.26 & 5.2 & 2.2 & -0.44 & 0.35 & 0.071 & 18.81 & 3.06 & 9.7 & 2.9 & -0.62 & 1.03 & 0.138 \\ \cline{2-17} & $\rmLB$ & 2 & 10.27 & 2.31 & 5.7 & 2.2 & 0.05 & 0.85 & 0.025 & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ \\ & & 5 & 10.26 & 1.88 & 3.6 & 1.9 & -0.04 & 0.76 & 0.209 & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ \\ \cline{2-17} & $\rmLR$ & 2 & 10.20 & 2.21 & 5.6 & 2.1 & -0.65 & 0.15 & 0.201 & 18.93 & 3.26 & 10.7 & 3.1 & 0.02 & 1.68 & 0.408 \\ & & 5 & \textbf{10.17} & \textbf{2.13} & \textbf{5.1} & \textbf{2.1} & \textbf{-0.93} & \textbf{-0.14} & \textbf{1.220} & \textbf{18.68} & \textbf{3.05} & \textbf{9.5} & \textbf{2.9} & \textbf{-1.31} & \textbf{0.33} & \textbf{1.170} \\ \cline{2-17} & $\rmRc$ & 2 & 11.48 & 2.75 & 7.1 & 2.7 & 11.86 & 12.76 & 0.002 & 20.67 & 3.63 & 12.1 & 3.5 & 9.20 & 11.01 & 0.004 \\ & & 5 & 10.47 & 2.25 & 5.9 & 2.2 & 2.08 & 2.89 & 0.010 & 19.49 & 3.26 & 10.6 & 3.1 & 2.96 & 4.67 & 0.019 \\ \cline{2-17} & $\rmR$ & 2 & 11.47 & 2.75 & 7.1 & 2.7 & 11.80 & 12.69 & 0.007 & 20.67 & 3.63 & 12.1 & 3.5 & 9.20 & 11.01 & 0.014 \\ & & 5 & 10.47 & 2.25 & 5.9 & 2.2 & 1.99 & 2.81 & 0.051 & 19.49 & 3.26 & 10.6 & 3.1 & 2.96 & 4.67 & 0.019 \\ 1.5line 0.8 & $\rmIP$ & 2 & 10.24 & 2.28 & 5.6 & 2.2 & -0.27 & 0.58 & 0.006 & 19.09 & 3.27 & 10.8 & 3.1 & 0.86 & 2.53 & 0.011 \\ & & 5 & \textbf{10.20} & \textbf{2.22} & \textbf{5.1} & \textbf{2.2} & \textbf{-0.65} & \textbf{0.20} & \textbf{0.070} & 18.82 & 3.08 & 9.6 & 2.9 &-0.56 & 1.09 & 0.137 \\ \cline{2-17} & $\rmLB$ & 2 & 10.24 & 2.28 & 5.6 & 2.2 & -0.23 & 0.62 & 0.021 & $-$& $-$ &$-$& $-$ & $-$ & $-$ & $-$ \\ & & 5 & 10.17 & 2.17 & 4.8 & 2.1 & -0.97 & -0.12 & 0.164 & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ & $-$ \\ \cline{2-17} & $\rmLR$ & 2 & 10.19 & 2.20 & 5.5 & 2.1 & -0.75 & 0.10 & 0.187 & 18.93 & 3.26 & 10.6 & 3.1& -0.01 & 1.65 & 0.416 \\ & & 5 & 10.20 & 2.11 & 5.0 & 2.1 & -0.67 & 0.18 & 0.561 & \textbf{18.65} & \textbf{3.02} & \textbf{9.4} & \textbf{2.9} & \textbf{-1.46} & \textbf{0.17} & \textbf{1.186} \\ \cline{2-17} & $\rmRc$ & 2 & 11.17 & 2.62 & 6.7 & 2.6 & 8.82 & 9.74 & 0.002 & 20.54 & 3.59 & 11.9 & 3.5 & 8.53 & 10.33 & 0.004 \\ & & 5 & 10.34 & 2.22 & 5.6 & 2.1 & 0.73 & 1.59 & 0.010 & 19.49 & 3.20 & 10.5 & 3.1 & 2.95 & 4.66 & 0.019 \\ \cline{2-17} & $\rmR$ & 2 & 11.17 & 2.62 & 6.7 & 2.6 & 8.81 & 9.74 & 0.007 & 20.54 & 3.59 & 11.9 & 3.5 & 8.53 & 10.33 & 0.014 \\ & & 5 & 10.34 & 2.22 & 5.6 & 2.1 & 0.74 & 1.60 & 0.051 & 19.49 & 3.20 & 10.5 & 3.1 & 2.95 & 4.66 & 0.101\\ 1.5line \end{tabular}} \caption{Numerical values of $\vert \nu_{\rmp} \vert$, $\rmF_{\rmp}$ (mean and standard errors), $\rmG_{\rmp}^{\rmLR}$ and $\rmG_{\rmp}^{\rmRc}$ obtained on an instance $(M,\gamma)$ with $M \in \{ 15, 20\}$ and $\gamma \in \{ 0.2, 0.4, 0.6, 0.8\}$. The values written in bold indicate the best performances regarding the objective values.} 1.5abel{tab:wkpomdp:maintenance_2} \end{table} \begin{table} \centering \resizebox{!}{6.0cm}{ \begin{tabular}{|c|cc|ccccccc|ccccccc|} 1.5line \multirow{3}{*}{$\gamma$} & \multirow{3}{*}{$\rmp$} & \multirow{3}{*}{$T_\rmr$} & \multicolumn{7}{c|}{$M=15$} & \multicolumn{7}{c|}{$M=20$} \\ & & & \multicolumn{2}{c}{$\vert \nu_{\rmp} \vert$ ($\times 10^3$)} & \multicolumn{2}{c}{$\rmF_{\rmp}$} & $\rmG_{\rmp}^{\rmLR}$ & $\rmG_{\rmp}^{\rmRc}$ & Time & \multicolumn{2}{c}{$\vert \nu_{\rmp} \vert$ ($\times 10^3$)} & \multicolumn{2}{c}{$\rmF_{\rmp}$} & $\rmG_{\rmp}^{\rmLR}$ & $\rmG_{\rmp}^{\rmRc}$ & Time \\ & & & {Mean} & {Std. err.} & {Mean} & {Std. err.} & $(\%)$ & $(\%)$ & ($\si{s}$) & {Mean} & {Std. err.} & {Mean} & {Std. err.} & $(\%)$ & $(\%)$ & ($\si{s}$) \\ 1.5line 0.2 & $\rmIP$ & 2 & 31.54 & 6.03 & 25.0 & 6.0 & -22.56 & 12.73 & 0.017 & 45.06 & 7.08 & 35.9 & 7.1 & -20.37 & 8.67 & 0.022 \\ & & 5 & 30.89 & 5.88 & 24.0 & 5.9 & -24.14 & 10.43 & 0.591 & 44.28 & 6.90 & 35.1 & 6.9 & -21.74 & 6.80 & 0.660 \\ \cline{2-17} & $\rmLR$ & 2 & 31.19 & 5.85 & 24.6 & 5.8 & -23.40 & 11.50 & 0.714 & 45.02 & 7.02 & 35.8 & 7.0 & -20.44 & 8.57 & 0.944 \\ & & 5 & \textbf{30.68} & \textbf{5.65} & \textbf{23.8} & \textbf{5.7} & \textbf{-24.65} & \textbf{9.68} & \textbf{10.189} & \textbf{44.16} & \textbf{6.77} & \textbf{35.0} & \textbf{6.8} & \textbf{-21.95} & \textbf{6.52} & \textbf{13.309} \\ \cline{2-17} & $\rmRc$ & 2 & 44.74 & 9.41 & 39.3 & 9.5 & 9.87 & 59.94 & 0.007 & 61.27 & 10.59 & 53.4 & 10.7 & 8.28 & 47.77 & 0.009 \\ & & 5 & 44.41 & 9.74 & 38.8 & 9.8 & 9.06 & 58.75 & 0.058 & 59.82 & 10.47 & 52.0 & 10.6 & 5.73 & 44.28 & 0.084 \\ \cline{2-17} & $\rmR$ & 2 & 44.71 & 9.53 & 39.3 & 9.6 & 9.78 & 59.81 & 0.041 & 61.64 & 10.75 & 53.8 & 10.8 & 8.94 & 48.66 & 0.061 \\ & & 5 & 44.18 & 9.46 & 38.6 & 9.5 & 8.50 & 57.94 & 0.212 & 59.58 & 10.57 & 51.8 & 10.7 & 5.31 & 43.71 & 0.294 \\ 1.5line 0.4 & $\rmIP$ & 2 & 28.18 & 4.22 & 19.1 & 4.1 & 0.45 & 1.93 & 0.016 & 41.18 & 4.72 & 23.0 & 4.7 & 0.35 & 1.50 & 0.020 \\ & & 5 & \textbf{27.67} & \textbf{3.97} & \textbf{16.8} & \textbf{3.9} & \textbf{-1.39} & \textbf{0.06} & \textbf{0.232} & 40.83 & 4.66& 22.7 & 4.7 & -0.49 & 0.66 & 0.469 \\ \cline{2-17} & $\rmLR$ & 2 & 28.10 & 4.13 & 19.0 & 4.0 & 0.15 & 1.63 & 0.646 & 41.22 & 4.71 & 23.0 & 4.7 & 0.44 & 1.60 & 0.939 \\ & & 5 & 27.72 & 3.94 & 16.9 & 3.9 & -1.19 & 0.26 & 6.975 & \textbf{40.70} & \textbf{4.64} & \textbf{22.4} & \textbf{4.6} & \textbf{-0.82} & \textbf{0.32} & \textbf{26.353} \\ \cline{2-17} & $\rmRc$ & 2 & 30.48 & 4.64 & 21.0 & 4.5 & 8.62 & 10.22 & 0.006 & 46.28 & 5.59 & 29.4 & 5.6 & 12.78 & 14.07 & 0.009 \\ & & 5 & 29.69 & 4.58 & 20.1 & 4.5 & 5.82 & 7.37 & 0.046 &44.37 & 5.46 & 27.6 & 5.5 & 8.13 & 9.37 & 0.065 \\ \cline{2-17} & $\rmR$ & 2 & 30.48 & 4.64 & 21.0 & 4.5 & 8.62 & 10.22 & 0.039 & 46.27 & 5.58 & 29.4 & 5.6 & 12.76 & 14.06 & 0.056 \\ & & 5 & 29.68 & 4.57 & 20.1 & 4.5 & 5.76 & 7.32 & 0.167 & 44.40 & 5.43 & 27.5 & 5.4 & 8.21 & 9.45 & 0.281 \\ 1.5line 0.6 & $\rmIP$ & 2 & 28.12 & 4.16 & 18.8 & 4.0 & 0.10 & 1.70 & 0.016 & 40.96 & 4.25 & 18.4 & 4.1 & 0.32 & 1.22 & 0.020 \\ & & 5 & \textbf{27.67} & \textbf{3.84} & \textbf{16.3} & \textbf{3.7} & \textbf{-1.51} & \textbf{0.05} & \textbf{0.225} & 40.72 & 4.18 & 17.9 & 4.0 & -0.28 & 0.62 & 0.316 \\ \cline{2-17} & $\rmLR$ & 2 & 28.10 & 4.09 & 18.8 & 3.9 & 0.04 & 1.63 & 0.620 & 40.83 & 4.12 & 18.3 & 4.0 & -0.02 & 0.89 & 0.831 \\ & & 5 & 27.70 & 3.84 & 16.4 & 3.7 & -1.38 & 0.19 & 1.872 & \textbf{40.57} & \textbf{3.96} & \textbf{16.7} & \textbf{3.8} & \textbf{-0.64} & \textbf{0.25} & \textbf{11.835} \\ \cline{2-17} & $\rmRc$ & 2 & 30.16 & 4.60 & 20.3 & 4.5 & 7.36 & 9.06 & 0.006 & 43.95 & 4.81 & 21.6 & 4.7 & 7.63 & 8.60 & 0.008 \\ & & 5 & 29.55 & 4.38 & 19.5 & 4.2 & 5.19 & 6.87 & 0.045 & 42.58 & 4.63 & 21.3 & 4.5 & 4.26 & 5.20 & 0.059 \\ \cline{2-17} & $\rmR$ & 2 & 30.16 & 4.60 & 20.3 & 4.5 & 7.36 & 9.06 & 0.038 & 43.95 & 4.81 & 21.6 & 4.7 & 7.62 & 8.60 & 0.052 \\ & & 5 & 29.55 & 4.38 & 19.5 & 4.2 & 5.19 & 6.87 & 0.163 & 42.23 & 4.35 & 19.3 & 4.2 & 3.42 & 4.36 & 0.225 \\ 1.5line 0.8 & $\rmIP$ & 2 & 28.12 & 4.16 & 18.8 & 4.0 & 0.11 & 1.71 & 0.015 & 40.96 & 4.24 & 18.3 & 4.0 & 0.29 & 1.21 & 0.019 \\ & & 5 & \textbf{27.65} & \textbf{3.86} & \textbf{16.2} & \textbf{3.7} & \textbf{-1.57} & \textbf{-0.01} & \textbf{0.226} & 40.76 & 4.09 & 17.8 & 3.9 & -0.19 & 0.73 & 0.313 \\ \cline{2-17} & $\rmLR$ & 2 & 28.10 & 4.09 & 18.8 & 3.9 & 0.03 & 1.62 & 0.637 & 40.83 & 4.09 & 18.1 & 3.9 & -0.04 & 0.88 & 0.829 \\ & & 5 & 27.71 & 3.84 & 16.3 & 3.7 & -1.37 & 0.20 & 1.893 & \textbf{40.56} & \textbf{3.86} & \textbf{16.3} & \textbf{3.7} & \textbf{-0.69} & \textbf{0.22} & \textbf{2.238} \\ \cline{2-17} & $\rmRc$ & 2 & 30.13 & 4.59 & 20.3 & 4.4 & 7.26 & 8.97 & 0.006 & 43.75 & 4.71 & 21.2 & 4.5 & 7.12 & 8.11 & 0.008 \\ & & 5 & 29.52 & 4.37 & 19.5 & 4.2 & 5.10 & 6.77 & 0.044 & 42.40 & 4.57 & 21.0 & 4.4 & 3.81 & 4.77 & 0.057 \\ \cline{2-17} & $\rmR$ & 2 & 30.13 & 4.59 & 20.3 & 4.4 & 7.26 & 8.97 & 0.038 & 43.75 & 4.71 & 21.2 & 4.5 & 7.12 & 8.11 & 0.050 \\ & & 5 & 29.52 & 4.37 & 19.5 & 4.2 & 5.10 & 6.77 & 0.163 & 42.04 & 4.25 & 18.9 & 4.1 & 2.93 & 3.88 & 0.219 \\ 1.5line \end{tabular}} \caption{Numerical values of $\vert \nu_{\rmp} \vert$, $\rmF_{\rmp}$ (mean and standard errors), $\rmG_{\rmp}^{\rmLR}$ and $\rmG_{\rmp}^{\rmRc}$ obtained on an instance $(M,\gamma)$ with $M \in \{ 15, 20\}$ and $\gamma \in \{ 0.2, 0.4, 0.6, 0.8\}$. The values written in bold indicate the best performances regarding the objective values.} 1.5abel{tab:wkpomdp:maintenance_3} \end{table} One may observe that for all instances, the matheuristic involving policy $\bfdelta^{\rmIP}$ significantly outperforms the matheuristic involving formulations $\{\rmLB,\rmR^\rmc,\rmR\}$ and delivers promising results even in the most challenging instances ($M=20$). As mentionned in the introduction, it gives better results than the policy $\bfdelta^{\rmR}$, which does not consider the partially observable aspect of the components. We also observe that the policy involving the Lagrangian relaxation gives competitive results with $\bfdelta^{\rmIP}$ in terms of rewards and failures. However, the standard errors of the Lagrangian relaxation policy are in general higher than the ones of $\rmIP$ and $\rmLR$. This may come from the fact that the action taken in $\rmAct_{T_\rmr}^{\rmLR,t}(\bfh)$ is sampled. In Tables~\ref{tab:wkpomdp:maintenance_1},~\ref{tab:wkpomdp:maintenance_2} and~\ref{tab:wkpomdp:maintenance_3}, the negative values of $\rmG_{\rmp}^{\rmRc}$ result from error approximations due to the Monte-Carlo simulations. From a solution time perspective, running $\rmAct_{T_\rmr}^{\rmIP,t}(\bfh)$ takes less time than $\rmAct_{T_\rmr}^{\rmLB,t}(\bfh)$, $\rmAct_{T_\rmr}^{\rmLR,t}(\bfh)$, $\rmAct_{T_\rmr}^{\rmR^\rmc,t}(\bfh)$ and $\rmAct_{T_\rmr}^{\rmR,t}(\bfh)$. Even for the largest instances ($M = 15$ or $M=20$) and for $T=5$, the average time per action of $\rmAct{T_\rmr}^{\rmIP,t}(\bfh)$ is on the order of $1.0$ second; this amount of time is still feasible even if the $24$ decision times are close together. Figure~\ref{fig:num:nb_failures} shows that the decomposable policy $\rmLB$ may lead to a lower number of failures and a higher maintenance cost, which means that the policy $\rmLB$ is more conservative. One may observe that all the figures reduce when the maintenance capacity $K$ increases, as expected. \begin{figure} \caption{Boxplots on the total number of failures counted during the simulations of the policies $\rmIP$ (blue boxplot), $\rmLB$ (green boxplot), $\rmLR$ (light blue) , $\rmR^\rmc$ (red boxplot) and $\rmR$ (orange boxplot) for different values of $M$ and $K$ and a rolling horizon $T_{\rmr} \end{figure} \section{Examples where $z_{\rmIP} < v_{\rmml}^*$ or $z_{\rmIP} > v_{\rmml}^*$} 1.5abel{app:counter_example} In this section, we describe two instances showing respectively that our MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} is neither an upper bound or a lower bound. \subsection{The inequality $z_{\rmIP} 1.5eq v_{\rmml}^*$ does not hold in general.} Consider a weakly coupled POMDP with $M=2$, $K=1$, $\calX_S^1 = \calX_S^2 = \{1,2,3 \}$, and $\calX_O^1= \calX_O^2 = \{1,2\}$. We set the following initial probability data, \begin{align*} p^1(\cdot) = \begin{bmatrix} 0.0286 & 0.4429 & 0.5284 \end{bmatrix}, & \quad & p^2(\cdot) = \begin{bmatrix} 0.5328 & 0.2202 & 0.2469 \end{bmatrix}, \end{align*} the following transition probability data, \begin{align*} p^1(\cdot|\cdot,0) = \begin{bmatrix} 0.3149 & 0.2598 & 0.4253 \\ 0.2542 & 0.5195 & 0.2263 \\ 0.2016 & 0.7551 & 0.0433 \end{bmatrix}, & \quad & p^2(\cdot|\cdot,0) = \begin{bmatrix} 0.6833 & 0.1797 & 0.1371 \\ 0.0398 & 0.9207 & 0.0394 \\ 0.1422 & 0.2202 & 0.6376 \end{bmatrix}, \\ p^1(\cdot|\cdot,0) = \begin{bmatrix} 0.3849 & 0.2891 & 0.3260 \\ 0.4462 & 0.1346 & 0.4192 \\ 0.0418 & 0.5297 & 0.4285 \end{bmatrix}, & \quad & p^2(\cdot|\cdot,1) = \begin{bmatrix} 0.4665 & 0.0956 & 0.4379 \\ 0.4510 & 0.5168 & 0.0322 \\ 0.5864 & 0.2903 & 0.1234 \end{bmatrix}, \end{align*} the following emission probability data, \begin{align*} p^1(\cdot|\cdot) = \begin{bmatrix} 0.6823 & 0.3177 \\ 0.0806 & 0.9194 \\ 0.5018 & 0.4982 \end{bmatrix}, & \quad & p^2(\cdot|\cdot) = \begin{bmatrix} 0.4389 & 0.5611 \\ 0.6657 & 0.3343 \\ 0.1207 & 0.8793 \end{bmatrix}, \end{align*} and the following reward data \begin{align*} r^1(\cdot|\cdot,0) = \begin{bmatrix} 3.3101 & 7.8198 & 6.9773 \\ 2.0722 & 2.6782 & 3.5715 \\ 8.4428 & 2.6010 & 3.2765 \end{bmatrix}, & \quad & r^2(\cdot|\cdot,0) = \begin{bmatrix} 2.9600 & 8.1503 & 4.5911 \\ 2.2638 & 6.0290 & 2.5511 \\ 8.0789 & 7.9927 & 5.0259 \\ \end{bmatrix}, \\ r^1(\cdot|\cdot,1) = \begin{bmatrix} 1.9315 & 9.3614 & 2.8927 \\ 4.8769 & 5.3131 & 7.3626 \\ 3.7944 & 4.5557 & 8.6462 \end{bmatrix}, & \quad & r^2(\cdot|\cdot,1) = \begin{bmatrix} 6.2647 & 6.6832 & 1.1263 \\ 9.9182 & 9.0278 & 5.9492 \\ 9.8333 & 0.4466 & 4.3798 \end{bmatrix}. \end{align*} Solving~\ref{pb:decPOMDP_wc} with $T=4$ using MILP~\eqref{pb:pomdp:MILP_pomdp} on $\calX_S$, $\calX_O$ and $\calX_A$, we obtain an optimal value of $v_{\rm{ml}}^* = 44.7122$, while the optimal value of our MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} is $z_{\rmIP} = 44.2834$. Hence, we obtain $z_{\rmIP} < v_{\rmml}^*$. Therefore, $v_{\rmml}^* 1.5eq z_{\rmIP}$ does not hold in general. \subsection{The inequality $z_{\rmIP} \geq v_{\rm{ml}}^*$ does not hold in general.} Consider a weakly coupled POMDP with $M=2$, $K=1$, $\calX_S^1 = \calX_S^2 = \{1,2,3 \}$, and $\calX_O^1= \calX_O^2 = \{1,2\}$. We set the following initial probability data, \begin{align*} p^1(\cdot) = \begin{bmatrix} 0.4311 & 0.5255 & 0.0434 \end{bmatrix}, & \quad & p^2(\cdot) = \begin{bmatrix} 0.4835 & 0.1745 & 0.3421 \end{bmatrix}, \end{align*} the following transition probability data, \begin{align*} p^1(\cdot|\cdot,0) = \begin{bmatrix} 0.1517 & 0.3481 & 0.5002 \\ 0.1639 & 0.0922 & 0.7439 \\ 0.3395 & 0.2385 & 0.4220 \end{bmatrix}, & \quad & p^2(\cdot|\cdot,0) = \begin{bmatrix} 0.3435 & 0.3291 & 0.3274 \\ 0.5964 & 0.1653 & 0.2383 \\ 0.3968 & 0.2626 & 0.3406 \end{bmatrix}, \\ p^1(\cdot|\cdot,1) = \begin{bmatrix} 0.3467 & 0.2733 & 0.3800 \\ 0.5027 & 0.3548 & 0.1425 \\ 0.2530 & 0.5466 & 0.2003 \end{bmatrix}, & \quad & p^2(\cdot|\cdot,1) = \begin{bmatrix} 0.3160 & 0.4210 & 0.2630 \\ 0.3583 & 0.3882 & 0.2535 \\ 0.3611 & 0.4308 & 0.2081 \end{bmatrix}, \end{align*} the following emission probability data, \begin{align*} p^1(\cdot|\cdot) = \begin{bmatrix} 0.2052 & 0.7948 \\ 0.8296 & 0.1704 \\ 0.5330 & 0.4670 \end{bmatrix}, & \quad & p^2(\cdot|\cdot) = \begin{bmatrix} 0.6273 & 0.3727 \\ 0.0392 & 0.9608 \\ 0.4024 & 0.5976 \end{bmatrix}, \end{align*} and the following reward data \begin{align*} r^1(\cdot|\cdot,0) = \begin{bmatrix} 7.0075 & 6.2135 & 8.4122 \\ 9.7198 & 9.5152 & 2.6182 \\ 1.8522 & 7.4390 & 4.9132 \end{bmatrix}, & \quad & r^2(\cdot|\cdot,0) = \begin{bmatrix} 8.7418 & 2.6682 & 2.5227 \\ 8.7673 & 6.1198 & 6.4814 \\ 6.4971 & 3.8810 & 0.3476 \end{bmatrix}, \\ r^1(\cdot|\cdot,1) = \begin{bmatrix} 2.8154 & 7.0215 & 1.6752 \\ 7.8149 & 0.7849 & 4.3722 \\ 5.9378 & 9.1273 & 1.1657 \end{bmatrix}, & \quad & r^2(\cdot|\cdot,1) = \begin{bmatrix} 7.4528 & 8.5013 & 9.1925 \\ 4.3003 & 2.0946 & 4.2973 \\ 4.2865 & 0.8470 & 9.5848 \end{bmatrix}. \end{align*} Solving~\ref{pb:decPOMDP_wc} with $T=4$ using MILP~\eqref{pb:pomdp:MILP_pomdp} on $\calX_S$, $\calX_O$ and $\calX_A$, we obtain an optimal value of $v_{\rmml}^* = 47.3693$, while the optimal value of our MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} is $z_{\rmIP} = 47.7356$. Hence, we obtain $v_{\rmml}^* < z_{\rmIP}$. Therefore, $v_{\rmml}^* \geq z_{\rmIP}$ does not hold in general. \section{Links between the linear relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} and the fluid formulation of~\citet{bertsimas2016decomposable}} 1.5abel{app:linksBertsimas} In this appendix, we show that when we consider decomposable POMDP (i.e., when the action space does not necessary decompose along the components), the linear relaxation of our MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} is equivalent to the fluid formulation of \citet{bertsimas2016decomposable} over a finite horizon and without discounting applied on the MDP relaxation of the problem. Consider a decomposable MDP $1.5eft((\calX_S^m,\pfrak^m),\calX_A\right)$, where $\pfrak^m = 1.5eft( (p^m(s))_{s \in \calX_S^m}, (p^m(s'|s,a))_{\substack{s,s' \in \calX_S \\ a \in \calX_A}} \right)$. Using Remark~\ref{rem:problem:decomposablePOMDP}, we transform the decomposable MDP into a weakly coupled MDP, where the action space is $\tilde{\calX_A} = 1.5eft\{ \bfa \in \calX_A^1\times \cdots \times \calX_A^M \colon \mathds{1}_{a}(a^m) - \mathds{1}_{a}(a^{m+1}) = 0, \enskip \forall a \in \calX_A \right\}$. Now we write the fluid formulation of \citet[Problem (3)]{bertsimas2016decomposable} with our notation for a finite horizon, i.e., without considering constraints at time $t' \geq T+1$ and by setting a discount factor $\beta=1$: \begin{subequations}1.5abel{pb:app_linksBertsimas:fluid} \begin{alignat}{2} \max_{\bfx,\bfA} \enskip & \sum_{t=1}^T \sum_{m=1}^M \sum_{\substack{s,s' \in \calX_S^m \\ a \in \calX_A}} r_m(s,a,s')x_{sas'}^{t,m} & \quad &\\ \mathrm{s.t.} \enskip & x_{s}^{1,m} = \sum_{a' \in \calX_A, s' \in \calX_S^m} x_{sa's'}^{1,m} & \forall m \in [M], s \in \calX_S 1.5abel{eq:app_linksBertsimas:LP_MDP_consistent_action_initial}\\ & \sum_{s'\in \calX_S^m,a' \in \calX_A} x_{s'a's}^{t,m} = \sum_{a' \in \calX_A, s' \in \calX_S^m} x_{sa's'}^{t+1,m} & \forall s \in \calX_S^m, m \in [M], t \in [T] 1.5abel{eq:app_linksBertsimas:LP_MDP_consistent_action}\\ & x_{s}^{1,m} = p^m(s) & \forall s \in \calX_S^m, m \in [M] 1.5abel{eq:app_linksBertsimas:LP_MDP_initial} \\ & x_{sas'}^{t,m} = p^m(s'|s,a) \sum_{s'' \in \calX_S^m}x_{sas''}^{t,m} & \forall s \in \calX_S^m, a \in \calX_A, m \in [M], t \in [T] 1.5abel{eq:app_linksBertsimas:LP_MDP_consistent_state} \\ & \sum_{s,s' \in\calX_S^m} x_{sas'}^{t,m} = A_{a}^{t} & \forall a \in \calX_A, m \in [M], t \in [T] 1.5abel{eq:app_linksBertsimas:same_action} \end{alignat} \end{subequations} Now, we relate this fluid formulation to the linear relaxation of our integer formulation~\eqref{pb:wkpomdp:decPOMDP_MILP}. Our MILP formulation written on the weakly coupled POMDP writes down: \begin{subequations}1.5abel{pb:app_linksBertsimas:decPOMDP_MILP} \begin{alignat}{2} \max_{\bftau,\bfdelta} \enskip & \sum_{t=1}^T \sum_{m=1}^M \sum_{\substack{s,s' \in \calX_S^m \\ a \in \calX_A^m}} r^m(s,a,s')\tau_{sas'}^{t,m} & \quad &\\ \mathrm{s.t.} \enskip & 1.5eft(\bftau^m,\bfdelta^m\right) \in \calQ^{\mathrm{d}}1.5eft(\calX_S^m, \calX_O^m, \calX_A^m,\pfrak^m\right) & \forall m \in [M] 1.5abel{eq:app_linksBertsimas:POMDP}\\ &\sum_{s \in \calX_S^m, o \in \calX_A^m}\tau_{soa}^{t,m} = \tau_a^{t,m} & \forall a \in \calX_A^m, m \in [M], t \in [T] \\ &\tau_a^{t,m} = \tau_{a}^{t,m+1} & \forall a \in \calX_A, m \in [M-1], t \in [T] \end{alignat} \end{subequations} The following proposition states that the linear relaxation of our MILP~\eqref{pb:app_linksBertsimas:decPOMDP_MILP} is equivalent to the fluid formulation written on the decomposable MDP $((\calX_S^m,\pfrak^m,\bfr^m)_{m\in [M]}, \calX_A)$. We denote by $z_{\rmF}$ the optimal value of fluid formulation~\eqref{pb:app_linksBertsimas:fluid}. \begin{theo}1.5abel{theo:app_linksBertsimas:linear_relaxation} The linear relaxation of MILP~\eqref{pb:app_linksBertsimas:decPOMDP_MILP} is equivalent to the fluid formulation~\eqref{pb:app_linksBertsimas:fluid}. In particular, $z_{\rmF} = z_{\rmR}$. \end{theo} The equivalence in Theorem~\ref{theo:app_linksBertsimas:linear_relaxation} should to be understood in the sense that there exists a solution of MILP~\eqref{pb:app_linksBertsimas:decPOMDP_MILP} if, and only if there exists a feasible solution of the fluid formulation~\eqref{pb:app_linksBertsimas:fluid} with the same objective value. \proof[Proof of Theorem~\ref{theo:app_linksBertsimas:linear_relaxation}] Let $(\bfx,\bfA)$ be a feasible solution of linear program~\eqref{pb:app_linksBertsimas:fluid}. We set $\tau_s^{1,m} = x_s^{1,m}$ and $\tau_{sas'}^{t,m} = x_{sas'}^{t,m}$ for every $s,s' \in \calX_S^m$, $a \in \calX_A$, $m \in [M]$ and $t \in [T].$ We also define variables $(\tau_{soa}^{t,m})_{s,o,a,t,m}$ and $(\delta_{a|o}^{t,m})_{o,a,t,m}$ using the definitions~\eqref{eq:pomdp:proof_mu_soa} and~\eqref{eq:pomdp:proof_delta} for each component $m$. Then, $(\bftau^m,\bfdelta^m)$ satisfies all the constraints of $Q^{\rm{d}}(\calX_S^m,\calX_O^m,\calX_A,\pfrak^m)$ when the variable $\bfdelta^m$ are continuous. It remains to show that constraints~\eqref{eq:app_linksBertsimas:same_action} are satisfied. To do so, we set $\tau_a^{t,m} = A_a^{t}$ for every $a \in \calX_A$, $m \in [M]$ and $t \in [T]$. We obtain that: \begin{align*} &\tau_a^{t,m}= A_a^t = \tau_a^{t,m+1}, \quad \text{and}, \quad &\tau_a^{t,m}= A_a^t = \sum_{s,s' \in \calX_S^m} \tau_{sas'}^{t,m}= \sum_{\substack{s,s' \in \calX_S^m \\ o \in \calX_S^m}} \tau_{soa}^{t,m}p^m(s'|s,a) = \sum_{s \in \calX_S^m, o \in \calX_O^m} \tau_{soa}^{t,m}, \end{align*} which ensure that $(\bftau^m,\bfdelta^m)_{m \in [M]}$ is a feasible solution of the linear relaxation of Problem~\eqref{pb:app_linksBertsimas:decPOMDP_MILP}. In addition, the objective values are equal. Let $(\bftau^m,\bfdelta^m)_{m\in [M]}$ be a feasible solution of the linear relaxation of MILP~\eqref{pb:app_linksBertsimas:decPOMDP_MILP}. We set $\bfx^m = ((\tau_s^{1,m})_{s \in \calX_S^m},(\tau_{sas'}^{t,m})_{\substack{s,s' \in \calX_S^m \\ a \in \calX_A, t \in [T]}})$ and $\bfA = (\tau_a^{t,1})_{a \in \calX_A,t \in [T]}$. By definition of $\calQ^{\mathrm{d}}(\calX_S^m,\calX_O^m,\calX_A,\pfrak^m)$, constraints~\eqref{eq:app_linksBertsimas:LP_MDP_consistent_action_initial}-\eqref{eq:app_linksBertsimas:LP_MDP_consistent_action} are satisfied. It remains to show that constraints~\eqref{eq:app_linksBertsimas:same_action} are satisfied. First, since $\tau_a^{t,m} = \tau_{a}^{t,m+1}$ for every $ m \in [M-1]$, then $A_a^t = \tau_a^{t,m}$ for every $m$ in $[M]$. This comes from the following computation: \begin{align*} A_a^{t} = \sum{s,o} \tau_{soa}^{t,m} = \sum_{s} \underbrace{\sum_{o} \tau_{soa}^{t,m}}_{= \sum_{s''} \tau_{sas''}^{t,m}} = \sum_{s,s'} \tau_{sas''}^{t,m} \end{align*} In addition, the objective values are equal. It achieves the proof. \qed \section{Definition of an implicit policy based on the linear relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP}} 1.5abel{app:LPpolicy} In this appendix, we introduce three algorithms $\rm{Act}_{T}^{\rmR,t}(\bfh)$, $\rm{Act}_{T}^{\rmR^\rmc,t}(\bfh)$ and $\rm{Act}_{T}^{\rmLR,t}(\bfh)$ that are slightly different to $\rm{Act}_{T}^{IP,t}(\bfh)$ and that respectively involve the linear relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} without and with valid inequalities~\eqref{eq:wkpomdp:dec_Valid_cuts}, and the Lagrangian relaxation. Algorithm $\rm{Act}_{T}^{IP,t}(\bfh)$ needs to be modified because the linear relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} and the Lagrangian relaxation do not provide policies $\bfdelta^m$ such that the action taken at step~\ref{alg:wkpomdp:take_action} belongs to $\calX_A$. For this reason we slightly modify this step in the following algorithm: \begin{algorithm}[H] \caption{History-dependent policy $\rm{Act}_{T}^{\rmR,t}(\bfh)$} 1.5abel{alg:app_LPpolicy:heuristic_individual} \begin{algorithmic}[1] \STATE \textbf{Input} An history of observations and actions $\bfh \in (\calX_O \times \calX_A)^{t-1}\times \calX_O$. \STATE \textbf{Output} An action $\bfa \in \calX_A.$ \STATE Compute the belief state $p^m(s|h^m)$ according to the belief state update for every state $s$ in $\calX_S^m$ and every component $m$. \STATE Remove constraints and variables indexed by $t'<t$ in MILP~\eqref{pb:wkpomdp:decPOMDP_MILP} and solve the \textbf{linear relaxation} of the resulting problem with horizon $T - t$, initial probability distributions $1.5eft(p^m(s|h^m)\right)_{s \in \calX_S^m}$ for every component $m$ in $[M]$ and initial observation $\bfo$ (see Remark~\ref{rem:pomdp:with_observation}) to obtain an optimal solution $(\bftau^m,\bfdelta^m)_{m\in [M]}.$ 1.5abel{alg:app_LPpolicy:modify_constraints} \STATE Choose $\bfa \notin \calX_A$ \WHILE{$\bfa \notin \calX_A$}1.5abel{alg:app_LPpolicy:heuristic_individual:loop_while} \STATE Sample $a^m$ according to the probability distribution $(\tau_a^{t,m})_{a \in \calX_A^m}$ for all $m$ in $[M].$ 1.5abel{alg:app_LPpolicy:take_action} \ENDWHILE \STATE Return $\bfa$ \end{algorithmic} \end{algorithm} Then we define the implicit policy $\bfdelta$ as follows: \begin{align}1.5abel{eq:app_LPpolicy:implicit_policy} \delta_{\bfa|\bfh}^{\rmR,t} = \begin{cases} 1, & \text{if}\ \bfa=\mathrm{Act}_{T}^{\rmR,t}(\bfh) \\ 0, & \text{otherwise} \end{cases}, & \quad \forall \bfh \in \calX_H^t, \enskip \bfa \in \calX_A, \enskip t \in [T], \end{align} Similarly, we define algorithm $\rm{Act}_{T}^{\rmR^\rmc,t}(\bfh)$ by adding valid inequalities~\eqref{eq:wkpomdp:dec_Valid_cuts} in the resolution of the linear formulation at step~\ref{alg:app_LPpolicy:modify_constraints}. We also define $\rm{Act}_{T}^{\rmLR,t}(\bfh)$ by solving the master problem~\eqref{pb:app_ColGen:master} using the column generation approach (see Appendix~\ref{app:ColGen}) at step~\ref{alg:app_LPpolicy:modify_constraints}. We assume that $\calX_A \subsetneq \calX_A^1 \times \cdots \times \calX_A^M$. Otherwise, solving~\ref{pb:POMDP_perfectRecall} can be solved by solving the subproblems independently. At first sight, there is no reason to believe that the loop starting at step~\ref{alg:app_LPpolicy:heuristic_individual:loop_while} ends in a finite number iterations. It turns out the theorem below ensures that algorithm $\rm{Act}_{T}^{\rmR,t}(\bfh)$ ends in a finite number of iteration. \begin{theo}1.5abel{theo:app_LPpolicy:alg_finite} Algorithm $\rm{Act}_{T}^{\rmR,t}(\bfh)$ (resp. $\rm{Act}_{T}^{\rmR^\rmc,t}(\bfh)$ and $\rm{Act}_{T}^{\rmLR,t}(\bfh)$) ends in a finite number of iterations. \end{theo} In fact, unlike \citet{bertsimas2016decomposable}, we cannot choose the action $a^m \in \argmax_{a} \tau_a^{t,m}$ for every $m$ in $[M]$ because there is no guarantee that the induced action $\bfa$ will belong to $\calX_A$. For this reason, we choose to sample according to $\tau_a^{t,m}$. \proof[Proof of Theorem~\ref{theo:app_LPpolicy:alg_finite}] To prove this result, we denote by $Z$ the stopping time that belongs to $\bbN$ and that represents the number of iterations of the loop starting at step~\ref{alg:app_LPpolicy:heuristic_individual:loop_while}. We want to prove that $\bbP( Z < \infty ) = 1$. To do so we introduce the random variable $\bfA_i$ that represents the sample drawn at iteration $i$. Since $\bftau$ is a feasible solution of the linear relaxation of MILP~\eqref{pb:wkpomdp:decPOMDP_MILP}, constraint~\eqref{eq:wkpomdp:decPOMDP_MILP_linking_cons} at time $t$ ensures that $\bbE_{\tilde{\bbP}} 1.5eft[\sum_{m=1}^M \bfD^m(A_{i}^m) \right] 1.5eq \bfb$ where $\tilde{\bbP}(\bfA_i = \bfa) = \prod_{m=1}^M \tau_{a^m}^{t,m}$ for every $\bfa \in \calX_A^1 \times \cdots \times \calX_A^M$. The random variable $Z$ follows a geometric law with a probability of success $\tilde{\bbP}1.5eft(\bfA_i \in \calX_A \right)$. To prove that $\bbP( Z < \infty ) = 1$, it suffices to prove that $\tilde{\bbP}1.5eft(\bfA_i \in \calX_A \right) > 0$. We introduce the quantity $E = \bbE_{\tilde{\bbP}} 1.5eft[\sum{m=1}^M \bfD^m(A_i^m) \right]$ and we have the following computation: \begin{align*} \tilde{\bbP}(\bfA_i \in \calX_A) = \tilde{\bbP}(\sum_{m=1}^M \bfD^m(A_i^m) 1.5eq \bfb) &= 1 - \bbP(\sum_{m=1}^M \bfD^m(A_i^m) - E > \bfb - E) \end{align*} By contradiction, suppose that $\tilde{\bbP}(\bfA_i \in \calX_A)=0$. It follows that $\tilde{\bbP}(\sum_{m=1}^M \bfD^m(A_i^m) - E > \bfb - E) = 1$. Since $\bfb \geq E$, we deduce that $\sum_{m=1}^M \bfD^m(A_i^m) - E$ is strictly positive almost surely according to the probability distribution $\tilde{\bbP}$. On the other hand, we have $\bbE_{\tilde{\bbP}} 1.5eft[ \sum_{m=1}^M \bfD^m(A_i^m) - E \right] =0$. Therefore, the random variable $\sum_{m=1}^M \bfD^m(A_i^m) - E$ is a non-negative random variable with an expected value equals to $0$. We deduce that $ \sum_{m=1}^M \bfD^m(A_i^m) - E$ is equal to $0$ almost surely according to $\tilde{\bbP}$, which contradicts the fact that $\tilde{\bbP}(\sum_{m=1}^M \bfD^m(A_i^m) - E > 0)=1$. It achieves the proof. The proof also holds for Lagrangian relaxation because the feasible solution also satisfies the linking constraint in expectation. \qed \end{document}
\begin{document} \title{The resource theory of coherence for quantum channels } \subtitle{} \author{F. H. Kamin$^{1}$\and F. T. Tabesh$^{1}$\and S. Salimi$^{1}$\and F. Kheirandish$^{1}$ } \institute{F. H. Kamin \at \email{[email protected]} \and S. Salimi \at \email{[email protected]} \and $^1$ Department of Physics, University of Kurdistan, P.O.Box 66177-15175, Sanandaj, Iran } \date{Received: date / Accepted: date} \maketitle \begin{abstract} We define the quantum-incoherent relative entropy of coherence ($\mathcal{QI}$ REC) of quantum channels in the framework of the resource theory by using the Choi-Jamiolkowsky isomorphism. Coherence-breaking channels are introduced as free operations and their corresponding Choi states as free states. We also show the relationship between the coherence of channel and the quantum discord and find that basis-dependent quantum asymmetric discord can never be more than the $\mathcal{QI}$ REC for any quantum channels. \textbf{Also}, we prove the $\mathcal{QI}$ REC is decreasing for any divisible quantum incoherent channel and we also claim it can be considered as the quantumness of quantum channels. Moreover, we demonstrate that for qubit channels, the relative entropy of coherence (REC) can be equivalent to the REC of their corresponding Choi states and the basis-dependent quantum symmetric discord can never exceed the coherence. \keywords{Quantum-incoherent relative entropy. Choi states. Coherence-breaking channels. Quantum discord} \end{abstract} \section{Introduction} Quantum resource theories (QRTs) approach play a significant role in quantum information theory, in particular, to determine the measures for evaluating physical resources \cite{r.t1,r.t2,r.t3,r.t4}. QRTs are defined by constraints that characterize a set of free operations that do not generate a resource and the corresponding set of free states that are devoid of the resource. These restrictions may arise from either fundamental conservation laws or practical restrictions resulting from \textbf{the difficulty} of performing quantum operations. In other words, a strong framework for studying various quantum phenomena can be provided by QRTs. Using this perspective to study quantum systems is natural since their characteristic quantum features can be destroyed due to processes such as decoherence. Taking into account that the structure of QRTs is very general, i.e., the great freedom in \textbf{the definition} of free states and free operations, QRTs can be applied to study many different branches of quantum physics such as entanglement \cite{r.e2,r.e3}, quantum reference frames and asymmetry \cite{as}, quantum thermodynamics \cite{qt}, nonlocality \cite{no}, non-Markovianity \cite{nom}, quantum coherence and superposition \cite{c.a1,c.a2,c.a3,c.a4,c.a5,c.a6,c.a7}, \textbf{etc.} According to \textbf{a common} structure in all QRTs, one can see similarities and \textbf{connections} between different QRTs in terms of available resource measures and resource reversibility \cite{r.t1,co,co1,co2,co3,inco1}. On the other hand, there are cases where multi-resources are required to accomplish a specific task, so attempts have been made to combine the different QRTs as resource theory of thermodynamics that it is a mixture of the purity theory and the asymmetry theory \cite{co3}. Quantum coherence is one of the most prominent features of quantum systems and it is an important physical resource in many quantum information processes as it can be provided at a specific cost and consumed to accomplish useful tasks \cite{c.a1,c.a2,c.a3,c.a4,c.a5,c.a6,c.a7}. In the resource theory of coherence, \textbf{the diagonal states} in a reference basis are chosen as the free states, which are known as incoherent states, and the incoherent operations that can not generate the coherence are known as free operations \cite{r.t4}. So far, many efforts have been accomplished to understand this phenomenon and its relation with other quantum resources, such as quantum entanglement, quantum magic, quantum \textbf{discord, etc.} \cite{c.r1,c.r2,c.r3,c.r4,c.r5,c.r6,dis,qqc}. The quantumness of open systems is extremely fragile due to the inevitable interactions with their \textbf{surrounding} environment, which leads to different noisy quantum channels \cite{nielsenbook}. It is very important to know the ability of these channels to change physical resources such as quantum coherence \cite{sk}, hence, the quantum resource theory can help us to understand how the quantum properties of a system change under such evolutions. For this reason, in this paper, we investigate the coherence of quantum channels within the framework of the resource theory. To achieve this aim, we use Choi-Jamiolkowski isomorphism \cite{nielsenbook} and introduce \textbf{coherence-breaking} channels (CBCs) as free operations and their corresponding Choi states as free states \cite{c.breaking.c}. In our framework, the Quantum-incoherent relative entropy of coherence ($\mathcal{QI}$ REC) of quantum channels is equivalent to the $\mathcal{QI}$ REC of their corresponding Choi states \cite{c.r3,QI1}. We show that it consists of two parts: the relative entropy of coherence (REC) of open system and the basis-dependent quantum asymmetric discord \cite{qdiscord1,qdiscord2,qdiscord3}, where the former is zero for both quantum unital channels and quantum incoherent channels. On the other hand, one can prove the $\mathcal{QI}$ REC of channel is decreasing for divisible quantum incoherent channels and it can be a witness of non-Markovianity for quantum incoherent channels. \textbf{Also}, we demonstrate for qubit channels, the REC can be equivalent to the REC of their corresponding Choi states and the basis-dependent quantum symmetric discord can never be more than it. \textbf{Our results} will provide new light for a better understanding of the relationship between the coherence of channels and quantum correlations, and the coherence of a quantum channels can be considered as the quantumness of that. We emphasize our formalism can be extended to the resource theory of entanglement for quantum channels. So, observing the analogical similarity between CBC and entanglement breaking channels (EBC) \cite{ee1}, one can define these operations as free channels and their corresponding Choi states as free states. In light of this, \textbf{one can} able to define the relative entropy of entanglement (REE) for quantum channels in the context of QRT \cite{ee2}. The paper is organized as follows. In Sec. II, a review of the quantum channels and the Choi representation is provided. In Sec. III, we briefly study the resource theory of coherence. We define the $\mathcal{QI}$ REC of quantum channels within the framework of the quantum resource theory in Sec. IV. We focus on the coherence of qubit channels in Sec. V. \textbf{To illustrate} the coherence of channel, two examples are investigated in Sec. VI. The paper concludes in Sec. VII. \section{Quantum channels} A quantum channel is a linear map that satisfies the completely positive and \textbf{trace-preserving} (CPTP) conditions \cite{nielsenbook}. It can be shown that such mapping admits dilations as following form \begin{equation} \Lambda(\rho) =Tr_{E}[U_{SE}(\rho_{S}\otimes\rho_{E})U^{\dagger}_{SE}], \end{equation} where $\rho_{S}$ and $\rho_{E}$ are state of an open system and its environment, respectively, and $U_{SE}$ denotes the unitary time evolution operator of the total system. On the other hand, the CPTP map $\Lambda$ can be represented by Kraus form \cite{nielsenbook}, and is shown by \begin{equation} \Lambda(\rho) =\sum_{i} K_{i}\rho K_{i}, \end{equation} where $K_{i}$ are Kraus operators that $\sum K^{\dagger}_{i} K_{i}=I$. Note that the quantum channels can have several Kraus representations. In this work, we focus on another representation of the quantum channel where it is known as \textbf{the Choi matrix.} According to Choi-Jamiolkowski isomorphism, any CPTP map can be related to a density matrix of \textbf{the composite} system $AS$ in which $A$ is an auxiliary system with the same dimension $d$ as $S$. The Choi state of the channel $\Lambda$ is defined as \begin{equation} \Omega_{\Lambda}=(I_{A}\otimes\Lambda_{S})(\vert\Psi\rangle\langle\Psi\vert), \end{equation} where $\vert\Psi\rangle =\frac{1}{\sqrt{d}}\sum_{i} \vert ii\rangle$ is a maximally entangled state of $SA$. It is important to emphasize that there is a unique Choi state for every quantum channel includes all the channel's information \cite{nielsenbook}. Hence, we claim the coherence of any channel can be determined by the coherence of \textbf{its} corresponding Choi state. \textbf{To define} the coherence of a channel, we review the resource theory of coherence in the next section. \section{The resource theory of coherence} As stated in the Introduction, quantum coherence is a physical property that is used as a resource for quantum systems. Hence, \textbf{the quantitative} determination of coherence for quantum systems has been extensively studied \cite{c.m1,c.m2,c.m3,c.m4,c.m5}. Let us consider the Hilbert space $\mathcal{H}$ with fixed basis ${\vert i \rangle}_{i=0,...,d-1}$, then an incoherent state is defined as $\delta=\sum^{d-1}_{i}\delta_{i}\vert\ i\rangle\langle\ i\vert$. The set of incoherent states is be denoted by $\mathcal{I}$ and $\vert\psi\rangle=\frac{1}{\sqrt{d}}\sum^{d-1}_{i} e^{i \theta} \vert i\rangle$ is a maximally coherent state where $\theta_{i}$ is an arbitrary phase. A completely positive and trace preserving map $\Lambda$ is maximally incoherent (MIO) if $\Lambda(\delta) \in \mathcal{I}$ for any state $\delta \in \mathcal{I}$ \cite{mio}. Meanwhile, an incoherent operation (IO) has a Kraus representation such that $\frac{K_{i}\delta K^{\dagger}_{i}}{Tr(K_{i}\delta K^{\dagger}_{i})} \in \mathcal{I}$ for all $n$ and $\delta \in\mathcal{I}$ \cite{r.t4}. By this restriction, the Kraus operators can be in the form $K_{i}=\sum^{d-1}_{j=0}c_{ij}\vert d_{i}(j)\rangle\langle j\vert$ for any incoherent operation, which are incoherent and $d_{i}(j)$ is a function of the index $j$ and $c_{ij}$ are coefficients \cite{inco}. If $d_{i}(j)$ is a permutation or one-to-one, then $K^{\dagger}_{i}$ is also incoherent as well as $K_{i}$. It is explicit from the definition that $IO\subseteq MIO$. In resource theory of coherence, the incoherent states and the incoherent operations are known as free states and free operations, respectively. A meaure for quantum coherence of state $\rho$ is characterized by a function $C(\rho)$ which satisfies the following properties \cite{r.t4}: \begin{itemize} \item[i]. $C(\rho)\geq 0$, for any $\rho$ and $C(\delta)=0$ if only if $\delta \in \mathcal{I}$; \item[ii]. The coherence cannot increase under $MIO$ map $\Lambda$, i.e., $C(\Lambda(\rho))\leq C(\rho)$; \item[iii]. For every $\Lambda\in IO$ with Kraus representation $\lbrace K_{i}\rbrace$, the coherence is non-increasing on average under selective measurement, i.e., $\sum_{i}p_{i}C(\rho_{i})\leq C(\rho)$, where $\rho_{i}=\frac{K_{i}\rho K^{\dagger}_{i}}{Tr(K_{i}\rho K^{\dagger}_{i})}$; \item[iv]. The coherence cannot increase by mixing quantum states, i.e., $C(\sum_{i}p_{i}\rho_{i})\leq\sum_{i}p_{i}C(\rho_{i})$. \end{itemize} One of the measures that satisfy all the above requirements is the REC defined by \cite{r.t4} \begin{equation}\label{e4} C_{r}(\rho)=\min_{\delta\in I}S(\rho\|\delta)=S(\rho^{d})-S(\rho), \end{equation} where $S(\rho\|\delta)=tr[\rho(Log{\rho}-Log{\delta})]$ is the relative entropy \cite{nielsenbook}, and the diagonal part of $\rho$ in the reference basis $\lbrace\vert i\rangle\rbrace$ is $\rho^{d}=\sum_{i}\vert\ i\rangle\langle\ i\vert\rho\vert\ i\rangle\langle\ i\vert$. In the next section we use the resource theory and introduce the $\mathcal{QI}$ REC of channels. \section{The Resource theory of coherence for quantum channels} In the previous section we introduced the resource theory of coherence for states. Here, we intend to define, in the context of the resource theory, a measure to determine the coherence of quantum channels. To achieve this purpose, we have to define the set of free states and free operations. $\textbf{Free operations:}$ The free operations cannot generate \textbf{resources,} e.g., in resource theory of entanglement the LOCC are considered as free operations \cite{r.e1,r.e2,r.e3}. In this paper, we consider coherence-breaking channels $CBCs$ as free operations \cite{c.breaking.c}. A quantum incoherent channel $\Lambda$ is called coherence-breaking if $\Lambda(\rho)$ is an incoherent state for any state $\rho$. The set of all $CBCs$ denoted by $S_{cbc}$. \textbf{A coherence-breaking channel} kills any coherence present in the state and this is our motivation in this work. $\textbf{Free states:}$ The Choi states corresponding to the free operations are regarded as the free states. The set of free states can be introduced by \textbf{the following form} \begin{equation} \mathcal{F}=\lbrace\Omega_{\Phi}\mid~\Phi\in S_{cbc}\rbrace. \end{equation} The corresponding Choi states of all coherence-breaking channels have the following form \cite{c.breaking.c} \begin{equation} \Omega_{\Phi}=\sum_{i}\lambda_{i}\rho_{i}\otimes\vert i\rangle\langle i\vert. \end{equation} It is clear that $\mathcal{F}$ is a set of quantum-incoherent states and $(\Lambda\otimes I) \Omega_{\Phi}\in\mathcal{F}$ for every state $\Omega_{\Phi}\in\mathcal{F}$ and $\Lambda\in{S_{cbc}}$. By using the Choi-Jamilkowski isomorphism, we define the $\mathcal{QI}$ REC of channels as \cite{c.r3,QI1} \begin{equation}\label{e7} C_{QI}(\Lambda)=C_{r}(\Omega^{A\vert S}_{\Lambda})=\min_{\Omega_{\Phi}\in\mathcal{F}}S(\Omega_{\Lambda}\|\Omega_{\Phi}), \end{equation} with the minimization taken over the set of $\mathcal{F}$. Applying the Theorem $2$ in \cite{ref29}, $C_{QI}(\Lambda)$ can also be written as \begin{equation} C_{QI}(\Lambda)=S(\Delta^{S}(\Omega_{\Lambda}))-S(\Omega_{\Lambda}), \end{equation} in which $\Delta^{S}(\Omega_{\Lambda})=\sum_{i}(I\otimes \vert i\rangle\langle i\vert)\Omega_{\Lambda}(I\otimes \vert i\rangle\langle i\vert)$. Notice that the relative entropy of Choi states is monotonicity decreasing under the local quantum incoherent operations. In the following, we are going to obtain the relationship between the $\mathcal{QI}$ REC of channel and quantum correlations. For this purpose, we regard the basis-dependent quantum asymmetric discord \cite{qdiscord1,qdiscord2,qdiscord3} \begin{equation}\label{e9} D^{A\vert S}(\Omega_{\Lambda})=I(\Omega_{\Lambda})-I[\Delta^{S}(\Omega_{\Lambda})], \end{equation} where $I(\rho_{AS})=S(\rho_{S})+S(\rho_{A})-S(\rho_{AS})$ is mutual information \cite{nielsenbook}. With the help of Eq.(\ref{e9}), it is straightforward to obtain the following equality \begin{equation}\label{e10} C_{QI}(\Lambda)=C_{r}(\rho_{S})+D^{A\vert S}(\Omega_{\Lambda}). \end{equation} where $C_{QI}(\Lambda)$ compose of the REC of open system and the quantum asymmetric discord. \textbf{The Eq. (\ref{e10})} tells us that $C_{QI}(\Lambda)\geq D^{A\vert S}(\Omega_{\Lambda})$, or \textbf{in other words,} the quantum asymmetric \textbf{discord} can never exceed the $\mathcal{QI}$ REC for any quantum channels. By taking partial trace over the ancilla $A$ one can obtain the state of the system $S$ as \begin{equation}\label{e11} \rho_{S}=Tr_{A}(\Omega_{\Lambda})=\frac{1}{d}\sum_{i}K_{i}K^{\dagger}_{i}. \end{equation} Suppose $\Lambda$ is a quantum unital channel, i.e., $\sum_{i}K_{i}K^{\dagger}_{i}=\sum_{i}K^{\dagger}_{i}K_{i}=I$, so we conclude $C_r(\rho_{S})$ is zero for any quantum unital channel. The state of the system, Eq. (\ref{e11}) for an incoherent quantum channel has the following form \begin{equation} \rho_{S}=\frac{1}{d}\sum^{d-1}_{l=0}\sum_{i}c_{il}c^{\ast}_{il}\vert d_{i}(l)\rangle\langle d_{i}(l)\vert, \end{equation} where the Kraus operator is $K_{i}=\sum^{d-1}_{j=0}c_{ij}\vert d_{i}(j)\rangle\langle j\vert$. Since $\rho_{S}$ in the above equation is an incoherent state then $C_{r}(\rho_{S})$ is also zero. So, the $\mathcal{QI}$ REC of the unital and the incoherent channels equals to $D^{A\vert S}(\Omega_{\Lambda})$. Besides, we prove the $\mathcal{QI}$ REC is decreasing for divisible incoherent channels in the following proposition. $\emph{Proposition.}$ If $\Lambda$ is a divisible incoherent channel then $C_{QI}(\Lambda)$ is decreasing. $\emph{Proof.}$ Assume $\Lambda$ is a divisible incoherent channel, i.e., $\Lambda_{t+\epsilon,0}=\Lambda_{t+\epsilon,t}\Lambda_{t,0}$. Using the facts that the $\mathcal{QI}$ REC in Eq.(\ref{e7}) is reduced under local incoherent channels, then we have $C^{A\vert S}_{r}(( I\otimes\Lambda)(\rho_{AS}))\leq C^{A\vert S}_{r}(\rho_{AS})$. So, one can write \begin{eqnarray}\label{e13} C^{A\vert S}_{r}(\rho_{AS}(t+\epsilon))&=&C^{A\vert S}_{r}((I\otimes\Lambda_{t+\epsilon,0})(\rho_{AS}(0))),\nonumber \\ &=&C^{A\vert S}_{r}((I\otimes\Lambda_{t+\epsilon,t}\Lambda_{t,0})(\rho_{AS}(0)))\nonumber \\ &=&C^{A\vert S}_{r}((I\otimes\Lambda_{t+\epsilon,t})(\rho_{AS}(t)))\nonumber \\ &\leq & C^{A\vert S}_{r}(\rho_{AS}(t)), \end{eqnarray} \textbf{The inequality (\ref{e13}) shows that} the coherence of the channel is decreasing and the proof is complete.\\ According to \textbf{the above} proposition the $\mathcal{QI}$ REC can be regarded as a witness for the non-Markovianity of quantum incoherent channels. Now one can ask a question: what is the relation between the $\mathcal{QI}$ REC and the quantumness of a channel? Taking into account Eq.(\ref{e7}), it is clear as long as the $\mathcal{QI}$ REC of a channel is reduced, the channel will be closer to a $CBC$. This means that the coherence of \textbf{the output state of the channel} is also decreased, thus the output state becomes more classical. Therefore, the $\mathcal{QI}$ REC of channel can be considered as a measure of \textbf{the quantumness of the channel.} It is important to say that we interpret the quantumness, the amount of quantumness remains in \textbf{the state of the system} during the evolution, and this is different from the channel's ability to create quantum correlations. \section{Coherence of qubit channels} In this section, we consider the evolution of a single qubit under a quantum channel. An arbitrary qubit is expressed as $\rho=\frac{1}{2}(I+\vec{r}.\vec{\sigma})$ , where $\vec{r}$ is the $3$-dimensional Bloch vector with $\vec{r} \in R^{3}(\vert \vec{r} \vert\leq 1)$ and $\sigma_{i}$ are Pauli matrices. A qubit channel, $\Lambda$, can be represented by a $4\times4$ matrix in the following form \cite{KI,RI} \begin{equation} \bf{F}= \left( \begin{array}{cc} 1&\vec{0} \\ \vec{\tau} & T \\ \end{array} \right), \end{equation} where, $T$ is a $3\times3$ real matrix and $\vec{\tau}$ and $\vec{0}$ are \textbf{3-dimensional} column and row vectors, respectively. Then we have \begin{equation} \Lambda(\rho)=\frac{1}{2}[I+(T\vec{r}+\vec{\tau}).\vec{\sigma}]. \end{equation} It is worthwhile to note that any qubit channel can be written as \cite{KI,RI} \begin{equation} \bf{F}= \left( \begin{array}{cccc} 1&0&0&0 \\ \tau^{\prime}_{1}& \lambda_{1}&0&0 \\ \tau^{\prime}_{2}&0&\lambda_{2}&0 \\ \tau^{\prime}_{3}&0&0&\lambda_{3}\\ \end{array} \right), \end{equation} where, $\lambda$'s are singular values of the matrix $T$. For all coherence-breaking qubit channels the matrix $\bf{F}$ is in the following form \cite{c.breaking.c} \begin{equation}\label{e17} \bf{F_{\Phi}}= \left( \begin{array}{cccc} 1&0&0&0 \\ 0&0&0&0 \\ 0&0&0&0 \\ \tau_{3}&0&0&\lambda_{3}\\ \end{array} \right), \end{equation} and the corresponding Choi matrix is \begin{equation} \Omega_{\Phi}= \left( \begin{array}{cccc} 1+\tau_{3}-\lambda_{3}&0&0&0 \\ 0&1+\tau_{3}+\lambda_{3}&0&0 \\ 0&0&1-\tau_{3}+\lambda_{3}&0 \\ 0&0&0&1-\tau_{3}-\lambda_{3}\\ \end{array} \right). \end{equation} \textbf{Here,} $\Omega_{\Phi}$ is an incoherent matrix. Therefore, for qubit channels in the form of Eq. (\ref{e17}), the coherence of the channel coincides with the REC of its corresponding Choi state \begin{equation} C_{r}(\Lambda)=C_{r}(\Omega_{\Lambda})=\min_{\Omega_{\Phi}\in\mathcal{F}}S(\Omega_{\Lambda}\|\Omega_{\Phi}), \end{equation} and regarding the Eq.(\ref{e4}), we have \begin{equation}\label{e20} C_{r}(\Lambda)=S(\Omega^{d}_{\Lambda})-S(\Omega_{\Lambda}). \end{equation} Also, by using the basis-dependent quantum symmetric discord \cite{qdiscord1,qdiscord2,qdiscord3} \begin{equation}\label{e21} D(\Omega_{\Lambda})=I(\Omega_{\Lambda})-I[(\Omega^{d}_{\Lambda})], \end{equation} The coherence of qubit channel gets the following form \begin{equation} C_{r}(\Lambda)=C_{r}(\rho_{S})+C_{r}(\rho_{A})+D(\Omega_{\Lambda}). \end{equation} \textbf{The above} equation tells us that the quantum symmetric discord $D(\Omega_{\Lambda})$ \textbf{can never} exceed the coherence $C_{r}(\Lambda)$ for qubit channels. \section{Examples} In this section the coherence of \textbf{the channel} will be illustrated by means two examples. \subsection{Amplitude damping channel} Here, we calculate the coherence for an amplitude damping channel with Kraus operators $K_{0}=\vert 0\rangle\langle 0\vert+\sqrt{1-p}\vert 1\rangle\langle 1\vert$ and $K_{1}=\sqrt{p}\vert 0\rangle\langle 1\vert$ \cite{nielsenbook}. The corresponding Choi matrix for this channel is given by \begin{equation} \Omega_{\Lambda^{AD}}= \left( \begin{array}{cccc} \frac{1}{2}&0&0&\frac{\sqrt{1-p}}{2} \\ 0& 0&0&0 \\ 0&0&\frac{p}{2}&0 \\ \frac{\sqrt{1-p}}{2}&0&0&\frac{1-p}{2}\\ \end{array} \right), \end{equation} and the coherence of the channel will be \begin{equation} C_{r}(\Lambda^{AD})=\frac{p-1}{2}\log{(\frac{1-p}{2})}+\frac{2-p}{2}\log{(\frac{2-p}{2})}+\frac{1}{2}. \end{equation} The behavior of the coherence of the channel (dashed red line) of the corresponding Choi state in terms of $p$ \textbf{is plotted} in Fig. \ref{case1}. From the figure one can see that the amount of coherence is reduced by increasing the parameter $p$ from 0 to 1 and it is very close to the quantumness $Q_{C}(\Lambda^{AD})$ which is defined in \cite{qqc}. \begin{figure} \caption{(Color online). Plot of $C_{QI} \label{case1} \end{figure} \subsection{Phase covariant channel} Here, we consider a general model of qubit dynamics which includes dephasing, dissipation and heating effects \cite{ex1,ex2,ex}. The time-local master equation for this \textbf{evolution} is given by \begin{eqnarray}\label{e25} \frac{d\rho}{dt}&=&-i (\omega+h(t))[\sigma_{z},\rho]+\frac{\gamma_{z}(t)}{2}(\sigma_{z}\rho\sigma_{z}-\rho)\nonumber \\ &+&\frac{\gamma_{1}(t)}{2}(\sigma_{+}\rho\sigma_{-}-\frac{1}{2}\lbrace\sigma_{-}\sigma_{+}\rho\rbrace)\nonumber \\ &+&\frac{\gamma_{2}(t)}{2}(\sigma_{-}\rho\sigma_{+}-\frac{1}{2}\lbrace\sigma_{+}\sigma_{-},\rho\rbrace),\nonumber \\ \end{eqnarray} where $\gamma_{i}(t) (i= 1,2,z)$ are \textbf{time-dependent} decay rates, $\sigma_{\pm}$ are the raising and lowering operators of the qubit, $\sigma_{z}$ is the Pauli spin operator in the $z$-direction and $h(t)$ time-dependent frequency shift and $\omega$ is the transition frequency of the qubit. It should be noted that the decay rates are time-dependent functions which can be \textbf{negative} at some times. The Eq. (\ref{e25}) is the most general time-local master equations for a qubit which \textbf{indicates} a phase covariant transform. The Choi matrix for such a transformation will be in the following form \begin{equation} \small{\Omega_{\Lambda_{\omega}}= \left( \begin{array}{cccc} \frac{1+\kappa(t)+\eta _{\parallel}(t)}{4}&0&0& \frac{\eta_{\perp}(t)}{2} \\ 0& \frac{1-\kappa(t)-\eta _{\parallel}(t)}{4}&0&0 \\ 0&0& \frac{1+\kappa(t)-\eta _{\parallel}(t)}{4}&0 \\ \frac{\eta _{\perp}(t)}{2}&0&0&\frac{1-\kappa(t)+\eta _{\parallel}(t)}{4}\\ \end{array} \right)}, \end{equation} with \begin{eqnarray} \kappa(t)&=&- e^{-\Gamma(t)}(1+2G(t))+1,\nonumber \\ \eta_{\parallel}(t)&=&e^{-\Gamma(t)},\nonumber \\ \eta_{\perp}(t)&=&e^{-\Gamma(t)/2-\Gamma_{z}(t)},\nonumber \\ \end{eqnarray} where the terms in the above equations are defined as \begin{eqnarray} \Gamma(t)&=&\int^{t}_{0}dt^{\prime}(\frac{\gamma_{1}(t^{\prime})}{2}+\frac{\gamma_{2}(t^{\prime})}{2}),\nonumber \\ \Gamma_{z}(t)&=&\int^{t}_{0}dt^{\prime}\gamma_{z}(t^{\prime}),\nonumber \\ G(t)&=&\int^{t}_{0}dt^{\prime}e^{\Gamma(t^{\prime})}\frac{\gamma_{2}(t^{\prime})}{2},\nonumber \\ \end{eqnarray} It is clear from that $C_{r}(\rho_{S})=0$. The coherence of channel $\Lambda_{\omega}$ can be evaluated as \begin{eqnarray}\label{e29} C_{r}(\Lambda_{\omega})&=&-\frac{1\pm\kappa(t)+\eta_{\parallel}(t)}{4}\log{\frac{1\pm\kappa(t)+\eta_{\parallel}(t)}{4}}\nonumber \\ &+&\frac{1+\eta_{\parallel}(t)\pm\sqrt{\kappa^{2}(t)+4\eta_{\perp}^{2}(t)}}{4} \times\log{\frac{1+\eta_{\parallel}(t)\pm\sqrt{\kappa^{2}(t)+4\eta_{\perp}^{2}(t)}}{4}}.\nonumber \\ \end{eqnarray} Now let us assume both environment thermal and dephasing are at the same temperature $T$. Also, we ignore the effect of the Lamb shift corrections of the first term. The decay rates of heating and dissipation reservior are $\gamma_{1}(t)/2=Nf(t)$ and $\gamma_{2}(t)/2=(N+1)f(t)$, respectively, where $N$ is the mean number of thermal photons. The function $f(t)$ depends on the form of the reservoir spectral density and for a Lorentzian spectrum it is expressed as \cite{ex3} \begin{equation} f(t)=Re\lbrace\frac{\dot{c}(t)}{c(t)}\rbrace, \end{equation} with \begin{equation} c(t)=e^{-\frac{t}{2}}[\cosh{(\frac{dt}{2})}+\frac{1}{d}\sinh{(\frac{dt}{2})}]c(0), \end{equation} where $d=\sqrt{1-2R}$, and $R=\gamma_{0}/\lambda$ is a dimensionless positive number, in which $\gamma_{0}$ is an effective coupling constant and $\lambda$ is the width of the spectral density of the environment. For $R<1/2$ (weak coupling) the dynamics is divisible (Markovian) while for $R>1/2$ (strong coupling), it becomes non-divisible (non-Markovian). We consider the spectral density $J(\omega)=\alpha(\omega^{s}/\omega^{s-1}_{c})e^{-\omega/\omega_{c}}$ for pure dephasing dynamic, where $\omega_{c}$ is the cutoff frequency, $s$ the Ohmicity parameter and $\alpha$ the coupling constant. In this case, the decay rate for the dephasing channel is determined by \cite{ex4,ex5,ex6} \begin{equation} \gamma_{z}(t)=\int d\omega J(\omega)\coth{(\frac{\hbar\omega}{2k_{B}T})}\frac{\sin{(\omega t)}}{\omega}. \end{equation} The memory time of the dephasing environment can be defined by $1/\omega_{c}$. \textbf{To characterize} the relation between the cutoff frequency of the dephasing environment and the width of the spectral density of the thermal reservoir one can introduce a new parameter $\beta=\omega_{c}/\lambda$. In dephasing dynamics the non-Markovianity, $\gamma_{z}(t)<0$, occurs whenever $s>s_{crit}(T=0) = 2$ \cite{ex7}. Hence, the dynamics of the whole system can be determined by two parameters $R$ and $s$. \begin{figure} \caption{(Color online). Dynamics of the coherence $C_{r} \label{case2} \end{figure} In this paper, we take $T=0$ thus the dephasing rate $\gamma_{z}(t)$ is independent of temperature and heating and dissipation rates are zero and $f(t)$, respectively. Then one can obtain the following expressions \begin{eqnarray} &&\Gamma(t)=-\Re(\ln{[u(t)]}),\nonumber \\ &&\Gamma_{z}(t)=\frac{\alpha}{s-1}\widetilde{\Gamma}(s)(1-(1+\omega^{2}_{c}t^{2})\nonumber\\ &&\times [\cos{(s\arctan{(\omega_{c}t)})}+\omega_{c}t\sin{(s\arctan{(\omega_{c}t}))}]),\nonumber \\ &&\kappa(t)=-(1-\exp{[\Re(\ln{[u(t)]})]}),\nonumber \\ \end{eqnarray} where $u(t)=\lbrace c(t)/c(0)\rbrace^{2}$ and $\widetilde{\Gamma}(s)$ is the Euler gamma function. At this point, we now return to the coherence problem of the phase covariant channel in Eq. (\ref{e29}). The dynamical behavior of coherence for quantum channel $\Lambda_{\omega}$ as a function of $\lambda t$ is shown in Fig. \ref{case2}. We assume that the memory time of the dephasing and thermal environments is the same, i.e., we choose the parameter $\beta=1$. In Fig. \ref{case2}a, the evolution is Markovian since we have $s=0.5$ and $R=0.01$. The coherence of channel is ploted for $s=3.5$ and $R=10$ in Fig. \ref{case2}b, that the dynamics is non-Markovian. As can be seen the coherence of channel is decreasing for \textbf{the Markovian regime} over time while in non-Markovian regime the coherence of channel damply oscillates and increase in some time intervals, which this behavior is due to non-divisibility of the dynamics.\\ \section{Conclusion} In this paper, we \textbf{have shown} the quantum-incoherent relative entropy of coherence ($\mathcal{QI}$ REC) of a quantum channel is equivalent to the $\mathcal{QI}$ REC of its Choi state. We have used the Choi-Jamiolkowski isomorphism, within the framework of QRT, to define it. Where, the coherence-breaking channels (CBCs) are considered as free operations and their corresponding Choi states as free states. It is demonstrated the $\mathcal{QI}$ REC consists of two parts: the REC of open system and the basis-dependent quantum asymmetric discord that the former is zero for both the quantum unital and quantum incoherent channels. Also, it is shown that the $\mathcal{QI}$ REC is decreasing for any divisible quantum incoherent channel and it can be considered as a witness of \textbf{the non-Markovianity} for incoherent channels. \textbf{Also, we have proposed} that the coherence of \textbf{the channel} can be regarded as the measure of the quantumness of that. Finally, we found that the coherence of qubit channels \textbf{can coincide} with the REC of their corresponding Choi states and the basis-dependent quantum symmetric discord can never exceed the coherence. Ultimately, it is worthwhile to note that our results could open a new way to better understand the relationship between quantum coherence as a physical resource and other quantum resources, such as quantum correlations. \end{document}
\begin{document} \title{\bf {\LARGE How Time Works in Quantum Systems:} \\ {\Large Overview of time ordering and time correlation in weakly perturbed atomic collisions and in strongly perturbed qubits} \vskip 0.17in} \author{J. H. McGuire$^1$, L. Kaplan$^1$, \\ Kh. Kh. Shakov$^1$, A. Chalastaras$^1$, A. M. Smith$^1$, \\ A. Godunov$^2$, H. Schmidt-B\"{o}cking$^3$, \\ and D. Uskov$^4$ } \affiliation{ \vskip 0.1in $^1$ Department of Physics, Tulane University, New Orleans, LA 70118, USA \\ $^2$ Department of Physics, Old Dominion University, Norfolk, VA 23529, USA \\ $^3$ Institut f\"ur Kernphysik, Universit\"at Frankfurt, 60486 Frankfurt, Germany \\ $^4$ Department of Physics, Louisiana State University, Baton Rouge, LA 70803, USA \vskip 0.17in} \begin{abstract} Time ordering may be defined by first defining the limit of no time ordering (NTO) in terms of a time average of an external interaction, $V(t)$. Previously, time correlation was defined in terms of a similar limit called the independent time approximation (ITA). Experimental evidence for time correlation has not yet been distinguished from experimental evidence for time ordering. \end{abstract} \maketitle \vskip 3em \section{Introduction} \subsection{Space and time} Both space and time are parameters used to mathematically describe observable properties of physical objects or systems of objects. In some ways space and time are similar, both intuitively and mathematically, since they are the most basic coordinates used to describe the dynamics of physical systems. It is often convenient and conventional to use $t$ and $\vec r$ to specify when and where an object is, even in quantum systems where the precision (or locality) of these coordinates is limited by the uncertainty principle or obscured by entanglement. Nevertheless, space and time differ fundamentally (even neglecting the second law of thermodynamics, which defines a direction in time but no preferred direction or arrow in space). Because there is no counterpart of temporal causality in a space-like context, an object can repeatedly return to any spatial location, but it cannot return or jump ahead temporarily in time. Much has been written about spatial properties of multi-particle systems, but less about time, even though three-dimensional space would seem much harder to deal with than one-dimensional time. Time, unlike space, seems somehow enigmatic. Newton refers to time as ``like a river flowing." Einstein calls time ``that which a clock measures." And Feynman refers to time as the space between events, i.e. that which keeps {\LARGE EVERYthing}{\Large FROMhappen}{ing}{\small ALLat}{\tiny ONCE}. One purpose of this paper is to see what happens when we try to carry an idea formulated in a spatial context into a time context. That idea is correlation (associated with non-randomness, or quantum entanglement). Time correlation~\cite{mc01,ita} has been directly related to time ordering, a causality condition implied by the time-dependent Schr\"{o}dinger equation that constrains sequential interactions to occur in order of increasing time. \subsection{N-body problem} It is well known that the N-body problem, namely determining the quantum evolution of $N$ correlated or interacting bodies, is exponentially difficult. Kohn~\cite{kohn} specifically estimates that the amount of computer storage capacity needed to solve an N-body problem scales as $e^{3N}$, where 3 is simply a coefficient based on experience. Combining Kohn's estimate with Moore's law, which estimates, again based on experience, a doubling of computer capacity every 1.5 years, one can easily show that 6.5 years are required for computer capacity to grow enough to accommodate one additional body. Thus, increasing capacity from, say helium (N = 3 ignoring nuclear and subnuclear structure) to carbon (N = 7) is estimated to require about 25 years, progressing to water (N = 11 ignoring the inner shell) requires about 50 years, and DNA thousands of times the age of the universe. The N-body problem is difficult. On the other hand, since much of physics, chemistry, biology, materials science, nanotechnology, and quantum computing involves multiple particles, the problem is significant. In quantum computing, for example, much has been understood about individual qubits (regarded here as single ``bodies"), but much less is understood about the coupled networks of qubits needed to build a quantum computer. Evidently, there is a need for sensible, well-defined approximations to the N-body problem. Perhaps the most widely used of these is the independent particle approximation (IPA), where the N-body problem is dramatically reduced in complexity to a problem of N independent bodies (or particles, quasi-particles, qubits, \ldots). \subsection{Correlation} It is sometimes argued that a theory is not well defined until a reliable method is developed to calculate corrections to the theory. In the case of the IPA, the corrections are called correlations and represent interconnections among the positions of the various particles. Correlation implies complexity: a system of independent (i.e. uncorrelated) particles is less complex than a system in which each particle's behavior depends on the behavior of every other particle. From one viewpoint, correlation is a key to understanding how to make complex systems from simple ones. From the opposite point of view, in a very complicated system correlation can be a pathway to see order through a landscape of chaos. Our approach is to focus on the first viewpoint -- building complex systems from simple ones. If the correlations are sufficiently small, then the IPA is reasonably accurate. Correlation, one method to approach the N-body problem, is defined as the difference between a full solution and the IPA. Thus, the IPA limit of no correlation is used to define correlation itself. While this approach appears a bit awkward at first, it is conventional in the study of non-random processes and quantum entanglement as well. For example, in the limit of no entanglement the N-body wave function may be written as a simple product of independent particle solutions, and entanglement may be defined as the deviation from this limit. In most applications, correlations arise from spatial interactions between particles. In N-electron atoms, for example, electron correlation is generated by the $1/r_{ij}$ Coulomb interactions between electrons. In this context, the IPA is also known as the independent electron approximation (IEA) and is defined by replacing these complicating correlation interactions by a sum of single-electron mean-field interactions. The resulting Hamiltonian is a sum of single-electron terms, even in the case of dynamic atomic systems~\cite{mcbook}, and the resulting wave function is a product of single-electron wave functions. One common method for analyzing deviations from the IEA is many-body perturbation theory (MBPT), used here and in other fields as well to describe correlation at various orders in a given perturbation. One of the primary questions motivating the work described here is, ``what happens if one tries to define an independent time approximation (ITA), where one eliminates correlations in time rather than the conventional correlations in space?" In such an approximation, multiple times may be used for multiple particles~\cite{mg03}. This is simpler than using a single time for all particles, just as the use of N independent particle positions is simpler that trying to solve a single, but complicated, N-body problem. Time correlation may then be defined as a deviation from the ITA in analogy to the definition of spatial correlation by reference to the IPA. \subsection{Correlation in time and time ordering} In this paper we first review early efforts to define the ITA, in which a key step is to remove time ordering between fields acting on different particles. This is closely related to the approximation of no time ordering (NTO), where all time ordering is removed for fields acting either on different particles or on the same particle. These early efforts to define and study the ITA were based on a perturbation expansion of the external interaction $V(t)$. Since time correlation or time ordering first appear at second order in $V(t)$, the observable effects are small and the calculations difficult. Next we review more recent investigations of time ordering and the NTO approximation for qubits in non-perturbative external fields. Therefore, the organization of this paper is unusual. The harder problem of time correlation is addressed first, and the related but more tractable problem of time ordering is discussed second. We follow this unusual order because it reflects what has happened in recent years and also because the newer work on time ordering raises some intriguing questions and challenges. In particular we emphasize that experimental evidence for time correlation has not yet been distinguished from evidence for time ordering. That is, no difference between the ITA and the NTO approximations has yet been observed in any experiment, although recent experiments have been progressing in this direction, as we illustrate below. \section{The N-body problem} In this section we consider solutions to the N-body problem described by a Hamiltonian \begin{eqnarray} \label{H} \hat H = \hat H_0 + \hat V_S(t) \,, \end{eqnarray} where there may be many terms in both the unperturbed $\hat H_0$, which we assume to be solvable, and in $\hat V_S(t)$, which we regard as an external time-dependent interaction (written in the Schr\"odinger representation). Where appropriate, $\hat V_S(t)$ may be treated as a classical external field. In the case of atomic collisions, this corresponds to the widely used semiclassical approximation (SCA) where the projectile is regarded as a classical particle if it is a proton or electron and a classical wave if it is a photon. The N-body problem may be solved in various representations, depending on how $\hat H$ is separated into $\hat H_0$ and $\hat V_S(t)$, ranging from the Schr\"{o}dinger representation where effectively $\hat V_S(t) = \hat H$ and $\hat H_0=0$ to the Heisenberg representation where $\hat H_0=\hat H$ and $\hat V_S(t)=0$. In the SCA, singularities can occur~\cite{mcbook} in the first order amplitudes when the Schr\"{o}dinger representation is used. Except where otherwise specified we shall use an appropriate intermediate representation that takes maximum advantage of known solutions for $\hat H_0$, and in which $\hat V(t)=e^{i\hat H_0 t}\hat V_S(t)e^{-i\hat H_0 t}$ causes transitions between eigenstates of $\hat H_0$. We work in atomic units, in which $e^2 = m_e = \hbar = 1$. \subsection{Formulation of the N-body problem in the time domain} We seek solutions $\Psi(r_1, \ldots, r_N;t)$ to the N-body problem described by Eq.~(\ref{H}). It is both conventional and sensible to separate the influence of the external interaction $\hat V(t)$ from the initial state at some time $t_0$ before the external fields are applied to the atomic system, namely, \begin{eqnarray} \label{Nbody} \Psi(r_1, \ldots, r_N;t) = \hat U(t,t_0) \Psi(r_1, \ldots, r_N;t_0) \,. \end{eqnarray} Here all dynamics are contained in the time evolution operator (or Green's function) $\hat U(t,t_0)$. It is easily shown that in the intermediate representation $\hat U(t,t_0)$ satisfies \begin{eqnarray} \label{deU} i \frac{d}{dt} \hat U(t,t_0) = \hat V(t) \, \hat U(t,t_0) \,. \end{eqnarray} The solution for $\hat U(t,t_0)$, which may be verified by insertion into Eq.~(\ref{deU}), is \begin{eqnarray} \label{U} \hat U(t,t_0) &=& 1 -i \int_{t_0}^{t} \hat V(t_1) \, dt_1 + (-i)^2 \int_{t_0}^{t} \hat V(t_1) \, dt_1 \int_{t_0}^{t_1} \hat V(t_2) \, dt_2 \nonumber \\ &+& \cdots \,+ (-i)^n \int_{t_0}^{t} \hat V(t_1) \, dt_1 \int_{t_0}^{t_1} \hat V(t_2) \, dt_2 \, \cdots \int_{t_0}^{t_{n-1}} \hat V(t_n) \, dt_n + \,\cdots \nonumber \\ &\equiv& T \sum_n \frac{(-i)^n}{n!} \left[ \int_{t_0}^{t} \hat V(t') \, dt' \right]^n = T e^{-i \int_{t_0}^t \hat V(t') \, dt'} \,. \end{eqnarray} Here $T$ is the Dyson time-ordering operator, which arranges the interactions $\hat V(t')$ in order of increasing time, similar to the requirements of causality. The time ordering operator is central to the discussion in this paper, as it relates both to observable time ordering effects and to time correlation. The key idea is that $T \, \hat V(t_1) \hat V(t_2) = \hat V(t_2) \hat V(t_1)$ if $t_2 > t_1$ and $\hat V(t_1) \hat V(t_2)$ otherwise. {\it The no time ordering (NTO) approximation is the approximation in which $T \to 1$, and the constraint of time ordering is not enforced.} In this limit all time sequences of the $\hat V(t')$ are equally weighted, so that \begin{equation} \hat U(t,t_0) \to \hat U_{\rm NTO}(t,t_0)= e^{-i \int_{t_0}^t \hat V(t') \, dt'} \end{equation} in Eq.~(\ref{U}). \subsection{Independent particle approximation (IPA)} As noted above, the N-body problem is notoriously difficult to solve. A particularly useful approximation is the independent particle approximation (IPA). For an atom with N electrons, $\hat H_0 = \sum_j ( -\nabla_j^2/2 - Z/r_j + \sum_{i< j} 1/r_{ij})$, where $Z$ is the nuclear charge and $\vec r_j$ are the electron positions in a coordinate system centered on the nucleus, and $\hat V_S(t) = -\sum_j Z_p/|\vec{R}(t) - \vec{r}_j|$, where $Z_p$ and $\vec R(t)$ are the charge and position of the projectile. If $\sum_{i<j} 1/r_{ij}$ is approximated by a mean field $\sum_j \hat v(\vec r_j)$, then $\hat H = \hat H_0 + \hat V_S(t)$ is reduced to a sum of single-particle terms that may be solved using separation of variables. One obtains the independent particle approximation (IPA) to the exact N-particle wave function, namely, \begin{eqnarray} \label{IPA} \Psi(r_1, \ldots,r_N;t) \simeq \prod_j\psi_j(r_j,t) = \prod_j \hat U_j(t,t_0) \, \psi_j(r_j,t_0) \ \ \ \ ({\rm IPA}) \,, \end{eqnarray} where each $\psi_j(r_j,t)$ is a single-particle wave function. We note here that in the IPA approximation, different times may be used for different particles, if so desired, since the particles are independent in both space and time. Whatever happens to one of the particles does not influence what happens to any of the others, although time ordering is retained within the evolution of each independent particle. \subsection{Independent time approximation (ITA)} In order to address the question, ``what happens if one tries to define an independent time approximation (ITA), where one explores correlations in time rather than the conventional correlations in space?", a list of comparisons between the ITA and IPA was developed~\cite{ita}. An updated comparison is summarized in Table~\ref{table1}. \begin{table} \caption{Comparison of correlation in space and time.} \begin{tabular}{||@{}lll||} \hline\hline & Spatial correlation & Temporal correlation \\ \hline\hline \hspace{0.3em} Cause: & $\hat v_{ij} = 1/r_{ij}$ & $T$ and $\hat V(t)$ \\ & spatially varying internal & time ordering of \\ & Coulomb interactions & external interactions \\ \hline & & \\[-10pt] \hspace{0.3em} Origin: & $\hat H_0 = \sum_j \hat H_{0j} + \sum_{i<j}\hat v_{ij}\;\;\;\;\;\;$ & $ i {d\over dt}{\hat U} = \hat V(t)\, \hat U $ \\[2pt] \hline \hspace{0.3em} Uncorrelated limit: $\;\;$& IPA & ITA \\ \hline & &\\[-10pt] \hspace{0.3em} Product form: & $\Psi(r_1,\ldots,r_N) \to \prod_j \psi_j(r_j)$ & $\Psi(t_1,\ldots,t_N) \to \sum_k c_k \prod_j \psi^{(k)}_j(t_j)$ \\[2pt] \hline & & \\[-10pt] \hspace{0.3em} No fluctuations: & $\hat v_{\rm cor} = \hat v_{ij} - \hat v_{\rm av} \to 0 $ & $T_{\rm cor} = T - T_{\rm av} \to 0 \;\;$ or $\;\;\delta \hat V(t)=\hat V(t)-\overline{\hat V} \to 0$ \\[2pt] \hline & & \\[-10pt] \hspace{0.3em} Average value: & $\hat v \to \hat v_{\rm av} = \hat v_{\rm mean \ field} $ & $T \to T_{\rm av} = 1\;\;$ or $\;\;\hat V(t) \to \overline{\hat V}$ \\[2pt] \hline\hline \end{tabular} \label{table1} \end{table} There are similarities between the temporal independent time approximation and the spatial independent particle approximation, as seen in Table~\ref{table1}. Time and space correlation can each be defined as a deviation from an uncorrelated limit, where the uncorrelated limit is given by a product form. Electron identity, which has been ignored here for simplicity of presentation, may be restored by antisymmetrizing the uncorrelated single-electron wave functions. The uncorrelated limit may also be described by an average of the appropriate correlation operator, as indicated in Table~\ref{table1}. Correlation may then be defined in terms of fluctuations away from the average, as is done in statistical mechanics~\cite{Balescu}. In both the spatial and temporal cases, the average term may form the basis for useful approximate calculations. There are also notable differences~\cite{time} between temporal and spacial correlation, as detailed in Table~\ref{table1}. While correlation in space arises in the asymptotic target Hamiltonian $\hat H_0$, and affects both the asymptotic initial wave function $\Psi(t_0)$ and the evolution operator $\hat U(t,t_0)$, correlation in time occurs only in the time evolution operator $\hat U(t,t_0)$. Correlation in space comes from $1/r_{ij}$ inter-electron interactions within the target. In the IPA, phase coherence and time correlation between electrons are both lost, as seen, for example, by noting that matrix elements of $\hat{V}$ in Eq.~(\ref{U2}) may be complex and that the time order is significant except when all the $\hat{V}$ go to a common $\overline{\hat V}$. Time correlation arises from time ordering of the external interaction $\hat V(t)$ acting on different particles. The ITA is intimately related to the NTO since $T \to T_{\rm av}=1$ in both cases. However, in the ITA $T \to 1$ is applied only to the cross terms affecting different particles, while time ordering for each individual particle is retained. In the ITA, each particle evolves independently in time, although the initial state may be spatially correlated. Thus, the initial state $\sum_k c_k \prod_j \psi^{(k)}_j(\vec r_j,t_0)$ evolves to $\sum_k c_k \prod_j \psi^{(k)}_j(\vec r_j,t_j)$, where the state of each particle may be evolved using its own independent time $t_j$. Removing some or all of the time ordering terms is straightforward in practice since the $T_{\rm cor}$ terms are easily identified, at least at second order in perturbation theory~\cite{mc01}. \section{Atomic collisions} In this section we review time ordering and time correlation using perturbation theory for the interaction of charged particles with helium~\cite{mc01,ita}. From Eq.~(\ref{U}) we see that the leading effect due to time ordering arises at second order in the $\hat V(t)$ expansion. Thus the system has an infinite number of states but the external interaction $\hat V(t)$ only acts on the system twice. In Sec.~\ref{secqubits}, we consider a strongly perturbed qubit, a two-state system that interacts with the external field $\hat V(t)$ an infinite number of times. Through second order in $\hat V(t)$, the time evolution operator $\hat U(t,t_0)$ is given by \begin{eqnarray} \label{U2} \hat U(t,t_0) &=& T e^{-i \int_{t_0}^{t} \hat V(t') \, dt'} \simeq 1 -i \int_{t_0}^{t} \hat V(t_1) \, dt_1 - \int_{t_0}^{t} \hat V(t_1) \, dt_1 \int_{t_0}^{t_1} \hat V(t_2) \, dt_2 \nonumber \\ &\equiv& 1 -i \int_{t_0}^{t} \hat V(t_1) dt_1 - T \frac{1}{2!} \int_{t_0}^{t} \hat V(t_1) \, dt_1 \int_{t_0}^{t} \hat V(t_2) \, dt_2 \,. \end{eqnarray} This may also be obtained by integrating Eq.~(\ref{deU}) through second order. Note that as $T \to 1$ the order in which the interactions act may be interchanged. It is the difference between the limit as $T \to 1$ and the full result that defines the effects of time ordering. In the case of weak correlation considered here, the NTO limit of $T \to 1$ will coincidentally yield the ITA as well. The trick now is to separate $T$ from $T_{\rm av} = 1$. \subsection{Primary results} \subsubsection{Various pathways to ITA} \label{secpathways} To separate the time ordering effects from the non-time ordering (NTO) effects, it is useful to write, \begin{eqnarray} \label{T-Tav} T = T_{\rm av} + (T - T_{\rm av}) \end{eqnarray} where $T_{\rm av}$ yields the NTO approximation and $T - T_{\rm av}$ yields the effects of time ordering. This decomposition of $T$ into an average part plus fluctuations is central for this paper. Note that $T_{\rm av}=1$ is required to satisfy the initial condition $\hat U(t_0,t_0) = 1$. One conceptual pathway to the ITA proceeds by analogy with the NTO limit. In the ITA, time ordering is enforced among all potentials acting on an individual particle (e.g. for $\hat V_i(t_1)\hat V_i(t_2)$ at second order), but ignored for different particles in cross terms such as $\hat V_i(t_1)\hat V_j(t_2)$ when $i \ne j$. This distinguishes the ITA from the NTO approximation where time ordering is removed for all terms. Another clever conceptual pathway to time ordering and the ITA, noticed by Godunov~\cite{ita}, is via use of commutator relations. Consider the identity, which we call the Godunov identity~\cite{gc}, \begin{equation} \label{Crel} \hat V(t_1) \hat V(t_2) = \frac{1}{2}\left (\hat V(t_1) \hat V(t_2) + \hat V(t_2) \hat V(t_1)\right ) + \frac{1}{2} [\hat V(t_1),\hat V(t_2)] \,. \end{equation} In the case of atomic collisions with helium, the projectile interacts with both electrons so that $\hat V(t') = \hat V_1(t') + \hat V_2(t')$. Now $[\hat V(t_1),\hat V(t_2)]$ contains both $[\hat V_i(t_1),\hat V_i(t_2)]$ and $[\hat V_i(t_1),\hat V_j(t_2)]$ ($i \neq j$) terms. In the NTO approximation, all commutator terms are eliminated, i.e. $[\hat V(t_1),\hat V(t_2)] \to 0$ and $\hat V(t_1) \hat V(t_2) \to \frac{1}{2}\left(\hat V(t_1) \hat V(t_2) + \hat V(t_2) \hat V(t_1)\right)$ so that the time evolution operator at second order is given by the average value of both possible time sequences. In the ITA, only cross commutator terms between fields acting on different particles are neglected, i.e. $[\hat V_i(t_1),\hat V_j(t_2)] \to 0$ and $\hat V_i(t_1) \hat V_j(t_2) \to \frac{1}{2}\left(\hat V_i(t_1) \hat V_j(t_2) + \hat V_j(t_2) \hat V_i(t_1)\right)$ only for $i \ne j$. A third pathway to the ITA is through elimination of off-energy-shell effects. The effect of the Dyson time ordering operator at second order may be expressed as $T \, \hat V(t_1) \hat V(t_2) = \Theta(t_1 - t_2) \hat V(t_1) \hat V(t_2) +(1 \leftrightarrow 2)$, and thus \begin{equation} \label{thetafun} {1 \over 2!} \, T \int_{-\infty}^{+\infty} \int_{-\infty}^{+\infty} \hat V(t_1) \hat V(t_2) \, dt_1 \, dt_2 = \int_{-\infty}^{+\infty} \int_{-\infty}^{+\infty} \Theta(t_1 - t_2) \hat V(t_1) \hat V(t_2) \, dt_1 \, dt_2 \,. \end{equation} Taking $T \to 1$ is equivalent to replacing $\Theta(t_1-t_2)$ by the constant $1/2$. The Fourier transform of the Heavyside theta function is well known, namely \begin{eqnarray} \label{FTthetafun} \int_{-\infty}^{+\infty} e^{i E (t_1-t_2)} \Theta(t_1 - t_2) \, d(t_1-t_2) = \pi \delta(E) + i P_v \frac{1}{E} \,. \end{eqnarray} The principal value term accesses off-energy-shell states, which violate energy conservation during the short collision time in a manner consistent with the uncertainty relation. Since taking $T \to 1$ gives the $\pi\delta(E)$ term in Eq.~(\ref{FTthetafun}), it is the principal value term that carries the effects of time ordering. Ignoring the off-shell contribution (which is conveniently $\pi/2$ out of phase with the time-averaged term) whenever the fields $\hat V(t_1)$ and $\hat V(t_2)$ act on different particles yields the ITA. There is yet a fourth pathway to the NTO or ITA. For potentials that are time-independent in the intermediate representation, $[\hat V(t_1),\hat V(t_2)] \to 0$ and the NTO approximation is exact. In general, the NTO approximation may be obtained by replacing the true external potential $\hat V(t)$ with its time average over the duration of the interaction, i.e. $\displaystyle \hat V(t') \to \overline{\hat V}={1 \over t-t_0} \int_{t_0}^t \hat V(t') \, dt'$, so that the exponent $\int \hat V(t') \, dt'$ in the time-ordered exponential of Eq.~(\ref{U}) is replaced by $\overline{\hat V}\cdot (t-t_0)$. The replacement of the external time-dependent interaction by its time average is analogous to the replacement of the true inter-particle interaction by its mean-field value in the IPA, which involves averaging over the positions of all but one of the particles. We note that the limit of constant potential $\hat V(t)$ in which the NTO approximation becomes exact is distinct from the adiabatic limit, in which the potential merely changes slowly with time. Furthermore, the content of the NTO or ITA approximation depends on the representation used, as we will see explicitly in Sec.~\ref{secqubits}. Thus, for a given decomposition $\hat H=\hat H_0+\hat V_S(t)$, it is the interaction-representation potential $\hat V(t)=e^{i \hat H_0 t}V_S(t)e^{-i\hat H_0 t}$ that must be constant for the NTO or ITA approximation to be exact. In the Heisenberg representation, $\hat V(t)=0$ by construction, and the NTO or ITA is always trivially exact. To summarize, pathways to the ITA approximation in this section include: \begin{enumerate} \item $T \to 1$, \item $[\hat V_i(t_1),\hat V_j(t_2)] \to 0$, \item $ P_v \frac{1}{E} \to 0$, \item $\hat V(t) \to \overline{\hat V}$\,. \end{enumerate} The difference between the ITA and NTO approximations is that in the second item above, the commutator disappears for all terms in the NTO but only for the cross terms in the ITA. \subsubsection{Economy of NTO} A variety of second-order calculations with and without time ordering have been done by Godunov and collaborators in recent years~\cite{gm01,god05}. Most of these calculations have studied two-electron transitions in atoms caused by high-velocity collisions with protons or electrons, where second Born methods are applicable. The advantage of looking at two-electron transitions is that second order terms are often dominant, since both elastic scattering and single-electron transitions, corresponding to the first two terms in Eq.~(\ref{U2}), are experimentally eliminated. A disadvantage is that the resulting cross sections are quite small. The Godunov code is remarkable in that the off-shell terms can be computed exactly at second order in perturbation theory, in contrast with most other existing calculations, which use closure approximations to avoid this relatively difficult calculation. Dropping the difficult but interesting off-shell terms yields a result without any time ordering. Comparing results with and without inclusion of off-shell terms then yields the time ordering or time correlation effect. The algebra required for the off-shell terms is relatively tedious, as reflected by the off-shell calculation typically requiring several hundred times more computer time. Thus the NTO (and possibly the ITA) represents a substantial reduction of computer time and algebraic effort. When valid, this approximation can therefore be used to attack problems harder than those that require the full off-shell terms. Unfortunately, at this point no simple, physically transparent criterion is known to us that determine when the NTO or ITA approximation is valid. \subsection{Experimental evidence} There have been ten or so experiments that show some evidence for time ordering and time correlation effects for two-electron transitions in high velocity collisions. Perhaps the most dramatic evidence is the factor of two difference found~\cite{aarhus} in the double ionization of helium in collisions with protons and antiprotons at several MeV. There seems to be agreement that this difference is due to time ordering and time correlation, but no definitive theoretical studies have yet been done. Some studies~\cite{god97} of Auger profiles also support the need for time correlation and time ordering. The clearest direct comparison of experimental data with time-ordered and non-time-ordered theoretical calculations is for polarization of light emitted after excitation-ionization of helium by proton impact~\cite{polarize}. \begin{figure} \caption{\footnotesize{\label{f1} \label{f1} \end{figure} In this study, time ordering or time correlation has a $20\%$ effect on the predicted polarization. The experiment, with $5\%$ errors, is in excellent agreement with Godunov's calculations, except at the lowest energy where perturbation theory is expected to break down, and clearly shows the importance of time ordering or time correlation effects. There is a need for more calculations by different authors to confirm these effects of time correlation and time ordering. In all of these two-electron transition experiments, one may do a simultaneous expansion in the external interaction $\hat V(t)$ and the correlation interaction $\hat v$. Time correlation effects are associated with commutators of the form $[\hat V_i(t_1),\hat V_j(t_2)]$, where $i$ and $j$ label two different electrons, and are second order in $\hat V$. In addition, there are time ordering effects not related to time correlation: these are associated with commutators $[\hat V_i(t_1),\hat V_i(t_2)]$ and enter at second order in $\hat V$ and first order in $\hat v$ (since the commutator vanishes for $\hat v=0$). Thus the difference between time ordering and time correlation is a third order effect, smaller by at least a factor of 10 than either the time ordering effect or the time correlation effect taken individually. The experiments show direct evidence for time correlation but do not distinguish between time correlation and time ordering. There is also one clear and definitive study by the group of Thomas~\cite{zhao} that shows direct evidence for time ordering in atoms interacting with a time-varying magnetic field. \section{Qubits} \label{secqubits} A qubit is a very simple two-state (e.g. on and off) quantum system whose state population may be changed by an external potential $\hat V(t)$, in analogy with the way in which an atomic state may be changed by the Coulomb potential $\hat V(t)$ of the projectile in an atomic collision. Qubits are building blocks for the complex interconnected N-qubit systems that can be used for quantum computation and quantum information. The extension from a single qubit to a system of N interconnected qubits is analogous to the extension from a one-electron atom to a correlated N-electron atom. The interaction of qubits with each other and with their environment still have to be dealt with before a quantum computer becomes a reality. In this section we consider time ordering in a single qubit, analogous to time ordering in scattering from atomic hydrogen. To better understand time ordering effects, we work in the time domain rather than the more common energy (or frequency) domain. The advantages of working with qubits include the possibility of easily handling non-perturbative external potentials and the existence in some cases of analytic solutions, so that numerical calculations can be avoided. This yields new ways to think about time ordering mathematically and physically. Specific effects due to time ordering in a simply pulsed qubit are shown, for example, in Fig.~\ref{figdifference} below. The idea is to extend the above analysis of time ordering for weakly perturbed atomic collisions to the case of strongly perturbed qubits. In order to make use of transparent analytic expressions, we spend some effort discussing the time evolution of qubits pulsed sharply in time, commonly referred to as ``kicked" qubits. With the exception of the last brief subsection, there is little emphasis here on time correlation between qubits, simply because little work has been done on this problem to our knowledge. So our emphasis in this section is almost entirely on time ordering. One new feature here will be to explore what happens to time ordering under change of representation -- specifically when we change from the Schr\"{o}dinger to the intermediate representation. \subsection{Overview of single qubits} The qubit wave function $\psi(t)$ is a linear superposition of the ``on" and ``off" states, namely \begin{eqnarray} \label{psi} \Psi(t) = a_1(t) \left[ \begin{array}{c} 1 \\ 0 \end{array} \right] + a_2(t) \left[ \begin{array}{c} 0 \\ 1 \end{array} \right] \,. \end{eqnarray} Population can be transferred from one state to the other by applying an external potential $\hat V(t)$, which can have the form of a single pulse characterized by a time duration, $\tau$. The full Hamiltonian can be written in terms of the Pauli spin matrices, \begin{eqnarray} \label{genh0} \hat H(t) &=& \hat H_0 + \hat V_S(t) \nonumber\\ &=& \left[ \begin{array}{cc} -\Delta E/2 & 0 \\ 0 & +\Delta E/2 \end{array} \right] + \left[ \begin{array}{ccc} 0 & V(t) \\ V(t) & 0 \end{array} \right] \\ &=& -{\Delta E \over 2} \sigma_z + V(t) \sigma_x. \nonumber \end{eqnarray} The time dependence of the system can be described by the time evolution operator $\hat{U}(t, t_0)$. Allowing $t_0 = 0$ to simplify the notation, we can write $\psi(t)=\hat{U}(t)\psi(0)$, where \begin{eqnarray} \label{uoperator} \hat{U}(t) = \left [ \begin{array}{cc} U_{11}(t) & U_{12}(t) \\ U_{21}(t) & U_{22}(t) \end{array} \right] = Te^{-i \int_{0}^{t} \hat{V}(t') dt'} \end{eqnarray} Here $\hat V(t')=e^{i\hat H_0 t'}V_S(t')e^{-i\hat H_0 t'}$ is the interaction potential in the intermediate representation, and $\hat U(t)$ is the formal solution to the differential equation (\ref{deU}). In the Schr\"odinger representation, $\hat V(t')$ is replaced by $\hat H(t')=\hat H_0+\hat V_S(t')$. The time evolution of a qubit depends on the energy splitting $\Delta E$ and the time dependence $V(t)$ of the external potential. Depending on the complexity of $V(t)$, $\hat U(t)$ may or may not have a simple analytic form. In the latter case, numerical solutions of the coupled differential equations (\ref{deU}) may obscure information about these quantum systems. Analytic solutions, when available, are more convenient and easy to analyze. \subsection{Simply pulsed qubits} Here we consider simply pulsed qubits -- that is, qubits subject to an external field $V(t)$ that has a finite duration in time and a sensibly simple shape, such as a simple rectangular or Gaussian pulse. Such pulses are convenient for studying qubits in the time domain. In some cases they also lead to convenient analytic solutions, even for strong fields. \subsubsection{Qubit map} A qubit map, such as that shown in Fig.~\ref{figmap}, is a tool enabling one to visualize how the possible behavior of simply pulsed qubits depends on the variables $\Delta E$ and $V(t)$. In order to make the qubit map a helpful visual tool, the map coordinates are taken to be a dimensionless level splitting $\Delta E\,\tau/2$ (where $\tau$ is the time duration of the pulse $V(t)$) and a dimensionless pulse strength $\int^\infty_0 V(t') \,dt'$. These two coordinates determine the effect of the unperturbed Hamiltonian $\hat{H_0}$ and of the external potential $\hat V(t)$ on the evolution operator $\hat{U}(t)$. It is useful to think of these two variables as independent phase angles or action-like integrals. \begin{figure} \caption{\footnotesize{\label{f2} \label{f2} \label{figmap} \end{figure} \subsubsection{RWA solutions} The kicked qubits we focus on below provide an alternative to the well-established rotating wave approximation (RWA) method~\cite{ae,shore,me} based on a single resonant transition frequency, that may be detuned. Our emphasis here is on kicked (i.e. strongly time-localized) potentials because: i) the physics is then naturally analyzed in the time domain, and ii) kicks have been less widely explored than the RWA approach, which works well for sharp pulses in the reciprocal frequency space. While the RWA method is useful for two-state systems perturbed by an external interaction of narrow bandwidth, it fails to describe simply kicked two-state systems where the pulse bandwidth may be very broad. \subsubsection{Kicked qubits} A useful approximation for simply pulsed qubits is the fast, narrow pulse or ``kick" limit in which the width $\tau$ of the pulse goes to zero, while the integrated strength or area under the external potential curve \begin{equation} \alpha=\int_0^t V(t') \, dt' \end{equation} remains fixed. Formally, the shape of a very narrow pulse of finite total strength $\alpha$ may be expressed by a delta function: $V(t')=\alpha \,\delta(t'-t_k)$, where $t_k$ is the time at which the pulse is centered. The kicked region corresponds to the lower half of the qubit map in Fig.~\ref{figmap}. Here the duration of the pulse is so short that $\Delta E\,\tau/2 \ll 2\pi$, i.e. there is not enough time for the splitting $\Delta E$ to have a significant effect while the pulse is active. The integrated strength of the pulse, $\alpha$, may be either large or small in this region. If $\alpha$ is large, we are in the lower right quadrant of the map, where the kicked region overlaps with the adiabatic region. If $\alpha$ is small, we are in the lower left quadrant, where the kicked region overlaps with the perturbative region. \vskip 1em {\bf Single kick} \vskip 1em For a two-state system subjected to a single kick at time $t_k$, corresponding to $V(t') = \alpha \, \delta(t'-t_k)$, the integration over time is trivial and the time evolution operator in Eq.~(\ref{uoperator}) becomes \begin{eqnarray} \label{Usk} \hat U^k(t) &=& T \exp \left[ -i\int_0^t e^{- i \Delta E\sigma_z t'/2} \alpha \, \delta(t'-t_k) \sigma_x e^{i \Delta E\, \sigma_z t'/2} dt' \right] \nonumber \\ &=& \exp \left[ -i \alpha e^{-i \Delta E \,\sigma_z t_k/2} \sigma_x e^{i \Delta E \,\sigma_z t_k/2} \right] \nonumber \\ &=& \exp \left[ -i \alpha \pmatrix{ e^{-i \Delta E \,t_k/2} & 0 \cr 0 & e^{i \Delta E \,t_k/2}} \pmatrix{0 & 1 \cr 1 & 0} \pmatrix{ e^{i \Delta E \,t_k/2} & 0 \cr 0 & e^{-i \Delta E \, t_k/2}} \right] \nonumber \\ &=& \exp \left[ -i \alpha \pmatrix{ 0 & e^{- i \Delta E \, t_k} \cr e^{i \Delta E \, t_k} & 0} \right] \nonumber \\ &=& \pmatrix{ \cos\alpha & -ie^{-i \Delta E \, t_k} \sin\alpha \cr -ie^{i \Delta E \, t_k} \sin\alpha & \cos\alpha} \end{eqnarray} for $t > t_k$. The last line of Eq.~(\ref{Usk}) was obtained by expanding the fourth line in powers of $\alpha$ using the identity $\pmatrix{ 0 & e^{- i \Delta E \, t_k} \cr e^{i \Delta E \, t_k} & 0 }^{2n} = I$. Equivalently, one can take advantage of the useful identity (which we also used in the third line) $e^{i\phi\, \vec{\sigma} \cdot \hat{u} } = \cos \phi \ \ \hat{I} + i \sin \phi \ \ \vec{\sigma} \cdot \hat{u}$, where $\hat{u}$ is an arbitrary unit vector. Note that $\hat U^k(t)$ is independent of the final time $t$ in the intermediate representation. The occupation probabilities for a kicked qubit initially in state 1 are \begin{eqnarray} \label{Pk1} P_1(t) &=& |a_1(t)|^2 = |U_{11}^k(t)|^2 = \cos^2 \alpha \nonumber \\ P_2(t) &=& |a_2(t)|^2 = |U_{21}^k(t)|^2 = \sin^2 \alpha \ . \end{eqnarray} This simple example may be extended to a series of kicks~\cite{kaplantime}. It is one of the few cases in which analytic solutions may be obtained for qubits controlled by external potentials. \vskip 1em {\bf Double kicks} \vskip 1em The simplest example of a series of arbitrary kicks is a sequence of two kicks of strengths $\alpha_1$ and $\alpha_2$, applied at times $t_1$ and $t_2$ respectively, i.e. $\hat V_S(t) = (\alpha_1 \delta(t - t_1) + \alpha_2 \delta(t - t_2)) \sigma_x$. Eq.~(\ref{uoperator}) is then easily solved in the interaction representation, namely, \begin{eqnarray} \label{U2a} \hat U^{k_2,k_1} &=& \hat U^{k_2} \times \hat U^{k_1} \nonumber \\ &=& \pmatrix{ \cos\alpha_2 & -ie^{-i \Delta E \, t_2} \sin\alpha_2 \cr -ie^{i \Delta E \, t_2} \sin\alpha_2 & \cos\alpha_2 } \times \pmatrix{ \cos\alpha_1 & -ie^{-i \Delta E \, t_1} \sin\alpha_1 \cr -ie^{i \Delta E \, t_1} \sin\alpha_1 & \cos\alpha_1 } \nonumber \\ &=& \pmatrix{ U_{11} & U_{12} \cr U_{21} & U_{22} } \ \ , \end{eqnarray} where \begin{eqnarray} \label{Uij} U_{11} &=& \cos\alpha_1 \cos\alpha_2 - \sin\alpha_1 \sin\alpha_2\,e^{-i \Delta E \, t_-} \ , \\ \nonumber U_{21} &=& -i e^{ i {\Delta E}\, t_+/2} (\cos\alpha_1 \sin\alpha_2 \,e^{i {\Delta E} \,t_-/2} + \sin\alpha_1 \cos\alpha_2 \,e^{-i {\Delta E} \,t_-/2}) \ . \end{eqnarray} Here $t_- = t_2 - t_1$, and $t_+ = t_1 + t_2$. In the limit $t_2 \to t_1$, Eq.~(\ref{U2a}) reduces to Eq.~(\ref{Usk}) with $\alpha \to \alpha_1+\alpha_2$. Note that $[\hat U^{k_2} , \hat U^{k_1}] \neq 0$ so that the time ordering of the interactions is important. The algebra for a combination of two arbitrary kicks~\cite{shakov05}, one proportional to $\sigma_y$ and the other proportional to $\sigma_x$, is very similar to the above. Triple kicks are also straightforward to solve analytically. \subsubsection{Time ordering in a doubly kicked qubit} In this subsection we use our analytic expressions to examine the effect of the Dyson time ordering operator $T$ in a kicked two-state system. Time ordering has been considered previously in the context of atomic collisions with charged particles~\cite{mcbook,gm01,zhao,aarhus,bruch} and differs somewhat from the order in which external pulses are applied, as illustrated below. As is intuitively evident, there is no time ordering in a singly kicked qubit~\cite{kick} since there is only one kick. The simplest kicked two-state system that shows an effect due to time ordering is the qubit kicked by two equal and opposite pulses labeled $k$ and $-k$ separated by a time interval $t_- = t_2 - t_1$. The evolution matrix for this system is~\cite{shakov05}, \begin{eqnarray} \label{U-kk1} \hat U^{-k,k}= \pmatrix{ e^{-i\Delta E\,t_-/2}(\cos\frac{\Delta E}{2}t_- +i \cos 2 \alpha \sin \frac{\Delta E}{2}t_-) & e^{-i \Delta E \, t_+} \sin 2 \alpha \sin \frac{\Delta E}{2} t_- \cr -e^{ i \Delta E \, t_+} \sin 2 \alpha \sin \frac{\Delta E}{2} t_- & e^{i\Delta E \,t_-/2}(\cos\frac{\Delta E}{2}t_- -i \cos 2 \alpha \sin \frac{\Delta E}{2}t_-) } \ . \end{eqnarray} The time evolution operator $\hat U^{(0)}$ in the limit of no time ordering, i.e. in the approximation $T \to 1$, may in principle be generally obtained~\cite{kick} by replacing $\int_0^t \hat V(t') \,dt' $ with $ \overline{\hat V} t $, where $ \overline{\hat V}$ is an average (constant) value of the interaction. In our case, it is then straightforward to show \begin{eqnarray} \label{U-kk20} \hat U^{(0) -k, k }= e^{-i\bar{\hat V} t} = \pmatrix{ \cos( 2 \alpha \sin \frac{\Delta E}{2} t_-) & e^{-i \Delta E \, t_+} \sin(2 \alpha \sin \frac{\Delta E}{2} t_-) \cr -e^{ i \Delta E \, t_+} \sin(2 \alpha \sin \frac{\Delta E}{2} t_-) & \cos( 2 \alpha \sin \frac{\Delta E}{2} t_-) } \ . \end{eqnarray} In this example we now have analytic expressions for the matrix elements of both the evolution operator $\hat U^{-k ,k}$ that contains time ordering and the evolution operator $\hat U^{(0) -k, k }$ without time ordering. \begin{figure} \caption{\footnotesize{\label{f3} \label{f3} \label{figdifference} \end{figure} It may be shown analytically~\cite{shakov05} that for two kicks both proportional to $\sigma_x$, the order of the kicks does not change the final transfer probability $P_2$. However, interestingly, this does not mean that there is no effect due to time ordering in this case. As we show next, there is an effect due to time ordering in this case, even though interchanging the order of the kicks has no effect. The effect of time ordering on the occupation probabilities may be examined by considering the probability of population transfer from the on state to the off state with and without time ordering, namely from Eqs.~(\ref{U-kk1}) and (\ref{U-kk20}), \begin{eqnarray} \label{P2} P_2 &=& |U_{21}|^2 = |\sin 2 \alpha \ \sin \frac{\Delta E}{2} t_-|^2 = |\epsilon \sin \phi|^2 \ , \\ \nonumber P_2^{(0)} &=& |U_{21}^{(0)}|^2 = |\sin( 2 \alpha \sin \frac{\Delta E}{2} t_-)|^2 = |\sin \epsilon \phi|^2 \ \ , \end{eqnarray} where $\epsilon = \sin \frac{\Delta E}{2} t_-$ and $\phi = 2 \alpha$. The effect of time ordering is shown in Fig.~\ref{figdifference}, where $P_2 - P_2^{(0)}$ is plotted as a function of $\phi = 2 \alpha$, corresponding to the strength of the kicks, and $\epsilon = \sin \frac{\Delta E}{2} t_-$, which varies with the time separation of the two kicks. The effect of time ordering disappears in our example in the limit that either the interaction strength or the time separation between the pulses goes to zero. For small, but finite, values of both the interaction strength and the time separation between the pulses, the effect of time ordering is to reduce the transition probability from the initially occupied state to an initially unoccupied state. That is, in this regime time ordering reduces the maximum transfer of population from one state to another. As either of these two parameters gets sufficiently large, the effect of time ordering oscillates with increasing values of the interaction strength or the inter-pulse separation time. Time ordering effects are present even though $\hat U^{-k,k} = \hat U^{k,-k}$. \subsubsection{Time ordering vs. time reversal} Let us now pause to examine the difference between time ordering and time reversal in this simple, illustrative example. Reversal of time ordering means that, since kick strengths $\alpha_k$ and kick times $t_k$ are both interchanged, $t_- \to -t_-$ and $\alpha \to -\alpha$. In this case one sees from Eqs.~(\ref{U-kk1}) and (\ref{U-kk20}) that $\hat U^{-k ,k}$ experiences a change of phase, while $\hat U^{(0) -k ,k}$ is invariant under change of time ordering. For time reversal~\cite{gw}, $t_\pm \to -t_\pm$ and, since the initial and final states are also interchanged, $\hat{U} \to \hat{U}^{\dag}$. Inspection of the same equations as above shows that $\hat U^{-k, k}$ and $\hat U^{(0) -k, k}$ are both invariant under time reversal, as expected. When the symmetry between the kicks $k_1$ and $k_2$ is broken, i.e. $\alpha_2 \ne \pm \alpha_1$, the difference between $\hat U^{k_2,k_1}=\hat U^{k_2} \hat U^{k_1}$ and $\hat U^{k_1,k_2}=\hat U^{k_1} \hat U^{k_2}$ can be observed~\cite{shakov05}. \subsubsection{Numerical calculations of time ordering} As an illustrative specific example, we present the results of numerical calculations for $2s \to 2p$ transitions in atomic hydrogen caused by a Gaussian pulse of finite width $\tau$. The occupation probabilities of the $2s$ and $2p$ states are evaluated by integrating the two-state equations using a standard fourth order Runge-Kutta method. This enables us to verify the validity of our analytic solutions for kicked qubits in the limit $\tau \to 0$ and also to consider the effects of finite pulse width. In this system, the unperturbed level splitting is the $2s-2p$ shift, $\Delta E= E_{2p} - E_{2s} = 4.37 \times 10^{-6}\,{\rm eV} $. The corresponding time scale is the Rabi time, $T_{\Delta E}=2\pi /\Delta E= 972$~ps, which gives the period of oscillation between the states. In our numerical calculations we use for convenience a Gaussian pulse of the form $V(t) = (\alpha /\sqrt{\pi} \tau) e^{-(t-t_k)^2/\tau^2}$. The two-state coupled equations for the amplitudes $a_1$ and $a_2$ of Eq.~(\ref{psi}) take the form implied by Eq.~(\ref{genh0}), \begin{eqnarray} \label{2s2p-1} i \dot{a}_1 &=& - \frac{1}{2} \Delta E \, a_1 + {\alpha \over \sqrt{\pi}\tau} e^{-(t-t_k)^2/\tau^2} a_2 \nonumber \\ i \dot{a}_2 &=& \ \frac{1}{2} \Delta E \, a_2 + {\alpha \over \sqrt{\pi}\tau} e^{-(t-t_k)^2/\tau^2} a_1 \ \ . \end{eqnarray} Here the pulse is applied at $t_k=150$ ps and we have chosen $\alpha = \pi/2$ so that in the limit of a perfect kick all of the population will be transferred from the $2s$ to the $2p$ state after $t = t_k$. In Fig.~\ref{fignumeric}, the resulting transition probability is shown as a function of pulse width $\tau$ and as a function of observation time $T_f$ for $T_f>t_k$. \begin{figure} \caption{\footnotesize{\label{f4} \label{f4} \label{fignumeric} \end{figure} In the Schr\"{o}dinger picture there are very large differences between the transition probabilities with and without time ordering, $P_2(T_f)$ and $P^{(0)}_2(T_f)$, even for an ideal kick. This occurs because the energy splitting $\Delta E$ is non-zero, and for $T_f > \alpha /\Delta E = \alpha \,T_{\Delta E}/ 2\pi $, the average potential $\bar V=\alpha/T_f$ becomes smaller than the energy splitting $\Delta E$. Thus, for a given pulse, the influence of the potential necessarily decreases at large $T_f$, and any transfer probability becomes exponentially small. In effect, the free propagation before and after the pulse diminishes the effect of the pulse itself in the Schr\"{o}dinger picture, when time ordering is removed. This behavior contrasts with the intermediate picture result, where $P_{I \, 2}^{(0)}(T_f)$ depends on $\Delta E \,\tau/2$ but not on $T_f$, as seen on the left side of Fig.~\ref{fignumeric}. The contrast is also evident on the right hand side of Fig.~\ref{fignumeric}, where, after the pulse has died off, the value of $P_{2}^{(0)}(T_f)$ decays with increasing $T_f$, while $P_{I \, 2}^{(0)}(T_f)$ approaches a constant. \subsection{Networks of qubits} To our knowledge relatively little has been understood analytically for systems of coupled qubits. Understanding coupling between qubits is a well recognized challenge in the field of quantum computing~\cite{roadmap}. In our view, developing a realistic analytic model for two coupled qubits could provide a useful and instructive example in the fields of quantum computing, quantum information and coherent control. Unitary evolution operators acting on a system of non-interacting qubits formally belong to the $SU(2) \times SU(2) \times \cdots\times SU(2)$ unitary group, which is simply the set of all local qubit operations. This set is a subgroup of the full $SU(2^N)$ dynamic group of N coupled qubits. The $SU(4)$ dynamic group of two interacting qubits plays a fundamental role in the analysis of multi-qubit dynamics since any operator (i.e. quantum gate) from the full $SU(2^N)$ group can be factorized as a product of $SU(4)$ two-qubit gates. In connection with this property, it is useful to note that any $4$-level quantum system can be used to encode a tensor-product four-dimensional Hilbert space of two qubits. The specific form of such encoding is completely determined~\cite{rau05} by fixing one point on the orbit of the maximal $SU(2) \times SU(2)$ subgroup of $SU(4)$. Next, the $3$-qubit system (e.g. a carrier space for GHZ multiparticle entangled states) can be similarly encoded in an arbitrary $8$-level quantum system. This requires two steps: identifying the maximal $SU(4) \times SU(2)$ subgroup of $SU(8)$, and then adjusting the $SU(2)\times SU(2)$ subgroup of the resulting $SU(4)$ group. It would be useful to have a well developed theory for time correlations between interacting qubits -- one that clarifies how the time dependence of a field acting on one qubit impacts the time evolution of another qubit, for example in switching. However, defining the independent time approximation for N-qubit systems in a useful way is a challenge. Although working with $n=2^N$ degenerate states is doable in principle~\cite{rakhimov04} (setting aside the problem of solving an $n$-th order equation for $n>4$), this does not always give the NTO approximation, seemingly a prerequisite for the ITA approximation needed to define time correlations. Furthermore, in many cases sequencing of external interactions can be problematic. For example, if $\hat V(t') = \hat V_A(t') + \hat V_B(t')$ for two particles or qubits $A$ and $B$, and $\hat U = T e^{-i\int (\hat V_A(t') + \hat V_B(t')) dt'}$, then the ITA is {\it not} $\hat U_A \cdot \hat U_B$ in general. The ITA {\it is} given by $\hat U_A \cdot \hat U_B =\hat U_B \cdot \hat U_A$ if $[\hat V_A(t),\hat V_B(t')] = 0$, but in that case all time correlations vanish and the ITA is exact. Also, it difficult for us to envision how one may satisfy $[\hat U_A(t),\hat U_B(t')]=0$ with $[\hat U_J(t),\hat U_J(t')] \neq 0$ for $J = A,$ $B$. That is, in what situations can one eliminate inter-particle time correlations while retaining time ordering for individual particles? If {\it all} commutators terms vanish, then time correlation effects and time ordering for individual particles are both absent. Similarly, time correlation effects disappear if all parts of a composite $\hat V(t)$ are replaced by the time averaged value $\overline{\hat V}$, but this again is equivalent to eliminating all time ordering, even within single-particle evolution. \section{Summary} In solutions of the time dependent Schr\"{o}dinger equation there are only two sources of time dependence, namely the Dyson time ordering operator $T$ and the explicit time dependence of the interaction $V(t)$. This simplifies the study of how time dependence may influence the evolution of $N$-body quantum systems. The causal-like constraint of time ordering between fields acting on different particles can cause time correlation between the particles. That is, the time dependence of a field acting on one particle can influence the evolution of other particles. Correlation is traditionally studied by defining an uncorrelated limit. In the case of time correlation, we have called this the independent time approximation (ITA), and have pointed out similarities (e.g. in Table~\ref{table1}) to the widely used and practical independent particle approximation (IPA) that eliminates spatial correlations between particles. Similarly, the limit of no time ordering (NTO) can be defined by eliminating {\it all} time ordering constraints, for fields acting on the same particle or on different particles. Thus, the ITA may be viewed as the NTO applied to cross terms only. The ITA or NTO limit may be reached in several ways, but the most general seems to be to define a mean time-averaged coupling interaction (as is done in the IPA). Time ordering effects have been observed in weakly perturbed atomic collisions. One may also consider strongly perturbed systems of coupled qubits. However, relatively little has been done on this problem, which is a key problem in quantum computing. In this paper we considered the effect of time ordering on strongly perturbed single qubits. To do this, we focused on qubits subjected to fast strong external pulses called kicks, where useful analytic expressions for observable transition probabilities may be obtained. What we have found (but not discussed here) is that the NTO may be easier to implement than the ITA. This means that the ITA might be computationally awkward in general. We have also demonstrated that the NTO (and consequently the ITA) are dependent on the representation used. There is evidence that the intermediate representation is preferred. This suggests that one could find gauge dependence in specific NTO and ITA terms if MBPT is used, and raises a question~\cite{mcbook,uskov04} about the physical meaning of time ordering and time correlation. In summary, the methods we have developed in this paper are intended to probe the nature of how time works in quantum N-body systems. The focal point in our approach is the influence of the constraint imposed by time ordering, which can lead to time correlation between different parts of the system. \end{document}
\begin{document} \title{Proof of a conjecture of Bergeron, Ceballos and Labb\'e} \author{Alexander Postnikov and Darij Grinberg} \date{\today} \maketitle \begin{abstract} The reduced expressions for a given element $w$ of a Coxeter group $\left( W,S\right) $ can be regarded as the vertices of a directed graph $\mathcal{R}\left( w\right) $; its arcs correspond to the braid moves. Specifically, an arc goes from a reduced expression $\overrightarrow{a}$ to a reduced expression $\overrightarrow{b}$ when $\overrightarrow{b}$ is obtained from $\overrightarrow{a}$ by replacing a contiguous subword of the form $stst\cdots$ (for some distinct $s,t\in S$) by $tsts\cdots$ (where both subwords have length $m_{s,t}$, the order of $st\in W$). We prove a strong bipartiteness-type result for this graph $\mathcal{R}\left( w\right) $: Not only does every cycle of $\mathcal{R}\left( w\right) $ have even length; actually, the arcs of $\mathcal{R}\left( w\right) $ can be colored (with colors corresponding to the type of braid moves used), and to every color $c$ corresponds an \textquotedblleft opposite\textquotedblright\ color $c^{\operatorname*{op}}$ (corresponding to the reverses of the braid moves with color $c$), and for any color $c$, the number of arcs in any given cycle of $\mathcal{R}\left( w\right) $ having color in $\left\{ c,c^{\operatorname*{op}}\right\} $ is even. This is a generalization and strengthening of a 2014 result by Bergeron, Ceballos and Labb\'{e}. \end{abstract} \section*{Introduction} Let $\left( W,S\right) $ be a Coxeter group\footnote{All terminology and notation that appears in this introduction will later be defined in more detail.} with Coxeter matrix $\left( m_{s,s^{\prime}}\right) _{\left( s,s^{\prime}\right) \in S\times S}$, and let $w\in W$. Consider a directed graph $\mathcal{R}\left( w\right) $ whose vertices are the reduced expressions for $w$, and whose arcs are defined as follows: The graph $\mathcal{R}\left( w\right) $ has an arc from a reduced expression $\overrightarrow{a}$ to a reduced expression $\overrightarrow{b}$ whenever $\overrightarrow{b}$ can be obtained from $\overrightarrow{a}$ by replacing some contiguous subword of the form $\underbrace{\left( s,t,s,t,\ldots \right) }_{m_{s,t}\text{ letters}}$ by $\underbrace{\left( t,s,t,s,\ldots \right) }_{m_{s,t}\text{ letters}}$, where $s$ and $t$ are two distinct elements of $S$. (This replacement is called an $\left( s,t\right) $\textit{-braid move}.) The directed graph $\mathcal{R}\left( w\right) $ (or, rather, its undirected version) has been studied many times; see, for example, \cite{ReiRoi11} and the references therein. In this note, we shall prove a bipartiteness-type result for $\mathcal{R}\left( w\right) $. Its simplest aspect (actually, a corollary) is the fact that $\mathcal{R}\left( w\right) $ is bipartite (i.e., every cycle of $\mathcal{R}\left( w\right) $ has even length); but we shall concern ourselves with stronger statements. We can regard $\mathcal{R} \left( w\right) $ as an edge-colored directed graph: Namely, whenever a reduced expression $\overrightarrow{b}$ is obtained from a reduced expression $\overrightarrow{a}$ by an $\left( s,t\right) $-braid move, we color the arc from $\overrightarrow{a}$ to $\overrightarrow{b}$ with the conjugacy class\footnote{A \textit{conjugacy class}\ here means an equivalence class under the relation $\sim$ on the set $S\times S$, which is given by \[ \left( \left( s,t\right) \sim\left( s^{\prime},t^{\prime}\right) \ \Longleftrightarrow\ \text{there exists a }q\in W\text{ such that } qsq^{-1}=s^{\prime}\text{ and }qtq^{-1}=t^{\prime}\right) . \] The conjugacy class of an $\left( s,t\right) \in S\times S$ is denoted by $\left[ \left( s,t\right) \right] $.} $\left[ \left( s,t\right) \right] $ of the pair $\left( s,t\right) \in S\times S$. Our result (Theorem \ref{thm.BCL}) then states that, for every such color $\left[ \left( s,t\right) \right] $, every cycle of $\mathcal{R}\left( w\right) $ has as many arcs colored $\left[ \left( s,t\right) \right] $ as it has arcs colored $\left[ \left( t,s\right) \right] $, and that the total number of arcs colored $\left[ \left( s,t\right) \right] $ and $\left[ \left( t,s\right) \right] $ in any given cycle is even. This generalizes and strengthens a result of Bergeron, Ceballos and Labb\'{e} \cite[Theorem 3.1]{BCL}. \subsection*{Acknowledgments} We thank Nantel Bergeron and Cesar Ceballos for introducing us to the problem at hand, and the referee for useful remarks. \section{\label{sect.motivate-ex}A motivating example} Before we introduce the general setting, let us demonstrate it on a simple example. This example is not necessary for the rest of this note (and can be skipped by the reader\footnote{All notations introduced in Section \ref{sect.motivate-ex} should be understood as local to this section; they will not be used beyond it (and often will be replaced by eponymic notations for more general objects).}); it merely provides some intuition and motivation for the definitions to come. For this example, we fix an integer $n\geq1$, and we let $W$ be the symmetric group $S_{n}$ of the set $\left\{ 1,2,\ldots,n\right\} $. For each $i\in\left\{ 1,2,\ldots,n-1\right\} $, let $s_{i}\in W$ be the transposition which switches $i$ with $i+1$ (while leaving the remaining elements of $\left\{ 1,2,\ldots,n\right\} $ unchanged). Let $S=\left\{ s_{1} ,s_{2},\ldots,s_{n-1}\right\} \subseteq W$. The pair $\left( W,S\right) $ is an example of what is called a \textit{Coxeter group} (see, e.g., \cite[Chapter 4]{Bourbaki4-6} and \cite[\S 1]{Lusztig-Hecke}); more precisely, it is known as the Coxeter group $A_{n-1}$. In particular, $S$ is a generating set for $W$, and the group $W$ can be described by the generators $s_{1} ,s_{2},\ldots,s_{n-1}$ and the relations \begin{align} s_{i}^{2} & =\operatorname*{id}\ \ \ \ \ \ \ \ \ \ \text{for every } i\in\left\{ 1,2,\ldots,n-1\right\} ;\label{eq.exam.A3.quad}\\ s_{i}s_{j} & =s_{j}s_{i}\ \ \ \ \ \ \ \ \ \ \text{for every }i,j\in\left\{ 1,2,\ldots,n-1\right\} \text{ such that }\left\vert i-j\right\vert >1;\label{eq.exam.A3.braid1}\\ s_{i}s_{j}s_{i} & =s_{j}s_{i}s_{j}\ \ \ \ \ \ \ \ \ \ \text{for every }i,j\in\left\{ 1,2,\ldots,n-1\right\} \text{ such that }\left\vert i-j\right\vert =1. \label{eq.exam.A3.braid2} \end{align} This is known as the \textit{Coxeter presentation} of $S_{n}$, and is due to Moore (see, e.g., \cite[(6.23)--(6.25)]{CoxMos80} or \cite[Theorem 1.2.4]{Williamson}). Given any $w\in W$, there exists a tuple $\left( a_{1},a_{2},\ldots ,a_{k}\right) $ of elements of $S$ such that $w=a_{1}a_{2}\cdots a_{k}$ (since $S$ generates $W$). Such a tuple is called a \textit{reduced expression} for $w$ if its length $k$ is minimal among all such tuples (for the given $w$). For instance, when $n=4$, the permutation $\pi\in S_{4}=W$ that is written as $\left( 3,1,4,2\right) $ in one-line notation has reduced expressions $\left( s_{2},s_{1},s_{3}\right) $ and $\left( s_{2} ,s_{3},s_{1}\right) $; in fact, $\pi=s_{2}s_{1}s_{3}=s_{2}s_{3}s_{1}$. (We are following the convention by which the product $u\circ v=uv$ of two permutations $u,v\in S_{n}$ is defined to be the permutation sending each $i$ to $u\left( v\left( i\right) \right) $.) Given a $w\in W$, the set of reduced expressions for $w$ has an additional structure of a directed graph. Namely, the equalities (\ref{eq.exam.A3.braid1} ) and (\ref{eq.exam.A3.braid2}) show that, given a reduced expression $\overrightarrow{a}=\left( a_{1},a_{2},\ldots,a_{k}\right) $ for $w\in W$, we can obtain another reduced expression in any of the following two ways: \begin{itemize} \item Pick some $i,j\in\left\{ 1,2,\ldots,n-1\right\} $ such that $\left\vert i-j\right\vert >1$, and pick any factor of the form $\left( s_{i},s_{j}\right) $ in $\overrightarrow{a}$ (that is, a pair of adjacent entries of $\overrightarrow{a}$, the first of which is $s_{i}$ and the second of which is $s_{j}$), provided that such a factor exists, and replace this factor by $\left( s_{j},s_{i}\right) $. \item Alternatively, pick some $i,j\in\left\{ 1,2,\ldots,n-1\right\} $ such that $\left\vert i-j\right\vert =1$, and pick any factor of the form $\left( s_{i},s_{j},s_{i}\right) $ in $\overrightarrow{a}$, provided that such a factor exists, and replace this factor by $\left( s_{j},s_{i},s_{j}\right) $. \end{itemize} In both cases, we obtain a new reduced expression for $w$ (provided that the respective factors exist). We say that this new expression is obtained from $\overrightarrow{a}$ by an $\left( s_{i},s_{j}\right) $\textit{-braid move}, or (when we do not want to mention $s_{i}$ and $s_{j}$) by a \textit{braid move}. For instance, the reduced expression $\left( s_{2},s_{1},s_{3}\right) $ for $\pi=\left( 3,1,4,2\right) \in S_{4}$ is obtained from the reduced expression $\left( s_{2},s_{3},s_{1}\right) $ by an $\left( s_{3} ,s_{1}\right) $-braid move, and conversely $\left( s_{2},s_{3},s_{1}\right) $ is obtained from $\left( s_{2},s_{1},s_{3}\right) $ by an $\left( s_{1},s_{3}\right) $-braid move. Now, we can define a directed graph $\mathcal{R}_{0}\left( w\right) $ whose vertices are the reduced expressions for $w$, and which has an edge from $\overrightarrow{a}$ to $\overrightarrow{b}$ whenever $\overrightarrow{b}$ is obtained from $\overrightarrow{a}$ by a braid move (of either sort). For instance, let $n=5$, and let $w$ be the permutation written in one-line notation as $\left( 3,2,1,5,4\right) $. Then, $\mathcal{R}_{0}\left( w\right) $ looks as follows: \[ \xymatrix{ & \left(s_2,s_4,s_1,s_2\right) \arcstr[r]^{\left(s_4,s_1\right)} \arcstr[dl]^{\left(s_2,s_4\right)} & \left(s_2,s_1,s_4,s_2\right ) \arcstr[l]^{\left(s_1,s_4\right)} \arcstr[rd]^{\left(s_4,s_2\right)} \\ \left(s_4,s_2,s_1,s_2\right) \arcstr[ur]^{\left(s_4,s_2\right)} \arcstr [d]^{\left(s_2,s_1\right)} & & & \left(s_2,s_1,s_2,s_4\right) \arcstr [lu]^{\left(s_2,s_4\right)} \arcstr[d]^{\left(s_2,s_1\right)} \\ \left(s_4,s_1,s_2,s_1\right) \arcstr[u]^{\left(s_1,s_2\right)} \arcstr [dr]^{\left(s_4,s_1\right)} & & & \left(s_1,s_2,s_1,s_4\right) \arcstr [u]^{\left(s_1,s_2\right)} \arcstr[dl]^{\left(s_1,s_4\right)} \\ & \left(s_1,s_4,s_2,s_1\right) \arcstr[ul]^{\left(s_1,s_4\right)} \arcstr[r]^{\left(s_4,s_2\right)} & \left(s_1,s_2,s_4,s_1\right) \arcstr [l]^{\left(s_2,s_4\right)} \arcstr[ur]^{\left(s_4,s_1\right)} } . \] Here, we have \textquotedblleft colored\textquotedblright\ (i.e., labelled) every arc $\left( \overrightarrow{a},\overrightarrow{b}\right) $ with the pair $\left( s_{i},s_{j}\right) $ such that $\overrightarrow{b}$ is obtained from $\overrightarrow{a}$ by an $\left( s_{i},s_{j}\right) $-braid move. In our particular case, the graph $\mathcal{R}_{0}\left( w\right) $ consists of a single bidirected cycle. This is not true in general, but certain things hold in general. First, it is clear that whenever an arc from some vertex $\overrightarrow{a}$ to some vertex $\overrightarrow{b}$ has color $\left( s_{i},s_{j}\right) $, then there is an arc with color $\left( s_{j} ,s_{i}\right) $ from $\overrightarrow{b}$ to $\overrightarrow{a}$. Thus, $\mathcal{R}_{0}\left( w\right) $ can be regarded as an undirected graph (at the expense of murkying up the colors of the arcs). Furthermore, every reduced expression for $w$ can be obtained from any other by a sequence of braid moves (this is the Matsumoto-Tits theorem; it appears, e.g., in \cite[Theorem 1.9]{Lusztig-Hecke}). Thus, the graph $\mathcal{R}_{0}\left( w\right) $ is strongly connected. What do the cycles of $\mathcal{R}_{0}\left( w\right) $ have in common? Walking down the long cycle in the graph $\mathcal{R}_{0}\left( w\right) $ for $w=\left( 3,2,1,5,4\right) \in S_{5}$ counterclockwise, we observe that the $\left( s_{1},s_{2}\right) $-braid move is used once (i.e., we traverse precisely one arc with color $\left( s_{1},s_{2}\right) $), the $\left( s_{2},s_{1}\right) $-braid move once, the $\left( s_{1},s_{4}\right) $-braid move twice, the $\left( s_{4},s_{1}\right) $-braid move once, the $\left( s_{2},s_{4}\right) $-braid move once, and the $\left( s_{4} ,s_{2}\right) $-braid move twice. In particular: \begin{itemize} \item The total number of $\left( s_{i},s_{j}\right) $-braid moves with $\left\vert i-j\right\vert =1$ used is even (namely, $2$). \item The total number of $\left( s_{i},s_{j}\right) $-braid moves with $\left\vert i-j\right\vert >1$ used is even (namely, $6$). \end{itemize} This example alone is scant evidence of any general result, but both evenness patterns persist for general $n$, for any $w\in S_{n}$ and any directed cycle in $\mathcal{R}_{0}\left( w\right) $. We can simplify the statement if we change our coloring to a coarser one. Namely, let $\mathfrak{M}$ denote the subset $\left\{ \left( s,t\right) \in S\times S\ \mid\ s\neq t\right\} =\left\{ \left( s_{i},s_{j}\right) \ \mid\ i\neq j\right\} $ of $S\times S$. We define a binary relation $\sim$ on $\mathfrak{M}$ by \[ \left( \left( s,t\right) \sim\left( s^{\prime},t^{\prime}\right) \ \Longleftrightarrow\ \text{there exists a }q\in W\text{ such that } qsq^{-1}=s^{\prime}\text{ and }qtq^{-1}=t^{\prime}\right) . \] This relation $\sim$ is an equivalence relation; it thus gives rise to a quotient set $\mathfrak{M}/\sim$. It is easy to see that the quotient set $\mathfrak{M}/\sim$ has exactly two elements (for $n\geq4$): the equivalence class of all $\left( s_{i},s_{j}\right) $ with $\left\vert i-j\right\vert =1$, and the equivalence class of all $\left( s_{i},s_{j}\right) $ with $\left\vert i-j\right\vert >1$. Let us now define an edge-colored directed graph $\mathcal{R}\left( w\right) $ by starting with $\mathcal{R}_{0}\left( w\right) $, and replacing each color $\left( s_{i},s_{j}\right) $ by its equivalence class $\left[ \left( s_{i},s_{j}\right) \right] $. Thus, in $\mathcal{R}\left( w\right) $, the arcs are colored with the (at most two) elements of $\mathfrak{M}/\sim$. Now, our evenness patterns can be restated as follows: For any $n\in\mathbb{N}$, any $w\in S_{n}$ and any color $c\in\mathfrak{M}/\sim$, any directed cycle of $\mathcal{R}\left( w\right) $ has an even number of arcs with color $c$. This can be generalized further to every Coxeter group, with a minor caveat. Namely, let $\left( W,S\right) $ be a Coxeter group with Coxeter matrix $\left( m_{s,s^{\prime}}\right) _{\left( s,s^{\prime}\right) \in S\times S}$. Notions such as reduced expressions and braid moves still make sense (see below for references and definitions). We redefine $\mathfrak{M}$ as $\left\{ \left( s,t\right) \in S\times S\ \mid\ s\neq t\text{ and }m_{s,t} <\infty\right\} $ (since pairs $\left( s,t\right) $ with $m_{s,t}=\infty$ do not give rise to braid moves). Unlike in the case of $W=S_{n}$, it is not necessarily true that $\left( s,t\right) \sim\left( t,s\right) $ for every $\left( s,t\right) \in\mathfrak{M}$. We define $\left[ \left( s,t\right) \right] ^{\operatorname*{op}}=\left[ \left( t,s\right) \right] $. The evenness pattern now has to be weakened as follows: For every $w\in W$ and any color $c\in\mathfrak{M}/\sim$, any directed cycle of $\mathcal{R}\left( w\right) $ has an even number of arcs whose color belongs to $\left\{ c,c^{\operatorname*{op}}\right\} $. (For $W=S_{n}$, we have $c=c^{\operatorname*{op}}$, and thus this recovers our old evenness patterns.) This is part of the main theorem we will prove in this note -- namely, Theorem \ref{thm.BCL} \textbf{(b)}; it extends a result \cite[Theorem 3.1]{BCL} obtained by Bergeron, Ceballos and Labb\'{e} by geometric means. The other part of the main theorem (Theorem \ref{thm.BCL} \textbf{(a)}) states that any directed cycle of $\mathcal{R}\left( w\right) $ has as many arcs with color $c$ as it has arcs with color $c^{\operatorname*{op}}$. \section{The theorem} In the following, we shall use the notations of \cite[\S 1]{Lusztig-Hecke} concerning Coxeter groups. (These notations are compatible with those of \cite[Chapter 4]{Bourbaki4-6}, except that Bourbaki writes $m\left( s,s^{\prime}\right) $ instead of $m_{s,s^{\prime}}$, and speaks of \textquotedblleft Coxeter systems\textquotedblright\ instead of \textquotedblleft Coxeter groups\textquotedblright.) Let us recall a brief definition of Coxeter groups and Coxeter matrices: A \textit{Coxeter group} is a pair $\left( W,S\right) $, where $W$ is a group, and where $S$ is a finite subset of $W$ having the following property: There exists a matrix $\left( m_{s,s^{\prime}}\right) _{\left( s,s^{\prime }\right) \in S\times S}\in\left\{ 1,2,3,\ldots,\infty\right\} ^{S\times S}$ such that \begin{itemize} \item every $s\in S$ satisfies $m_{s,s}=1$; \item every two distinct elements $s$ and $t$ of $S$ satisfy $m_{s,t} =m_{t,s}\geq2$; \item the group $W$ can be presented by the generators $S$ and the relations \[ \left( st\right) ^{m_{s,t}}=1\ \ \ \ \ \ \ \ \ \ \text{for all }\left( s,t\right) \in S\times S\text{ satisfying }m_{s,t}\neq\infty. \] \end{itemize} In this case, the matrix $\left( m_{s,s^{\prime}}\right) _{\left( s,s^{\prime}\right) \in S\times S}$ is called the \textit{Coxeter matrix} of $\left( W,S\right) $. It is well-known (see, e.g., \cite[\S 1] {Lusztig-Hecke}\footnote{See also \cite[Chapter V, n$^{\circ}$ 4.3, Corollaire]{Bourbaki4-6} for a proof of the existence of a Coxeter group corresponding to a given Coxeter matrix. Note that Bourbaki's definition of a \textquotedblleft Coxeter system\textquotedblright\ differs from our definition of a \textquotedblleft Coxeter group\textquotedblright\ in the extra requirement that $m_{s,t}$ be the order of $st\in W$; but this turns out to be a consequence of the other requirements.}) that any Coxeter group has a unique Coxeter matrix, and conversely, for every finite set $S$ and any matrix $\left( m_{s,s^{\prime}}\right) _{\left( s,s^{\prime}\right) \in S\times S}\in\left\{ 1,2,3,\ldots,\infty\right\} ^{S\times S}$ satisfying the first two of the three requirements above, there exists a unique (up to isomorphism preserving $S$) Coxeter group $\left( W,S\right) $. We fix a Coxeter group $\left( W,S\right) $ with Coxeter matrix $\left( m_{s,s^{\prime}}\right) _{\left( s,s^{\prime}\right) \in S\times S}$. Thus, $W$ is a group, and $S$ is a set of elements of order $2$ in $W$ such that for every $\left( s,s^{\prime}\right) \in S\times S$, the element $ss^{\prime }\in W$ has order $m_{s,s^{\prime}}$. (See, e.g., \cite[Proposition 1.3(b)]{Lusztig-Hecke} for this well-known fact.) We let $\mathfrak{M}$ denote the subset \[ \left\{ \left( s,t\right) \in S\times S\ \mid\ s\neq t\text{ and } m_{s,t}<\infty\right\} \] of $S\times S$. (This is denoted by $I$ in \cite[Chapter 4, n$^{\circ}$ 1.3]{Bourbaki4-6}.) We define a binary relation $\sim$ on $\mathfrak{M}$ by \[ \left( \left( s,t\right) \sim\left( s^{\prime},t^{\prime}\right) \ \Longleftrightarrow\ \text{there exists a }q\in W\text{ such that } qsq^{-1}=s^{\prime}\text{ and }qtq^{-1}=t^{\prime}\right) . \] It is clear that this relation $\sim$ is an equivalence relation; it thus gives rise to a quotient set $\mathfrak{M}/\sim$. For every pair $P\in\mathfrak{M}$, we denote by $\left[ P\right] $ the equivalence class of $P$ with respect to this relation $\sim$. We set $\mathbb{N}=\left\{ 0,1,2,\ldots\right\} $. A \textit{word} will mean a $k$-tuple for some $k\in\mathbb{N}$. A \textit{subword} of a word $\left( s_{1},s_{2},\ldots,s_{k}\right) $ will mean a word of the form $\left( s_{i_{1}},s_{i_{2}},\ldots,s_{i_{p}}\right) $, where $i_{1},i_{2},\ldots,i_{p}$ are elements of $\left\{ 1,2,\ldots ,k\right\} $ satisfying $i_{1}<i_{2}<\cdots<i_{p}$. For instance, $\left( 1\right) $, $\left( 3,5\right) $, $\left( 1,3,5\right) $, $\left( {}\right) $ and $\left( 1,5\right) $ are subwords of the word $\left( 1,3,5\right) $. A \textit{factor} of a word $\left( s_{1},s_{2},\ldots ,s_{k}\right) $ will mean a word of the form $\left( s_{i+1},s_{i+2} ,\ldots,s_{i+m}\right) $ for some $i\in\left\{ 0,1,\ldots,k\right\} $ and some $m\in\left\{ 0,1,\ldots,k-i\right\} $. For instance, $\left( 1\right) $, $\left( 3,5\right) $, $\left( 1,3,5\right) $ and $\left( {}\right) $ are factors of the word $\left( 1,3,5\right) $, but $\left( 1,5\right) $ is not. We recall that a \textit{reduced expression} for an element $w\in W$ is a $k$-tuple $\left( s_{1},s_{2},\ldots,s_{k}\right) $ of elements of $S$ such that $w=s_{1}s_{2}\cdots s_{k}$, and such that $k$ is minimum (among all such tuples). The length of a reduced expression for $w$ is called the \textit{length} of $w$, and is denoted by $l\left( w\right) $. Thus, a reduced expression for an element $w\in W$ is a $k$-tuple $\left( s_{1} ,s_{2},\ldots,s_{k}\right) $ of elements of $S$ such that $w=s_{1}s_{2}\cdots s_{k}$ and $k=l\left( w\right) $. \begin{definition} \label{def.braid}Let $w\in W$. Let $\overrightarrow{a}=\left( a_{1} ,a_{2},\ldots,a_{k}\right) $ and $\overrightarrow{b}=\left( b_{1} ,b_{2},\ldots,b_{k}\right) $ be two reduced expressions for $w$. Let $\left( s,t\right) \in\mathfrak{M}$. We say that $\overrightarrow{b}$ is obtained from $\overrightarrow{a}$ by an $\left( s,t\right) $\textit{-braid move} if $\overrightarrow{b}$ can be obtained from $\overrightarrow{a}$ by finding a factor of $\overrightarrow{a}$ of the form $\underbrace{\left( s,t,s,t,s,\ldots\right) }_{m_{s,t}\text{ elements}}$ and replacing it by $\underbrace{\left( t,s,t,s,t,\ldots\right) }_{m_{s,t}\text{ elements}}$. We notice that if $\overrightarrow{b}$ is obtained from $\overrightarrow{a}$ by an $\left( s,t\right) $-braid move, then $\overrightarrow{a}$ is obtained from $\overrightarrow{b}$ by an $\left( t,s\right) $-braid move. \end{definition} \begin{definition} \label{def.R}Let $w\in W$. We define an edge-colored directed graph $\mathcal{R}\left( w\right) $, whose arcs are colored with elements of $\mathfrak{M}/\sim$, as follows: \begin{itemize} \item The vertex set of $\mathcal{R}\left( w\right) $ shall be the set of all reduced expressions for $w$. \item The arcs of $\mathcal{R}\left( w\right) $ are defined as follows: Whenever $\left( s,t\right) \in\mathfrak{M}$, and whenever $\overrightarrow{a}$ and $\overrightarrow{b}$ are two reduced expressions for $w$ such that $\overrightarrow{b}$ is obtained from $\overrightarrow{a}$ by an $\left( s,t\right) $-braid move, we draw an arc from $s$ to $t$ with color $\left[ \left( s,t\right) \right] $. \end{itemize} \end{definition} \begin{theorem} \label{thm.BCL}Let $w\in W$. Let $C$ be a (directed) cycle in the graph $\mathcal{R}\left( w\right) $. Let $c=\left[ \left( s,t\right) \right] \in\mathfrak{M}/\sim$ be an equivalence class with respect to $\sim$. Let $c^{\operatorname*{op}}$ be the equivalence class $\left[ \left( t,s\right) \right] \in\mathfrak{M}/\sim$. Then: \textbf{(a)} The number of arcs colored $c$ appearing in the cycle $C$ equals the number of arcs colored $c^{\operatorname*{op}}$ appearing in the cycle $C$. \textbf{(b)} The number of arcs whose color belongs to $\left\{ c,c^{\operatorname*{op}}\right\} $ appearing in the cycle $C$ is even. \end{theorem} None of the parts \textbf{(a)} and \textbf{(b)} of Theorem \ref{thm.BCL} is a trivial consequence of the other: When $c=c^{\operatorname*{op}}$, the statement of Theorem \ref{thm.BCL} \textbf{(a)} is obvious and does not imply part \textbf{(b)}. Theorem \ref{thm.BCL} \textbf{(b)} generalizes \cite[Theorem 3.1]{BCL} in two directions: First, Theorem \ref{thm.BCL} is stated for arbitrary Coxeter groups, rather than only for finite Coxeter groups as in \cite{BCL}. Second, in the terms of \cite[Remark 3.3]{BCL}, we are working with sets $Z$ that are \textquotedblleft stabled by conjugation instead of automorphism\textquotedblright. \section{Inversions and the word $\rho_{s,t}$} We shall now introduce some notations and state some auxiliary results that will be used to prove Theorem \ref{thm.BCL}. Our strategy of proof is inspired by that used in \cite[\S 3.4]{BCL} and thus (indirectly) also by that in \cite[\S 3, and proof of Corollary 5.2]{ReiRoi11}; however, we shall avoid any use of geometry (such as roots and hyperplane arrangements), and work entirely with the Coxeter group itself. We denote the subset $\bigcup\limits_{x\in W}xSx^{-1}$ of $W$ by $T$. The elements of $T$ are called the \textit{reflections} (of $W$). They all have order $2$. (The notation $T$ is used here in the same meaning as in \cite[\S 1]{Lusztig-Hecke} and in \cite[Chapter 4, n$^{\circ}$ 1.4] {Bourbaki4-6}.) \begin{definition} \label{def.biset}For every $k\in\mathbb{N}$, we consider the set $W^{k}$ as a left $W$-set by the rule \[ w\left( w_{1},w_{2},\ldots,w_{k}\right) =\left( ww_{1},ww_{2},\ldots ,ww_{k}\right) , \] and as a right $W$-set by the rule \[ \left( w_{1},w_{2},\ldots,w_{k}\right) w=\left( w_{1}w,w_{2}w,\ldots ,w_{k}w\right) . \] \end{definition} \begin{definition} Let $s$ and $t$ be two distinct elements of $T$. Let $m_{s,t}$ denote the order of the element $st\in W$. (This extends the definition of $m_{s,t}$ for $s,t\in S$.) Assume that $m_{s,t}<\infty$. We let $D_{s,t}$ denote the subgroup of $W$ generated by $s$ and $t$. Then, $D_{s,t}$ is a dihedral group (since $s$ and $t$ are two distinct nontrivial involutions, and since any group generated by two distinct nontrivial involutions is dihedral). We denote by $\rho_{s,t}$ the word \[ \left( \left( st\right) ^{0}s,\left( st\right) ^{1}s,\ldots,\left( st\right) ^{m_{s,t}-1}s\right) =\left( s,sts,ststs,\ldots ,\underbrace{ststs\cdots s}_{2m_{s,t}-1\text{ letters}}\right) \in\left( D_{s,t}\right) ^{m_{s,t}}. \] \end{definition} The \textit{reversal} of a word $\left( a_{1},a_{2},\ldots,a_{k}\right) $ is defined to be the word $\left( a_{k},a_{k-1},\ldots,a_{1}\right) $. The following proposition collects some simple properties of the words $\rho_{s,t}$. \begin{proposition} \label{prop.rhost}Let $s$ and $t$ be two distinct elements of $T$ such that $m_{s,t}<\infty$. Then: \textbf{(a)} The word $\rho_{s,t}$ consists of reflections in $D_{s,t}$, and contains every reflection in $D_{s,t}$ exactly once. \textbf{(b)} The word $\rho_{t,s}$ is the reversal of the word $\rho_{s,t}$. \textbf{(c)} Let $q\in W$. Then, the word $q\rho_{t,s}q^{-1}$ is the reversal of the word $q\rho_{s,t}q^{-1}$. \end{proposition} \begin{proof} [Proof of Proposition \ref{prop.rhost}.]\textbf{(a)} We need to prove three claims: \textit{Claim 1:} Every entry of the word $\rho_{s,t}$ is a reflection in $D_{s,t}$. \textit{Claim 2:} The entries of the word $\rho_{s,t}$ are distinct. \textit{Claim 3:} Every reflection in $D_{s,t}$ is an entry of the word $\rho_{s,t}$. \textit{Proof of Claim 1:} We must show that $\left( st\right) ^{k}s$ is a reflection in $D_{s,t}$ for every $k\in\left\{ 0,1,\ldots,m_{s,t}-1\right\} $. Thus, fix $k\in\left\{ 0,1,\ldots,m_{s,t}-1\right\} $. Then, \begin{align*} \left( st\right) ^{k}s & =\underbrace{stst\cdots s}_{2k+1\text{ letters}}= \begin{cases} \underbrace{stst\cdots t}_{k\text{ letters}}s\underbrace{tsts\cdots s}_{k\text{ letters}}, & \text{if }k\text{ is even};\\ \underbrace{stst\cdots s}_{k\text{ letters}}t\underbrace{stst\cdots s}_{k\text{ letters}}, & \text{if }k\text{ is odd} \end{cases} \\ & = \begin{cases} \underbrace{stst\cdots t}_{k\text{ letters}}s\left( \underbrace{stst\cdots t}_{k\text{ letters}}\right) ^{-1}, & \text{if }k\text{ is even};\\ \underbrace{stst\cdots s}_{k\text{ letters}}t\left( \underbrace{stst\cdots s}_{k\text{ letters}}\right) ^{-1}, & \text{if }k\text{ is odd} \end{cases} \\ & \ \ \ \ \ \ \ \ \ \ \left( \begin{array} [c]{c} \text{since }\underbrace{tsts\cdots s}_{k\text{ letters}}=\left( \underbrace{stst\cdots t}_{k\text{ letters}}\right) ^{-1}\text{ if }k\text{ is even,}\\ \text{and }\underbrace{stst\cdots s}_{k\text{ letters}}=\left( \underbrace{stst\cdots s}_{k\text{ letters}}\right) ^{-1}\text{ if }k\text{ is odd} \end{array} \right) . \end{align*} Hence, $\left( st\right) ^{k}s$ is conjugate to either $s$ or $t$ (depending on whether $k$ is even or odd). Thus, $\left( st\right) ^{k}s$ is a reflection. Also, it clearly lies in $D_{s,t}$. This proves Claim 1. \textit{Proof of Claim 2:} The element $st$ of $W$ has order $m_{s,t}$. Thus, the elements $\left( st\right) ^{0},\left( st\right) ^{1},\ldots,\left( st\right) ^{m_{s,t}-1}$ are all distinct. Hence, the elements $\left( st\right) ^{0}s,\left( st\right) ^{1}s,\ldots,\left( st\right) ^{m_{s,t}-1}s$ are all distinct. In other words, the entries of the word $\rho_{s,t}$ are all distinct. Claim 2 is proven. \textit{Proof of Claim 3:} The dihedral group $D_{s,t}$ has $2m_{s,t}$ elements\footnote{since it is generated by two distinct involutions $s\neq1$ and $t\neq1$ whose product $st$ has order $m_{s,t}$}, of which at most $m_{s,t}$ are reflections\footnote{\textit{Proof.} Consider the group homomorphism $\operatorname*{sgn}:W\rightarrow\left\{ 1,-1\right\} $ defined in \cite[\S 1.1]{Lusztig-Hecke}. The group homomorphism $\operatorname*{sgn} \mid_{D_{s,t}}:D_{s,t}\rightarrow\left\{ 1,-1\right\} $ sends either none or $m_{s,t}$ elements of $D_{s,t}$ to $-1$. Thus, this homomorphism $\operatorname*{sgn}\mid_{D_{s,t}}$ sends at most $m_{s,t}$ elements of $D_{s,t}$ to $-1$. Since it must send every reflection to $-1$, this shows that at most $m_{s,t}$ elements of $D_{s,t}$ are reflections. \par (Actually, we can replace \textquotedblleft at most\textquotedblright\ by \textquotedblleft exactly\textquotedblright\ here, but we won't need this.)}. But the word $\rho_{s,t}$ has $m_{s,t}$ entries, and all its entries are reflections in $D_{s,t}$ (by Claim 1); hence, it contains $m_{s,t}$ reflections in $D_{s,t}$ (by Claim 2). Since $D_{s,t}$ has only at most $m_{s,t}$ reflections, this shows that every reflection in $D_{s,t}$ is an entry of the word $\rho_{s,t}$. Claim 3 is proven. This finishes the proof of Proposition \ref{prop.rhost} \textbf{(a)}. \textbf{(b)} We have $\rho_{s,t}=\left( \left( st\right) ^{0}s,\left( st\right) ^{1}s,\ldots,\left( st\right) ^{m_{s,t}-1}s\right) $ and \newline$\rho_{t,s}=\left( \left( ts\right) ^{0}t,\left( ts\right) ^{1}t,\ldots,\left( ts\right) ^{m_{s,t}-1}t\right) $ (since $m_{t,s} =m_{s,t}$). Thus, in order to prove Proposition \ref{prop.rhost} \textbf{(b)}, we must merely show that $\left( st\right) ^{k}s=\left( ts\right) ^{m_{s,t}-1-k}t$ for every $k\in\left\{ 0,1,\ldots,m_{s,t}-1\right\} $. So fix $k\in\left\{ 0,1,\ldots,m_{s,t}-1\right\} $. Then, \begin{align*} \left( st\right) ^{k}s\cdot\left( \left( ts\right) ^{m_{s,t} -1-k}t\right) ^{-1} & =\left( st\right) ^{k}s\underbrace{t^{-1}} _{=t}\underbrace{\left( \left( ts\right) ^{m_{s,t}-1-k}\right) ^{-1} }_{=\left( s^{-1}t^{-1}\right) ^{m_{s,t}-1-k}}=\underbrace{\left( st\right) ^{k}st}_{=\left( st\right) ^{k+1}}\left( \underbrace{s^{-1} }_{=s}\underbrace{t^{-1}}_{=t}\right) ^{m_{s,t}-1-k}\\ & =\left( st\right) ^{k+1}\left( st\right) ^{m_{s,t}-1-k}=\left( st\right) ^{m_{s,t}}=1, \end{align*} so that $\left( st\right) ^{k}s=\left( ts\right) ^{m_{s,t}-1-k}t$. This proves Proposition \ref{prop.rhost} \textbf{(b)}. \textbf{(c)} Let $q\in W$. Proposition \ref{prop.rhost} \textbf{(b)} shows that the word $\rho_{t,s}$ is the reversal of the word $\rho_{s,t}$. Hence, the word $q\rho_{t,s}q^{-1}$ is the reversal of the word $q\rho_{s,t}q^{-1}$ (since the word $q\rho_{t,s}q^{-1}$ is obtained from $\rho_{t,s}$ by conjugating each letter by $q$, and the word $q\rho_{s,t}q^{-1}$ is obtained from $\rho_{s,t}$ in the same way). This proves Proposition \ref{prop.rhost} \textbf{(c)}. \end{proof} \begin{definition} \label{def.Invsle}Let $\overrightarrow{a}=\left( a_{1},a_{2},\ldots ,a_{k}\right) \in S^{k}$. Then, $\operatorname*{Invs}\overrightarrow{a}$ is defined to be the $k$-tuple $\left( t_{1},t_{2},\ldots,t_{k}\right) \in T^{k}$, where we set \[ t_{i}=\left( a_{1}a_{2}\cdots a_{i-1}\right) a_{i}\left( a_{1}a_{2}\cdots a_{i-1}\right) ^{-1}\ \ \ \ \ \ \ \ \ \ \text{for every }i\in\left\{ 1,2,\ldots,k\right\} . \] \end{definition} \begin{remark} Let $w\in W$. Let $\overrightarrow{a}=\left( a_{1},a_{2},\ldots,a_{k}\right) $ be a reduced expression for $w$. The $k$-tuple $\operatorname*{Invs} \overrightarrow{a}$ is denoted by $\Phi\left( \overrightarrow{a}\right) $ in \cite[Chapter 4, n$^{\circ}$ 1.4]{Bourbaki4-6}, and is closely connected to various standard constructions in Coxeter group theory. A well-known fact states that the set of all entries of $\operatorname*{Invs}\overrightarrow{a}$ depends only on $w$ (but not on $\overrightarrow{a}$); this set is called the \textit{(left) inversion set} of $w$. The $k$-tuple $\operatorname*{Invs} \overrightarrow{a}$ contains each element of this set exactly once (see Proposition \ref{prop.Invsles} below); it thus induces a total order on this set. \end{remark} \begin{proposition} \label{prop.Invsles}Let $w\in W$. \textbf{(a)} If $\overrightarrow{a}$ is a reduced expression for $w$, then all entries of the tuple $\operatorname*{Invs}\overrightarrow{a}$ are distinct. \textbf{(b)} Let $\left( s,t\right) \in\mathfrak{M}$. Let $\overrightarrow{a}$ and $\overrightarrow{b}$ be two reduced expressions for $w$ such that $\overrightarrow{b}$ is obtained from $\overrightarrow{a}$ by an $\left( s,t\right) $-braid move. Then, there exists a $q\in W$ such that $\operatorname*{Invs}\overrightarrow{b}$ is obtained from $\operatorname*{Invs}\overrightarrow{a}$ by replacing a particular factor of the form $q\rho_{s,t}q^{-1}$ by its reversal\footnotemark. \end{proposition} \footnotetext{See Definition \ref{def.biset} for the meaning of $q\rho _{s,t}q^{-1}$.} \begin{proof} [Proof of Proposition \ref{prop.Invsles}.]Let $\overrightarrow{a}$ be a reduced expression for $w$. Write $\overrightarrow{a}$ as $\left( a_{1} ,a_{2},\ldots,a_{k}\right) $. Then, the definition of $\operatorname*{Invs} \overrightarrow{a}$ shows that $\operatorname*{Invs}\overrightarrow{a}=\left( t_{1},t_{2},\ldots,t_{k}\right) $, where the $t_{i}$ are defined by \[ t_{i}=\left( a_{1}a_{2}\cdots a_{i-1}\right) a_{i}\left( a_{1}a_{2}\cdots a_{i-1}\right) ^{-1}\ \ \ \ \ \ \ \ \ \ \text{for every }i\in\left\{ 1,2,\ldots,k\right\} . \] Now, every $i\in\left\{ 1,2,\ldots,k\right\} $ satisfies \begin{align*} t_{i} & =\left( a_{1}a_{2}\cdots a_{i-1}\right) a_{i}\underbrace{\left( a_{1}a_{2}\cdots a_{i-1}\right) ^{-1}}_{\substack{=a_{i-1}^{-1}a_{i-2} ^{-1}\cdots a_{1}^{-1}=a_{i-1}a_{i-2}\cdots a_{1}\\\text{(since each } a_{j}\text{ belongs to }S\text{)}}}=\left( a_{1}a_{2}\cdots a_{i-1}\right) a_{i}\left( a_{i-1}a_{i-2}\cdots a_{1}\right) \\ & =a_{1}a_{2}\cdots a_{i-1}a_{i}a_{i-1}\cdots a_{2}a_{1}. \end{align*} But \cite[Proposition 1.6 (a)]{Lusztig-Hecke} (applied to $q=k$ and $s_{i}=a_{i}$) shows that the elements $a_{1},a_{1}a_{2}a_{1},a_{1}a_{2} a_{3}a_{2}a_{1},\ldots,a_{1}a_{2}\cdots a_{k-1}a_{k}a_{k-1}\cdots a_{2}a_{1}$ are distinct\footnote{This also follows from \cite[Chapter 4, n$^{\circ}$ 1.4, Lemme 2]{Bourbaki4-6}.}. In other words, the elements $t_{1},t_{2} ,\ldots,t_{k}$ are distinct (since \newline$t_{i}=a_{1}a_{2}\cdots a_{i-1}a_{i}a_{i-1}\cdots a_{2}a_{1}$ for every $i\in\left\{ 1,2,\ldots ,k\right\} $). In other words, all entries of the tuple $\operatorname*{Invs} \overrightarrow{a}$ are distinct. Proposition \ref{prop.Invsles} \textbf{(a)} is proven. \textbf{(b)} We need to prove that there exists a $q\in W$ such that $\operatorname*{Invs}\overrightarrow{b}$ is obtained from $\operatorname*{Invs}\overrightarrow{a}$ by replacing a particular factor of the form $q\rho_{s,t}q^{-1}$ by its reversal. We set $m=m_{s,t}$ (for the sake of brevity). Write $\overrightarrow{a}$ as $\left( a_{1},a_{2},\ldots,a_{k}\right) $. The word $\overrightarrow{b}$ can be obtained from $\overrightarrow{a}$ by an $\left( s,t\right) $-braid move. In other words, the word $\overrightarrow{b}$ can be obtained from $\overrightarrow{a}$ by finding a factor of $\overrightarrow{a}$ of the form $\underbrace{\left( s,t,s,t,s,\ldots\right) }_{m\text{ elements}}$ and replacing it by $\underbrace{\left( t,s,t,s,t,\ldots\right) }_{m\text{ elements}}$ (by the definition of an \textquotedblleft$\left( s,t\right) $-braid move\textquotedblright, since $m_{s,t}=m$). In other words, there exists an $p\in\left\{ 0,1,\ldots,k-m\right\} $ such that $\left( a_{p+1} ,a_{p+2},\ldots,a_{p+m}\right) =\underbrace{\left( s,t,s,t,s,\ldots\right) }_{m\text{ elements}}$, and the word $\overrightarrow{b}$ can be obtained by replacing the $\left( p+1\right) $-st through $\left( p+m\right) $-th entries of $\overrightarrow{a}$ by $\underbrace{\left( t,s,t,s,t,\ldots \right) }_{m\text{ elements}}$. Consider this $p$. Write $\overrightarrow{b}$ as $\left( b_{1},b_{2},\ldots,b_{k}\right) $ (this is possible since the tuple $\overrightarrow{b}$ has the same length as $\overrightarrow{a}$). Thus, \begin{align} \left( a_{1},a_{2},\ldots,a_{p}\right) & =\left( b_{1},b_{2},\ldots ,b_{p}\right) ,\label{pf.prop.Invsles.b.1}\\ \left( a_{p+1},a_{p+2},\ldots,a_{p+m}\right) & =\underbrace{\left( s,t,s,t,s,\ldots\right) }_{m\text{ elements}},\label{pf.prop.Invsles.b.2}\\ \left( b_{p+1},b_{p+2},\ldots,b_{p+m}\right) & =\underbrace{\left( t,s,t,s,t,\ldots\right) }_{m\text{ elements}},\label{pf.prop.Invsles.b.3}\\ \left( a_{p+m+1},a_{p+m+2},\ldots,a_{k}\right) & =\left( b_{p+m+1} ,b_{p+m+2},\ldots,b_{k}\right) . \label{pf.prop.Invsles.b.4} \end{align} Write the $k$-tuples $\operatorname*{Invs}\overrightarrow{a}$ and $\operatorname*{Invs}\overrightarrow{b}$ as $\left( \alpha_{1},\alpha _{2},\ldots,\alpha_{k}\right) $ and $\left( \beta_{1},\beta_{2},\ldots ,\beta_{k}\right) $, respectively. Their definitions show that \begin{equation} \alpha_{i}=\left( a_{1}a_{2}\cdots a_{i-1}\right) a_{i}\left( a_{1} a_{2}\cdots a_{i-1}\right) ^{-1} \label{pf.prop.Invsles.b.alpha} \end{equation} and \begin{equation} \beta_{i}=\left( b_{1}b_{2}\cdots b_{i-1}\right) b_{i}\left( b_{1} b_{2}\cdots b_{i-1}\right) ^{-1} \label{pf.prop.Invsles.b.beta} \end{equation} for every $i\in\left\{ 1,2,\ldots,k\right\} $. Now, set $q=a_{1}a_{2}\cdots a_{p}$. From (\ref{pf.prop.Invsles.b.1}), we see that $q=b_{1}b_{2}\cdots b_{p}$ as well. In order to prove Proposition \ref{prop.Invsles} \textbf{(b)}, it clearly suffices to show that $\operatorname*{Invs}\overrightarrow{b}$ is obtained from $\operatorname*{Invs}\overrightarrow{a}$ by replacing a particular factor of the form $q\rho_{s,t}q^{-1}$ -- namely, the factor $\left( \alpha _{p+1},\alpha_{p+2},\ldots,\alpha_{p+m}\right) $ -- by its reversal. So let us show this. In view of $\operatorname*{Invs}\overrightarrow{a} =\left( \alpha_{1},\alpha_{2},\ldots,\alpha_{k}\right) $ and $\operatorname*{Invs}\overrightarrow{b}=\left( \beta_{1},\beta_{2} ,\ldots,\beta_{k}\right) $, it clearly suffices to prove the following claims: \textit{Claim 1:} We have $\beta_{i}=\alpha_{i}$ for every $i\in\left\{ 1,2,\ldots,p\right\} $. \textit{Claim 2:} We have $\left( \alpha_{p+1},\alpha_{p+2},\ldots ,\alpha_{p+m}\right) =q\rho_{s,t}q^{-1}$. \textit{Claim 3:} The $m$-tuple $\left( \beta_{p+1},\beta_{p+2},\ldots ,\beta_{p+m}\right) $ is the reversal of $\left( \alpha_{p+1},\alpha _{p+2},\ldots,\alpha_{p+m}\right) $. \textit{Claim 4:} We have $\beta_{i}=\alpha_{i}$ for every $i\in\left\{ p+m+1,p+m+2,\ldots,k\right\} $. \textit{Proof of Claim 1:} Let $i\in\left\{ 1,2,\ldots,p\right\} $. Then, (\ref{pf.prop.Invsles.b.1}) shows that $a_{g}=b_{g}$ for every $g\in\left\{ 1,2,\ldots,i\right\} $. Now, (\ref{pf.prop.Invsles.b.alpha}) becomes \begin{align*} \alpha_{i} & =\left( a_{1}a_{2}\cdots a_{i-1}\right) a_{i}\left( a_{1}a_{2}\cdots a_{i-1}\right) ^{-1}=\left( b_{1}b_{2}\cdots b_{i-1} \right) b_{i}\left( b_{1}b_{2}\cdots b_{i-1}\right) ^{-1}\\ & \ \ \ \ \ \ \ \ \ \ \left( \text{since }a_{g}=b_{g}\text{ for every } g\in\left\{ 1,2,\ldots,i\right\} \right) \\ & =\beta_{i}\ \ \ \ \ \ \ \ \ \ \left( \text{by (\ref{pf.prop.Invsles.b.beta})}\right) . \end{align*} This proves Claim 1. \textit{Proof of Claim 2:} We have \[ \rho_{s,t}=\left( \left( st\right) ^{0}s,\left( st\right) ^{1} s,\ldots,\left( st\right) ^{m_{s,t}-1}s\right) =\left( \left( st\right) ^{0}s,\left( st\right) ^{1}s,\ldots,\left( st\right) ^{m-1}s\right) \] (since $m_{s,t}=m$). Hence, \begin{align*} q\rho_{s,t}q^{-1} & =q\left( \left( st\right) ^{0}s,\left( st\right) ^{1}s,\ldots,\left( st\right) ^{m-1}s\right) q^{-1}\\ & =\left( q\left( st\right) ^{0}sq^{-1},q\left( st\right) ^{1} sq^{-1},\ldots,q\left( st\right) ^{m-1}sq^{-1}\right) . \end{align*} Thus, in order to prove $\left( \alpha_{p+1},\alpha_{p+2},\ldots,\alpha _{p+m}\right) =q\rho_{s,t}q^{-1}$, it suffices to show that $\alpha _{p+i}=q\left( st\right) ^{i-1}sq^{-1}$ for every $i\in\left\{ 1,2,\ldots,m\right\} $. So let us fix $i\in\left\{ 1,2,\ldots,m\right\} $. We have \[ a_{1}a_{2}\cdots a_{p+i-1}=\underbrace{\left( a_{1}a_{2}\cdots a_{p}\right) }_{=q}\underbrace{\left( a_{p+1}a_{p+2}\cdots a_{p+i-1}\right) }_{\substack{=\underbrace{stst\cdots}_{i-1\text{ letters}}\\\text{(by (\ref{pf.prop.Invsles.b.2}))}}}=q\underbrace{stst\cdots}_{i-1\text{ letters} }. \] Hence, \begin{align*} \left( a_{1}a_{2}\cdots a_{p+i-1}\right) ^{-1} & =\left( q\underbrace{stst\cdots}_{i-1\text{ letters}}\right) ^{-1}=\underbrace{\cdots t^{-1}s^{-1}t^{-1}s^{-1}}_{i-1\text{ letters}}q^{-1}\\ & =\underbrace{\cdots tsts}_{i-1\text{ letters}}q^{-1} \ \ \ \ \ \ \ \ \ \ \left( \text{since }s^{-1}=s\text{ and }t^{-1}=t\right) . \end{align*} Also, \[ \left( a_{1}a_{2}\cdots a_{p+i-1}\right) a_{p+i}=a_{1}a_{2}\cdots a_{p+i}=\underbrace{\left( a_{1}a_{2}\cdots a_{p}\right) }_{=q} \underbrace{\left( a_{p+1}a_{p+2}\cdots a_{p+i}\right) } _{\substack{=\underbrace{stst\cdots}_{i\text{ letters}}\\\text{(by (\ref{pf.prop.Invsles.b.2}))}}}=q\underbrace{stst\cdots}_{i\text{ letters}}. \] Now, (\ref{pf.prop.Invsles.b.alpha}) (applied to $p+i$ instead of $i$) yields \begin{align*} \alpha_{p+i} & =\underbrace{\left( a_{1}a_{2}\cdots a_{p+i-1}\right) a_{p+i}}_{=q\underbrace{stst\cdots}_{i\text{ letters}}}\underbrace{\left( a_{1}a_{2}\cdots a_{p+i-1}\right) ^{-1}}_{=\underbrace{\cdots tsts} _{i-1\text{ letters}}q^{-1}}=q\underbrace{\underbrace{stst\cdots}_{i\text{ letters}}\underbrace{\cdots tsts}_{i-1\text{ letters}}} _{=\underbrace{stst\cdots s}_{2i-1\text{ letters}}=\left( st\right) ^{i-1} s}q^{-1}\\ & =q\left( st\right) ^{i-1}sq^{-1}. \end{align*} This completes the proof of $\left( \alpha_{p+1},\alpha_{p+2},\ldots ,\alpha_{p+m}\right) =q\rho_{s,t}q^{-1}$. Hence, Claim 2 is proven. \textit{Proof of Claim 3:} In our proof of Claim 2, we have shown that $\left( \alpha_{p+1},\alpha_{p+2},\ldots,\alpha_{p+m}\right) =q\rho _{s,t}q^{-1}$. The same argument (applied to $\overrightarrow{b}$, $\left( b_{1},b_{2},\ldots,b_{k}\right) $, $\left( \beta_{1},\beta_{2},\ldots ,\beta_{k}\right) $, $t$ and $s$ instead of $\overrightarrow{a}$, $\left( a_{1},a_{2},\ldots,a_{k}\right) $, $\left( \alpha_{1},\alpha_{2} ,\ldots,\alpha_{k}\right) $, $s$ and $t$) shows that $\left( \beta _{p+1},\beta_{p+2},\ldots,\beta_{p+m}\right) =q\rho_{t,s}q^{-1}$ (where we now use (\ref{pf.prop.Invsles.b.3}) instead of (\ref{pf.prop.Invsles.b.2}), and use $q=b_{1}b_{2}\cdots b_{p}$ instead of $q=a_{1}a_{2}\cdots a_{p}$). Now, recall that the word $q\rho_{t,s}q^{-1}$ is the reversal of the word $q\rho_{s,t}q^{-1}$. Since \newline$\left( \alpha_{p+1},\alpha_{p+2} ,\ldots,\alpha_{p+m}\right) =q\rho_{s,t}q^{-1}$ and $\left( \beta _{p+1},\beta_{p+2},\ldots,\beta_{p+m}\right) =q\rho_{t,s}q^{-1}$, this means that the word $\left( \beta_{p+1},\beta_{p+2},\ldots,\beta_{p+m}\right) $ is the reversal of $\left( \alpha_{p+1},\alpha_{p+2},\ldots,\alpha_{p+m}\right) $. This proves Claim 3. \textit{Proof of Claim 4:} Since $m=m_{s,t}$, we have $\underbrace{stst\cdots }_{m\text{ letters}}=\underbrace{tsts\cdots}_{m\text{ letters}}$ (this is one of the braid relations of our Coxeter group). Let us set $x=\underbrace{stst\cdots}_{m\text{ letters}}=\underbrace{tsts\cdots}_{m\text{ letters}}$. Now, (\ref{pf.prop.Invsles.b.2}) yields $a_{p+1}a_{p+2}\cdots a_{p+m}=\underbrace{stst\cdots}_{m\text{ letters}}=x$. Similarly, from (\ref{pf.prop.Invsles.b.3}), we obtain $b_{p+1}b_{p+2}\cdots b_{p+m}=x$. Let $i\in\left\{ p+m+1,p+m+2,\ldots,k\right\} $. Thus, \begin{align*} a_{1}a_{2}\cdots a_{i-1} & =\underbrace{\left( a_{1}a_{2}\cdots a_{p}\right) }_{=q}\underbrace{\left( a_{p+1}a_{p+2}\cdots a_{p+m}\right) }_{=x}\underbrace{\left( a_{p+m+1}a_{p+m+2}\cdots a_{i-1}\right) }_{\substack{=b_{p+m+1}b_{p+m+2}\cdots b_{i-1}\\\text{(by (\ref{pf.prop.Invsles.b.4}))}}}\\ & =qx\left( b_{p+m+1}b_{p+m+2}\cdots b_{i-1}\right) . \end{align*} Comparing this with \begin{align*} b_{1}b_{2}\cdots b_{i-1} & =\underbrace{\left( b_{1}b_{2}\cdots b_{p}\right) }_{=q}\underbrace{\left( b_{p+1}b_{p+2}\cdots b_{p+m}\right) }_{=x}\left( b_{p+m+1}b_{p+m+2}\cdots b_{i-1}\right) \\ & =qx\left( b_{p+m+1}b_{p+m+2}\cdots b_{i-1}\right) , \end{align*} we obtain $a_{1}a_{2}\cdots a_{i-1}=b_{1}b_{2}\cdots b_{i-1}$. Also, $a_{i}=b_{i}$ (by (\ref{pf.prop.Invsles.b.4})). Now, (\ref{pf.prop.Invsles.b.alpha}) becomes \begin{align*} \alpha_{i} & =\left( \underbrace{a_{1}a_{2}\cdots a_{i-1}}_{=b_{1} b_{2}\cdots b_{i-1}}\right) \underbrace{a_{i}}_{=b_{i}}\left( \underbrace{a_{1}a_{2}\cdots a_{i-1}}_{=b_{1}b_{2}\cdots b_{i-1}}\right) ^{-1}=\left( b_{1}b_{2}\cdots b_{i-1}\right) b_{i}\left( b_{1}b_{2}\cdots b_{i-1}\right) ^{-1}\\ & =\beta_{i}\ \ \ \ \ \ \ \ \ \ \left( \text{by (\ref{pf.prop.Invsles.b.beta})}\right) . \end{align*} This proves Claim 4. Hence, all four claims are proven, and the proof of Proposition \ref{prop.Invsles} \textbf{(b)} is complete. \end{proof} The following fact is rather easy (but will be proven in detail in the next section): \begin{proposition} \label{prop.has}Let $w\in W$. Let $s$ and $t$ be two distinct elements of $T$ such that $m_{s,t}<\infty$. Let $\overrightarrow{a}$ be a reduced expression for $w$. \textbf{(a)} The word $\rho_{s,t}$ appears as a subword of $\operatorname*{Invs}\overrightarrow{a}$ at most one time. \textbf{(b)} The words $\rho_{s,t}$ and $\rho_{t,s}$ cannot both appear as subwords of $\operatorname*{Invs}\overrightarrow{a}$. \end{proposition} \begin{proof} [Proof of Proposition \ref{prop.has}.]\textbf{(a)} This follows from the fact that the word $\rho_{s,t}$ has length $m_{s,t}\geq2>0$, and from Proposition \ref{prop.Invsles} \textbf{(a)}. \textbf{(b)} Assume the contrary. Then, both words $\rho_{s,t}$ and $\rho_{t,s}$ appear as a subword of $\operatorname*{Invs}\overrightarrow{a}$. By Proposition \ref{prop.rhost} \textbf{(b)}, this means that both the word $\rho_{s,t}$ and its reversal appear as a subword of $\operatorname*{Invs} \overrightarrow{a}$. Since the word $\rho_{s,t}$ has length $m_{s,t}\geq2$, this means that at least one letter of $\rho_{s,t}$ appears twice in $\operatorname*{Invs}\overrightarrow{a}$. This contradicts Proposition \ref{prop.Invsles} \textbf{(a)}. This contradiction concludes our proof. \end{proof} \section{The set $\mathfrak{N}$ and subwords of inversion words} We now let $\mathfrak{N}$ denote the subset $\bigcup\limits_{x\in W}x\mathfrak{M}x^{-1}$ of $T\times T$. Clearly, $\mathfrak{M}\subseteq \mathfrak{N}$. Moreover, for every $\left( s,t\right) \in\mathfrak{N}$, we have $s\neq t$ and $m_{s,t}<\infty$ (because $\left( s,t\right) \in\mathfrak{N}=\bigcup\limits_{x\in W}x\mathfrak{M}x^{-1}$, and because these properties are preserved by conjugation). Thus, for every $\left( s,t\right) \in\mathfrak{N}$, the word $\rho_{s,t}$ is well-defined and has exactly $m_{s,t}$ entries. We define a binary relation $\approx$ on $\mathfrak{N}$ by \[ \left( \left( s,t\right) \approx\left( s^{\prime},t^{\prime}\right) \ \Longleftrightarrow\ \text{there exists a }q\in W\text{ such that } qsq^{-1}=s^{\prime}\text{ and }qtq^{-1}=t^{\prime}\right) . \] It is clear that this relation $\approx$ is an equivalence relation; it thus gives rise to a quotient set $\mathfrak{N}/\approx$. For every pair $P\in\mathfrak{N}$, we denote by $\left[ \left[ P\right] \right] $ the equivalence class of $P$ with respect to this relation $\approx$. The relation $\sim$ on $\mathfrak{M}$ is the restriction of the relation $\approx$ to $\mathfrak{M}$. Hence, every equivalence class $c$ with respect to $\sim$ is a subset of an equivalence class with respect to $\approx$. We denote the latter equivalence class by $c_{\mathfrak{N}}$. Thus, $\left[ P\right] _{\mathfrak{N}}=\left[ \left[ P\right] \right] $ for every $P\in\mathfrak{M}$. We notice that the set $\mathfrak{N}$ is invariant under switching the two elements of a pair (i.e., for every $\left( u,v\right) \in\mathfrak{N}$, we have $\left( v,u\right) \in\mathfrak{N}$). Moreover, the relation $\approx$ is preserved under switching the two elements of a pair (i.e., if $\left( s,t\right) \approx\left( s^{\prime},t^{\prime}\right) $, then $\left( t,s\right) \approx\left( t^{\prime},s^{\prime}\right) $). This shall be tacitly used in the following proofs. \begin{definition} \label{def.has}Let $w\in W$. Let $\overrightarrow{a}$ be a reduced expression for $w$. \textbf{(a)} For any $\left( s,t\right) \in\mathfrak{N}$, we define an element $\operatorname*{has}\nolimits_{s,t}\overrightarrow{a}\in\left\{ 0,1\right\} $ by \[ \operatorname*{has}\nolimits_{s,t}\overrightarrow{a}= \begin{cases} 1, & \text{if }\rho_{s,t}\text{ appears as a subword of }\operatorname*{Invs} \overrightarrow{a};\\ 0, & \text{otherwise} \end{cases} . \] (Keep in mind that we are speaking of subwords, not just factors, here.) \textbf{(b)} Consider the free $\mathbb{Z}$-module $\mathbb{Z}\left[ \mathfrak{N}\right] $ with basis $\mathfrak{N}$. We define an element $\operatorname*{Has}\overrightarrow{a}\in\mathbb{Z}\left[ \mathfrak{N} \right] $ by \[ \operatorname*{Has}\overrightarrow{a}=\sumnonlimits\limits_{\left( s,t\right) \in \mathfrak{N}}\operatorname*{has}\nolimits_{s,t}\overrightarrow{a}\cdot\left( s,t\right) \] (where the $\left( s,t\right) $ stands for the basis element $\left( s,t\right) \in\mathfrak{N}$ of $\mathbb{Z}\left[ \mathfrak{N}\right] $). \end{definition} We can now state the main result that we will use to prove Theorem \ref{thm.BCL}: \begin{theorem} \label{thm.has}Let $w\in W$. Let $\left( s,t\right) \in\mathfrak{M}$. Let $\overrightarrow{a}$ and $\overrightarrow{b}$ be two reduced expressions for $w$ such that $\overrightarrow{b}$ is obtained from $\overrightarrow{a}$ by an $\left( s,t\right) $-braid move. Proposition \ref{prop.Invsles} \textbf{(b)} shows that there exists a $q\in W$ such that $\operatorname*{Invs}\overrightarrow{b}$ is obtained from $\operatorname*{Invs}\overrightarrow{a}$ by replacing a particular factor of the form $q\rho_{s,t}q^{-1}$ by its reversal. Consider this $q$. Set $s^{\prime}=qsq^{-1}$ and $t^{\prime}=qtq^{-1}$; thus, $s^{\prime}$ and $t^{\prime}$ are reflections and satisfy $m_{s^{\prime},t^{\prime}} =m_{s,t}<\infty$. Also, the definitions of $s^{\prime}$ and $t^{\prime}$ yield $\left( s^{\prime},t^{\prime}\right) =q\underbrace{\left( s,t\right) }_{\in\mathfrak{M}}q^{-1}\in q\mathfrak{M}q^{-1}\subseteq\mathfrak{N}$. Similarly, $\left( t^{\prime},s^{\prime}\right) \in\mathfrak{N}$ (since $\left( t,s\right) \in\mathfrak{M}$). Now, we have \begin{equation} \operatorname*{Has}\overrightarrow{b}=\operatorname*{Has}\overrightarrow{a} -\left( s^{\prime},t^{\prime}\right) +\left( t^{\prime},s^{\prime}\right) . \label{eq.thm.has.a} \end{equation} \end{theorem} Before we prove Theorem \ref{thm.has}, we first show two lemmas. The first one is a crucial property of dihedral subgroups in our Coxeter group: \begin{lemma} \label{lem.dihindih}Let $\left( s,t\right) \in\mathfrak{M}$ and $\left( u,v\right) \in\mathfrak{N}$. Let $q\in W$. Assume that $u\in qD_{s,t}q^{-1}$ and $v\in qD_{s,t}q^{-1}$. Then, $m_{s,t}=m_{u,v}$. \end{lemma} \begin{proof} [Proof of Lemma \ref{lem.dihindih}.]\textit{Claim 1:} Lemma \ref{lem.dihindih} holds in the case when $\left( u,v\right) \in\mathfrak{M}$. \textit{Proof.} Assume that $\left( u,v\right) \in\mathfrak{M}$. Thus, $u,v\in S$. Let $I$ be the subset $\left\{ s,t\right\} $ of $S$. We shall use the notations of \cite[\S 9]{Lusztig-Hecke}. In particular, $l\left( r\right) $ denotes the length of any element $r\in W$. We have $W_{I}=D_{s,t}$. Consider the coset $W_{I}q^{-1}$ of $W_{I}$. From \cite[Lemma 9.7 (a)]{Lusztig-Hecke} (applied to $a=q^{-1}$), we know that this coset $W_{I}q^{-1}$ has a unique element of minimal length. Let $w$ be this element. Thus, $w\in W_{I}q^{-1}$, so that $W_{I}w=W_{I}q^{-1}$. Now, \[ \underbrace{q}_{=\left( q^{-1}\right) ^{-1}}\underbrace{W_{I}}_{=\left( W_{I}\right) ^{-1}}=\left( q^{-1}\right) ^{-1}\left( W_{I}\right) ^{-1}=\left( \underbrace{W_{I}q^{-1}}_{=W_{I}w}\right) ^{-1}=\left( W_{I}w\right) ^{-1}=w^{-1}W_{I}. \] Let $u^{\prime}=wuw^{-1}$ and $v^{\prime}=wvw^{-1}$. We have $u\in q\underbrace{D_{s,t}}_{=W_{I}}q^{-1}=q\underbrace{W_{I}q^{-1} }_{=W_{I}w}=\underbrace{qW_{I}}_{=w^{-1}W_{I}}w=w^{-1}W_{I}w$. In other words, $wuw^{-1}\in W_{I}$. In other words, $u^{\prime}\in W_{I}$ (since $u^{\prime }=wuw^{-1}$). Similarly, $v^{\prime}\in W_{I}$. We have $u^{\prime}=wuw^{-1}$, hence $u^{\prime}w=wu$. But \cite[Lemma 9.7 (b)]{Lusztig-Hecke} (applied to $a=q^{-1}$ and $y=u^{\prime}$) shows that $l\left( u^{\prime}w\right) =l\left( u^{\prime}\right) +l\left( w\right) $. Hence, \[ l\left( u^{\prime}\right) +l\left( w\right) =l\left( \underbrace{u^{\prime}w}_{=wu}\right) =l\left( wu\right) =l\left( w\right) \pm1\ \ \ \ \ \ \ \ \ \ \left( \text{since }u\in S\right) . \] Subtracting $l\left( w\right) $ from this equality, we obtain $l\left( u^{\prime}\right) =\pm1$, and thus $l\left( u^{\prime}\right) =1$, so that $u^{\prime}\in S$. Combined with $u^{\prime}\in W_{I}$, this shows that $u^{\prime}\in S\cap W_{I}=I$. Similarly, $v^{\prime}\in I$. We have $u\neq v$ (since $\left( u,v\right) \in\mathfrak{N}$), thus $wuw^{-1}\neq wvw^{-1}$, thus $u^{\prime}=wuw^{-1}\neq wvw^{-1}=v^{\prime}$. Thus, $u^{\prime}$ and $v^{\prime}$ are two distinct elements of the two-element set $I=\left\{ s,t\right\} $. Hence, either $\left( u^{\prime },v^{\prime}\right) =\left( s,t\right) $ or $\left( u^{\prime},v^{\prime }\right) =\left( t,s\right) $. In either of these two cases, we have $m_{u^{\prime},v^{\prime}}=m_{s,t}$. But since $u^{\prime}=wuw^{-1}$ and $v^{\prime}=wvw^{-1}$, we have $m_{u^{\prime},v^{\prime}}=m_{u,v}$. Hence, $m_{s,t}=m_{u^{\prime},v^{\prime}}=m_{u,v}$. This proves Claim 1. \textit{Claim 2:} Lemma \ref{lem.dihindih} holds in the general case. \textit{Proof.} Consider the general case. We have $\left( u,v\right) \in\mathfrak{N}=\bigcup_{x\in W}x\mathfrak{M}x^{-1}$. Thus, there exists some $x\in W$ such that $\left( u,v\right) \in x\mathfrak{M}x^{-1}$. Consider this $x$. From $\left( u,v\right) \in x\mathfrak{M}x^{-1}$, we obtain $x^{-1}\left( u,v\right) x\in\mathfrak{M}$. In other words, $\left( x^{-1}ux,x^{-1}vx\right) \in\mathfrak{M}$. Moreover, \[ x^{-1}\underbrace{u}_{\in qD_{s,t}q^{-1}}x\in x^{-1}qD_{s,t}\underbrace{q^{-1} x}_{=\left( x^{-1}q\right) ^{-1}}=x^{-1}qD_{s,t}\left( x^{-1}q\right) ^{-1}, \] and similarly $x^{-1}vx\in x^{-1}qD_{s,t}\left( x^{-1}q\right) ^{-1}$. Hence, Claim 1 (applied to $\left( x^{-1}ux,x^{-1}vx\right) $ and $x^{-1}q$ instead of $\left( u,v\right) $ and $q$) shows that $m_{s,t}=m_{x^{-1} ux,x^{-1}vx}=m_{u,v}$. This proves Claim 2, and thus proves Lemma \ref{lem.dihindih}. \end{proof} Next comes another lemma, bordering on the trivial: \begin{lemma} \label{lem.GandH}Let $G$ be a group. Let $H$ be a subgroup of $G$. Let $u\in G$, $v\in G$ and $g\in\mathbb{Z}$. Assume that $\left( uv\right) ^{g-1}u\in H$ and $\left( uv\right) ^{g}u\in H$. Then, $u\in H$ and $v\in H$. \end{lemma} \begin{proof} [Proof of Lemma \ref{lem.GandH}.]We have $\underbrace{\left( \left( uv\right) ^{g}u\right) }_{\in H}\left( \underbrace{\left( uv\right) ^{g-1}u}_{\in H}\right) ^{-1}\in HH^{-1}\subseteq H$ (since $H$ is a subgroup of $G$). Since \[ \left( \left( uv\right) ^{g}u\right) \underbrace{\left( \left( uv\right) ^{g-1}u\right) ^{-1}}_{=u^{-1}\left( \left( uv\right) ^{g-1}\right) ^{-1}}=\left( uv\right) ^{g}\underbrace{uu^{-1}}_{=1}\left( \left( uv\right) ^{g-1}\right) ^{-1}=\left( uv\right) ^{g}\left( \left( uv\right) ^{g-1}\right) ^{-1}=uv, \] this rewrites as $uv\in H$. However, $\left( uv\right) ^{-g}\left( uv\right) ^{g}u=u$, so that \[ u=\left( \underbrace{uv}_{\in H}\right) ^{-g}\underbrace{\left( uv\right) ^{g}u}_{\in H}\in H^{-g}H\subseteq H \] (since $H$ is a subgroup of $G$). Now, both $u$ and $uv$ belong to the subgroup $H$ of $G$. Thus, so does $u^{-1}\left( uv\right) $. In other words, $u^{-1}\left( uv\right) \in H$, so that $v=u^{-1}\left( uv\right) \in H$. This completes the proof of Lemma \ref{lem.GandH}. \end{proof} \begin{proof} [Proof of Theorem \ref{thm.has}.]Conjugation by $q$ (that is, the map $W\rightarrow W,\ x\mapsto qxq^{-1}$) is a group endomorphism of $W$. Hence, for every $i\in\mathbb{N}$, we have \begin{equation} q\left( st\right) ^{i}sq^{-1}=\left( \underbrace{\left( qsq^{-1}\right) }_{=s^{\prime}}\left( \underbrace{qtq^{-1}}_{=t^{\prime}}\right) \right) ^{i}\underbrace{\left( qsq^{-1}\right) }_{=s^{\prime}}=\left( s^{\prime }t^{\prime}\right) ^{i}s^{\prime}. \label{pf.thm.has.qconj} \end{equation} Let $m=m_{s,t}$. We have \[ \rho_{s,t}=\left( \left( st\right) ^{0}s,\left( st\right) ^{1} s,\ldots,\left( st\right) ^{m_{s,t}-1}s\right) =\left( \left( st\right) ^{0}s,\left( st\right) ^{1}s,\ldots,\left( st\right) ^{m-1}s\right) \] (since $m_{s,t}=m$) and thus \begin{align*} q\rho_{s,t}q^{-1} & =q\left( \left( st\right) ^{0}s,\left( st\right) ^{1}s,\ldots,\left( st\right) ^{m-1}s\right) q^{-1}\\ & =\left( q\left( st\right) ^{0}sq^{-1},q\left( st\right) ^{1} sq^{-1},\ldots,q\left( st\right) ^{m-1}sq^{-1}\right) \\ & =\left( \left( s^{\prime}t^{\prime}\right) ^{0}s^{\prime},\left( s^{\prime}t^{\prime}\right) ^{1}s^{\prime},\ldots,\left( s^{\prime} t^{\prime}\right) ^{m-1}s^{\prime}\right) \\ & \ \ \ \ \ \ \ \ \ \ \left( \begin{array} [c]{c} \text{since every }i\in\left\{ 0,1,\ldots,m-1\right\} \text{ satisfies}\\ q\left( st\right) ^{i}sq^{-1}=\left( s^{\prime}t^{\prime}\right) ^{i}s^{\prime}\text{ (by (\ref{pf.thm.has.qconj}))} \end{array} \right) \\ & =\left( \left( s^{\prime}t^{\prime}\right) ^{0}s^{\prime},\left( s^{\prime}t^{\prime}\right) ^{1}s^{\prime},\ldots,\left( s^{\prime} t^{\prime}\right) ^{m_{s^{\prime},t^{\prime}}-1}s^{\prime}\right) \ \ \ \ \ \ \ \ \ \ \left( \text{since }m=m_{s,t}=m_{s^{\prime},t^{\prime} }\right) \\ & =\rho_{s^{\prime},t^{\prime}}\ \ \ \ \ \ \ \ \ \ \left( \text{by the definition of }\rho_{s^{\prime},t^{\prime}}\right) . \end{align*} The word $\overrightarrow{b}$ is obtained from $\overrightarrow{a}$ by an $\left( s,t\right) $-braid move. Hence, the word $\overrightarrow{a}$ can be obtained from $\overrightarrow{b}$ by a $\left( t,s\right) $-braid move. From $\left( s^{\prime},t^{\prime}\right) \in\mathfrak{N}$, we obtain $s^{\prime}\neq t^{\prime}$. Hence, $\left( s^{\prime},t^{\prime}\right) \neq\left( t^{\prime},s^{\prime}\right) $. From $s^{\prime}=qsq^{-1}$ and $t^{\prime}=qtq^{-1}$, we obtain $D_{s^{\prime },t^{\prime}}=qD_{s,t}q^{-1}$ (since conjugation by $q$ is a group endomorphism of $W$). Proposition \ref{prop.rhost} \textbf{(c)} shows that the word $q\rho _{t,s}q^{-1}$ is the reversal of the word $q\rho_{s,t}q^{-1}$. Hence, the word $q\rho_{s,t}q^{-1}$ is the reversal of the word $q\rho_{t,s}q^{-1}$. Recall that $\operatorname*{Invs}\overrightarrow{b}$ is obtained from $\operatorname*{Invs}\overrightarrow{a}$ by replacing a particular factor of the form $q\rho_{s,t}q^{-1}$ by its reversal. Since this latter reversal is $q\rho_{t,s}q^{-1}$ (as we have previously seen), this shows that $\operatorname*{Invs}\overrightarrow{b}$ has a factor of $q\rho_{t,s}q^{-1}$ in the place where the word $\operatorname*{Invs}\overrightarrow{a}$ had the factor $q\rho_{s,t}q^{-1}$. Hence, $\operatorname*{Invs}\overrightarrow{a}$ can, in turn, be obtained from $\operatorname*{Invs}\overrightarrow{b}$ by replacing a particular factor of the form $q\rho_{t,s}q^{-1}$ by its reversal (since the reversal of $q\rho_{t,s}q^{-1}$ is $q\rho_{s,t}q^{-1}$). Thus, our situation is symmetric with respect to $s$ and $t$; more precisely, we wind up in an analogous situation if we replace $s$, $t$, $\overrightarrow{a}$, $\overrightarrow{b}$, $s^{\prime}$ and $t^{\prime}$ by $t$, $s$, $\overrightarrow{b}$, $\overrightarrow{a}$, $t^{\prime}$ and $s^{\prime}$, respectively. We shall prove the following claims: \textit{Claim 1:} Let $\left( u,v\right) \in\mathfrak{N}$ be such that $\left( u,v\right) \neq\left( s^{\prime},t^{\prime}\right) $ and $\left( u,v\right) \neq\left( t^{\prime},s^{\prime}\right) $. Then, $\operatorname*{has}\nolimits_{u,v}\overrightarrow{b}=\operatorname*{has} \nolimits_{u,v}\overrightarrow{a}$. \textit{Claim 2:} We have $\operatorname*{has}\nolimits_{s^{\prime},t^{\prime }}\overrightarrow{b}=\operatorname*{has}\nolimits_{s^{\prime},t^{\prime} }\overrightarrow{a}-1$. \textit{Claim 3:} We have $\operatorname*{has}\nolimits_{t^{\prime},s^{\prime }}\overrightarrow{b}=\operatorname*{has}\nolimits_{t^{\prime},s^{\prime} }\overrightarrow{a}+1$. \textit{Proof of Claim 1:} Assume the contrary. Thus, $\operatorname*{has} \nolimits_{u,v}\overrightarrow{b}\neq\operatorname*{has}\nolimits_{u,v} \overrightarrow{a}$. Hence, one of the numbers $\operatorname*{has} \nolimits_{u,v}\overrightarrow{b}$ and $\operatorname*{has}\nolimits_{u,v} \overrightarrow{a}$ equals $1$ and the other equals $0$ (since both $\operatorname*{has}\nolimits_{u,v}\overrightarrow{b}$ and $\operatorname*{has}\nolimits_{u,v}\overrightarrow{a}$ belong to $\left\{ 0,1\right\} $). Without loss of generality, we assume that $\operatorname*{has}\nolimits_{u,v}\overrightarrow{a}=1$ and $\operatorname*{has}\nolimits_{u,v}\overrightarrow{b}=0$ (because in the other case, we can replace $s$, $t$, $\overrightarrow{a}$, $\overrightarrow{b}$, $s^{\prime}$ and $t^{\prime}$ by $t$, $s$, $\overrightarrow{b}$, $\overrightarrow{a}$, $t^{\prime}$ and $s^{\prime}$, respectively). The elements $u$ and $v$ are two distinct reflections (since $\left( u,v\right) \in\mathfrak{N}$). Write the tuple $\operatorname*{Invs}\overrightarrow{a}$ as $\left( \alpha_{1},\alpha_{2},\ldots,\alpha_{k}\right) $. The tuple $\operatorname*{Invs}\overrightarrow{b}$ has the same length as $\operatorname*{Invs}\overrightarrow{a}$, since $\operatorname*{Invs} \overrightarrow{b}$ is obtained from $\operatorname*{Invs}\overrightarrow{a}$ by replacing a particular factor of the form $q\rho_{s,t}q^{-1}$ by its reversal. Hence, write the tuple $\operatorname*{Invs}\overrightarrow{b}$ as $\left( \beta_{1},\beta_{2},\ldots,\beta_{k}\right) $. From $\operatorname*{has}\nolimits_{u,v}\overrightarrow{a}=1$, we obtain that $\rho_{u,v}$ appears as a subword of $\operatorname*{Invs}\overrightarrow{a}$. In other words, $\rho_{u,v}=\left( \alpha_{i_{1}},\alpha_{i_{2}} ,\ldots,\alpha_{i_{f}}\right) $ for some integers $i_{1},i_{2},\ldots,i_{f}$ satisfying $1\leq i_{1}<i_{2}<\cdots<i_{f}\leq k$. Consider these $i_{1} ,i_{2},\ldots,i_{f}$. From $\operatorname*{has}\nolimits_{u,v} \overrightarrow{b}=0$, we conclude that $\rho_{u,v}$ does not appear as a subword of $\operatorname*{Invs}\overrightarrow{b}$. On the other hand, $\operatorname*{Invs}\overrightarrow{b}$ is obtained from $\operatorname*{Invs}\overrightarrow{a}$ by replacing a particular factor of the form $q\rho_{s,t}q^{-1}$ by its reversal. This factor has $m_{s,t}=m$ letters; thus, it has the form $\left( \alpha_{p+1},\alpha_{p+2} ,\ldots,\alpha_{p+m}\right) $ for some $p\in\left\{ 0,1,\ldots,k-m\right\} $. Consider this $p$. Thus, \[ \left( \alpha_{p+1},\alpha_{p+2},\ldots,\alpha_{p+m}\right) =q\rho _{s,t}q^{-1}=\left( \left( s^{\prime}t^{\prime}\right) ^{0}s^{\prime },\left( s^{\prime}t^{\prime}\right) ^{1}s^{\prime},\ldots,\left( s^{\prime}t^{\prime}\right) ^{m-1}s^{\prime}\right) . \] In other words, \begin{equation} \alpha_{p+i}=\left( s^{\prime}t^{\prime}\right) ^{i-1}s^{\prime }\ \ \ \ \ \ \ \ \ \ \text{for every }i\in\left\{ 1,2,\ldots,m\right\} . \label{pf.thm.has.c1.2} \end{equation} We now summarize: \begin{itemize} \item The word $\rho_{u,v}$ appears as the subword $\left( \alpha_{i_{1} },\alpha_{i_{2}},\ldots,\alpha_{i_{f}}\right) $ of $\operatorname*{Invs} \overrightarrow{a}$, but does not appear as a subword of $\operatorname*{Invs} \overrightarrow{b}$. \item The word $\operatorname*{Invs}\overrightarrow{b}$ is obtained from $\operatorname*{Invs}\overrightarrow{a}$ by replacing the factor \newline$\left( \alpha_{p+1},\alpha_{p+2},\ldots,\alpha_{p+m}\right) $ by its reversal. \end{itemize} Thus, replacing the factor $\left( \alpha_{p+1},\alpha_{p+2},\ldots ,\alpha_{p+m}\right) $ in $\operatorname*{Invs}\overrightarrow{a}$ by its reversal must mess up the subword $\left( \alpha_{i_{1}},\alpha_{i_{2} },\ldots,\alpha_{i_{f}}\right) $ of $\operatorname*{Invs}\overrightarrow{a}$ badly enough that it no longer appears as a subword (not even in different positions). This can only happen if at least two of the integers $i_{1} ,i_{2},\ldots,i_{f}$ lie in the interval $\left\{ p+1,p+2,\ldots,p+m\right\} $. Hence, at least two of the integers $i_{1},i_{2},\ldots,i_{f}$ lie in the interval $\left\{ p+1,p+2,\ldots,p+m\right\} $. In particular, there must be a $g\in\left\{ 1,2,\ldots,f-1\right\} $ such that the integers $i_{g}$ and $i_{g+1}$ lie in the interval $\left\{ p+1,p+2,\ldots,p+m\right\} $ (since $i_{1}<i_{2}<\cdots<i_{f}$). Consider this $g$. We have $i_{g}\in\left\{ p+1,p+2,\ldots,p+m\right\} $. In other words, $i_{g}=p+r_{g}$ for some $r_{g}\in\left\{ 1,2,\ldots,m\right\} $. Consider this $r_{g}$. We have $i_{g+1}\in\left\{ p+1,p+2,\ldots,p+m\right\} $. In other words, $i_{g+1}=p+r_{g+1}$ for some $r_{g+1}\in\left\{ 1,2,\ldots,m\right\} $. Consider this $r_{g+1}$. We have $\left( \alpha_{i_{1}},\alpha_{i_{2}},\ldots,\alpha_{i_{f}}\right) =\rho_{u,v}=\left( \left( uv\right) ^{0}u,\left( uv\right) ^{1} u,\ldots,\left( uv\right) ^{m_{u,v}-1}u\right) $ (by the definition of $\rho_{u,v}$). Hence, $\alpha_{i_{g}}=\left( uv\right) ^{g-1}u$ and $\alpha_{i_{g+1}}=\left( uv\right) ^{g}u$. Now, \begin{align*} \left( uv\right) ^{g-1}u & =\alpha_{i_{g}}=\alpha_{p+r_{g}} \ \ \ \ \ \ \ \ \ \ \left( \text{since }i_{g}=p+r_{g}\right) \\ & =\left( s^{\prime}t^{\prime}\right) ^{r_{g}-1}s^{\prime} \ \ \ \ \ \ \ \ \ \ \left( \text{by (\ref{pf.thm.has.c1.2}), applied to }i=r_{g}\right) \\ & \in D_{s^{\prime},t^{\prime}} \end{align*} and \begin{align*} \left( uv\right) ^{g}u & =\alpha_{i_{g+1}}=\alpha_{p+r_{g+1} }\ \ \ \ \ \ \ \ \ \ \left( \text{since }i_{g+1}=p+r_{g+1}\right) \\ & =\left( s^{\prime}t^{\prime}\right) ^{r_{g+1}-1}s^{\prime} \ \ \ \ \ \ \ \ \ \ \left( \text{by (\ref{pf.thm.has.c1.2}), applied to }i=r_{g+1}\right) \\ & \in D_{s^{\prime},t^{\prime}}. \end{align*} Hence, Lemma \ref{lem.GandH} (applied to $G=W$ and $H=D_{s^{\prime},t^{\prime }}$) yields $u\in D_{s^{\prime},t^{\prime}}$ and $v\in D_{s^{\prime} ,t^{\prime}}$. Furthermore, we have \[ \alpha_{i_{1}}=u\ \ \ \ \ \ \ \ \ \ \text{and}\ \ \ \ \ \ \ \ \ \ \alpha _{i_{f}}=v \] \footnote{\textit{Proof.} From $\left( \alpha_{i_{1}},\alpha_{i_{2}} ,\ldots,\alpha_{i_{f}}\right) =\left( \left( uv\right) ^{0}u,\left( uv\right) ^{1}u,\ldots,\left( uv\right) ^{m_{u,v}-1}u\right) $, we obtain $\alpha_{i_{1}}=\underbrace{\left( uv\right) ^{0}}_{=1}u=u$. \par We have $\left( uv\right) ^{m_{u,v}}=1$, and thus $\left( uv\right) ^{m_{u,v}-1}=\left( uv\right) ^{-1}=v^{-1}u^{-1}$. \par From $\left( \alpha_{i_{1}},\alpha_{i_{2}},\ldots,\alpha_{i_{f}}\right) =\left( \left( uv\right) ^{0}u,\left( uv\right) ^{1}u,\ldots,\left( uv\right) ^{m_{u,v}-1}u\right) $, we obtain $\alpha_{i_{f}} =\underbrace{\left( uv\right) ^{m_{u,v}-1}}_{=v^{-1}u^{-1}}u=v^{-1} u^{-1}u=v^{-1}=v$ (since $v$ is a reflection), qed.}. Now, we have $i_{1}\in\left\{ p+1,p+2,\ldots,p+m\right\} $ (by a simple argument\footnote{\textit{Proof.} The element $u$ is a reflection and lies in $D_{s^{\prime},t^{\prime}}$. Hence, Proposition \ref{prop.rhost} \textbf{(a)} (applied to $s^{\prime}$ and $t^{\prime}$ instead of $s$ and $t$) shows that the word $\rho_{s^{\prime},t^{\prime}}$ contains $u$. Since $\rho_{s^{\prime },t^{\prime}}=q\rho_{s,t}q^{-1}=\left( \alpha_{p+1},\alpha_{p+2} ,\ldots,\alpha_{p+m}\right) $, this shows that the word $\left( \alpha _{p+1},\alpha_{p+2},\ldots,\alpha_{p+m}\right) $ contains $u$. In other words, $u=\alpha_{M}$ for some $M\in\left\{ p+1,p+2,\ldots,p+m\right\} $. Consider this $M$. \par But Proposition \ref{prop.Invsles} \textbf{(a)} shows that all entries of the tuple $\operatorname*{Invs}\overrightarrow{a}$ are distinct. In other words, the elements $\alpha_{1},\alpha_{2},\ldots,\alpha_{k}$ are pairwise distinct (since those are the entries of $\operatorname*{Invs}\overrightarrow{a}$). Hence, from $\alpha_{i_{1}}=u=\alpha_{M}$, we obtain $i_{1}=M\in\left\{ p+1,p+2,\ldots,p+m\right\} $. Qed.}) and $i_{f}\in\left\{ p+1,p+2,\ldots ,p+m\right\} $ (by a similar argument, with $v$ occasionally replacing $u$). Thus, all of the integers $i_{1},i_{2},\ldots,i_{f}$ belong to $\left\{ p+1,p+2,\ldots,p+m\right\} $ (since $i_{1}<i_{2}<\cdots<i_{f}$). Now, recall that $f$ is the length of the word $\rho_{u,v}$ (since $\rho _{u,v}=\left( \alpha_{i_{1}},\alpha_{i_{2}},\ldots,\alpha_{i_{f}}\right) $), and thus equals $m_{u,v}$. Thus, $f=m_{u,v}$. But $u\in D_{s^{\prime},t^{\prime}}=qD_{s,t}q^{-1}$ and $v\in D_{s^{\prime },t^{\prime}}=qD_{s,t}q^{-1}$. Hence, Lemma \ref{lem.dihindih} yields $m_{s,t}=m_{u,v}$. Since $m=m_{s,t}$ and $f=m_{u,v}$, this rewrites as $m=f$. Recall that all of the integers $i_{1},i_{2},\ldots,i_{f}$ belong to $\left\{ p+1,p+2,\ldots,p+m\right\} $. Since $i_{1}<i_{2}<\cdots<i_{f}$ and $f=m$, these integers $i_{1},i_{2},\ldots,i_{f}$ form a strictly increasing sequence of length $m$. Thus, $\left( i_{1},i_{2},\ldots,i_{f}\right) $ is a strictly increasing sequence of length $m$ whose entries belong to $\left\{ p+1,p+2,\ldots,p+m\right\} $. But the only such sequence is $\left( p+1,p+2,\ldots,p+m\right) $ (because the set $\left\{ p+1,p+2,\ldots ,p+m\right\} $ has only $m$ elements). Thus, $\left( i_{1},i_{2} ,\ldots,i_{f}\right) =\left( p+1,p+2,\ldots,p+m\right) $. In particular, $i_{1}=p+1$ and $i_{f}=p+m$. Now, $\alpha_{i_{1}}=u$, so that \begin{align*} u & =\alpha_{i_{1}}=\alpha_{p+1}\ \ \ \ \ \ \ \ \ \ \left( \text{since }i_{1}=p+1\right) \\ & =\underbrace{\left( s^{\prime}t^{\prime}\right) ^{1-1}}_{=1}s^{\prime }\ \ \ \ \ \ \ \ \ \ \left( \text{by (\ref{pf.thm.has.c1.2}), applied to }i=1\right) \\ & =s^{\prime}. \end{align*} Also, $\alpha_{i_{f}}=v$, so that \begin{align*} v & =\alpha_{i_{f}}=\alpha_{p+m}\ \ \ \ \ \ \ \ \ \ \left( \text{since }i_{f}=p+m\right) \\ & =\underbrace{\left( s^{\prime}t^{\prime}\right) ^{m-1}} _{\substack{=\left( s^{\prime}t^{\prime}\right) ^{-1}\\\text{(since }\left( s^{\prime}t^{\prime}\right) ^{m}=1\\\text{(since }m=m_{s,t}=m_{s^{\prime },t^{\prime}}\text{))}}}s^{\prime}\ \ \ \ \ \ \ \ \ \ \left( \text{by (\ref{pf.thm.has.c1.2}), applied to }i=m\right) \\ & =\left( s^{\prime}t^{\prime}\right) ^{-1}s^{\prime}=t^{\prime}. \end{align*} Combined with $u=s^{\prime}$, this yields $\left( u,v\right) =\left( s^{\prime},t^{\prime}\right) $, which contradicts $\left( u,v\right) \neq\left( s^{\prime},t^{\prime}\right) $. This contradiction proves that our assumption was wrong. Claim 1 is proven. \textit{Proof of Claim 2:} The word $\operatorname*{Invs}\overrightarrow{b}$ is obtained from $\operatorname*{Invs}\overrightarrow{a}$ by replacing a particular factor of the form $q\rho_{s,t}q^{-1}$ by its reversal. Thus, the word $\operatorname*{Invs}\overrightarrow{a}$ has a factor of the form $q\rho_{s,t}q^{-1}$. Since $q\rho_{s,t}q^{-1}=\rho_{s^{\prime},t^{\prime}}$, this means that the word $\operatorname*{Invs}\overrightarrow{a}$ has a factor of the form $\rho_{s^{\prime},t^{\prime}}$. Consequently, the word $\operatorname*{Invs}\overrightarrow{a}$ has a subword of the form $\rho_{s^{\prime},t^{\prime}}$. In other words, $\operatorname*{has} \nolimits_{s^{\prime},t^{\prime}}\overrightarrow{a}=1$. The same argument (applied to $t$, $s$, $\overrightarrow{b}$, $\overrightarrow{a}$, $t^{\prime}$ and $s^{\prime}$ instead of $s$, $t$, $\overrightarrow{a}$, $\overrightarrow{b}$, $s^{\prime}$ and $t^{\prime}$) shows that $\operatorname*{has}\nolimits_{t^{\prime},s^{\prime}} \overrightarrow{b}=1$. In other words, the word $\operatorname*{Invs} \overrightarrow{b}$ has a subword of the form $\rho_{t^{\prime},s^{\prime}}$. Hence, the word $\operatorname*{Invs}\overrightarrow{b}$ has no subword of the form $\rho_{s^{\prime},t^{\prime}}$ (because Proposition \ref{prop.has} \textbf{(b)} (applied to $\overrightarrow{b}$, $s^{\prime}$ and $t^{\prime}$ instead of $\overrightarrow{a}$, $s$ and $t$) shows that the words $\rho_{s^{\prime},t^{\prime}}$ and $\rho_{t^{\prime},s^{\prime}}$ cannot both appear as subwords of $\operatorname*{Invs}\overrightarrow{b}$). In other words, $\operatorname*{has}\nolimits_{s^{\prime},t^{\prime}}\overrightarrow{b} =0$. Combining this with $\operatorname*{has}\nolimits_{s^{\prime},t^{\prime} }\overrightarrow{a}=1$, we immediately obtain $\operatorname*{has} \nolimits_{s^{\prime},t^{\prime}}\overrightarrow{b}=\operatorname*{has} \nolimits_{s^{\prime},t^{\prime}}\overrightarrow{a}-1$. Thus, Claim 2 is proven. \textit{Proof of Claim 3:} Applying Claim 2 to $t$, $s$, $\overrightarrow{b}$, $\overrightarrow{a}$, $t^{\prime}$ and $s^{\prime}$ instead of $s$, $t$, $\overrightarrow{a}$, $\overrightarrow{b}$, $s^{\prime}$ and $t^{\prime}$, we obtain $\operatorname*{has}\nolimits_{t^{\prime},s^{\prime}}\overrightarrow{a} =\operatorname*{has}\nolimits_{t^{\prime},s^{\prime}}\overrightarrow{b}-1$. In other words, $\operatorname*{has}\nolimits_{t^{\prime},s^{\prime} }\overrightarrow{b}=\operatorname*{has}\nolimits_{t^{\prime},s^{\prime} }\overrightarrow{a}+1$. This proves Claim 3. Now, our goal is to prove that $\operatorname*{Has}\overrightarrow{b} =\operatorname*{Has}\overrightarrow{a}-\left( s^{\prime},t^{\prime}\right) +\left( t^{\prime},s^{\prime}\right) $. But the definition of $\operatorname*{Has}\overrightarrow{b}$ yields \begin{align*} & \operatorname*{Has}\overrightarrow{b}\\ & =\sumnonlimits\limits_{\left( u,v\right) \in\mathfrak{N}}\operatorname*{has} \nolimits_{u,v}\overrightarrow{b}\cdot\left( u,v\right) \\ & =\sumnonlimits\limits_{\substack{\left( u,v\right) \in\mathfrak{N};\\\left( u,v\right) \neq\left( s^{\prime},t^{\prime}\right) ;\\\left( u,v\right) \neq\left( t^{\prime},s^{\prime}\right) }}\underbrace{\operatorname*{has}\nolimits_{u,v} \overrightarrow{b}}_{\substack{=\operatorname*{has}\nolimits_{u,v} \overrightarrow{a}\\\text{(by Claim 1)}}}\cdot\left( u,v\right) +\underbrace{\operatorname*{has}\nolimits_{s^{\prime},t^{\prime} }\overrightarrow{b}}_{\substack{=\operatorname*{has}\nolimits_{s^{\prime },t^{\prime}}\overrightarrow{a}-1\\\text{(by Claim 2)}}}\cdot\left( s^{\prime},t^{\prime}\right) +\underbrace{\operatorname*{has} \nolimits_{t^{\prime},s^{\prime}}\overrightarrow{b}} _{\substack{=\operatorname*{has}\nolimits_{t^{\prime},s^{\prime} }\overrightarrow{a}+1\\\text{(by Claim 3)}}}\cdot\left( t^{\prime},s^{\prime }\right) \\ & \ \ \ \ \ \ \ \ \ \ \left( \text{since }\left( s^{\prime},t^{\prime }\right) \neq\left( t^{\prime},s^{\prime}\right) \right) \\ & =\sumnonlimits\limits_{\substack{\left( u,v\right) \in\mathfrak{N};\\\left( u,v\right) \neq\left( s^{\prime},t^{\prime}\right) ;\\\left( u,v\right) \neq\left( t^{\prime},s^{\prime}\right) }}\operatorname*{has}\nolimits_{u,v} \overrightarrow{a}\cdot\left( u,v\right) +\left( \operatorname*{has} \nolimits_{s^{\prime},t^{\prime}}\overrightarrow{a}-1\right) \cdot\left( s^{\prime},t^{\prime}\right) +\left( \operatorname*{has}\nolimits_{t^{\prime },s^{\prime}}\overrightarrow{a}+1\right) \cdot\left( t^{\prime},s^{\prime }\right) \\ & =\sumnonlimits\limits_{\substack{\left( u,v\right) \in\mathfrak{N};\\\left( u,v\right) \neq\left( s^{\prime},t^{\prime}\right) ;\\\left( u,v\right) \neq\left( t^{\prime},s^{\prime}\right) }}\operatorname*{has}\nolimits_{u,v} \overrightarrow{a}\cdot\left( u,v\right) +\operatorname*{has} \nolimits_{s^{\prime},t^{\prime}}\overrightarrow{a}\cdot\left( s^{\prime },t^{\prime}\right) -\left( s^{\prime},t^{\prime}\right) +\operatorname*{has}\nolimits_{t^{\prime},s^{\prime}}\overrightarrow{a} \cdot\left( t^{\prime},s^{\prime}\right) +\left( t^{\prime},s^{\prime }\right) \\ & =\underbrace{\sumnonlimits\limits_{\substack{\left( u,v\right) \in\mathfrak{N};\\\left( u,v\right) \neq\left( s^{\prime},t^{\prime}\right) ;\\\left( u,v\right) \neq\left( t^{\prime},s^{\prime}\right) }}\operatorname*{has}\nolimits_{u,v} \overrightarrow{a}\cdot\left( u,v\right) +\operatorname*{has} \nolimits_{s^{\prime},t^{\prime}}\overrightarrow{a}\cdot\left( s^{\prime },t^{\prime}\right) +\operatorname*{has}\nolimits_{t^{\prime},s^{\prime} }\overrightarrow{a}\cdot\left( t^{\prime},s^{\prime}\right) } _{\substack{=\sumnonlimits\limits_{\left( u,v\right) \in\mathfrak{N}}\operatorname*{has} \nolimits_{u,v}\overrightarrow{a}\cdot\left( u,v\right) \\\text{(since }\left( s^{\prime},t^{\prime}\right) \neq\left( t^{\prime},s^{\prime }\right) \text{)}}}-\left( s^{\prime},t^{\prime}\right) +\left( t^{\prime },s^{\prime}\right) \\ & =\underbrace{\sumnonlimits\limits_{\left( u,v\right) \in\mathfrak{N}}\operatorname*{has} \nolimits_{u,v}\overrightarrow{a}\cdot\left( u,v\right) } _{=\operatorname*{Has}\overrightarrow{a}}-\left( s^{\prime},t^{\prime }\right) +\left( t^{\prime},s^{\prime}\right) =\operatorname*{Has} \overrightarrow{a}-\left( s^{\prime},t^{\prime}\right) +\left( t^{\prime },s^{\prime}\right) . \end{align*} This proves Theorem \ref{thm.has}. \end{proof} \section{The proof of Theorem \ref{thm.BCL}} We are now ready to establish Theorem \ref{thm.BCL}: \begin{proof} [Proof of Theorem \ref{thm.BCL}.]We shall use the \textit{Iverson bracket notation}: i.e., if $\mathcal{A}$ is any logical statement, then we shall write $\left[ \mathcal{A}\right] $ for the integer $ \begin{cases} 1, & \text{if }\mathcal{A}\text{ is true};\\ 0, & \text{if }\mathcal{A}\text{ is false} \end{cases} $. For every $z\in\mathbb{Z}\left[ \mathfrak{N}\right] $ and $n\in\mathfrak{N} $, we let $\operatorname*{coord}\nolimits_{n}z\in\mathbb{Z}$ be the $n$-coordinate of $z$ (with respect to the basis $\mathfrak{N}$ of $\mathbb{Z}\left[ \mathfrak{N}\right] $). For every $z\in\mathbb{Z}\left[ \mathfrak{N}\right] $ and $N\subseteq \mathfrak{N}$, we set $\operatorname*{coord}\nolimits_{N}z=\sumnonlimits\limits_{n\in N}\operatorname*{coord}\nolimits_{n}z$. We have $c=\left[ \left( s,t\right) \right] $, thus $c_{\mathfrak{N} }=\left[ \left[ \left( s,t\right) \right] \right] $ and $c^{\operatorname{op}}=\left[ \left( t,s\right) \right] $. From the latter equality, we obtain $\left( c^{\operatorname*{op}}\right) _{\mathfrak{N} }=\left[ \left[ \left( t,s\right) \right] \right] $. Let $\overrightarrow{c_{1}},\overrightarrow{c_{2}},\ldots ,\overrightarrow{c_{k}},\overrightarrow{c_{k+1}}$ be the vertices on the cycle $C$ (listed in the order they are encountered when we traverse the cycle, starting at some arbitrarily chosen vertex on the cycle and going until we return to the starting point). Thus: \begin{itemize} \item We have $\overrightarrow{c_{k+1}}=\overrightarrow{c_{1}}$. \item There is an arc from $\overrightarrow{c_{i}}$ to $\overrightarrow{c_{i+1}}$ for every $i\in\left\{ 1,2,\ldots,k\right\} $. \end{itemize} Fix $i\in\left\{ 1,2,\ldots,k\right\} $. Then, there is an arc from $\overrightarrow{c_{i}}$ to $\overrightarrow{c_{i+1}}$. In other words, there exists some $\left( s_{i},t_{i}\right) \in\mathfrak{M}$ such that $\overrightarrow{c_{i+1}}$ is obtained from $\overrightarrow{c_{i}}$ by an $\left( s_{i},t_{i}\right) $-braid move. Consider this $\left( s_{i} ,t_{i}\right) $. Thus, \begin{equation} \text{the color of the arc from }\overrightarrow{c_{i}}\text{ to }\overrightarrow{c_{i+1}}\text{ is }\left[ \left( s_{i},t_{i}\right) \right] . \label{pf.thm.BCL.a.color} \end{equation} Proposition \ref{prop.Invsles} \textbf{(b)} (applied to $\overrightarrow{c_{i} }$, $\overrightarrow{c_{i+1}}$, $s_{i}$ and $t_{i}$ instead of $\overrightarrow{a}$, $\overrightarrow{b}$, $s$ and $t$) shows that there exists a $q\in W$ such that $\operatorname*{Invs}\overrightarrow{c_{i+1}}$ is obtained from $\operatorname*{Invs}\overrightarrow{c_{i}}$ by replacing a particular factor of the form $q\rho_{s_{i},t_{i}}q^{-1}$ by its reversal. Let us denote this $q$ by $q_{i}$. Set $s_{i}^{\prime}=q_{i}s_{i}q_{i}^{-1}$ and $t_{i}^{\prime}=q_{i}t_{i}q_{i}^{-1}$. Thus, $s_{i}^{\prime}\neq t_{i} ^{\prime}$ (since $s_{i}\neq t_{i}$) and $m_{s_{i}^{\prime},t_{i}^{\prime} }=m_{s_{i},t_{i}}<\infty$ (since $\left( s_{i},t_{i}\right) \in\mathfrak{M} $). Also, the definitions of $s_{i}^{\prime}$ and $t_{i}^{\prime}$ yield $\left( s_{i}^{\prime},t_{i}^{\prime}\right) =\left( q_{i}s_{i}q_{i} ^{-1},q_{i}t_{i}q_{i}^{-1}\right) =q_{i}\underbrace{\left( s_{i} ,t_{i}\right) }_{\in\mathfrak{M}}q_{i}^{-1}\in q_{i}\mathfrak{M}q_{i} ^{-1}\subseteq\mathfrak{N}$. From $s_{i}^{\prime}=q_{i}s_{i}q_{i}^{-1}$ and $t_{i}^{\prime}=q_{i}t_{i}q_{i}^{-1}$, we obtain $\left( s_{i}^{\prime} ,t_{i}^{\prime}\right) \approx\left( s_{i},t_{i}\right) $. We shall now show that \begin{equation} \operatorname*{coord}\nolimits_{c_{\mathfrak{N}}}\left( \operatorname*{Has} \overrightarrow{c_{i+1}}-\operatorname*{Has}\overrightarrow{c_{i}}\right) =\left[ \left[ \left( s_{i},t_{i}\right) \right] =c^{\operatorname*{op} }\right] -\left[ \left[ \left( s_{i},t_{i}\right) \right] =c\right] . \label{pf.thm.BCL.a.hasdiff1} \end{equation} \textit{Proof of (\ref{pf.thm.BCL.a.hasdiff1}):} We have the following chain of logical equivalences: \begin{align*} & \ \left( \left( t_{i}^{\prime},s_{i}^{\prime}\right) \in \underbrace{c_{\mathfrak{N}}}_{=\left[ \left[ \left( s,t\right) \right] \right] }\right) \\ & \Longleftrightarrow\ \left( \left( t_{i}^{\prime},s_{i}^{\prime}\right) \in\left[ \left[ \left( s,t\right) \right] \right] \right) \ \Longleftrightarrow\ \left( \left( t_{i}^{\prime},s_{i}^{\prime}\right) \approx\left( s,t\right) \right) \ \Longleftrightarrow\ \left( \left( s_{i}^{\prime},t_{i}^{\prime}\right) \approx\left( t,s\right) \right) \\ & \Longleftrightarrow\ \left( \left( s_{i},t_{i}\right) \approx\left( t,s\right) \right) \ \ \ \ \ \ \ \ \ \ \left( \text{since }\left( s_{i}^{\prime},t_{i}^{\prime}\right) \approx\left( s_{i},t_{i}\right) \right) \\ & \Longleftrightarrow\ \left( \left( s_{i},t_{i}\right) \sim\left( t,s\right) \right) \ \ \ \ \ \ \ \ \ \ \left( \text{since the restriction of the relation }\approx\text{ to }\mathfrak{M}\text{ is }\sim\right) \\ & \Longleftrightarrow\ \left( \left( s_{i},t_{i}\right) \in \underbrace{\left[ \left( t,s\right) \right] }_{=c^{\operatorname*{op}} }\right) \ \Longleftrightarrow\ \left( \left( s_{i},t_{i}\right) \in c^{\operatorname*{op}}\right) \ \Longleftrightarrow\ \left( \left[ \left( s_{i},t_{i}\right) \right] =c^{\operatorname*{op}}\right) . \end{align*} Hence, \begin{equation} \left[ \left( t_{i}^{\prime},s_{i}^{\prime}\right) \in c_{\mathfrak{N} }\right] =\left[ \left[ \left( s_{i},t_{i}\right) \right] =c^{\operatorname*{op}}\right] . \label{pf.thm.BCL.a.hasdiff1.eq1} \end{equation} Also, we have the following chain of logical equivalences: \begin{align*} & \ \left( \left( s_{i}^{\prime},t_{i}^{\prime}\right) \in \underbrace{c_{\mathfrak{N}}}_{=\left[ \left[ \left( s,t\right) \right] \right] }\right) \\ & \Longleftrightarrow\ \left( \left( s_{i}^{\prime},t_{i}^{\prime}\right) \in\left[ \left[ \left( s,t\right) \right] \right] \right) \ \Longleftrightarrow\ \left( \left( s_{i}^{\prime},t_{i}^{\prime}\right) \approx\left( s,t\right) \right) \\ & \Longleftrightarrow\ \left( \left( s_{i},t_{i}\right) \approx\left( s,t\right) \right) \ \ \ \ \ \ \ \ \ \ \left( \text{since }\left( s_{i}^{\prime},t_{i}^{\prime}\right) \approx\left( s_{i},t_{i}\right) \right) \\ & \Longleftrightarrow\ \left( \left( s_{i},t_{i}\right) \sim\left( s,t\right) \right) \ \ \ \ \ \ \ \ \ \ \left( \text{since the restriction of the relation }\approx\text{ to }\mathfrak{M}\text{ is }\sim\right) \\ & \Longleftrightarrow\ \left( \left( s_{i},t_{i}\right) \in \underbrace{\left[ \left( s,t\right) \right] }_{=c}\right) \ \Longleftrightarrow\ \left( \left( s_{i},t_{i}\right) \in c\right) \ \Longleftrightarrow\ \left( \left[ \left( s_{i},t_{i}\right) \right] =c\right) . \end{align*} Hence, \begin{equation} \left[ \left( s_{i}^{\prime},t_{i}^{\prime}\right) \in c_{\mathfrak{N} }\right] =\left[ \left[ \left( s_{i},t_{i}\right) \right] =c\right] . \label{pf.thm.BCL.a.hasdiff1.eq2} \end{equation} Applying (\ref{eq.thm.has.a}) to $\overrightarrow{c_{i}}$, $\overrightarrow{c_{i+1}}$, $s_{i}$, $t_{i}$, $q_{i}$, $s_{i}^{\prime}$ and $t_{i}^{\prime}$ instead of $\overrightarrow{a}$, $\overrightarrow{b}$, $s$, $t$, $q$, $s^{\prime}$ and $t^{\prime}$, we obtain $\operatorname*{Has} \overrightarrow{c_{i+1}}=\operatorname*{Has}\overrightarrow{c_{i}}-\left( s_{i}^{\prime},t_{i}^{\prime}\right) +\left( t_{i}^{\prime},s_{i}^{\prime }\right) $. In other words, $\operatorname*{Has}\overrightarrow{c_{i+1} }-\operatorname*{Has}\overrightarrow{c_{i}}=\left( t_{i}^{\prime} ,s_{i}^{\prime}\right) -\left( s_{i}^{\prime},t_{i}^{\prime}\right) $. Thus, \begin{align*} & \operatorname*{coord}\nolimits_{c_{\mathfrak{N}}}\left( \operatorname*{Has}\overrightarrow{c_{i+1}}-\operatorname*{Has} \overrightarrow{c_{i}}\right) \\ & =\operatorname*{coord}\nolimits_{c_{\mathfrak{N}}}\left( \left( t_{i}^{\prime},s_{i}^{\prime}\right) -\left( s_{i}^{\prime},t_{i}^{\prime }\right) \right) =\underbrace{\operatorname*{coord} \nolimits_{c_{\mathfrak{N}}}\left( t_{i}^{\prime},s_{i}^{\prime}\right) }_{\substack{=\left[ \left( t_{i}^{\prime},s_{i}^{\prime}\right) \in c_{\mathfrak{N}}\right] \\=\left[ \left[ \left( s_{i},t_{i}\right) \right] =c^{\operatorname*{op}}\right] \\\text{(by (\ref{pf.thm.BCL.a.hasdiff1.eq1}))}}}-\underbrace{\operatorname*{coord} \nolimits_{c_{\mathfrak{N}}}\left( s_{i}^{\prime},t_{i}^{\prime}\right) }_{\substack{=\left[ \left( s_{i}^{\prime},t_{i}^{\prime}\right) \in c_{\mathfrak{N}}\right] \\=\left[ \left[ \left( s_{i},t_{i}\right) \right] =c\right] \\\text{(by (\ref{pf.thm.BCL.a.hasdiff1.eq2}))}}}\\ & =\left[ \left[ \left( s_{i},t_{i}\right) \right] =c^{\operatorname*{op}}\right] -\left[ \left[ \left( s_{i},t_{i}\right) \right] =c\right] . \end{align*} This proves (\ref{pf.thm.BCL.a.hasdiff1}). Now, let us forget that we fixed $i$. Thus, for every $i\in\left\{ 1,2,\ldots,k\right\} $, we have defined $\left( s_{i},t_{i}\right) \in\mathfrak{M}$ satisfying (\ref{pf.thm.BCL.a.color}) and (\ref{pf.thm.BCL.a.hasdiff1}). We have $\operatorname*{coord}\nolimits_{c_{\mathfrak{N}}}\left( \operatorname*{Has}\overrightarrow{c_{i+1}}-\operatorname*{Has} \overrightarrow{c_{i}}\right) =\operatorname*{coord} \nolimits_{c_{\mathfrak{N}}}\left( \operatorname*{Has}\overrightarrow{c_{i+1} }\right) -\operatorname*{coord}\nolimits_{c_{\mathfrak{N}}}\left( \operatorname*{Has}\overrightarrow{c_{i}}\right) $ for all $i\in\left\{ 1,2,\ldots,k\right\} $. Hence, \begin{align*} & \sumnonlimits\limits_{i=1}^{k}\operatorname*{coord}\nolimits_{c_{\mathfrak{N}}}\left( \operatorname*{Has}\overrightarrow{c_{i+1}}-\operatorname*{Has} \overrightarrow{c_{i}}\right) \\ & =\sumnonlimits\limits_{i=1}^{k}\left( \operatorname*{coord}\nolimits_{c_{\mathfrak{N}} }\left( \operatorname*{Has}\overrightarrow{c_{i+1}}\right) -\operatorname*{coord}\nolimits_{c_{\mathfrak{N}}}\left( \operatorname*{Has} \overrightarrow{c_{i}}\right) \right) =0 \end{align*} (by the telescope principle). Hence, \begin{align*} 0 & =\sumnonlimits\limits_{i=1}^{k}\operatorname*{coord}\nolimits_{c_{\mathfrak{N}}}\left( \operatorname*{Has}\overrightarrow{c_{i+1}}-\operatorname*{Has} \overrightarrow{c_{i}}\right) \\ & =\sumnonlimits\limits_{i=1}^{k}\left( \left[ \left[ \left( s_{i},t_{i}\right) \right] =c^{\operatorname*{op}}\right] -\left[ \left[ \left( s_{i},t_{i}\right) \right] =c\right] \right) \ \ \ \ \ \ \ \ \ \ \left( \text{by (\ref{pf.thm.BCL.a.hasdiff1})}\right) \\ & =\sumnonlimits\limits_{i=1}^{k}\left[ \left[ \left( s_{i},t_{i}\right) \right] =c^{\operatorname*{op}}\right] -\sumnonlimits\limits_{i=1}^{k}\left[ \left[ \left( s_{i},t_{i}\right) \right] =c\right] . \end{align*} Comparing this with \begin{align*} & \left( \text{the number of arcs colored }c^{\operatorname*{op}}\text{ appearing in }C\right) \\ & \ \ \ \ \ \ \ \ \ \ -\left( \text{the number of arcs colored }c\text{ appearing in }C\right) \\ & =\sumnonlimits\limits_{i=1}^{k}\left[ \left( \text{the color of the arc from }\overrightarrow{c_{i}}\text{ to }\overrightarrow{c_{i+1}}\right) =c^{\operatorname*{op}}\right] \\ & \ \ \ \ \ \ \ \ \ \ -\sumnonlimits\limits_{i=1}^{k}\left[ \left( \text{the color of the arc from }\overrightarrow{c_{i}}\text{ to }\overrightarrow{c_{i+1}}\right) =c\right] \\ & =\sumnonlimits\limits_{i=1}^{k}\left[ \left[ \left( s_{i},t_{i}\right) \right] =c^{\operatorname*{op}}\right] -\sumnonlimits\limits_{i=1}^{k}\left[ \left[ \left( s_{i},t_{i}\right) \right] =c\right] \ \ \ \ \ \ \ \ \ \ \left( \text{by (\ref{pf.thm.BCL.a.color})}\right) , \end{align*} we obtain \begin{align*} & \left( \text{the number of arcs colored }c^{\operatorname*{op}}\text{ appearing in }C\right) \\ & \ \ \ \ \ \ \ \ \ \ -\left( \text{the number of arcs colored }c\text{ appearing in }C\right) \\ & =0. \end{align*} In other words, the number of arcs colored $c$ appearing in $C$ equals the number of arcs colored $c^{\operatorname*{op}}$ appearing in $C$. This proves Theorem \ref{thm.BCL} \textbf{(a)}. \textbf{(b)} If $c\neq c^{\operatorname*{op}}$, then Theorem \ref{thm.BCL} \textbf{(b)} follows immediately from Theorem \ref{thm.BCL} \textbf{(a)}. Thus, for the rest of this proof, assume that $c=c^{\operatorname*{op}}$ (without loss of generality). We have $\left[ \left( s,t\right) \right] =c=c^{\operatorname*{op} }=\left[ \left( t,s\right) \right] $, so that $\left( t,s\right) \sim\left( s,t\right) $. Hence, $\left( t,s\right) \approx\left( s,t\right) $ (since $\sim$ is the restriction of the relation $\approx$ to $\mathfrak{M}$). Fix some total order on the set $S$. Let $d$ be the subset $\left\{ \left( u,v\right) \in c_{\mathfrak{N}}\ \mid\ u<v\right\} $ of $c_{\mathfrak{N}}$. Fix $i\in\left\{ 1,2,\ldots,k\right\} $. We shall now show that \begin{equation} \operatorname*{coord}\nolimits_{d}\left( \operatorname*{Has} \overrightarrow{c_{i+1}}-\operatorname*{Has}\overrightarrow{c_{i}}\right) \equiv\left[ \left[ \left( s_{i},t_{i}\right) \right] =c\right] \operatorname{mod}2. \label{pf.thm.BCL.b.hasdiff1} \end{equation} \textit{Proof of (\ref{pf.thm.BCL.b.hasdiff1}):} Define $q_{i}$, $s_{i}^{\prime}$ and $t_{i}^{\prime}$ as before. We have $s_{i}^{\prime}\neq t_{i}^{\prime}$. Hence, either $s_{i}^{\prime}<t_{i}^{\prime}$ or $t_{i}^{\prime}<s_{i}^{\prime}$. We have the following equivalences: \begin{align} \left( \left( t_{i}^{\prime},s_{i}^{\prime}\right) \in c_{\mathfrak{N} }\right) \ & \Longleftrightarrow\ \left( \left( t_{i}^{\prime} ,s_{i}^{\prime}\right) \in\left[ \left[ \left( s,t\right) \right] \right] \right) \ \ \ \ \ \ \ \ \ \ \left( \text{since }c_{\mathfrak{N} }=\left[ \left[ \left( s,t\right) \right] \right] \right) \nonumber\\ & \Longleftrightarrow\ \left( \left( t_{i}^{\prime},s_{i}^{\prime}\right) \approx\left( s,t\right) \right) \ \Longleftrightarrow\ \left( s_{i}^{\prime},t_{i}^{\prime}\right) \approx\left( t,s\right) \ \Longleftrightarrow\ \left( \left( s_{i},t_{i}\right) \approx\left( s,t\right) \right) \nonumber\\ & \ \ \ \ \ \ \ \ \ \ \ \left( \text{since }\left( s_{i}^{\prime} ,t_{i}^{\prime}\right) \approx\left( s_{i},t_{i}\right) \text{ and }\left( t,s\right) \approx\left( s,t\right) \right) \nonumber\\ & \Longleftrightarrow\ \left( \left( s_{i},t_{i}\right) \sim\left( s,t\right) \right) \label{pf.thm.BCL.b.hasdiff1.pf.equiv1} \end{align} (since the restriction of the relation $\approx$ to $\mathfrak{M}$ is $\sim$) and \begin{align} \left( \left( s_{i}^{\prime},t_{i}^{\prime}\right) \in c_{\mathfrak{N} }\right) \ & \Longleftrightarrow\ \left( \left( s_{i}^{\prime} ,t_{i}^{\prime}\right) \in\left[ \left[ \left( s,t\right) \right] \right] \right) \ \ \ \ \ \ \ \ \ \ \left( \text{since }c_{\mathfrak{N} }=\left[ \left[ \left( s,t\right) \right] \right] \right) \nonumber\\ & \Longleftrightarrow\ \left( \left( s_{i}^{\prime},t_{i}^{\prime}\right) \approx\left( s,t\right) \right) \ \Longleftrightarrow\ \left( \left( s_{i},t_{i}\right) \approx\left( s,t\right) \right) \nonumber\\ & \Longleftrightarrow\ \left( \left( s_{i},t_{i}\right) \sim\left( s,t\right) \right) . \label{pf.thm.BCL.b.hasdiff1.pf.equiv2} \end{align} Applying (\ref{eq.thm.has.a}) to $\overrightarrow{c_{i}}$, $\overrightarrow{c_{i+1}}$, $s_{i}$, $t_{i}$, $q_{i}$, $s_{i}^{\prime}$ and $t_{i}^{\prime}$ instead of $\overrightarrow{a}$, $\overrightarrow{b}$, $s$, $t$, $q$, $s^{\prime}$ and $t^{\prime}$, we obtain $\operatorname*{Has} \overrightarrow{c_{i+1}}=\operatorname*{Has}\overrightarrow{c_{i}}-\left( s_{i}^{\prime},t_{i}^{\prime}\right) +\left( t_{i}^{\prime},s_{i}^{\prime }\right) $. In other words, $\operatorname*{Has}\overrightarrow{c_{i+1} }-\operatorname*{Has}\overrightarrow{c_{i}}=\left( t_{i}^{\prime} ,s_{i}^{\prime}\right) -\left( s_{i}^{\prime},t_{i}^{\prime}\right) $. Thus, \begin{align*} & \operatorname*{coord}\nolimits_{d}\left( \operatorname*{Has} \overrightarrow{c_{i+1}}-\operatorname*{Has}\overrightarrow{c_{i}}\right) \\ & =\operatorname*{coord}\nolimits_{d}\left( \left( t_{i}^{\prime} ,s_{i}^{\prime}\right) -\left( s_{i}^{\prime},t_{i}^{\prime}\right) \right) =\operatorname*{coord}\nolimits_{d}\left( t_{i}^{\prime} ,s_{i}^{\prime}\right) -\operatorname*{coord}\nolimits_{d}\left( s_{i}^{\prime},t_{i}^{\prime}\right) \\ & =\left[ \left( t_{i}^{\prime},s_{i}^{\prime}\right) \in d\right] -\left[ \left( s_{i}^{\prime},t_{i}^{\prime}\right) \in d\right] \\ & \equiv\left[ \left( t_{i}^{\prime},s_{i}^{\prime}\right) \in d\right] +\left[ \left( s_{i}^{\prime},t_{i}^{\prime}\right) \in d\right] \\ & =\left[ \left( t_{i}^{\prime},s_{i}^{\prime}\right) \in c_{\mathfrak{N} }\text{ and }t_{i}^{\prime}<s_{i}^{\prime}\right] +\left[ \left( s_{i}^{\prime},t_{i}^{\prime}\right) \in c_{\mathfrak{N}}\text{ and } s_{i}^{\prime}<t_{i}^{\prime}\right] \\ & \ \ \ \ \ \ \ \ \ \ \left( \text{since a pair }\left( u,v\right) \text{ belongs to }d\text{ if and only if }\left( u,v\right) \in c_{\mathfrak{N} }\text{ and }u<v\right) \\ & =\left[ \left( s_{i},t_{i}\right) \sim\left( s,t\right) \text{ and }t_{i}^{\prime}<s_{i}^{\prime}\right] +\left[ \left( s_{i},t_{i}\right) \sim\left( s,t\right) \text{ and }s_{i}^{\prime}<t_{i}^{\prime}\right] \\ & \ \ \ \ \ \ \ \ \ \ \left( \text{by the equivalences (\ref{pf.thm.BCL.b.hasdiff1.pf.equiv1}) and (\ref{pf.thm.BCL.b.hasdiff1.pf.equiv2})}\right) \\ & =\left[ \left( s_{i},t_{i}\right) \sim\left( s,t\right) \right] \ \ \ \ \ \ \ \ \ \ \left( \text{because either }s_{i}^{\prime}<t_{i} ^{\prime}\text{ or }t_{i}^{\prime}<s_{i}^{\prime}\right) \\ & =\left[ \left[ \left( s_{i},t_{i}\right) \right] =\left[ \left( s,t\right) \right] \right] =\left[ \left[ \left( s_{i},t_{i}\right) \right] =c\right] \operatorname{mod}2\ \ \ \ \ \ \ \ \ \ \left( \text{since }\left[ \left( s,t\right) \right] =c\right) . \end{align*} This proves (\ref{pf.thm.BCL.b.hasdiff1}). Now, $\operatorname*{coord}\nolimits_{d}\left( \operatorname*{Has} \overrightarrow{c_{i+1}}-\operatorname*{Has}\overrightarrow{c_{i}}\right) =\operatorname*{coord}\nolimits_{d}\left( \operatorname*{Has} \overrightarrow{c_{i+1}}\right) -\operatorname*{coord}\nolimits_{d}\left( \operatorname*{Has}\overrightarrow{c_{i}}\right) $ for each $i\in\left\{ 1,2,\ldots,k\right\} $; hence, \[ \sumnonlimits\limits_{i=1}^{k}\operatorname*{coord}\nolimits_{d}\left( \operatorname*{Has} \overrightarrow{c_{i+1}}-\operatorname*{Has}\overrightarrow{c_{i}}\right) =\sumnonlimits\limits_{i=1}^{k}\left( \operatorname*{coord}\nolimits_{d}\left( \operatorname*{Has}\overrightarrow{c_{i+1}}\right) -\operatorname*{coord} \nolimits_{d}\left( \operatorname*{Has}\overrightarrow{c_{i}}\right) \right) =0 \] (by the telescope principle). Hence, \begin{align*} 0 & =\sumnonlimits\limits_{i=1}^{k}\operatorname*{coord}\nolimits_{d}\left( \operatorname*{Has}\overrightarrow{c_{i+1}}-\operatorname*{Has} \overrightarrow{c_{i}}\right) \\ & \equiv\sumnonlimits\limits_{i=1}^{k}\left[ \left[ \left( s_{i},t_{i}\right) \right] =c\right] \ \ \ \ \ \ \ \ \ \ \left( \text{by (\ref{pf.thm.BCL.b.hasdiff1} )}\right) \\ & =\sumnonlimits\limits_{i=1}^{k}\left[ \left( \text{the color of the arc from }\overrightarrow{c_{i}}\text{ to }\overrightarrow{c_{i+1}}\right) =c\right] \ \ \ \ \ \ \ \ \ \ \left( \text{by (\ref{pf.thm.BCL.a.color})}\right) \\ & =\left( \text{the number of arcs colored }c\text{ appearing in }C\right) \operatorname{mod}2. \end{align*} Thus, the number of arcs colored $c$ appearing in $C$ is even. In other words, the number of arcs whose color belongs to $\left\{ c\right\} $ appearing in $C$ is even. In other words, the number of arcs whose color belongs to $\left\{ c,c^{\operatorname*{op}}\right\} $ appearing in $C$ is even (since $\left\{ c,\underbrace{c^{\operatorname*{op}}}_{=c}\right\} =\left\{ c,c\right\} =\left\{ c\right\} $). This proves Theorem \ref{thm.BCL} \textbf{(b)}. \end{proof} \section{Open questions} Theorem \ref{thm.BCL} is a statement about reduced expressions. As with all such statements, one can wonder whether a generalization to \textquotedblleft non-reduced\textquotedblright\ expressions would still be true. If $w$ is an element of $W$, then an \textit{expression} for $w$ means a $k$-tuple $\left( s_{1},s_{2},\ldots,s_{k}\right) $ of elements of $S$ such that $w=s_{1} s_{2}\cdots s_{k}$. Definition \ref{def.braid} can be applied verbatim to arbitrary expressions, leading to the concept of an $\left( s,t\right) $-braid move. Finally, for every $w\in W$, we define a directed graph $\mathcal{E}\left( w\right) $ in the same way as we defined $\mathcal{R} \left( w\right) $ in Definition \ref{def.R}, but with the word \textquotedblleft reduced\textquotedblright\ removed everywhere. This directed graph $\mathcal{E}\left( w\right) $ will be infinite (in general) and consist of many connected components (one of which is $\mathcal{R}\left( w\right) $), but we can still inquire about its cycles. We conjecture the following generalization of Theorem \ref{thm.BCL}: \begin{conjecture} \label{conj.E(w)}Let $w\in W$. Theorem \ref{thm.BCL} is still valid if we replace $\mathcal{R}\left( w\right) $ by $\mathcal{E}\left( w\right) $. \end{conjecture} A further, slightly lateral, generalization concerns a kind of \textquotedblleft spin extension\textquotedblright\ of a Coxeter group: \begin{conjecture} \label{conj.spin}For every $\left( s,t\right) \in\mathfrak{M}$, let $c_{s,t}$ be an element of $\left\{ 1,-1\right\} $. Assume that $c_{s,t}=c_{s^{\prime},t^{\prime}}$ for any two elements $\left( s,t\right) $ and $\left( s^{\prime},t^{\prime}\right) $ of $\mathfrak{M}$ satisfying $\left( s,t\right) \sim\left( s^{\prime},t^{\prime}\right) $. Assume furthermore that $c_{s,t}=c_{t,s}$ for each $\left( s,t\right) \in\mathfrak{M}$. Let $W^{\prime}$ be the group with the following generators and relations: \textit{Generators:} the elements $s\in S$ and an extra generator $q$. \textit{Relations:} \begin{align*} s^{2} & =1\ \ \ \ \ \ \ \ \ \ \text{for every }s\in S;\\ q^{2} & =1;\\ qs & =sq\ \ \ \ \ \ \ \ \ \ \text{for every }s\in S;\\ \left( st\right) ^{m_{s,t}} & =1\ \ \ \ \ \ \ \ \ \ \text{for every }\left( s,t\right) \in\mathfrak{M}\text{ satisfying }c_{s,t}=1;\\ \left( st\right) ^{m_{s,t}} & =q\ \ \ \ \ \ \ \ \ \ \text{for every }\left( s,t\right) \in\mathfrak{M}\text{ satisfying }c_{s,t}=-1. \end{align*} There is clearly a surjective group homomorphism $\pi:W^{\prime}\rightarrow W$ sending each $s\in S$ to $s$, and sending $q$ to $1$. There is also an injective group homomorphism $\iota:\mathbb{Z}/2\mathbb{Z}\rightarrow W^{\prime}$ which sends the generator of $\mathbb{Z}/2\mathbb{Z}$ to $q$. Then, the sequence \begin{equation} 1\longrightarrow\mathbb{Z}/2\overset{\iota}{\longrightarrow}W^{\prime }\overset{\pi}{\longrightarrow}W\longrightarrow1 \label{eq.conj.spin.seq} \end{equation} is exact. Equivalently, $\left\vert \operatorname*{Ker}\pi\right\vert =2$. \end{conjecture} (Note that exactness of the sequence (\ref{eq.conj.spin.seq}) at $W^{\prime}$ and at $W$ is easy.) If Conjecture \ref{conj.spin} holds, then so does Conjecture \ref{conj.E(w)} \textbf{(b)} (that is, Theorem \ref{thm.BCL} \textbf{(b)} holds with $\mathcal{R}\left( w\right) $ replaced by $\mathcal{E}\left( w\right) $). Indeed, assume Conjecture \ref{conj.spin} to hold. Let $c\in\mathfrak{M}/\sim$ be an equivalence class. For any $\left( u,v\right) \in\mathfrak{M}$, define \[ c_{u,v}= \begin{cases} -1, & \text{if }\left( u,v\right) \in c\text{ or }\left( v,u\right) \in c;\\ 1, & \text{otherwise} \end{cases} . \] Thus, a group $W^{\prime}$ is defined. Pick any section $\mathbf{s} :W\rightarrow W^{\prime}$ (in the category of sets) of the projection $\pi:W^{\prime}\rightarrow W$. If $w\in W$, and if $\left( s_{1},s_{2} ,\ldots,s_{k}\right) $ is an expression of $w$, then the product $s_{1} s_{2}\cdots s_{k}$ formed in $W^{\prime}$ will either be $\mathbf{s}\left( w\right) $ or $q\mathbf{s}\left( w\right) $; and these latter two values are distinct (by Conjecture \ref{conj.spin}). We can then define the \textit{sign} of the expression $\left( s_{1},s_{2},\ldots,s_{k}\right) $ to be $ \begin{cases} 1, & \text{if }s_{1}s_{2}\cdots s_{k}=\mathbf{s}\left( w\right) ;\\ -1, & \text{if }s_{1}s_{2}\cdots s_{k}=q\mathbf{s}\left( w\right) \end{cases} \in\left\{ 1,-1\right\} $. The sign of an expression switches when we apply a braid move whose arc's color belongs to $\left\{ c,c^{\operatorname*{op} }\right\} $, but stays unchanged when we apply a braid move of any other color. Theorem \ref{thm.BCL} \textbf{(b)} then follows by a simple parity argument. The construction of $W^{\prime}$ in Conjecture \ref{conj.spin} generalizes the construction of one of the two \textit{spin symmetric groups} (up to a substitution). We suspect that Conjecture \ref{conj.spin} could be proven by constructing a \textquotedblleft regular representation\textquotedblright, and this would then yield an alternative proof of Theorem \ref{thm.BCL} \textbf{(b)}. \end{document}
\begin{document} \title{Experimental Comparison of PC-Trees and PQ-Trees} \begin{abstract} PQ-trees and PC-trees are data structures that represent sets of linear and circular orders, respectively, subject to constraints that specific subsets of elements have to be consecutive. While equivalent to each other, PC-trees are conceptually much simpler than PQ-trees; updating a PC-trees so that a set of elements becomes consecutive requires only a single operation, whereas PQ-trees use an update procedure that is described in terms of nine transformation templates that have to be recursively matched and applied. Despite these theoretical advantages, to date no practical PC-tree implementation is available. This might be due to the original description by Hsu and McConnell~\cite{Hsu2003} in some places only sketching the details of the implementation. In this paper, we describe two alternative implementations of PC-trees. For the first one, we follow the approach by Hsu and McConnell, filling in the necessary details and also proposing improvements on the original algorithm. For the second one, we use a different technique for efficiently representing the tree using a Union-Find data structure. In an extensive experimental evaluation we compare our implementations to a variety of other implementations of PQ-trees that are available on the web as part of academic and other software libraries. Our results show that both PC-tree implementations beat their closest fully correct competitor, the PQ-tree implementation from the OGDF library~\cite{Chimani2014,Leipert1997}, by a factor of 2 to 4, showing that PC-trees are not only conceptually simpler but also fast in practice. Moreover, we find the Union-Find-based implementation, while having a slightly worse asymptotic runtime, to be twice as fast as the one based on the description by Hsu and McConnell. \end{abstract} \section{Introduction} PQ-trees represent linear orders of a ground set subject to constraints that require specific subsets of elements to be consecutive. Similarly, PC-trees do the same for circular orders subject to consecutivity constraints. PQ-trees were developed by Booth and Lueker~\cite{Booth1976} to solve the consecutive ones problem, which asks whether the columns of a Boolean matrix can be permuted such that the 1s in each row are consecutive. PC-trees are a more recent generalization introduced by Shih and Hsu~\cite{Shih1999} to solve the circular consecutive ones problem, where the 1s in each row only have to be circularly consecutive. Though PQ-trees represent linear orders and PC-trees represent circular orders, Haeupler and Tarjan~\cite{Haeupler2008} show that in fact PC-trees and PQ-trees are equivalent, i.e., one can use one of them to implement the other without affecting the asymptotic running time. The main difference between PQ-trees and PC-trees lies in the update procedure. The update procedure takes as input a PQ-tree (a PC-tree) $T$ and a subset $U$ of its leaves and produces a new PQ-tree (PC-tree) $T'$ that represents exactly the linear orders (circular orders) represented by $T$ where the leaves in $U$ appear consecutively. The update procedure for PC-trees consists only of a single operation that is applied independently of the structure of the tree. In contrast, the update of the PQ-tree is described in terms of a set of nine template transformations that have to be recursively matched and applied. PQ-trees have numerous applications, e.g., in planarity testing~\cite{Booth1976, Shih1999}, recognition of interval graphs~\cite{Booth1976} and genome sequencing~\cite{Benzer1959}. Nevertheless, PC-trees have been adopted more widely, e.g., for constrained planarity testing problems~\cite{Blaesius2016,br-pclp-17} due to their simpler update procedure. Despite their wide applications and frequent use in theoretical algorithms, few PQ-tree implementations and even fewer PC-tree implementations are available. \Cref{tab:eval-impls} shows an overview of all PC/PQ-tree implementations that we are aware of, though not all of them are working. In this paper we describe the first correct and generic implementations of PC-trees. \Cref{sec:prelim} contains an overview of the update procedure for applying a new restriction to a PC-tree. In \Cref{sec:impl}, we describe the main challenge when implementing PC-trees and how our two implementations take different approaches at solving it. In \Cref{ch:evaluation}, we present an extensive experimental evaluation, where we compare the performance of our implementations with the implementations of PC-trees and PQ-trees from \Cref{tab:eval-impls}. Our experiments show that, concerning running time, PC-trees following Hsu and McConnell's original approach beat their closest competitor, the PQ-tree implementation from the OGDF library~\cite{Chimani2014} by roughly a factor 2. Our second implementation using Union-Find is another 50\% faster than this first one, thus beating the OGDF implementation by a factor of up to 4. \section{The PC-tree}\label{sec:prelim} A \emph{PC-tree} $T$ is a tree without degree-2 vertices whose inner nodes are partitioned into \emph{P-nodes} and \emph{C-nodes}. Edges incident to C-nodes have a circular order that is fixed up to reversal, whereas edges incident to P-nodes can be reordered arbitrarily. Traversing the tree according to fixed orders around the inner nodes determines a circular ordering of the leaves $L$ of the tree. Any circular permutation of $L$ that can be obtained from $T$ after arbitrarily reordering the edges around P-nodes and reversing orders around C-nodes is a \emph{valid permutation} of $L$. In this way a PC-tree represents a set of circular permutations of $L$. When applying a \emph{restriction} $R\subseteq L$ to $T$, we seek a new tree that represents exactly the valid permutations of $L$ where the leaves in $R$ appear consecutively. We call a restriction \emph{impossible} if there is no valid permutation of $L$ where the leaves in $R$ are consecutive. Thus, restriction $R$ is possible if and only if the edges incident to P-nodes can be rearranged and orders of edges incident to C-nodes can be reversed in such a way that all leaves in $R$ are consecutive. Updating a PC-tree to enforce the new restriction can thus be done by identifying and adapting the nodes that decide about the consecutivity of the elements of $R$ and then changing the tree to ensure that this consecutivity can no longer be broken. Let a leaf $x\in L$ be \emph{full} if $x \in R$ and \emph{empty} otherwise. We call an edge \emph{terminal} if the two subtrees separated by the edge both contain at least one empty and at least one full leaf. Exactly the endpoints of all terminal edges need to be ``synchronized'' to ensure that all full leaves are consecutive. Hsu and McConnell~\cite{Hsu2003,Hsu2004} show that $R$ is possible if and only if the terminal edges form a path and all nodes of this path can be flipped so that all full leaves are on one side and all empty leaves are on the other. This path is called the \emph{terminal path}, the two nodes at the ends of the terminal path are the \emph{terminal nodes}. Observe that each node in $T$ that is adjacent to two subtrees of which one only contains full leaves and the other contains only empty leaves is contained in the terminal path. Figure~\ref{fig:terminalPathPrelim} illustrates the terminal path. \begin{figure} \caption{ \textbf{(a)} \label{fig:updatePrelim} \end{figure} When updating $T$ in order to apply the restriction, every node on the terminal path is split into two nodes, one of which holds all edges to neighbors of the original node whose subtree has only full leaves, the other holds all edges to empty neighbors, while terminal edges are deleted. A new central C-node $c$ is created that is adjacent to all the split nodes in such a way that it preserves the order of the neighbors around the terminal path. Contracting all edges to the split C-nodes incident to $c$ and contracting all nodes with degree two results in the updated tree that represents the new restriction~\cite{Hsu2003,Hsu2004}. \Cref{fig:updatePrelim} shows an example of this update, while \Cref{fig:updateStep} details changes made to the terminal path. \begin{figure} \caption{ Left: The terminal path with all full subtrees shown in black on top and all empty subtrees shown in white on the bottom. Middle: The updated PC-tree, where all terminal edges were deleted, all nodes on the terminal were split in a full and empty half and all new nodes were connected to a new C-node $c$. Right: The PC-Tree after contracting all new C-nodes and all degree-2 P-nodes into $c$. } \label{fig:updateStep} \end{figure} It remains to efficiently find the terminal edges, and thus the subtrees with mixed full and empty leaves. To do so, Hsu and McConnell first choose an arbitrary node of the tree as root. They also assign labels to the inner nodes of the tree, marking an inner node (and conceptually the subtree below it) \emph{partial} if at least one of its neighbors (i.e. children or parent) is full, \emph{full} if all its neighbors except one (which usually is the parent) are full, and \emph{empty} otherwise. Then, an edge is terminal if and only if it lies on a path between two partial nodes~\cite{Hsu2003,Hsu2004}. Assigning the labels and subsequently finding the terminal edges can be done by two bottom-up traversals of the tree. We summarize these steps in the following, more fine-granular description of Hsu and McConnell's algorithm for updating the PC-tree \cite[Algorithm 32.2]{Hsu2004}: \subparagraph{Algorithm for Applying Restrictions.}\label{main-alg} To add a new restriction $R$ to a PC-tree $T$: \begin{enumerate} \item\label[step]{step:alg-label} Label all partial and full nodes by searching the tree bottom-up from all full leaves. \item\label[step]{step:alg-tp} Find the terminal path by walking the tree upwards from all partial nodes in parallel. \item\label[step]{step:alg-flip} Perform flips of C-nodes and modify the cyclic order of edges incident to P-nodes so that all full leaves lie on one side of the path. \item\label[step]{step:alg-split} Split each node on the path into two nodes, one incident to all edges to full leaves and one incident to all edges to empty leaves. \item\label[step]{step:alg-delete} Delete the edges of the path and replace them with a new C-node $c$, adjacent to all split nodes, whose cyclic order preserves the order of the nodes on this path. \item\label[step]{step:alg-contract} Contract all edges from $c$ to adjacent C-nodes, and contract any node that has only two neighbors. \end{enumerate} \section{Our Implementations}\label{sec:impl} The main challenge posed to the data structure for representing the PC-tree is that, in \cref{step:alg-contract}, it needs to be able to merge arbitrarily large C-nodes in constant time for the overall algorithm to run in linear time. This means that, whenever C-nodes are merged, updating the pointer to a persistent C-node object on every incident edge would be too expensive. Hsu and McConnell (see \cite[Definition 32.1]{Hsu2004}) solve this problem by using C-nodes that, instead of having a permanent node object, are only represented by the doubly-linked list of their incident half-edges, which we call \emph{arcs}. This complicates various details of the implementation, like finding the parent pointer of a C-node, which are only superficially covered in the initial work of Hsu and McConnell~\cite{Hsu2003}. These issues are in part remedied by the so called \emph{block-spanning pointers} introduced in the later published book chapter~\cite{Hsu2004}, which are related to the pointer borrowing strategy introduced by Booth and Lueker~\cite{Booth1976}. These block-spanning pointers link the first and last arc of a consecutive block of full arcs (i.e. the arcs to full neighbors) around a C-node and can be accompanied by temporary C-node objects, see the blue dashed arcs in \Cref{fig:cNodeTPCase-fullI,fig:cNodeTPCase-fullA,fig:fullBlocks} in the Appendix for an example. Whenever a neighbor of a C-node becomes full, either a new block is created for the corresponding arc of the C-node (\Cref{fig:fullBlocksAppend} left), an adjacent block grows by one arc (\Cref{fig:fullBlocksAppend} right), or the two blocks that now became adjacent are merged (\Cref{fig:fullBlocksMerge}). Using this data structure, Hsu and McConnell show that the addition of a single new restriction $R$ takes $O(p + |R|)$ time, where $p$ is the length of the terminal path, and that applying restrictions $R_1,\ldots,R_k$ takes $\Theta(|L|+\sum_{i=1}^k|R_i|)$ time~\cite{Hsu2003,Hsu2004}. Especially for \cref{step:alg-label,step:alg-tp}, they only sketch the details of the implementation, making it hard to directly put it into practice. In \Cref{sec:algo}, we fill in the necessary details for these steps and also refine their runtime analysis, showing that \cref{step:alg-label} can be done in $O(|R|)$ time and \cref{step:alg-tp} can be done in $O(p)$ time. Using the original procedures by Hsu and McConnell, \cref{step:alg-flip,step:alg-split} can be done in $O(|R|)$ time and \cref{step:alg-delete,step:alg-contract} can be done in $O(p)$ time. For our first implementation, which we call HsuPC, we directly implemented these steps in C++, using the data structure without permanent C-node objects as described by Hsu and McConnell. During the evaluation, we realized that traversals of the tree are expensive. This is plausible, as they involve a lot of pointer-dereferencing to memory segments that are not necessarily close-by, leading to cache misses. To avoid additional traversals for clean-up purposes, we store information that is valid only during the update procedure with a timestamp. Furthermore, we found that keeping separate objects for arcs and nodes and the steps needed to work around the missing C-node objects pose a non-negligible overhead. To remove this overhead, we created a second version of our implementation, which we call UFPC, using a Union-Find tree for representing C-node objects: Every C-node is represented by an entry in the Union-Find tree and every incident child edge stores a reference to this entry. Whenever two C-nodes are merged, we apply \texttt{union} to both entries and only keep the object of the entry that survives. This leads to every lookup of a parent C-node object taking amortized $O(\alpha(|L|))$ time, where $\alpha$ is the inverse Ackermann function. Although this makes the overall runtime super-linear, the experimental evaluation following in the next section shows that this actually improves the performance in practice. As a second change, the UFPC no longer requires separate arc and node objects, allowing us to use a doubly-linked tree consisting entirely of nodes that store pointers to their parent node, left and right sibling node, and first and last child node. Edges are represented implicitly by the child node whose parent is the other end of the edge. Note that of the five stored pointers, a lookup in the Union-Find data structure is only needed for resolving the parent of a node. In \Cref{sec:algo}, we describe our algorithmic improvements and differences of both implementations. We use the Union-Find data structure from the OGDF~\cite{Chimani2014} and plan to merge our UFPC implementation into the OGDF. Furthermore, both implementations should also be usable stand-alone with a custom Union-Find implementation. The source code for both implementations, our evaluation harness and all test data are available on GitHub (see \Cref{tab:eval-impls}). \section{Evaluation} \label{ch:evaluation} In this section, we experimentally evaluate our PC-tree implementations by comparing the running time for applying a restriction with that of various PQ- and PC-tree implementations that are publicly available. In the following we describe our method for generating test cases, our experimental setup and report our results. \subsection{Test Data Generation} To generate PQ-trees and restrictions on them, we make use of the planarity test by Booth and Lueker~\cite{Booth1976}, one of the initial applications of PQ-trees. This test incrementally processes vertices one by one according to an $st$-ordering. Running the planarity test on a graph with $n$ vertices applies $n-1$ restrictions to PQ-trees of various sizes. Since not all implementations provide the additional modification operations necessary to implement the planarity test, we rather export, for each step of the planarity test, the current PQ-tree and the restriction that is applied to it as one instance of our test set. We note that the use of $st$-orderings ensures that the instances do not require the ability of the PC-tree to represent circular permutations, making them good test cases for comparing PC-trees and PQ-trees. In this way, we create one test set \texttt{SER-POS}\xspace consisting of only PQ-trees with possible restrictions by exporting the instances from running the planarity test on a randomly generated biconnected planar graph for each vertex count $n$ from $1000$ to $20,000$ in steps of $1000$ and each edge count $m \in \{2n,3n-6\}$. To avoid clutter, to remedy the tendency of the planarity test to produce restrictions with very few leaves, and to avoid bias from trivial optimizations such as filtering trivial restrictions with $|R| \in \{1,|L|-1,|L|\}$, which is present in some of the implementations, we filter test instances where $|R|$ lies outside the interval $[5,|L|-2]$. Altogether, this test set contains $199,831$ instances, whose distribution with regards to tree and restriction size is shown in \Cref{fig:distplot_full_leaves_tree_size_possible}. To guard against overly permissive implementations, we also create a small test set \texttt{SER-IMP}\xspace of impossible restrictions. It is generated in the same way, by adding randomly chosen edges to the graphs from above until they become non-planar. In this case the planarity test fails with an impossible restriction at some point; we include these $3,800$ impossible restrictions in the set, see \Cref{fig:distplot_full_leaves_tree_size_impossible}. As most of the available implementations have no simple means to store and load a PQ-/PC-tree, we serialize each test instance as a set of restrictions that create the tree, together with the additional new restriction. When running a test case, we then first apply all the restrictions to reobtain the tree, and then measure the time to apply the new restriction from the test case. The prefix \texttt{SER-} in the name of both sets emphasizes this serialization. To be able to conduct a more detailed comparison of the most promising implementations, we also generate a third test set with much larger instances. As deserializing a PC- or PQ-tree is very time-consuming, we directly use the respective implementations in the planarity test by Booth and Lueker~\cite{Booth1976}, thus calling the set \texttt{DIR-PLAN}\xspace. We generated 10 random planar graphs with $n$ vertices and $m$ edges for each $n$ ranging from $100,000$ to $1,000,000$ in steps of $100,000$ and each $m\in\{2n, 3n-6\}$, yielding 200 graphs in total. The planarity test then yields one possible restriction per node. As we only want to test big restrictions, we filter out restrictions with less than 25 full leaves, resulting in \texttt{DIR-PLAN}\xspace containing $564,300$ instances. \begin{figure} \caption{ Distribution of tree and restriction size for the data sets \textbf{(a)} \label{fig:distplots} \end{figure} \subsection{Experimental Setup} \newcommand{PQR-Tree\footnotemark[1]}{PQR-Tree\footnotemark[1]} \begin{sidewaystable}[p] \centering \begin{tabular}{l|c|l|c|c|r|p{7cm}} \textbf{Name} & \textbf{Type} & \textbf{Context} & \textbf{Language} & \textbf{Correct} & \textbf{Errors} & \textbf{URL} \\ \hline HsuPC & PC-Tree & our impl., based on \cite{Hsu2004} & C++ & \checkmark & 0 & \url{https://github.com/N-Coder/pc-tree/tree/HsuPCSubmodule} \\ \hline UFPC & PC-Tree & our impl. using Union-Find & C++ & \checkmark & 0 & \url{https://github.com/N-Coder/pc-tree} \\ \hline Luk\&Zhou & PC-Tree & student course project & C++ & $-$ & $-$ & \url{https://github.com/kwmichaelluk/pc-tree} \\ \hline Hsu \cite{Hsu2003a} & PC-Tree & planarity test prototype & C++ & n.a. & $-$ & \url{http://qa.iis.sinica.edu.tw/graphtheory} \\ \hline Noma \cite{Boyer2004} & PC-Tree & planarity test evaluation & C++ & n.a. & $-$ & \url{https://www.ime.usp.br/~noma/sh} \\ \hline OGDF \cite{Leipert1997} & PQ-Tree & planarity testing & C++ & \checkmark & 0 & \url{https://ogdf.github.io} \\ \hline Gregable & PQ-Tree & biclustering & C++ & \checkmark & 0 & \url{https://gregable.com/2008/11/pq-tree-algorithm.html} \\ \hline BiVoC \cite{Grothaus2006} & PQ-Tree & automatic layout of biclusters & C++ & \xmark & 71 & \url{https://bioinformatics.cs.vt.edu/~murali/papers/BiVoC} \\ \hline Reisle & PQ-Tree & student project & C++ & \xmark & 236 & \url{https://github.com/creisle/pq-trees} \\ \hline GraphSet \cite{EstrellaBalderrama2009} & PQ-Tree & visual graph editor & C++ & \xmark & 580 & \url{http://graphset.cs.arizona.edu} \\ \hline Zanetti \cite{Zanetti2012} & PQR-Tree\footnotemark[1] & extension of PQ-Trees & Java & \xmark & 454 & \url{https://github.com/jppzanetti/PQRTree} \\ \hline CppZanetti & PQR-Tree\footnotemark[1] & our C++ conversion of Zanetti & C++ & \xmark & 454 & \url{https://github.com/N-Coder/pc-tree#installation} \\ \hline JGraphEd \cite{Harris2004} & PQ-Tree & visual graph editor & Java & \xmark & 11 & \url{https://www3.cs.stonybrook.edu/~algorith/implement/jgraphed/implement.shtml} \\ \hline GTea \cite{Cregten2017} & PQ-Tree & visual graph theory tool & Java & $-$ & $-$ & \url{https://github.com/rostam/GTea} \\ \hline TryAlgo & PQ-Tree & consecutive-ones testing & Python & $-$ & $-$ & \url{https://tryalgo.org/en/data structures/2017/12/15/pq-trees} \\ \hline SageMath & PQ-Tree & interval graph detection & Python & \checkmark & 0 & \url{https://doc.sagemath.org/html/en/reference/graphs/sage/graphs/pq_trees.html} \\ \end{tabular} \caption{ Implementations considered for the evaluation. Implementations that are entirely unusable as they are incomplete or crash/produce incorrect results on almost all inputs (marked with $-$) and those where no stand-alone PC-/PQ-tree implementation could be extracted (marked with n.a.) could not be evaluated. Correct implementations are marked with~\checkmark and implementations that are functional, but do not always produce correct results are marked with \xmark. These two categories are included in our experimental evaluation. The last column shows how many of $203,630$ restrictions in the sets \texttt{SER-POS}\xspace and \texttt{SER-IMP}\xspace failed. } \label{tab:eval-impls} \justify \noindent\rule{5cm}{0.4pt} \noindent \begin{minipage}[t]{.013\textwidth} \footnotemark[1] \end{minipage}\begin{minipage}[t]{.987\textwidth} \footnotesize PQR-Trees are a variant of PQ-Trees that can also represent impossible restrictions, replacing any node that would make a restriction impossible by an R-node (again allowing arbitrary permutation). To make the implementations comparable, we abort early whenever an impossible restriction is detected and an R-node would be generated. \end{minipage} \end{sidewaystable} \stepcounter{footnote} \Cref{tab:eval-impls} gives an overview of all implementations we are aware of, although not all implementations could be considered for the evaluation. The three existing implementations of PC-trees we found are incomplete and unusable (Luk\&Zhou) or tightly intertwined with a planarity test in such a way that we were not able to extract a generic implementation of PC-trees (Hsu, Noma). We further exclude two PQ-tree implementations as they either crash or produce incorrect results on almost all inputs (GTea) or have an excessively poor running time (TryAlgo). Among the remaining PQ-tree implementations only three correctly handle all our test cases (OGDF, Gregable, SageMath). Several other implementations have smaller correctness issues: After applying a fix to prevent segmentation faults in a large number of cases for BiVoC, the remaining implementations crash (BiVoC, GraphSet, Zanetti) and/or produce incorrect results (Reisle, JGraphEd, Zanetti) on a small fraction of our tests; compare the last column of \Cref{tab:eval-impls}. We nevertheless include them in our evaluation. We changed the data structure responsible for mapping the input to the leaves of the tree for BiVoC and Gregable from \texttt{std::map} to \texttt{std::vector} to make them competitive. Moreover, BiVoC, Gregable and GraphSet use a rather expensive cleanup step that has to be executed after each update operation. As this could probably largely be avoided by the use of timestamps, we do not include the cleanup time in their reported running times. For SageMath the initial implementation turned out to be quadratic, which we improved to linear by removing unnecessary recursion. As Zanetti turned out to be a close competitor to our implementation in terms of running time, we converted the original Java implementation to C++ to allow a fair comparison. This decreased the runtime by one third while still producing the exact same results. All other non-C++ implementations were much slower or had other issues, making a direct comparison of their running times within the same language environment as our implementations unnecessary. Further details on the implementations can be found in~\Cref{sec:impl-details}. Each experiment was run on a single core of a Intel Xeon E5-2690v2 CPU (3.00 GHz, 10 Cores) with 64 GiB of RAM, running Linux Kernel version 4.19. Implementations in C++ were compiled with GCC 8.3.0 and optimization \texttt{-O3 -march=native -mtune=native}. Java implementations were executed on OpenJDK 64-Bit Server VM 11.0.9.1 and Python implementations were run with CPython 3.7.3. For the Java implementations we ran each experiment several times, only measuring the last one to remove startup-effects and to facilitate optimization by the JIT compiler. We used OGDF version 2020.02 (Catalpa) to generate the test graphs. \subsection{Results} \begin{figure} \caption{Runtime for \texttt{SER-POS} \label{fig:relplot_possible} \end{figure} \begin{figure} \caption{ \textbf{(a)} \end{figure} \begin{figure} \caption{ Runtime for \texttt{SER-IMP} \label{fig:relplot_impossible} \end{figure} Our experiments turn out that SageMath, even with the improvements mentioned above, is on average 30 to 100 times slower than all other implementations.\footnote{Part of this might be due to the overhead of running the code with CPython. As the following analysis shows, SageMath also has other issues, allowing us to safely exclude it.} For the sake of readability, we scale our plots to focus on the other implementations. As the main application of PC-/PQ-trees is applying possible restrictions, we first evaluate on the dataset \texttt{SER-POS}\xspace. \Cref{fig:relplot_possible} shows the runtime for individual restrictions based on the size of the restriction (i.e. the number of full leaves) and the overall size of the tree. \Cref{fig:relplot_time_full_leaves_possible} clearly shows that for all implementations the runtime is linear in the size of the restriction. \Cref{fig:relplot_time_tree_size_possible} suggests that the runtime of Reisle and GraphSet does not solely depend on the restriction size, but also on the size of the tree. To verify this, we created for each implementation a heatmap that indicates the average runtime depending on both the tree size and the restriction size, shown in \Cref{fig:heatmap_time_possible_buckets}. The diagonal pattern shown by SageMath, Reisle, and GraphSet confirms the dependency on the tree size. All other implementations exhibit vertical stripes, which shows that their runtime does not depend on the tree size. Finally, \Cref{fig:relplot_time_tp_length_possible} shows the runtime compared to the terminal path length. As expected, all implementations show a linear dependency on the terminal path length, with comparable results to \Cref{fig:relplot_time_full_leaves_possible}. \Cref{fig:relplot_impossible} shows the performance on the dataset \texttt{SER-IMP}\xspace. The performance is comparable with that on \texttt{SER-POS}\xspace. Noteworthy is that Zanetti performs quite a bit worse, which is due to its implementation not being able to detect failure during a labeling step. It always performs updates until a so-called R-node would be generated. Altogether, the data from \texttt{SER-POS}\xspace and \texttt{SER-IMP}\xspace shows that the implementations GraphSet, OGDF, Zanetti, HsuPC and UFPC are clearly superior to the others. In the following, we conduct a more detailed comparison of these implementations by integrating them into a planarity test and running them on much larger instances, i.e., the data set \texttt{DIR-PLAN}\xspace. In addition to an update method, this requires a method for replacing the now-consecutive leaves by a P-node with a given number of child leaves. Adding the necessary functionality would be a major effort for most of the implementations, which is why we only adapted the most efficient implementations to run this set. We also exclude GraphSet from this experiment; the fact that it scales linearly with the tree size causes the planarity test to run in quadratic time (see also \Cref{sec:impl-details}). \Cref{fig:planaritytest_scatter_size} again shows the runtime of individual restrictions depending on the restriction size. Curiously, Zanetti produces incorrect results for nearly all graphs with $m=2n$ in \Cref{fig:planaritytest_scatter_size_2n}. As the initial tests already showed, the implementation has multiple flaws; one major issue is already described in an issue on GitHub, while we give a small example of another independent error in \Cref{fig:zanettiBroken} in the appendix. Both plots show that HsuPC is more than twice as fast as OGDF and that UFPC is again close to two times faster than HsuPC. Zanetti's runtime is roughly the same as that of HsuPC, while converting its Java code to C++ brings the runtime down close to that of UFPC. As OGDF is the slowest, we use it as baseline to calculate the speedup of the other implementations. \Cref{fig:planaritytest_speedup_size} shows that the runtime improvement for all three implementations is the smallest for small restrictions, quickly increasing to the final values of roughly $0.4$ times the runtime of OGDF for HsuPC and $0.25$ for both CppZanetti and UFPC. \Cref{fig:planaritytest_speedup_tp_length} shows the speedup depending on the length of the terminal path. For very short terminal paths (which are common in our datasets, see \Cref{fig:planaritytest_distplot_size_tp_length} in the appendix), both implementations are again close; but already for slightly longer terminal paths UFPC quickly speeds up to being roughly 20\% faster than CppZanetti. This might be because creating the central node in \cref{step:alg-delete} is more complicated for UFPC, as the data structure without edge objects does not allow arbitrarily adding and removing edges (which is easier for HsuPC) and allowing circular restrictions forces UFPC to also pay attention to various special cases (which are not necessary for PQ-trees). \begin{figure} \caption{ Runtime of individual restrictions of \texttt{DIR-PLAN} \label{fig:planaritytest_scatter_size} \end{figure} \begin{figure} \caption{ Median performance increase depending on \textbf{(a)} \label{fig:planaritytest_speedup} \end{figure} \section{Conclusion} \label{ch:conclusion} In this paper we have presented the first fully generic and correct implementations of PC-trees. One implementation follows the original description of Hsu and McConnell~\cite{Hsu2003,Hsu2004}, which contains several subtle mistakes in the description of the labeling and the computation of the terminal path. This may be the reason why no fully generic implementation has been available so far. A corrected version that also includes several small simplifications is given in the appendix. Furthermore, we provided a second, alternative implementation, using Union-Find to replace many of the complications of Hsu and McConnell's original approach. Technically, this increases the runtime to $O((|R|+p)\cdot\alpha(|L|))$, where $\alpha$ is the inverse Ackerman function. In contrast, our evaluations show that the Union-Find-based approach is even faster in practice, despite the worse asymptotic runtime. Our experimental evaluation with a variety of other implementations reveals that surprisingly few of them seem to be fully correct. Only three other implementation have correctly handled all our test cases. The fastest of them is the PQ-tree implementation of OGDF, which our Union-Find-based PC-tree implementation beats by roughly a factor of 4. Interestingly, the Java implementation of PQR-trees by Zanetti achieves a similar speedup once ported to C++. However, Zanetti's Java implementation is far from correct and it is hard to say whether it is possible to fix it without compromising its performance. Altogether, our results show that PC-trees are not only conceptually simpler than PQ-trees but also perform well in practice, especially when combined with Union-Find. To put the speedup of factor 4 into context, we compared the OGDF implementations of the planarity test by Booth and Lueker and the one by Boyer and Myrvold on our graph instances. The Boyer and Myrvold implementation was roughly 40\% faster than the one by Booth and Lueker. Replacing the PQ-trees, which are the core part of Booth and Lueker's algorithm, by an implementation that is 4 time faster, might make this planarity test run faster than the one by Boyer and Myrvold. We leave a detailed evaluation, also taking into account the embedding generation, which our PC-tree based planarity test not yet provides, for future work. \appendix \begin{figure} \caption{Distribution of tree and restriction size for \texttt{DIR-PLAN} \label{fig:planaritytest_distplot_size_tp_length} \end{figure} \section{Details About Evaluated Implementations}\label{sec:impl-details} \begin{description} \item[BiVoC, Gregable] In the implementations of \emph{BiVoC} and \emph{Gregable}, we improved the mapping from the input to the tree's leaves by replacing \texttt{std::map} with \texttt{std::vector}, as suggested in the code's comments. As a result, this mapping now takes constant time. The \texttt{Bubble} method of BiVoC caused segmentation faults due to undefined behavior, because a set iterator is dereferenced and incremented after its corresponding element has been removed. We resolved this issue for our evaluation. Still, the method \texttt{qNextChild} of BiVoC rarely caused program hangs due to undefined behavior, when the past-the-end iterator of an empty set is incremented. In the Gregable repository, the author notes that the code ``is known to be buggy on some rare inputs. A believed to be correct, but harder to use version of this code can be found as a library within BiVoC''. However, in contrast to BiVoC, Gregable's implementation always produced the expected results in our evaluation. \item[GraphSet] In the implementation of \emph{GraphSet}, we removed the entanglement with Microsoft Foundation Classes by replacing its data structures with their corresponding variants from the standard library. We were unable to get GraphSet's \texttt{Bubble} method to work for our tests. Instead, we used the approach from their quadratic-time variant of Booth and Lueker's planarity test, where they traverse the entire tree before each reduction in order to find and prepare the pertinent subtree. Still, GraphSet produced segmentation faults due to null pointer dereferencing in Template Q3. \item[TryAlgo] In June 2020, the authors of the \emph{TryAlgo} implementation noted on their website that they ``have problems implementing this data structure, and cannot provide at this point a correct implementation in tryalgo''. Furthermore, they note that ``the current implementation has a complexity in the order of $n*m$, however an implementation in $O(n+m+s)$ is possible''. As we thus assumed their implementation to be neither correct nor linear-time, we excluded it from our evaluation. \item[SageMath] The main routine \texttt{set\_contiguous} of the PQ-tree of \emph{SageMath} recursively traverses the tree starting from its root as follows: it first calls \texttt{set\_contiguous} recursively on all children of the current node, then calls \texttt{flatten}, calls \texttt{set\_contiguous} recursively on all children again and then proceeds to sort the children depending on whether they are full, partial, or empty. The \texttt{flatten} function for removing degree-2 nodes is implemented to recurse itself on all children in the subtree, making the runtime of \texttt{set\_contiguous} quadratic in the tree size. We modified the implementation to only flatten the current level and dropped the second recursive call to \texttt{set\_contiguous}, improving the runtime to linear in the tree size without generating incorrect results. \item[Zanetti] We found that \emph{Zanetti}'s data structures became inconsistent after some restrictions, which was also already independently reported on GitHub\punctuationfootnote{\url{https://github.com/jppzanetti/PQRTree/issues/2}}. This happened mostly after restrictions having a terminal path length greater than 1. As the restrictions generated when serializing a PC-tree only have very short terminal paths and the inconsistency is usually only found when modifying the same area of the tree again, only few of these cases surfaced in our tests on \texttt{SER-POS}\xspace. Only when applying multiple bigger restrictions consecutively, these issues surfaced more often, i.e. at some point during the planarity test for close to all graphs with $m=2n$ and also some of the graphs with $m=3n-6$. We also found a second, independent issue, where Zanetti's implementation generates C-nodes with their children in the wrong order. An examples where this happens is shown in \Cref{fig:zanettiBroken}. As Zanetti's Java implementation still has a very good runtime in practice, we decided to port its Java code to C++ to be able to perform a direct comparison with the other C++ implementations. As the implementation uses close to none Java specific features, the conversion mostly involved replacing Java Object variables with C pointers and Java utility classes with their C++ stdlib equivalents. The only non-trivial change was that, because Zanetti stores the Union-Find information directly in the nodes and not in an external array, we had to implement reference counting for Zanetti's tree nodes to ensure that the lifetime of nodes which are no longer part of the tree, but still referenced in the Union-Find data structure, is handled properly. We made sure that both the Java and the C++ version not only produced equivalent output, but actually keep the same PQR-tree state in memory. Where both implementations differ is that Java immediately reports inconsistencies of the data structure, e.g. by throwing a \texttt{NullPointerException}, while the \texttt{SIGSEGFAULT} of C++ might not be immediately triggered. This generates a few more data points with an invalid result, where the Java implementation already crashed. \end{description} \begin{figure} \caption{ Left: The PQR-tree \texttt{[0 1 2 (3 (4 5)) 6]} \label{fig:zanettiBroken} \end{figure} \section{Algorithmic Improvements}\label{sec:algo} In this section, we describe further details in which our implementations differ from the description given by Hsu and McConnell and explain the corrections needed for a working implementation. \Cref{sec:step-label} describes the labeling procedure used by HsuPC and UFPC. Note that the technical complications involving the arcs and block-spanning pointers only concern the missing C-node objects of HsuPC. UFPC uses direct references to the concerned nodes instead of arcs and doesn't maintain block-spanning pointers, which is why those parts are not relevant for our second implementation, which is otherwise based on HsuPC. The same holds for \Cref{sec:step-tp}, where we give a corrected algorithm for enumerating the terminal path. \Cref{sec:step-tp-impossible} then describes the generic steps needed to detect impossible restrictions. Lastly, \Cref{sec:step-delete-contract-uf} explains the differing update step of UFPC, while the update step of HsuPC follows Hsu and McConnells original description. \subsection{Efficiently Labeling and Finding Partial Nodes}\label{sec:step-label} In our description of the labeling step, we follow the general procedure of Hsu and McConnell~\cite{Hsu2004}, using a bottom-up traversal of the tree, starting at the full leaves. To implement this, we keep a queue of unprocessed non-full arcs of full nodes, which is initialized with the parent arcs of all full leaves. If the front of the queue knows its node object, this has to be a P-node and we can simply append the incoming arc to the node's list of full children. Recall that each full node has exactly one non-full neighbor. Thus, if this list reaches a size one smaller than the node's degree, we enqueue the parent pointer of the node, if it exists and the parent is not yet full. The other case, when the now-full node is the root or when the parent has become full before its child, is missing in the description by Hsu and McConnell. Here, we need to search all children for the single non-full node, and queue this arc instead. This case is rare and the search of all full children can still be done in time linear in the number of full leaves, thus not affecting the overall runtime. If the arc does not point to a permanent (P-)node, we need to maintain (i.e. create/append/merge) the block-spanning pointers around the respective C-node $x$. The merging of full blocks is illustrated in \Cref{fig:fullBlocks}, see the book chapter by Hsu and McConnell~\cite{Hsu2004} for more details. If the new endpoints of the C-node's full block now share a common non-full neighbor, the C-node is full and we queue this neighbor $z$. Note that similar to the case of P-nodes, this node is most often but not necessarily (i.e. for the root) the parent arc. Still, there is no explicit search required, as we already know the arc to the only non-full neighbor. \begin{figure} \caption{\textbf{(a)} \label{fig:fullBlocks} \end{figure} In their definition of the data structure Hsu and McConnell note that ``[n]o two C nodes are adjacent, so each of these edges [incident to a C-node] has one end that identifies a neighbor of the C node, and another end that indicates that the end is incident to a C node, without identifying the C node''~\cite[page 32-10]{Hsu2004}. Considering the PC-tree with 6 leaves representing the restrictions $\{\{1,2\}, \{2,3\}, \{4,5\}, \{5,6\}\}$, which consists of two adjacent C-nodes and (up to reversal and circular shifting) only has the two valid permutations $[1,2,3,4,5,6]$ and $[1,2,3,6,5,4]$, this statement is obviously untrue. Hsu and McConnell use this property within their planarity test and when they test whether the endpoints of a full block are adjacent to the same arc: ``[i]f $x$ passes this test, it is full, and the full-neighbor counter of $z$ is incremented''~\cite[page 32-11]{Hsu2004}. According to their argumentation, $z$ has to be a P-node as no two C nodes are adjacent, which is incorrect. Still, the important information is not the type of the nodes, but that the neighbor $x$ of non-full node $z$ became full, and our queue-based approach also handles the case of a chain of multiple C-nodes becoming full consecutively. \subsection{Efficiently Finding the Terminal Path}\label{sec:step-tp} There are two possible structures for the terminal path, assuming the restriction is possible. Let the \emph{apex} be the highest node on the terminal path, i.e., the node that is an ancestor of all other nodes on the terminal path. The two cases can now be differentiated based on the position of the apex, which in turn depends on the position of the root node: \begin{description} \item[I-Shaped:] If the apex lies on one of the ends of the terminal path and is therefore a terminal node at the same time, the terminal path extends from the other endpoint of the path upwards to the apex, as shown in Figure~\ref{fig:tpIshaped}. In this case, every node on the terminal path has exactly one child on the terminal path, except for the lower terminal node $t_{1}$. This also covers the special case where the terminal path has length 0 and there is only a single terminal node $t_{1}=t_{2}$. \item[A-Shaped:] If the apex does not lie on one of the ends of the terminal path, two ascending paths join in the apex, as shown in Figure~\ref{fig:tpAshaped}. In this case, the apex $a$ has two children on the terminal path, the terminal nodes $t_{1}$ and $t_{2}$ have none, and all other nodes on the terminal path have exactly one child that is also on the terminal path. \end{description} \begin{figure} \caption{ Two examples of a PC-tree with root $r$. \textbf{(a)} \label{fig:tpIshaped} \label{fig:tpAshaped} \end{figure} Hsu and McConnell show that an edge is terminal if and only if it lies on a path in the tree between two partial nodes. This allows them to conduct parallel searches, starting at every partial node and extending ascending paths through their ancestors at the same rate~\cite{Hsu2004}. Whenever an already processed node is encountered, expansion of the current path is stopped and the path is instead merged into the path of the already processed node. This bottom-up traversal can easily be done for P-nodes, but again as C-nodes have no object registered with their incident arcs (i.e. except for a temporary one stored at the two endpoints of their full block), finding their parent arc can be difficult. Hsu and McConnell distinguish two cases: Either they arrived at the C-node $x$ via an arc from a partial or empty node, or via an arc from a full node. In the former case, they ``look at the two neighbors of the child edge in the cyclic order, and one of them must be the parent edge. This takes $O(1)$ time.'' In the latter case, they ``cycle clockwise and counterclockwise through the edges to full neighbors. Of the first edges clockwise and counter-clockwise that are not to full neighbors, one of these is the parent. In this case, the cost of finding the parent is proportional to the number of full neighbors $x$''~\cite[page 32-12]{Hsu2004}. Their procedure for the first case is incorrect, as $x$ could also be the apex where both incident terminal arcs are adjacent to each other, but not to the parent arc (note that if the apex $x$ is also the root, it doesn't even have a parent arc; see \Cref{fig:cNodeTPCase-emptyA}). The procedure for the second case is also incorrect, as $x$ could again be the apex, where the parent arc (if it exists) is not necessarily adjacent to the full block, as shown in \Cref{fig:cNodeTPCase-fullA}. Moreover, we can never arrive at $x$ via an arc from a full node, as full nodes cannot be part of the terminal path. To correctly implement this step, we again use a queue of unprocessed arcs that may be part of the terminal path. We initialize the queue with the parent arcs of all partial nodes found in the previous step, which is easy to do for partial P-nodes. To find the parent arc of a C-node $x$, only knowing the two endpoints of a full block from the labeling step, we fix the second case of Hsu and McConnell's case distinction as follows: If one of the two non-full arcs adjacent to the full block is indeed the parent arc $p$ (see \Cref{fig:cNodeTPCase-fullI}), we do simply continue the path in that direction by queuing $p$. Otherwise, we store $x$ as apex candidate as we do not need to ascend any further. This holds for the case when the parent arc $p$ is within the full block, as a full node can not be part of the terminal path. If $p$ is located somewhere else in the empty block, continuing the terminal path via that node would make it impossible to later split the C-node $x$ into an (exclusively) empty and an (exclusively) full half. We now process the queue step by step, seeking the parent arc $p$ of the node $x$ to which the removed arc $a$ points. When we encounter the root, we store it as apex candidate and continue with the next entry of the queue. We also never process a node twice, proceeding to the next entry if we detect this case. The hard part now is again finding the parent pointer of a C-node $x$. We first check whether the arcs next to the incoming arc $a$ are part of a full block. If both are part of the same block, we can stop ascending as we ran into a full node (the same holds for full P-nodes). If both are part of two different full blocks, we abort and report an impossible restriction, as there is no flip of $x$ that makes all full leaves consecutive. If only one is part of a full block, we can use the procedure for initialization described in the previous paragraph to find the parent arc. Now consider the case where there is no full block adjacent to $a$, i.e. the former case of Hsu and McConnells case distinction. If the parent arc $p$ is actually adjacent to the incoming arc $a$, we follow that path and queue $p$ (see \Cref{fig:cNodeTPCase-emptyI}). Otherwise, the terminal path may continue as shown in \Cref{fig:cNodeTPCase-emptyA}, where one of the arcs adjacent to $a$ also belongs to the terminal path and the current node $x$ is the apex. As we don't know which of the neighboring edges is terminal, we mark $a$ and store $x$ (identified by its incident arc $a$) as the apex candidate. We can resolve this situation, identify the second predecessor of $x$ and fix $x$ as actual apex if we later process an arc that is adjacent to the accordingly marked arc $a$. Observe that for each processed arc, we now either found a following parent arc or marked the corresponding node as apex candidate. If we marked a node as apex candidate and encounter it a second time when ascending from a different node, we can be sure that the terminal path is A-shaped and the current node actually is the apex. Otherwise, we can't be sure that the apex candidate is the actual apex if it is empty, as we might be extending an I-shaped path of empty nodes above the actual apex. Thus, if the apex candidate is empty and has only one predecessor, we find the actual apex by ``backtracking'' to the next partial node below the apex candidate. Furthermore, if there is only one element left in the queue and we haven't found an apex (candidate) yet, all parts of the terminal path have already converged into a single ascending I-shaped path (instead of having met in an A-shape and stopped at the apex) and we are again extending the terminal path above the previously unidentified apex. We stop processing the queue and backtrack to the next partial node below the last remaining element of the queue. Otherwise, the queue processing also terminates if an impossible restriction is detected (see \Cref{sec:step-tp-impossible}) or the queue runs empty, already having identified the actual apex. The backtracking needed to find the actual apex can be done in constant time by storing, for each empty node on the terminal path, the highest partial node in its subtree. Special care needs to be taken as an empty apex candidate may collide with a later-found actual apex. In this case, we again extended an I-shaped path of empty nodes above the previously unidentified apex and only later found the second terminal path child of the apex, indicating that the terminal path is actually A-shaped. As long as both the apex and the apex candidate have the same node as highest partial node in their subtree, the restriction is valid and the apex candidate can simply be discarded. Observe that the number of arcs on the terminal path is proportional to the length $p$ of the terminal path. Furthermore, we only check up to two neighbors of each arc and any apex candidate requiring backtracking is at most $p$ nodes above the actual apex. Thus, the overall runtime of our search for the terminal path is in $O(p)$. Note that this slightly refines the analysis of Hsu and McConnell~\cite{Hsu2004}, who sometimes scan the full children of a node and thus have a runtime in $O(p+|R|)$. \begin{figure} \caption{An \emph{empty} \caption{An \emph{empty} \caption{A \emph{full} \caption{A \emph{full} \caption{ Different cases of the terminal path crossing a C-node. Empty, partial, and full nodes are drawn in white, gray, and black, respectively. The edges are oriented towards the root node. The green edges are part of the terminal path, the blue dashed half-arcs depict the block-spanning pointers. In cases (c) and (d), the node can be the final node of the terminal path, i.e. a terminal node. Thus there might zero, one or two incident terminal edges. } \label{fig:cNodeTPCase-emptyI} \label{fig:cNodeTPCase-emptyA} \label{fig:cNodeTPCase-fullI} \label{fig:cNodeTPCase-fullA} \end{figure} \subsection{Efficiently Detecting Impossible Restrictions}\label{sec:step-tp-impossible} Recall that a restriction is possible if and only if all terminal edges form a path and all nodes on the terminal path can be flipped or rearranged such that their empty and full children are consecutive while separated by the terminal edges. The only way to violate the first property is when a node has more than two incident terminal edges. P-nodes can detect directly when this case occurs, while a C-node with more than two incident terminal edges either leads to multiple apex candidates or detects the usage of one of its arc in multiple paths. As P-nodes allow arbitrary arrangements of their children, only C-nodes can violate the second property by either having multiple distinct full blocks, by having a full block that is not adjacent to all incident terminal edges, or by having no full block and two non-adjacent terminal edges. All three cases lead to a disconnected terminal-path and thus multiple apex (candidate) nodes, if the impossible restriction is not detected right away because an arc would be part of two different paths. Thus, we know that the restriction is impossible if and only if we either find a second, incompatible apex (candidate) or a node has more than two incident terminal edges. \subsection{Deletion and Contraction}\label{sec:step-delete-contract-uf} Deleting and inserting new edges is simple when using the arc-based tree representation described by Hsu and McConnell. When using a doubly-linked tree structure similar like the one used by UFPC, no explicit edge objects exists and they are instead encoded by the child-parent relationship of the nodes. This means that in \cref{step:alg-delete,step:alg-contract} of the update procedure, the child-parent relationship needs to be set immediately and correctly for every change and cannot easily be updated later, as done by Hsu and McConnell. Thus, UFPC uses a different approach for these two steps: First, the central node is created and we make sure that it already has its final neighbors. This is trivial if the apex is a C-node and thus can simply be reused as is. Otherwise, we create a new C-node and add up to 4 neighbors: the apex' first child on the terminal path, a newly created P-node that was reassigned as parent of all full children of the apex, the second child on the terminal path, and finally the apex with all its empty children remaining. Note that not all four neighbors might exist or be required, e.g. when the apex has no full or empty children. Furthermore, the root of the tree is either among the full or the empty children and thus the node that is still connected to the root needs to be installed as parent of the new central node. Second, we iteratively contract a child of the central node that is part of terminal path into the central node. C-nodes can again be simply merged, while a P-node $x$ needs to be split into a full and empty node. P-node $x$ is then replaced by the full node and the empty node with the other terminal path neighbor of $x$ in between, if the latter exists. \end{document}
\begin{document} \draft \title{Quantitative~wave-particle~duality and~non-erasing~quantum~erasure} \author{Peter D. D. Schwindt,$^{\dagger}$ Paul G. Kwiat,$^{\dagger}$ and Berthold-Georg Englert$^{\ddagger,\ast}$} \address{$^{\dagger}$Physics Division, P-23, Los Alamos National Laboratory, Los Alamos, NM 87545\\ $^{\ddagger}$Max-Planck-Institut f\"{u}r Quantenoptik, Hans-Kopfermann-Str.~1, 85748 Garching, Germany\\ $^{\ast}$ Department of Physics, Texas A\&M University, College Station, TX 77843-4242} \date{Received 3 May 1999} \wideabs{ \maketitle \begin{abstract}\hspace*{1em} The notion of wave-particle duality may be quantified by the inequality $V^2+K^2\le1$, relating interference fringe visibility $V$ and path knowledge $K$. With a single-photon interferometer in which polarization is used to label the paths, we have investigated the relation for various situations, including pure, mixed, and partially-mixed input states. A quantum eraser scheme has been realized that recovers interference fringes even when no which-way information is available to erase. \end{abstract} \pacs{03.65.Bz, 42.50.-p, 07.60.Ly} } \thispagestyle{inprint} \enlargethispage{0.8\baselineskip} Wave-particle duality (WPD) dates back to Einstein's seminal paper on the photoelectric effect \cite{ref:Einst}, and is a striking manifestation of Bohr's complementarity principle \cite{ref:Bohr} (for a formal definition see Ref.~\cite{ref:SEWnat}). The familiar phrase ``each experiment must be described either in terms of particles or in terms of waves'' emphasizes the extreme cases and disregards intermediate situations, in which particle and wave aspects coexist. Theoretical investigations \cite{ref:WZ,ref:WPDbge1}, supplemented by a few experimental studies \cite{ref:WPDexp1,ref:Rempe1}, have led to a quantitative formulation of WPD [Eq.\ (\ref{eq:WPDineq}) below]. Here we report an experiment using a single-photon Mach-Zehnder interferometer in which polarization marks the path. We investigated the entire scope of the duality relation, for pure, mixed, and partially-mixed input states, and found {\em absolute\/} agreement at the percent level \cite{ref:Rempe2}. We also realized a novel quantum eraser scheme, whereby interference is recoverable although no which way (WW) information was available to erase. In view of the kinematical equivalence of all binary degrees of freedom, our results are directly applicable whenever an interfering particle is entangled with a 2-state quantum system. To quantify WPD, one needs quantitative, measurable characteristics for the wave-like and particle-like behavior of quanta. In an interferometer, the former is naturally quantified by the {\em Visibility\/} $V$ of the observed interference fringes. The quantification of the latter is based on the {\em Likelihood\/} $L$ of correctly guessing the path taken by a particular quantum --- the better one can guess, the more pronounced are the particle aspects. A random guess gives $L=\frac{1}{2}$, whereas $L=1$ indicates that the way is known with certainty. The actual WW {\em Knowledge\/} $K$ is given by $K=2L-1$, with ${0\leq K\leq1}$. In an {\em asymmetric\/} interferometer one way is more likely than the other to begin with (${L_{\rm a~priori}>\frac{1}{2}}$); we call WW knowledge of this kind {\em Predictability\/} (${P=2L_{\rm a~priori}-1}$). The statement $V^2+P^2\leq1$ has been known for some time, implicitly or explicitly, in various physical contexts \cite{ref:WZ,ref:WPDexp1}. Since one cannot lose {\em a priori\/} knowledge, ${P\leq K}$; in fact, $P\approx 0$ in our experiments. Nevertheless, owing to an {\em entanglement\/} of the system wave function with the wave function of some WW marker (WWM), the Knowledge can still be as large as $1$. The actual value of $K$ depends on the ``betting strategy'' employed; the optimal strategy maximizes K and identifies the {\em Distinguishability\/} $D=\max\{K\}$ --- it is the maximum amount of WW Knowledge available, although a non-optimal measurement may yield less, or even zero. (Experimental inaccessibility of some crucial degrees of freedom may force the experimenter to settle for a non-optimal measurement; see Ref.~\cite{ref:BjKa} for further remarks.) Except where noted, our measurements were suitably optimized to maximize $K$. The {\em Duality Relation\/} accessible to experimental test then becomes \cite{ref:WPDbge1,ref:BjKa}: \begin{equation}\label{eq:WPDineq} V^2 + K^2 \leq 1 \;. \end{equation} The equality holds for pure initial states of the WWM, while the inequality applies to (partially-)mixed states. \enlargethispage{0.8\baselineskip} \section{Experimental setup and procedure}\label{sec:setup} In our experiments single photons (at $670\,{\rm nm}$) were directed into a compressed Mach-Zehnder interferometer \cite{ref:compressed} (see Fig.~\ref{fig:setup}). An adjustable half waveplate (HWP) in path 1 was used to entangle the photon's path with its polarization (i.e., with the WWM) thus yielding WW Knowledge \cite{ref:waveplate}. Our adjustable analysis system --- quarter waveplate (QWP), HWP, and calcite prism (PBS) --- allowed the polarization WWM to be measured in any arbitrary basis. The photons were detected using geiger-mode avalanche photodiodes --- Single Photon Counting Modules (EG\&G \#SPCM-AQ, efficiency $\sim60\%$). The input source, described below, was greatly attenuated so that the maximum detection rates were always less than $50,000{\rm s}^{-1}$; for the interferometer passage time of 1ns, this means that on average fewer than $10^{-4}$ photons were in the interferometer at any time. \begin{figure}\label{fig:setup} \end{figure} The probability for having no photon at all is close to unity at any arbitrary instant, but state reduction removes this part a posteriori as soon as a detector ``clicks.'' The reduced state is virtually indistinguishable from a one-photon Fock state because the probability for two or more photons is negligibly small. This one-photon-at-a-time operation is essential to allow sensible discussion of the likely path taken by an individual light quantum~\cite{ref:QKrypt}. Perhaps unnecessarily, we emphasize that our experiment is not intended to be a direct proof of the quantum nature of light. Rather, we accept the existence of photons as an established experimental fact \cite{ref:photon1}. The quantized electromagnetic field has a classical limit as a field (unlike other quantum fields that have, at best, a limit in terms of particles), and some properties of the quantum field have close classical analogs. In particular, the counting rates of single-photon interferometers, such as the one used in our experiment, are proportional to the intensities of the corresponding classical electromagnetic field. But there is no allowance for individual detector clicks in Maxwell's equations \cite{ref:clicks}, nor for the quantum entanglement of photonic degrees of freedom that we exploit. And clearly, the trajectory of a light quantum through the interferometer is a concept alien to classical electrodynamics, as is the experimenter's knowledge $K$ about this trajectory. For Visibility measurements the polarization analyzer was lowered out of the beam path, and the maximum and minimum count rates on detector 1 were measured as the length of path 2 was varied slightly (via a piezoelectric transducer). After subtracting out the separately-measured detector background (i.e., the count rate when the input to the interferometer was blocked, typically 100--400${{\rm s}^{-1}}$), the visibility was calculated in the standard manner: $V=({\rm Max}-{\rm Min})/({\rm Max}+{\rm Min})$. For the determination of the Likelihood, and hence the Knowledge, the following procedure was used. With the polarization analyzer in place, and path 2 blocked, the counts on the two detectors were measured. Detector 1 (2) looked at polarization $\lambda$ ($\lambda^{\perp}$), determined by the analysis settings. After subtracting the backgrounds measured for each detector, the count rates from detector 1 were scaled by the relative efficiency of the two detectors: $\eta_2/\eta_1{=1.11\pm0.01}$. (In this way our calculated value of the Knowledge corresponds to what would have been measured if our detectors had been identical and noiseless.) Call the resulting scaled rates $R_{1\lambda}\equiv R(\mbox{path 1}, \mbox{polarization $\lambda$})$ and $R_{1\lambda^{\perp}}\equiv R(\mbox{path 1}, \mbox{polarization $\lambda^{\perp}$})$. Next, we measure the corresponding quantities for path 2: $R_{2\lambda}$ and $R_{2\lambda^{\perp}}$. The betting strategy is the one introduced by Wootters and Zurek \cite{ref:WZ} and optimized in Ref.~\cite{ref:WPDbge1}: Pick the path which contributes most to the probability of triggering the detector that has actually fired. The Likelihood is then \begin{equation} L = \frac {\max\{R_{1\lambda},R_{2\lambda}\} + \max\{R_{1\lambda^{\perp}},R_{2\lambda^{\perp}}\}} {R_{1\lambda} + R_{2\lambda} + R_{1\lambda^{\perp}} + R_{2\lambda^{\perp}}} \,. \end{equation} \section{Experimental results}\label{sec:results} \subsection{Wave-particle duality for pure states}\label{ssec:WPDpure} Figure \ref{fig:pureVK} shows the results when a pure vertical-polarization state ({\sf V}) was input to the interferometer, as a function of the internal HWP's orientation. As expected, when the HWP is aligned to the vertical ($\theta_{\rm{HWP}} = 0$), therefore leaving the polarization unchanged, we see nearly complete Visibility and get no WW Knowledge. The measured values of $V$ are slightly lower than the theoretical curve because the intrinsic visibility of the interferometer (even without the HWP) is only $\sim 98\%$, due to nonideal optics \cite{ref:visibility}. Conversely, with the HWP set (at $\theta_{\rm{HWP}} = 45^{\circ}$) to rotate the polarization in path 1 to horizontal ({\sf H}), the Visibility is essentially zero, and the Knowledge nearly equal to 1. Formally, the spatial wave function and the polarization WWM wave function are completely entangled by the HWP: ${|\psi\rangle \propto |1\rangle|{\sf H}\rangle_{{\rm WWM}} + e^{i \phi}|2\rangle|{\sf V}\rangle_{{\rm WWM}}}$, where $\phi$ is the relative phase between paths 1 and 2. Tracing over the WWM effectively removes the coherence between the spatial modes. That a small visibility persists in our results can be explained by slight residual polarization transformations by the interferometer mirrors and beam splitters, so that the polarizations from the two paths are not completely orthogonal; and by the remarkable robustness of interference --- both theoretically and experimentally, $V>4.4\%$ even though $L>99.9\%$! \begin{figure}\label{fig:pureVK} \end{figure} In Fig.\ \ref{fig:pureVK} we also display two sets of Knowledge data, one taken in the optimal basis \cite{ref:optimal}, the other fixed in the horizontal-vertical basis. These data demonstrate that Knowledge can depend on the measurement technique. With the optimal basis, the value of ${V^2 + K^2}$ is always very close to the predicted unit value; our experiment is the first to verify this. The average of all the data points in Fig.~\ref{fig:pureVK} gives ${0.976\pm0.017}$. The slight discrepancy with the predicted value of $1$ is mostly due to the intrinsic visibility of the interferometer --- for the minimum-visibility arrangement, ${V^2+K^2=0.998}$. \subsection{Wave-particle duality for (partially-)mixed states} \label{ssec:WPDmix} Using photons from an attenuated quartz halogen lamp that was spectrally-filtered with a narrow-band interference filter (centered at $670\,{\rm nm}$, $1.5\,{\rm nm}$ FWHM) and spatially-filtered via a single-mode optical fiber, we explored Eq.\ (\ref{eq:WPDineq}) for {\it mixed} states (slight polarizing effects from the fiber actually led to $\sim4\%$ residual net polarization). The measurements of Visibility and Knowledge for this nearly completely-mixed input state have values close to the theoretical prediction of $0$ (Fig.\ \ref{fig:mixedVK}a). $K \rightarrow 0$ for a completely-mixed WWM state because any unitary transformations on an unpolarized input also yield an unpolarized state (the density matrix is unaffected), so there is no WW information. That $V \rightarrow 0$ can be understood by examining the behavior of orthogonal pure WWM states, with no definite phase relationship between them. In the basis where the HWP rotates the WWM states by $90^{\circ}$, the orthogonal polarizations from paths 1 and 2 cannot interfere; in the basis aligned with the HWP's axes, each polarization individually interferes, but the interference patterns are shifted relatively by $180^{\circ}$ (due to the birefringence of the HWP), so the sum is a fringeless constant. \begin{figure}\label{fig:mixedVK} \end{figure} To enable production of an even more mixed input, and to allow generation of arbitrary partially-mixed states, we used a ``tunable'' diode-laser scheme (see Fig.\ \ref{fig:setup}b). By rotating the (pure linear) polarization input to the first polarizing beam splitter, one can control the relative contribution of horizontal and vertical components. For example, for incident photons at $45^{\circ}$, one has equal {\sf H} and {\sf V} amplitudes which are then added together with a random and rapidly varying phase to produce an effectively completely mixed state of polarization \cite{ref:LLP}. With 5 times more vertical than horizontal, the state is then 1/3 completely-mixed to 2/3 pure. This case is shown in Fig.~\ref{fig:mixedVK}b. Note that the maximum Visibility (and Knowledge) is numerically equal to the state purity, as the mixed-component displays no interference (and contains no WW information). The data taken for various input states show excellent agreement with theoretical predictions (Fig. \ref{fig:mixedVK}c). \subsection{Quantum erasure (erasing and non-erasing)}\label{ssec:QE} In contrast to many interference situations where the WW information may be inaccessible, the quantum state of our WWM is easily manipulated. One can then in fact ``erase'' the distinguishing information and recover interference \cite{ref:SEWnat,ref:Scully2} (though this simple physical picture fails when non-pure WWM states are considered). In our experiments, such an erasure consists of using a polarization analysis to reduce or remove the WW labeling. For example, if the path 1 and 2 polarizations are horizontal and vertical, respectively, analysis at $\pm45^{\circ}$ will recover complete fringes; any photon transmitted through a $45^{\circ}$ polarizer is equally likely to have come from either path. \begin{figure}\label{fig:QE} \end{figure} Figure \ref{fig:QE}a shows quantum eraser data under the condition that a pure vertical photon is input to the interferometer, and rotated by the HWP in path 1 by either $90^{\circ}$ or $20^{\circ}$. The visibility on detector 1 {\em after\/} the analyzer can assume any value from $0$ to $1$, the latter case being a complete quantum erasure. Even for a completely-mixed state, it is still possible to recover interference (Fig.~\ref{fig:QE}b). With no WW information to erase, this {\em non-erasing quantum erasure\/} may seem quite remarkable at first. However, the essential feature of quantum erasure is not that it destroys the possibly available WW information, but that it sorts the photons into sub-ensembles (depending on the quantum state of the WWM) each exhibiting high-visibility fringes. Complete interference is recoverable by analyzing along the eigenmodes of the internal HWP --- along one axis we see fringes, along the other we see ``anti-fringes,'' shifted by $180^{\circ}$ \cite{ref:quartz}. More generally, one post-selects one of the WWM eigenstates as determined by the interaction Hamiltonian of the interfering quantum system and the WWM \cite{ref:ZNatf}. For a partially-mixed WWM state, just as the value of ${V^2 + K^2}$ lies partway between $1$ (pure state) and $0$ (completely-mixed state), the analysis angles yielding zero visibility also fall between those for pure and mixed states (Fig.~\ref{fig:QE}c). Quantitatively, for a fractional purity $s$, the angles are at $\theta_{\rm HWP} \pm 1/2\arccos\biglb(s\cos(2\theta_{\rm HWP})\bigrb)$; consult Ref.\ \cite{ref:Garda} for further details. A convenient geometrical visualization of our results can be had by considering the polarization analysis in the Poincar\'{e} sphere representation \cite{ref:Poincare}, in which all linear polarizations lie on the equator of the sphere, circular polarizations lie on the poles, and arbitrary elliptical states lie in between. Any two orthogonal states lie diametrically opposed on the sphere. For pure polarization input states to our interferometer, there are in general exactly two points on the sphere for which the interference visibility will be exactly equal to zero. These correspond to the polarizations where a detector sees light from only one of the interferometer paths. Along the entire great circle that bisects the line connecting these two points, the quantum eraser will yield perfect visibility. Curiously, the situation for a completely-mixed input state is reversed. Here there are in general exactly two polarization states for which the quantum eraser recovers unit visibility, corresponding to the eigenmodes of the polarization elements inside the interferometer; on the great circle equidistant from these two points the Visibility vanishes. For example, in some mixed-state experiments described in Ref.\ \cite{ref:Garda}, the eigenmodes are the poles on the Poincar\'{e} sphere, and the great circle corresponds to the equator --- no visibility is observed for any linear polarization analysis. \section{Discussion}\label{sec:discuss} Our results demonstrate the validity of Eq.\ (\ref{eq:WPDineq}) at the percent level. Moreover, they highlight some features associated with mixed states, which may not have been widely appreciated. Namely, that it is possible for both the interference visibility and the path distinguishability to equal zero. We have also seen that in some cases where the visibility is intrinsically equal to zero, it is possible to perform quantum erasure on the photons, and recover the interference. Remarkably, this is true even when the input state is completely mixed, and there exists no WW information to erase. The operation of the polarizer is essentially to {\em select\/} a sub-ensemble of photons. Depending on how this selection is performed, we may recover fringes, anti-fringes, no fringes, or any intermediate case. The WW labeling in our experiment arose from an {\em entanglement\/} between the photon's spatial mode and polarization state. It could just as well have been with another photon altogether, as in the experiments in \cite{ref:QErasers1}, or even with a different kind of quantum system \cite{ref:Eichmann}. The same results are predicted, as long as the WW information is stored in a 2-state quantum system (e.g., internal energy states, polarization, spin, etc.). More generally, our findings are extendible to analogous experiments with quanta of different kinds such as, for example, interferometers with electrons \cite{ref:Buks}, neutrons \cite{ref:neutron1}, or atoms \cite{ref:Rempe1,ref:Rempe2}. To counter a possible misunderstanding let us note that, quite generally, entanglement concerns different degrees of freedom (DoF's), not different particles. For certain purposes, such as quantum dense coding \cite{ref:denscod} or quantum teleportation \cite{ref:teleport}, it is essential that the entangled DoF's be carried by different particles and can thus be manipulated at a distance. For other purposes, however, one can just as well entangle an internal DoF of the interfering particle itself with its center-of-mass DoF \cite{ref:Grover}. In our experiment the photon's polarization DoF is entangled with the spatial mode DoF represented by the binary alternative ``reflected at the entry beam splitter, or transmitted?'' Analogously, hyperfine levels of an atom were used to mark its path in the experiments of Refs.\ \cite{ref:Rempe1,ref:Rempe2}. In the extreme situation of perfect WW distinguishability, the entangled state is of the form stated in Sec.~\ref{ssec:WPDpure}, namely ${|\psi\rangle \propto |1\rangle|{\sf H}\rangle + e^{i \phi}|2\rangle|{\sf V}\rangle}$. Appropriate measurements on the spatial DoF (defined by $|1\rangle$ and $|2\rangle$) and the polarization DoF ($|{\sf H}\rangle$ and $|{\sf V}\rangle$) would show that the entanglement is indeed so strong that Bell's inequality \cite{ref:Bell} is violated --- clear evidence that a description based solely on classical electrodynamics cannot account for all features of our experiment. Of course, inasmuch as one cannot satisfy the implicit assumption that the measurements on the entangled subsystems be space-like separated, this violation of Bell's inequality implies nothing about the success or failure of local-hidden-variable theories; however, this is not relevant here. Finally we'd like to mention that further progress was made since the completion of the work reported here. Experimental tests of more sophisticated inequalities than (\ref{eq:WPDineq}) were performed \cite{ref:DR99}, and there was progress in theory as well \cite{ref:EB99}. In particular, the quantitative aspects of quantum erasure were investigated beyond the initial stage reached in Ref.\ \cite{ref:BjKa}. \section*{Acknowledgments} BGE is grateful to Helmut Rauch and collaborators for their hospitality at the Atominstitut in Vienna, where part of this work was done, and he thanks the Technical University of Vienna for financial support. PGK and PDDS acknowledge Andrew White for helpful discussions and assistance. Correspondence should be addressed to PGK (email: [email protected]). \begin{references} \bibitem{ref:Einst} A. Einstein, Ann.\ Physik {\bf 17}, 132 (1905); English translation in {\it The World of the Atom\/}, edited by H. A. Boorse and L. Motz (Basic Books, New York, 1966). \bibitem{ref:Bohr} N. Bohr, \nat {\bf 121}, 580 (1928). \bibitem{ref:SEWnat} M. O. Scully, B.-G. Englert, and H. Walther, \nat {\bf 351}, 111 (1991). \bibitem{ref:WZ} W. Wootters and W. Zurek, \prd {\bf 19}, 473 (1979); R. Glauber, Ann.\ N. Y. Acad.\ Sci.\ {\bf 480}, 336 (1986); D. Greenberger and A. Yasin, \pl {\bf A128}, 391 (1988); L. Mandel, \ol {\bf 16}, 1882 (1991); G. Jaeger, A. Shimony, and L. Vaidman, \pra {\bf 51}, 54 (1995). \bibitem{ref:WPDbge1} B.-G. Englert, \prl {\bf 77}, 2154 (1996). \bibitem{ref:WPDexp1} H. Rauch and J. Summhammer, \pl {\bf A104}, 44 (1984); J. Summhammer, H. Rauch, and D. Tuppinger, \pra {\bf 36}, 4447 (1987); P. Mittelstaedt, A. Prieur, and R. Schieder, Found.\ Phys.\ {\bf 17}, 891 (1987); F. De Martini {\it et al.\/} \pra {\bf 45}, 5144 (1992). \bibitem{ref:Rempe1} S. D\"{u}rr, T. Nonn, and G. Rempe, \nat {\bf 395}, 33 (1998). \bibitem{ref:Rempe2} With internal atomic degrees of freedom employed for the path marking, a recent atom-interferometry experiment investigated the {\it equality} of (1), with {\em scaled\/} results constant to within $10\%$, but achieving unscaled values of only $\sim0.6$ [S. D\"{u}rr, T. Nonn, and G. Rempe, \prl {\bf 81}, 5705 (1998)].\\ For the record we note that our work was simultaneous with and independent of D\"urr, Nonn, and Rempe's although their published account appeared earlier. \bibitem{ref:BjKa} G. Bj\"ork and A. Karlsson, \pra {\bf 58}, 3477 (1998). \bibitem{ref:compressed} The angle of incidence on the beam splitter was set to $10^{\circ}$, in order to minimize polarization variations in the reflection and transmission amplitudes -- the resulting beamsplitter reflectivies and transmittivities were found to lie in the range $0.49$ to $0.51$ for all polarizations. \bibitem{ref:waveplate} A HWP {\em reflects\/} linear polarization about the optic axis direction, effectively rotating the polarization by twice the angle between the incident polarization and this axis. \bibitem{ref:QKrypt} Note that this same feature enables quantum cryptography to be performed with attenuated coherent states; see, for instance, B. Huttner, N. Imoto, N. Gisin, and T. Mor, \pra {\bf 51}, 1863 (1995). \bibitem{ref:photon1} J. F. Clauser, \prd {\bf 9}, 853 (1974); P. Grangier, G. Roger, and A. Aspect, Europhys.\ Lett.\ {\bf 1}, 173 (1986). \bibitem{ref:clicks} It is sometimes claimed that the quantization of matter is all one needs to explain detector clicks and that a semiclassical theory (quantized matter interacting with classical Maxwell fields) is capable of giving a full account. However, experiments (like those in \cite{ref:photon1}) have proved that such a model incorrectly predicts physical results in some situations. Rather than adopting a semiclassical approach (which we know eventually fails) for some experiments and the quantum approach (which {\em always\/} works) for others, we feel compelled to use the latter picture throughout. Moreover, a semiclassical description is unavoidably inconsistent for theoretical reasons. Either the charged quantized matter would have to act as a source for the classical electromagnetic field or, if this is avoided by construction, action would not be properly paired with reaction. \bibitem{ref:visibility} This distinguishing WW knowledge in the {\em spatial\/} wave functions could possibly be extracted using a suitable measurement that included spatial-mode information. \bibitem{ref:optimal} For linear polarization states, the optimal Knowledge-measurement axes lie exactly between the axes which would equalize the amplitudes from the two paths (and for which the Visibility is maximum), i.e., if the light coming from paths 1 and 2 is polarized at $\phi_1$ and $\phi_2$, then the optimal Knowledge basis is at $(\phi_1+\phi_2)/2 \pm 45^{\circ}$. \enlargethispage{-29.5\baselineskip} \bibitem{ref:LLP} {\em Individual\/} photons thus manipulated are unpolarized; consult J. Lehner, U. Leonhard, and H. Paul, \pra {\bf 53}, 2727 (1996), for unpolarized {\em multi\/}photon states. \bibitem{ref:Scully2} M. O. Scully and K. Dr\"{u}hl, \pra {\bf 25}, 2208 (1982). \bibitem{ref:quartz} In another series of measurements \cite{ref:Garda}, the internal HWP was replaced by quartz rotators, relying only on optical activity, whose net effect was to rotate the relative polarizations in the two paths by $90^{\circ}$. When a linear polarization at $\theta$ was input, there was basically never any interference without quantum erasure. Complete visibility could be restored using a linear analysis at $\theta$, or a circular-polarization analysis. For a completely mixed state input, however, {\em only\/} the circular analysis (i.e., along the eigenmodes of the quartz) recovered complete visibility. \bibitem{ref:ZNatf} B.-G. Englert, Z. Naturforsch.\ {\bf 54a}, 11 (1999). \bibitem{ref:Garda} P.~G. Kwiat, P.~D.~D. Schwindt, and B.-G. Englert, in {\it Mysteries, Puzzles, and Paradoxes in Quantum Mechanics}, edited by R. Bonifacio (CP461, American Institute of Physics, Woodbury, 1999), p.~69. \bibitem{ref:Poincare} G.~N. Ramachandran and S. Ramaseshan, in {\it Handbuch der Physik}, Vol.\ 25, Part I (Springer, Berlin, 1961). \bibitem{ref:QErasers1} A.~G. Zajonc {\it et al.\/} \nat {\bf 353}, 507 (1991); P.~G. Kwiat, A.~M. Steinberg, and R.~Y. Chiao, \pra {\bf 45}, 7729 (1992); T. Herzog, {\it et al.\/} \prl {\bf 75}, 3034 (1995). \bibitem{ref:Eichmann} U. Eichmann {\it et al.\/} \pra {\bf 57}, 2359 (1993). \bibitem{ref:Buks} E. Buks {\it et al.\/} \nat {\bf 391}, 871 (1998). \bibitem{ref:neutron1} G. Badurek, H. Rauch, and J. Summhammer, Physica B\&C {\bf 151}, 82 (1988); G. Badurek {\it et al.\/} Nucl.\ Instrum.\ Methods A (to be published). \bibitem{ref:denscod} C.~H. Bennett and S.~J. Wiesner, \prl {\bf 69}, 2881 (1992); K. Mattle, H. Weinfurter, P.~G. Kwiat, and A. Zeilinger, \prl {\bf 76}, 4656 (1996). \bibitem{ref:teleport} C.~H. Bennett {\it et~al.}, \prl {\bf 70}, 1895 (1993); D. Bouwmeester {\it et~al.}, \nat {\bf 390}, 575 (1997); D. Boschi {\it et~al.}, \prl {\bf 80}, 1121 (1998); A. Furasawa {\it et~al.}, Science {\bf 282}, 706 (1998). \bibitem{ref:Grover} For example, such single-particle entanglement recently allowed the realization of a quantum search algorithm in an optical system with only passive linear elements [P.~G. Kwiat, J.~R. Mitchell, P.~D.~D. Schwindt, and A.~G. White, \jmo (to be published)]. \enlargethispage{-29.5\baselineskip} \bibitem{ref:Bell} J. Bell, Physics {\bf 1}, 195 (1964). \bibitem{ref:DR99} S. D\"urr and G. Rempe, \oc (submitted). \bibitem{ref:EB99} B.-G. Englert and J.~A. Bergou, \oc (submitted). \end{references} \end{document}
\begin{document} \title{Adapt or Die: Polynomial Lower Bounds for Non-Adaptive Dynamic Data Structures} \begin{abstract} In this paper, we study the role \emph{non-adaptivity} plays in maintaining dynamic data structures. Roughly speaking, a data structure is non-adaptive if the memory locations it reads and/or writes when processing a query or update depend only on the query or update and not on the contents of previously read cells. We study such non-adaptive data structures in the cell probe model. This model is one of the least restrictive lower bound models and in particular, cell probe lower bounds apply to data structures developed in the popular word-RAM model. Unfortunately, this generality comes at a high cost: the highest lower bound proved for any data structure problem is only polylogarithmic. Our main result is to demonstrate that one can in fact obtain polynomial cell probe lower bounds for non-adaptive data structures. To shed more light on the seemingly inherent polylogarithmic lower bound barrier, we study several different notions of non-adaptivity and identify key properties that must be dealt with if we are to prove polynomial lower bounds without restrictions on the data structures. Finally, our results also unveil an interesting connection between data structures and depth-2 circuits. This allows us to translate conjectured hard data structure problems into good candidates for high circuit lower bounds; in particular, in the area of linear circuits for linear operators. Building on lower bound proofs for data structures in slightly more restrictive models, we also present a number of properties of linear operators which we believe are worth investigating in the realm of circuit lower bounds. \end{abstract} \thispagestyle{empty} \setcounter{page}{1} \section{Introduction} \label{sec:intro} Proving lower bounds on the performance of data structures has been an important line of research for decades. Over time, numerous computational models have been proposed, of which the \emph{cell probe model} of Yao~\cite{yao:cellprobe} is the least restrictive. Lower bounds proved in this model apply to essentially any imaginable data structure, including those developed in the most popular upper bound model, the word-RAM. Much effort has therefore been spent on deriving cell probe lower bounds for natural data structure problems. Nevertheless, the highest lower bound that has been proved for \emph{any} data structure problem remains just polylogarithmic. In this paper, we consider a natural restriction of data structures, namely \emph{non-adaptivity}. Roughly speaking, a non-adaptive data structure is a data structure for which the memory locations read when answering a query or processing an update depend only on the query or update itself, and not on the contents of the previously read memory locations. Surprisingly, we are able to derive polynomially high cell probe lower bounds for such data structures. \subsection{The Cell Probe Model} In the cell probe model, a data structure consists of a collection of memory cells, each storing $w$ bits. Each cell has an integer address amongst $[2^w]=\{1,\dots,2^w\}$, i.e. we assume any cell has enough bits to address any other cell. When a data structure is presented with a query, the query algorithm starts reading, or \emph{probing}, cells of the memory. The cell probed at each step may depend arbitrarily on the query and the contents of all cells probed so far. After probing a number of cells, the query algorithm terminates with the answer to the query. A dynamic data structure in the cell probe model must also support updates. When presented with an update, the update algorithm similarly starts reading and/or writing cells of the data structures. We refer jointly to reading or writing a cell as probing the cell. The cell probed at each step, and the contents written to a cell at each step, may again depend arbitrarily on the update operation and the cells probed so far. The query and update times of a cell probe data structure are defined as the number of cells probed when answering a query or update respectively. The space usage is simply defined as the largest address used by any cell of the data structure. \subsection{Previous Cell Probe Lower Bound Techniques} \label{sec:previous} As mentioned, the state-of-the-art techniques for proving cell probe lower bounds unfortunately yield just polylogarithmic bounds. In the following, we give a brief overview of the highest lower bounds that has been achieved since the introduction of the model, and also the most promising line of attack towards polynomial lower bounds. \textbf{p}aragraph{Static Data Structures.} One of the most important early papers on cell probe lower bounds for static data structures is the paper of Miltersen et al.~\cite{milt:asym}. They demonstrated an elegant reduction to data structures from an assymmetric communication game. This connection allowed them to obtain lower bounds of the form $t_q = \Omega(\lg m/\lg S)$, where $m$ denotes the number of queries to the data structure problem, $S$ the space usage in number of cells and $t_q$ the query time. Note however that this bound is insensitive to polynomial changes in $S$ and cannot give super-constant lower bounds for problems where the number of possible queries is just polynomial in the input size (which is true for most natural problems). This barrier was overcome in the seminal work of P{\v a}tra{\c s}cu and Thorup~\cite{patrascu10higher}, who extended the communication game of Miltersen et al.~\cite{milt:asym} and obtained lower bounds of $t_q = \Omega(\lg m/\lg(St_q/n))$, which peaks at $t_q = \Omega(\lg m/\lg \lg m)$ for data structures using $n \textrm{poly}(\lg m)$ space. An alternative approach to static lower bounds was given by Panigrahy et al.~\cite{pani:metric}. Their method is based on sampling the cells of a data structure and showing that many queries can be answered from a small set of cells if the query time is too small (we note that similar ideas have been used for succinct data structure lower bounds, see e.g.~\cite{gal:succinct}). The maximum lower bounds that can be obtained from this technique are of the form $t_q = \Omega(\lg m/\lg(S/n))$, see~\cite{larsen:staticloga}. For linear space, this reaches $t_q = \Omega(\lg m)$, which remains the highest static lower bound to date. \textbf{p}aragraph{Dynamic Data Structures.} The first technique for proving lower bounds on dynamic data structures was the \emph{chronogram technique} of Fredman and Saks~\cite{Fredman:chrono}. This technique gives lower bounds of the form $t_q = \Omega(\lg n/\lg(wt_u))$ and plays a fundamental role in all later techniques for proving dynamic data structure lower bounds. P{\v a}tra{\c s}cu and Demaine~\cite{Patrascu:loga} extended the technique of Fredman and Saks with their \emph{information transfer} technique. This extension allowed for lower bounds of $\max\{t_q,t_u\} = \Omega(\lg n)$. Very recently, Larsen~\cite{larsen:dynamic_count} combined the chronogram technique of Fredman and Saks with the cell sampling method of Panigrahy et al. to obtain a lower bound of $t_q=\Omega((\lg n/\lg(wt_u))^2)$, which remains the highest lower bound achieved so far. \textbf{p}aragraph{Conditional Lower Bounds.} Examining all of the above results, we observe that no lower bound has yet exceeded $\max\{t_u,t_q\}=\Omega((\lg n/\lg \lg n)^2)$ in the most natural case of polynomially many queries, i.e. $m=\textrm{poly}(n)$. In an attempt to overcome this barrier, P{\v a}tra{\c s}cu~\cite{patrascu10mp-3sum} defined a dynamic version of a set disjointness problem, named the \emph{multiphase problem}. We study problems that are closely related to the multiphase problem, so we summarize it here: \textbf{p}aragraph{The Multiphase Problem.} This problem consists of three phases: \begin{itemize} \item \textbf{Phase I:} In this phase, we receive $k$ sets $S_1,\dots,S_k$, all subset of a universe $[n]$. We are allowed to preprocess these sets into a data structure using time $O(\tau k n)$. \item \textbf{Phase II:} We receive another set $T \subseteq [n]$ and have time $O(\tau n)$ to read and update cells of the data structure constructed in Phase I. \item \textbf{Phase III:} We receive an index $i \in [k]$ and have time $O(\tau)$ to read cells of the data structure constructed during Phase I and II in order to determine whether $S_i \cap T = \emptyset$. \end{itemize} P{\v a}tra{\c s}cu conjectured that there exists constants $\mu > 1$ and $\varepsilon>0$ such that any solution for the multiphase problem must have $\tau = \Omega(n^\varepsilon)$ when $k = n^\mu$, i.e. for the right relationship between $n$ and $k$, any data structure must have either polynomial preprocessing time, update time or query time. Furthermore, he reduced the multiphase problem to a number of natural data structure problems, including e.g. the following problems. \begin{itemize} \item\textbf{Reachability in Directed Graphs.} In a preprocessing phase, we are given a directed graph with $n$ nodes and $m$ edges. We are then to support inserting directed edges into the graph. A query is finally specified by two nodes of the graph, $u$ and $v$, and the goal is to determine whether there exists a directed path from $u$ to $v$. \item\textbf{Subgraph Connectivity.} In a preprocessing phase, we are given an undirected graph with $n$ nodes and $m$ edges. We are then to turn nodes on and off. A query is finally specified by two nodes of the graph, $u$ and $v$, and the goal is to determine whether there exists a path from $u$ to $v$ using only \emph{on} nodes. \end{itemize} We also mention the following problem, which was shown in~\cite{chan:mode} to solve the multiphase problem. \begin{itemize} \item\textbf{Range Mode.} In a preprocessing phase, we are given an array $A[1 : n]=\{A[1],\dots,A[n]\}$ of integers and are to support value updates $A[i] \gets A[i] + x$. Queries are specified by two indicies $i$ and $j$, and the goal is to find the most frequently occuring integer in the subarray $A[i:j]$. \end{itemize} These reductions imply polynomial lower bounds for the above problems, if the multiphase problem has a polynomial lower bound. Thus it seems fair to say that studying the multiphase problem is the most promising direction for obtaining polynomial data structure lower bounds. \subsection{Non-Adaptivity} \label{sec:nonadapt} Given that we are generally clueless about how to prove polynomial lower bounds in the cell probe model, it is natural to investigate under which circumstances such bounds can be achieved. In this paper, we study the performance of data structures that are non-adaptive. To make the notion of non-adaptivity precise, we define it in the following: \begin{itemize} \item \textbf{Non-Adaptive Query Algorithm.} A cell probe data structure has a non-adaptive query algorithm, if the cells it probes when answering a query depend only on the query, and not on the contents of previously probed cells. \item \textbf{Non-Adaptive Update Algorithm.} Similarly, a cell probe data structure has a non-adaptive update algorithm, if the cells it probes when processing an update depend only on the update, and not on the contents of previously probed cells. \item \textbf{Memoryless Update Algorithm.} In this paper, we also study a slighlty more restrictive type of update algorithm. A cell probe data structure has a memoryless update algorithm, if the update algorithm is both non-adaptive, and furthermore, the contents written to a cell during an update depend only on the update and the current contents of the cell, i.e., they may not depend on the contents of other cells probed during the update operation.\footnote{A caveat on the semantics of updates: in this work, we assume updates specify how data \emph{changes} (e.g. updates are of the form $A[k] \leftarrow A[k] + \Delta$) as opposed to specifying new values for data (e.g. updates of the form $A[k] \leftarrow v$). The latter notion goes against the notion of non-adaptive updates, since to rewrite a cell, one must know how an update changes data. One solution is to assume that the data structure stores raw data directly, and to allow memoryless updates to depend on the current contents of a cell, the update, and the previous value of the update. We view this issue as largely semantic, and do not discuss it further.} \item \textbf{Linear Data Structures.} Finally, we study a sub-class of the data structures with a memoryless update algorithm, which we refer to as linear data structures. These data structures are defined for problems where the input can be interpreted as an array $A$ of $n$ bits and an update operation can be interpreted as flipping a bit of $A$ (from $0$ to $1$ or $1$ to $0$). A linear data structure has non-adaptive query and update algorithms. Furthermore, when processing an update, the contents of all probed cells are simply flipped, and on a query, the data structure returns the XOR of the bits stored in all the probed cells. Note that these data structures use only a word size of $w=1$ bit, every cell stores a linear combination over the bits of $A$ (mod 2) and a query again computes a linear combination over the stored linear combinations (mod 2). \end{itemize} While linear data structures might appear to be severly restrictive, for many data structure problems (particularly in the area of range searching), natural solutions are in fact linear. An example is the well-studied \emph{prefix sum problem}, where the goal is to dynamically maintain an array $A$ of bits under flip operations, and a query asks for the XOR of elements in a prefix range $A[1\ldots k]$. One-dimensional range trees are linear data structures that solve prefix sum with update and query time $O(\lg n)$. This is optimal when memory cells store only single bits~\cite{Patrascu:loga}, even for adaptive data structures. More elaborate problems in range searching would be: Given a fixed set $P$ of $n$ points in $d$-dimensional space, support deleting and re-inserting points of $P$ while answering queries of the form ``what is the parity of the number of points inside a given query range?''. Here query ranges could be axis-aligned rectangles, halfspaces, simplices etc. We note that all the known data structures for range counting can easily be modified to yield linear data structures when given a fixed set of points $P$, and still, this setting seems to capture the hardness of range counting. The main difference between non-adaptive and memoryless update algorithms is that non-adaptive update algorithms may \emph{move} the information about an update operation around the data structure, even on later updates. As an example, consider a data structure with a non-adaptive update algorithm and two possible updates, say updates $u_1$ and $u_2$. Even if the data structure only probes the first memory cell on update $u_1$, information about $u_1$ can be stored many other places in the data structure. Imagine the data structure initially stores the value $0$ in the first memory cell. Whenever update $u_1$ is performed, the data structure increments the contents of the first memory cell by one. On update $u_2$, the data structure copies the contents of the first memory cell to the second memory cell. Clearly both operations are non-adaptive, and we observe that whenever we have performed update $u_2$, the second memory cell stores the number of times update $u_1$ has been performed, even though $u_1$ never probes the cell. For memoryless updates, information about an update is only stored in cells that are actually probed when processing the update operation. Linear data structures are inherently memoryless. However, some features possible with memoryless updates are not available to linear data structures. For example, memoryless update algorithms can support cells that maintain a count of the total number of updates executed. This is not possible with linear data structures, since the contents of each cell is a \emph{fixed} linear combination of the data being stored. \subsection{Our Results} \label{sec:our} The main result of this paper, is to demonstrate that polynomial cell probe lower bounds can be achieved when we restrict data structures to be non-adaptive. In Section~\ref{sec:lower} we also prove lower bounds for data structures where only the query algorithm is non-adaptive. The concrete data structure problem that we study in this setting is the following indexing problem. \textbf{p}aragraph{Indexing Problem.} In a preprocessing phase, we receive a set of $k$ binary strings $S_1,\dots,S_k$, each of length $n$. We are then to support updates, consisting of an index $j \in [n]$, which we think of as an index into the strings $S_1,\dots,S_k$. A query is finally specified by an index $i \in [k]$ and the goal is to return the $j$'th bit of $S_i$. \begin{theorem} \label{thm:nonquery} Any cell probe data structure solving the indexing problem with a non-adaptive query algorithm must either have $t_q = \Omega(n/w)$ or $t_u = \Omega(k/w)$, regardless of the preprocessing time and space usage. \end{theorem} Examining this problem, one quickly observes that it is a special case of the multiphase problem presented in Section~\ref{sec:previous}, thus by setting the parameters in the reductions of~\cite{patrascu10mp-3sum,chan:mode} correctly we obtain, amongst others, the following lower bounds as an immediate corollary of our lower bound for the indexing problem: \begin{corollary} Any cell probe data structure that uses a non-adaptive query algorithm to solve (i) reachability in directed graphs or (ii) subgraph connectivity must either have $t_q = \Omega(n/w)$ or $t_u = \Omega(n/w)$. Any cell probe data structure that solves range mode with a non-adaptive query algorithm must have $t_qt_u = \Omega(n/w^2)$. \end{corollary} In Section~\ref{sec:lower}, we prove lower bounds for data structures where the query algorithm is allowed to be adaptive, but the update algorithm is memoryless. Again, we prove our lower bound for a special case of the multiphase problem: \textbf{p}aragraph{Set Disjointness Problem.} In a preprocessing phase, we receive a subset $S$ of a universe $[n]$. We are then to support inserting elements $x \in [n]$ into an initially empty set $T$. Finally a query simply asks to return whether $S \cap T = \emptyset$, i.e. the problem has just one query. \begin{theorem} \label{thm:memoryless} Any cell probe data structure solving the set disjointness problem with a memoryless update algorithm must have $t_q = \Omega(n/w)$, regardless of the preprocessing time, space usage and update time. \end{theorem} Again, using the reductions of~\cite{patrascu10mp-3sum,chan:mode}, we obtain the following lower bounds as a corollary of our lower bound for the set disjointness problem: \begin{corollary} Any cell probe data structure that uses a memoryless update algorithm to solve (i) reachability in directed graphs, (ii) subgraph connectivity, or (iii) range mode must have $t_q = \Omega(n/w)$. \end{corollary} Finally, in Section~\ref{sec:circuit}, we show a strong connection between nonadaptive data structures and the wire complexity of depth-2 circuits. In these circuits, gates have \emph{unbounded} fan-in and fan-out and compute \emph{arbitrary} functions. Thus, trivial bounds on the number of gates exist. Instead, the size of a circuit $s(C)$ is defined to be the number of wires. Proving lower bounds on the size of circuits computing explicit operators $F:\{0,1\}^n \rightarrow \{0,1\}^m$ has been studied in several works. In particular, Valiant~\cite{Valiant77} showed that an $\omega(n^2/(\lg\lg n))$ bound for circuits computing $F$ implies that $F$ cannot be computed by log-depth, linear size, bounded fan-in circuits. Currently, the best bounds known for an explicit operator are $\Omega(n^{3/2})$. Cherukhin~\cite{Cherukhin05} gave such a bound for circuits computing cyclic convolutions. Jukna~\cite{Jukna10} gave a similar lower bound for circuits computing matrix multiplication, and developed a general technique for proving such lower bounds, formalizing the intuition in~\cite{Cherukhin05}. First, we show how to use simple encoding arguments common to data structure lower bounds to achieve circuit lower bounds, using matrix multiplication as an example. Our bound matches the result from~\cite{Jukna10}, but yields a simpler argument. We discuss Jukna's technique in more detail in Section~\ref{sec:circuit}. \begin{theorem}[\cite{Jukna10}]\label{thm:mm-encoding-intro} Any circuit computing matrix multiplication has size at least $n^{3/2}$. \end{theorem} Depth-2 circuits computing explicit \emph{linear} operators are of particular interest. Currently, the best lower bound for an explicit linear operator is the recent $\Theta(n (\lg n/\lg \lg n)^2)$ bound of G\'{a}l et al.~\cite{Gal12} for circuits that compute error correcting codes. Another interesting question is whether general circuits are more powerful than \emph{linear} circuits for computing linear operators. Linear circuits use only XOR gates; i.e., each gate outputs a linear combination in $\textbf{GF}(2)$ over its inputs. We show a generic connection between linear data structures and linear circuits. Define a problem $\mathcal{P}$ as a mapping $F_\mathcal{P} = (f_1,\dots,f_m) : \{0,1\}^n \to \{0,1\}^m$, where each $f_j : \{0,1\}^n \to \{0,1\}$. For linear data structures, think of the domain $\{0,1\}^n$ as the input array $A$ with $n$ bits, and view each $f_j$ as a query, where $f_j(A)$ is the answer to the query $f_j$ on the input $A$. A linear data structure hence solves $\mathcal{P}$, if after any sequence of updates to $A$, it holds for all $1 \leq j \leq m$ that answering the query $f_j$ returns the value $f_j(A)$. \begin{lemma}\label{lem:circuit-intro} If there is a linear data structure for a problem $\mathcal{P}$ with query time of $t_q$ and update time $t_u$, then there exists a depth-2 linear circuit $C$ computing $F_\mathcal{P}$ with size $s(C) \leq n t_u + mt_q$. If there is a depth-2 linear circuit $C$ that computes $F_\mathcal{P}$, then there is a linear data structure for $\mathcal{P}$ with \emph{average} query time at most $s(C)/m$ and average update time at most $s(C)/n$. \end{lemma} Lemma~\ref{lem:circuit-intro} thus gives a new way to attack circuit lower bounds. We believe the connection between non-adaptive data structures and depth-2 circuits has the potential to yield strong insight to this problem, and that several linear operators conjectured to have strong data structure lower bounds are good candidates for hard circuit problems (for linear or general circuits). Apart from being interesting lower bounds in their own right, we believe our results shed much light on the inherent difficulties of proving polynomial lower bounds in the cell probe model. In particular the \emph{movement} of data when performing updates (see the discussion in Section~\ref{sec:nonadapt}) appears to be a major obstacle. We conclude in Section~\ref{sec:concl} with a discussion of our results and potential directions for future research. \section{Lower Bounds} \label{sec:lower} In this section, we first prove lower bounds for data structures where only the query algorithm is assumed non-adaptive. The problem we study is the indexing problem defined in Section~\ref{sec:our}. \begin{theorem} [Restatement of Theorem~\ref{thm:nonquery}] Any cell probe data structure solving the indexing problem with a non-adaptive query algorithm must either have $t_q = \Omega(n/w)$ or $t_u = \Omega(k/w)$, regardless of the preprocessing time and space usage. Here $t_q$ denotes the query time, $t_u$ the update time and $w$ the cell size in bits. \end{theorem} We prove this using an encoding argument. Specifically, consider a game between an encoder and a decoder. The encoder receives as input $k$ binary string $S_1,\dots,S_k$, each of length $n$ and must from this send a message to the decoder. From the message alone, the decoder must uniquely recover all the strings $S_1,\dots,S_k$. If the strings $S_1,\dots,S_k$ are drawn from a distribution, then the expected length of the message must be at least $H(S_1 \cdots S_k)$, or we have reached a contradiction. Here $H(\cdot)$ denotes Shannon entropy. The idea in our proof is to assume for contradiction that a data structure for the indexing problem exists with a non-adaptive query algorithm that simultaneously has $t_q=o(n/w)$ and $t_u=o(k/w)$. Using this data structure as a black box, we construct a message that is shorter than $H(S_1 \cdots S_k)$, but at the same time, the decoder can recover $S_1,\dots,S_k$ from the message, i.e. we have reached the contradiction. We let the $k$ strings $S_1,\dots,S_k$ given as input to the encoder be uniform random bit strings of length $n$. Clearly $H(S_1 \cdots S_k) = kn$. \textbf{p}aragraph{Encoding Procedure.} When given the strings $S_1,\dots,S_k$ as input, the encoder first runs the preprocessing algorithm of the claimed data structure on $S_1,\dots,S_k$. He then examines every possible query index $i \in [k]$, and for each $i$, collects the set of addresses of the cells probed on query $i$. Since the query algorithm is non-adaptive, these sets of addresses are independent of $S_1,\dots,S_k$ and any updates we might perform on the data structure. Letting $C$ denote the set containing all these addresses for all $i$, the encoder starts by writing down the concatenation of the contents of all cells with an address in $C$. This constitutes the first part of the message. The encoder now runs through every possible update $j\in [n]$. For each $j$, he runs the update algorithm as if update $j$ was performed on the data structure. While running update $j$, the decoder appends the contents of the probed cells (as they are when the update reads the cells, not after potential changes) to the constructed message. After processing all $j$'s, the encoder finally sends the constructed message to the decoder. This completes the encoding procedure. \textbf{p}aragraph{Decoding Procedure.} The decoder receives as input the message consisting first of the contents of all cells with an address in $C$ after preprocessing $S_1,\dots,S_k$. Since the query algorithm is non-adaptive, the decoder knows the addresses of all these cells simply by examining the query algorithm of the claimed data structure. The decoder will now run the update algorithm of every $j \in [n]$. While doing this, he maintains the contents of all cells in $C$ and all cells probed during the updates. Specifically, the decoder does the following: For each $j=1,\dots,n$ in turn, he starts to run the update algorithm for $j$. Observe that the contents of each probed cell (before potential changes) can be recovered from the message (the contents appear one after another in the message). This allows the decoder to completely simulate the update algorithm for each $j=1,\dots,n$. Note furthermore that for each cell that is probed during these updates, the address can also be recovered simply by examining the update algorithm. In this way, the decoder always knows the contents of all cells in $C$ and all cells probed by the update algorithm as they would have been after preprocessing $S_1,\dots,S_k$ and performing the updates after this preprocessing. While processing the updates $j=1,\dots,n$, the decoder also executes a number of queries: After having completely processed an update $j$, the decoder runs the query algorithm for every $i \in [k]$. Note that the decoder knows the contents of all the probed cells as if the preprocessing on $S_1,\dots,S_k$ had been performed, followed by updates $j'=1,\dots,j$. This implies that the simulation of the query algorithm for each $i \in [k]$ terminates precisely with the answer being the $j$'th bit of $S_i$. It follows immediately that the decoder can recover every bit of every $S_i$ from the message. \textbf{p}aragraph{Analysis.} What remains is to analyze the size of the message. Since by assumption, the query time is $t_q=o(n/w)$, the first part of the message has $t_qkw=o(kn)$ bits. Similarly, we assumed $t_u=o(k/w)$, thus the second part of the message has $t_unw=o(kn)$ bits. Thus the entire message has $o(kn)$ bits. Since $H(S_1 \cdots S_k) = kn$, we have reached our contradiction. This completes the proof of Theorem~\ref{thm:nonquery}.\\\\ Next, we prove lower bounds for data structures where only the update algorithm is assumed to be memoryless, that is, we allow the query algorithm to be adaptive. In this setting, we study the set disjointness problem defined in Section~\ref{sec:our}: \begin{theorem} [Restatement of Theorem ~\ref{thm:memoryless}] Any cell probe data structure solving the set disjointness problem with a memoryless update algorithm must have $t_q = \Omega(n/w)$, regardless of the preprocessing time, space usage and update time. Here $t_q$ denotes the query time and $w$ the cell size in bits. \end{theorem} Again, we prove this using an encoding argument. In this encoding proof, we let the input of the encoder be a uniform random set $S \subseteq [n]$. Clearly $H(S)=n$ bits. We now assume for contradiction that there exists a data structure for the set disjointness problem with a memoryless update algorithm and at the same it has query time $t_q = o(n/w)$. The encoder uses this data structure to send a message encoding $S$ in less than $n$ bits, i.e. a contradiction. \textbf{p}aragraph{Encoding Procedure.} When the encoder receives $S$, he runs the preprocessing algorithm of the claimed data strucutre. Then, he computes $\bar{S} = [n]\setminus S$ and inserts $\bar{S}$ into the data structure as the set $T$. Finally, the encoder runs the query algorithm and notes the set of cells $C$ probed. Note that by the choice of $\bar{S}$, the query algorithm will output \emph{disjoint}, and furthermore, $\bar{S}$ is the largest possible set that will result in a \emph{disjoint} answer. The encoding consists of three parts\footnote{In fact, it is possible for the decoder to recover $C$ from the second two parts of the encoding, so the first part is unnecessary. However, this does not materially affect our lower bound, so we omit the details.}: (i) the addresses of the cells in $C$, (ii) the contents of the cells in $C$ after preprocessing but before inserting $\bar{S}$, and (iii) the contents of the cells in $C$ after inserting $\bar{S}$. \textbf{p}aragraph{Decoding Procedure.} The decoder iterates over all sets $S' \subseteq [n]$. Each time, the decoder initializes the contents of cells in $C$ to match the second part of the encoder's message. Then, he inserts each element of $S'$ into the data structure, changing the contents of any cell in $C$ where appropriate. When a cell outside of $C$ is to be changed, the decoder does nothing. Since the update algorithm is memoryless, this procedure ends with all cells in $C$ having the same contents as they would have had after preprocessing $S$ and inserting elements of $S'$. Moreover, if the contents match the contents written down in the third part of the encoding, then it must be that $S$ and $S'$ are disjoint (we know that the query answers \emph{disjoint} when the contents of $C$ are like that). When $S' = \bar{S}$, the contents of $C$ will match the last part of the encoding, and it is trivially the largest set to do so. Thus, the decoder selects the largest set $S^*$ whose updates to $C$ match the contents written down in the third part of the encoding. In this way, the decoder recovers $S = [n] \setminus S^*$. \textbf{p}aragraph{Analysis.} Finally, we analyze the size of the encoding. Since we assumed $t_q = o(n/w)$, the encoding has size $3t_qw = o(n)$ bits. But $H(S) = n$, thus we have reached a contradiction. \section{Circuits and Non-Adaptive Data Structures}\label{sec:circuit} In this section, we demonstrate a strong connection between non-adaptive data structures and the wire complexity of depth-2 circuits. A depth-2 circuit computing $F = (f_1,\ldots, f_m) : \{0,1\}^n \rightarrow \{0,1\}^m$ is a directed graph with three layers of vertices. The first layer consists of $n$ input nodes, labeled $x_1,\ldots, x_n \in \{0,1\}$. Vertices in the second layer are interior gates and output boolean values. The last layer consists of $m$ output gates, labeled $z_1,\ldots, z_m \in \{0,1\}$. There are edges between input nodes and interior gates, and between interior gates and output gates. Each gate computes an \emph{arbitrary} function of its inputs. Since non-input nodes compute arbitrary functions, $f$ can be trivially computed using $m$ gates. Instead, we define the \emph{size} $s(C)$ of a depth-2 ciruit $C$ as the total number of wires in it; i.e., the number of edges in the graph. First, we show how to use the encoding technique common to data structure lower bounds to achieve size bounds for depth-2 circuits. As a proof of concept, we prove such a lower bound for matrix multiplication. We say that a circuit computes matrix multiplication if there are $n = 2m$ inputs, each corresponding to an entry in one of two $\sqrt{n} \times \sqrt{n}$ binary matrices $A$ and $B$, and each output gate computes an entry in the product $A\cdot B$. Arithmetic is in $\textbf{GF}(2)$. Jukna~\cite{Jukna10} considered depth-$2$ circuits and gave an $n^{3/2}$ lower bound for circuits computing boolean matrix multiplication. At a high level, his proof proceeds in the following fashion. \begin{enumerate} \item Partition input nodes into sets $I_1,\ldots, I_t$ and output gates into sets $J_1,\ldots, J_t$. \item Prove that for each $1 \leq \ell \leq t$, the number of wires leaving inputs from $I_\ell$ plus the number of wires entering outputs in $J_\ell$ must be large.\label{foo:part} \item Conclude a large lower bound by summing the terms from Step~\ref{foo:part}. \end{enumerate} Note that since $\{I_\ell\}$ and $\{J_\ell\}$ are partitions, they induce a partition on the wires in the circuit. Jukna proves Step~\ref{foo:part} by proving lower bounds on what he calls the \emph{entropy} of an operator. He proves a lower bound on the entropy of an operator by carefully analyzing subfunctions of the operator. In the case of matrix multiplication, subfunctions are created by fixing entries in $B$ to be all zero, except for a single cell $B[k,\ell]$. Each $I_\ell,J_\ell$ represents a column in $B$ and in $A \cdot B$ respectively. By ranging over different $k,\ell$, Jukna is able to argue that the entropy of matrix multiplication is high. The details of this argument are technical. We give a new proof for Step~\ref{foo:part} using an encoding argument. The encoder exploits the circuit operations to encode a $\sqrt{n} \times \sqrt{n}$ matrix $A$. The encoded message has length precisely equal to the nubmer of outgoing wires in $I_\ell$ and incoming wires to $J_\ell$. The argument is very similar to the arguments in Section~\ref{sec:lower}; we leave it to the full version of the paper for lack of space. \begin{theorem}\label{thm:mm-encoding} Any circuit $C$ computing boolean matrix multiplication has size $s(C) \geq n^{3/2}$. \end{theorem} Finally, we provide a strong connection between depth-2 linear circuits and linear data structures. The connection is almost immediately established: \begin{lemma}[Restatement of Lemma~\ref{lem:circuit-intro}]\label{lem:circuit} If there is a linear data structure for a problem $\mathcal{P}$ with query time of $t_q$ and update time $t_u$, then there exists a depth-2 linear circuit $C$ computing $F_\mathcal{P}$ with size $s(C) \leq n t_u + mt_q$. If there is a depth-2 linear circuit $C$ computing $F_\mathcal{P}$, then there is a linear data structure for $\mathcal{P}$ with \emph{average} query time at most $s(C)/m$ and average update time at most $s(C)/n$. \end{lemma} \begin{proof} First, suppose there exists a linear data structure solving $\mathcal{P}$. We construct the corresponding depth-2 circuit directly. Input nodes correspond to the $n$ bits of the input (the array $A$ in the definition of linear data structures). Output nodes correspond to the $m$ possible queries, and there is an interior node for each cell in the database. For each update $1\leq i \leq n$ (flip an entry of $A$), add edges from $x_i$ to each of the cells updated by the data structure. Similarly, add wires $(c_i,z_j)$ whenever the $j$th query probes the $i$th cell in the data structure. Correctness follows immediately. Finally, note that since updates and queries probe at most $t_u$ and $t_q$ cells respectively, the total number of wires in the circuit is bounded by $s(C) \leq nt_u + mt_q$. Constructing a linear data structure from a linear depth-2 circuit $C$ is similar. Letting $t_{u,i}$ and $t_{q,j}$ denote the number of cells probed during the $i$th update and $j$th query respectively, it is easy to see that $s(C) = \sum_{i=1}^n t_{u,i} + \sum_{j=1}^m t_{q,j}$. It follows that the average update time is at most $\frac{1}{n}\sum t_{u,i} \leq s(C)/n$, and similarly that the average query time is at most $\frac{1}{m}\sum t_{q,j} \leq s(C)/m$. \end{proof} The main contribution of Lemma~\ref{lem:circuit} is a new range of candidate hard problems for linear circuits, all inspired by data structure problems. As mentioned in Section~\ref{sec:nonadapt}, linear data structures most naturally occur in the field of range searching. Furthermore, these data structure problems turn out to correspond precisely to linear operators: Let $P=\{p_1,\dots,p_n\}$ be a fixed set of $n$ points in $\mathbb{R}^d,$ and let $\mathcal{R}$ be a set of query ranges, where each $R_i \in \mathcal{R}$ is a subset of $\mathbb{R}^d$. $P$ and $\mathcal{R}$ naturally define a linear operator $A(P,\mathcal{R}) \in \{0,1\}^{|\mathcal{R}| \times |P|}$, where the $i$th row of $A(P,\mathcal{R})$ has a $1$ in the $j$th column if $p_j \in R_i$ and $0$ otherwise. In the light of Lemma~\ref{lem:circuit}, assume a linear data structure solves the following range counting problem: Given the fixed set of points $P$, each assigned a weight in $\{0,1\}$, support flipping the weights of the points (intuitively inserting/deleting the points) while also supporting to efficiently compute the parity of the weights assigned to the points inside a query range $R_i \in \mathcal{R}$. Then that linear data structure immediately translates into a linear circuit for the linear operator $A(P,\mathcal{R})$ and vice versa. Thus we expect that hard range searching problems of the above form also provide hard linear operators for linear circuits. The seemingly hardest range searching problem is \emph{simplex range searching}, where we believe that the following holds: \begin{conjecture} \label{conj:simplex} There exists a constant $\varepsilon>0$, a set $\mathcal{R}$ of $\Theta(n)$ simplices in $\mathbb{R}^d$ and a set of $n$ points in $\mathbb{R}^d$, such that any data structure solving the above range counting problem (flip weights, parity queries), must have average query and update time $t_u t_q = \Omega(n^\varepsilon)$. \end{conjecture} We have toned down Conjecture~\ref{conj:simplex} somewhat, since the community generally believe $\varepsilon$ can be replaced by $1-1/d$, but to be on the safe side we only conjecture the above. In the circuit setting, this conjecture translates to \begin{corollary} If Conjecture~\ref{conj:simplex} is true for linear data structures, then there exists a constant $\delta>0$, a set $\mathcal{R}$ of $\Theta(n)$ simplices in $\mathbb{R}^d$ and a set $P$ of $n$ points, such that any linear circuit computing the linear operator $A(P,\mathcal{R})$ must have $\Omega(n^{1+\delta})$ wires. \end{corollary} Furthermore, the research on data structure lower bounds also provide a lot of insight into which concrete sets $P$ and $\mathcal{R}$ that might be difficult. More specifically, polynomial lower bounds for simplex range searching has been proved for: range reporting in the pointer machine~\cite{chazelle:simplex,Afshani.simplex} and I/O-model~\cite{Afshani.simplex}, range searching in the semi-group model~\cite{chazelle:polytope} and range searching in the group model~\cite{larsen:group,larsen:improved}. The group model comes closest in spirit to linear data structures. A data structure in the group model is essentially a linear data structure, where instead of storing linear combinations over $\textbf{GF}(2)$, we store linear combinations with integer coefficients (and no mod operations). Similarly, queries are answered by computing linear combinations over the stored elements, but with integer coefficients and not over $\textbf{GF}(2)$. The properties used to drive home range searching lower bounds in the group model are: \begin{itemize} \item If $A(P,\mathcal{R})$ has polynomial red-blue discrepancy, then any group model data structure must have $t_u t_q = \Omega(n^\varepsilon)$ for some constant $\varepsilon>0$. \item If $A(P,\mathcal{R})$ has $\Omega(n)$ eigenvalues that are polynomial, then any group model data structure must have $t_u t_q = \Omega(n^\varepsilon)$ for some constant $\varepsilon>0$. \item If $|R_i \cap P|$ is polynomial for all $R_i \in \mathcal{R}$ and $|R_i \cap R_j \cap P|=O(1)$ for all $i \neq j$, then any group model data structure must have $t_u t_q = \Omega(n^\varepsilon)$ for some constant $\varepsilon>0$. \end{itemize} The last property directly translates to $A(P,\mathcal{R})$ having rows and columns with polynomially many $1$s and any two rows/columns having a constant number of $1$s in common. Given the tight correspondence between group model data structures and linear data structures, we believe these properties are worth investigating in the circuit setting. Furthermore, a concrete set of $n$ points $P$ and a set of $\Theta(n)$ simplices $\mathcal{R}$, with all three properties, is known even in $\mathbb{R}^2$. This example can be found in~\cite{chazelle:fracLB}, where it is stated for $\mathcal{R}$ being lines (i.e. degenerate simplices). Note that the lower bound in~\cite{chazelle:fracLB} is for range reporting in the pointer machine, but using the observations in~\cite{larsen:group,larsen:improved} it is easily seen that all the above properties hold. Even if these properties are not enough to obtain lower bounds for linear operators, we believe the geometric approach might be useful in its own right. \section{Conclusion} \label{sec:concl} In this paper, we have studied the role non-adaptivity plays in dynamic data structures. Surprisingly, we were able to prove polynomially high lower bounds for such data structures. Perhaps more importantly, we believe our results shed much new light on the current polylogarithmic barriers if we do not make any restrictions on data structures. We also presented an interesting connection between data structures and depth-2 circuits. The connection between linear operators and range searching is particularly intriguing, revealing a number of new properties to investigate further in the realm of circuit lower bounds. \appendix \section{A Lower Bound Proof for Matrix Multiplication} \begin{theorem}[Restatement of Theorem~\ref{thm:mm-encoding}] Any circuit $C$ computing boolean matrix multiplication has size $s(C) \geq n^{3/2}$. \end{theorem} \begin{proof} Fix a circuit $C$. Let $P = A\cdot B$. For $1 \leq \ell \leq \sqrt{n}$, let $I_\ell$ denote the $\ell$th column of $B$; that is, $I_\ell$ consists of all inputs corresponding to $B[k,\ell]$ for some $k$. Similarly, $J_\ell$ is the set of all outputs corresponding to the $\ell$th column of $P$; that is, all outputs given by $P[k,\ell]$ for some $k$. Let $t_{u,\ell}$ denote the number of wires leaving inputs in $I_\ell$. Similarly, let $t_{q,\ell}$ denote the number of wires entering outputs in $J_\ell$. \begin{claim}\label{claim:mm-encoding} For any $\ell$, we have $t_{u,\ell} + t_{q,\ell} \geq n$. \end{claim} Before proving this claim, note that Theorem~$\ref{thm:mm-encoding}$ follows directly, since there are $\sqrt{n}$ pairs $(I_\ell, J_\ell)$ and the wires corresponding to each pair are disjoint. \end{proof} \begin{proof}[Proof of Claim~\ref{claim:mm-encoding}] This proof will involve an encoding argument. The encoder will receive a $\sqrt{n} \times \sqrt{n}$ boolean matrix $M$, where $M$ is drawn uniformly amongst all such boolean matrices. He will then use the matrix multiplication circuit to encode $M$ in such a way that the size of the encoding depends on the wires leaving $I_\ell$ and entering $J_\ell$. \textbf{p}aragraph{Encoding Procedure.} The encoder receives $M$. As a first step, he sets $A[i,j] \leftarrow M[i,j]$ for all $i,j$; he also sets all entries in $B$ to zero. He then writes down the output of all interior gates adjacent to an output in $J_\ell$. In the second step, for each $1 \leq k \leq \sqrt{n}$, the encoder performs the following: he sets $B[k,\ell] \leftarrow 1$ and sets all other entries in $B$ to zero. He then writes down the output of all interior gates adjacent to $B[k,\ell]$. This completes the encoding procedure. \textbf{p}aragraph{Decoding Procedure.} Note that $P[i,\ell] = \sum_j A[i,j]B[j,\ell]$. In particular, when $B$ consists of a $1$ in entry $[k,\ell]$ and zero in all other entries, then the $\ell$th column of $P$ corresponds to the $k$th column of $A$. The decoder thus recovers the $k$th column of $M$ by using $C$ to compute the $\ell$th column of $P$, i.e., by querying all outputs in $J_\ell$. For each output gate in $J_\ell$, she looks at all interior gates adjacent to it. For each of \emph{these} gates $g$, the decoder checks to see if $g$ is adjacent to the input gate $B[k,\ell]$. If so, then she recovers the correct output value of this gate from the second part of the encoding. Otherwise, she recovers the correct output from the first part (noting that in this case, changing the value of $B[k,\ell]$ does not affect $g$). In this way, the decoder recovers the $\ell$th column of $C$, which is also the $k$th column of $A$, which is again the $k$th column of $M$. Doing this for all $k$ completes the decoding. \textbf{p}aragraph{Analysis.} The first part of the encoding consists of the output of each interior gate adjacent to at least one output in $J_\ell$. Thus, the first part of the encoding can be described in at most $t_{q,\ell}$ bits. The second part of the encoding consists of the output of each interior gate adjacent to each input node in $I_\ell$. This requires at most $t_{u,\ell}$ bits. Thus, the total length of the encoding is at most $t_{u,\ell} + t_{q,\ell}$. The decoder recovers all of $M$ from this message. Since each entry of $M$ is independent and uniform, $H(M) = n$. Thus, $t_{u,\ell} + t_{q,\ell} \geq n$. \end{proof} \textbf{p}aragraph{Remark.} As mentioned previously, Jukna proves his lower bounds by defining the \emph{entropy} of an operator. He lower bounds the wire complexity of a circuit by the entropy of the operator it computes. He proves a lower bound on the entropy of an operator by carefully analyzing subfunctions of the operator, created by fixing subsets of the variables to specific values and considering the induced function on the remaining variables. Parts of Jukna's proof are similar in spirit to ours. In particular, the way we encode $M$ by fixing the matrix $B$ to be one in entry $[k,\ell]$ and zero elsewhere corresponds to the subfunctions Jukna considers in his proof. In fact, we believe that \emph{any} lower bound provable using Jukna's technique can also be proved using our method. Our advantage is in replacing Jukna's technical and somewhat complicated machinery with a simple encoding argument. \end{document}
\begin{document} \begin{frontmatter} \title{\textbf{A new proof for the number of lozenge tilings of quartered hexagons}} \author{Tri Lai\corref{cor1}\fnref{myfootnote1}} \address{Institute for Mathematics and its Applications\\ University of Minnesota\\ Minneapolis, MN 55455} \fntext[myfootnote1]{This research was supported in part by the Institute for Mathematics and its Applications with funds provided by the National Science Foundation.} \cortext[cor1]{Corresponding author, email: [email protected], tel: 612-626-8319} \begin{abstract} It has been proven that the lozenge tilings of a quartered hexagon on the triangular lattice are enumerated by a simple product formula. In this paper we give a new proof for the tiling formula by using Kuo's graphical condensation. Our result generalizes a Proctor's theorem on enumeration of plane partitions contained in a ``maximal staircase". \end{abstract} \begin{keyword} Tilings\sep perfect matchings \sep plane partitions \sep graphical condensation. \operatorname{M}SC[2010] 05A15\sep05C70 \sep 05E99 \end{keyword} \end{frontmatter} \section{Introduction} A plane partition is a rectangular array of non-negative integers with weakly decreasing rows and columns. The number of plane partitions contained in a $b\times c$ rectangle with entries at most $a$ is given by MacMahon's formula $\prod_{i=1}^{a}\prod_{j=1}^{b}\prod_{k=1}^{c}\frac{i+j+k-1}{i+j+k-2}$ (see \cite{Mac}). As a variation of this, Proctor proved a simple product formula for the number of plane partitions with entries at most $a$ which are contained in a shape with row lengths $b, b-1,\dotsc,b-c+1$ (see Corollary 4.1 in \cite{Proctor}). \begin{figure} \caption{Obtaining $P_{a,b,c} \label{KuoQAR5} \end{figure} A \textit{lozenge tiling} of a region on the triangular lattice is a covering of the region by unit rhombi (or lozenges) so that there are no gaps or overlaps. We use notation $\operatorname{L}(R)$ for the number of lozenge tilings of a region $R$ ($\operatorname{L}(\emptyset):=1$). The plane partitions contained in a $b \times c$ rectangle with entries at most $a$ are in bijection with lozenge tilings of the hexagon $H_{a,b,c}$ of sides $a,b,c,a,b,c$ (in cyclic order, starting from the north side). In the view of this we have an equivalent form of Proctor's result as follows. \begin{thm}[Proctor \cite{Proctor}]\label{proctor} Assume that $a,b,c$ are non-negative integer so that $b\geq c$. Let $P_{a,b,c}$ be the region obtained from the hexagon $H_{a,b,c}$ by removing the ``maximal staircase" from its east corner (see Figure \ref{KuoQAR5} for $P_{3,6,4}$). Then \begin{equation} \operatorname{L}(P_{a,b,c})=\prod_{i=1}^{c}\left[\prod^{b-c+1}_{j=1}\frac{a+i+j-1}{i+j-1}\prod^{b-c+i}_{j=1}\frac{2a+i+j-1}{i+j-1}\right], \end{equation} where empty products are equal to $1$ by convention. \end{thm} One can find more variations and generalizations of the Proctor's result in \cite{CK}. We consider next a different generalization of Theorem \ref{proctor}. \begin{figure} \caption{Obtaining the region $R_{a,b,c} \label{KuoQAR6} \end{figure} Let $R_{a,b,c}$ be the region described as in Figure \ref{KuoQAR6}. To precise, $R_{a,b,c}$ consists of all unit triangles on the right of the vertical symmetry axis of the hexagon of sides $2a+1,b,c,2a+b-c+1,c,b$ (in cyclic order, starting from the north side). Figure \ref{KuoQAR6}(a) illustrates the region $R_{2,6,3}$ and Figure \ref{KuoQAR6}(b) shows the region $R_{2,5,3}$ (see the ones restricted by the bold contours). We are interested in the region $R_{a,b,c}$ with $k$ up-pointing unit triangles removed from the base ($k=\lfloor\frac{b-c+1}{2}\rfloor$). If the positions of the triangles removed are $s_1,s_2,\dotsc,s_k$, then we denote by $R_{a,b,c}(s_1,s_2,\dotsc,s_k)$ the resulting region (see Figures \ref{KuoQAR6}(b) and (c) for $R_{2,3,6}(2,3)$ and $R_{2,5,3}(2)$, respectively). The number of lozenge tilings of the region $R_{a,b,c}(s_1,\dotsc,s_k)$ is given by the theorem stated below. \begin{thm}\label{main} Assume $a,b,c$ are non-negative integers. If $b-c=2k-1$ for some non-negative integer $k$, then \begin{equation}\label{eq1} \operatorname{L}\big(R_{a,b,c}(s_1,s_2,\dotsc,s_k)\big)=\prod_{1\leq i<j\leq k+c}\frac{s_j-s_i}{j-i}\frac{s_i+s_j-1}{i+j-1}, \end{equation} where $s_{k+i}:= a+\frac{b-c+1}{2}+i$, for $i=1,2,\dotsc,c$. If $b-c=2k$ for some non-negative integer $k$, then \begin{equation}\label{eq2} \operatorname{L}\big(R_{a,b,c}(s_1,s_2,\dotsc,s_k)\big)=\prod_{i=1}^{k+c}\frac{s_i}{2i-1}\prod_{1\leq i<j\leq k+c}\frac{s_j-s_i}{j-i}\frac{s_i+s_j}{i+j}, \end{equation} where $s_{k+i}:= a+\frac{b-c}{2}+i$, for $i=1,2,\dotsc,c$. \end{thm} We note that by specializing $k=b-c$ and $s_i=i$, for $i=1,2,\dotsc,k$, the region $P_{a,b,c}$ is obtained from $R_{a,b,c}(1,2,\dotsc,k)$ by removing forced lozenges on the lower-left corner (see Figure \ref{KuoQAR7}). Thus \[\operatorname{L}\big(R_{a,b,c}(1,2,\dotsc,k)\big)=\operatorname{L} \big(P_{a,b,c}\big),\] and the Proctor's Theorem \ref{proctor} follows from Theorem \ref{main}. \begin{rmk} We enumerated the lozenge tilings of the region $R_{a,b,c}(s_1,s_2,\dotsc,s_k)$ in \cite{Tri} under the name \textit{quartered hexagon} (see Theorem 3.1, equations (3.1) and (3.2)). In particular, the region $R_{a,b,c}(s_1,s_2,\dotsc,s_k)$ is obtained from the quartered hexagon\\ $QH_{b+c,n}(s_1,s_2,\dotsc,s_k,n-c+1,n-c+2,\dotsc,n)$ ($n:=\lfloor\frac{2a+1+b+c}{2}\rfloor$) by removing several forced lozenges (see Figures 2.8 and 2.9(a) in \cite{Tri}). Thus, we still call our $R_{a,b,c}$-type regions \emph{quartered hexagons}. In \cite{Tri}, we identified the lozenge tilings of $R_{a,b,c}(s_1,s_2,\dotsc,s_k)$ with certain families of non-intersecting paths on $\mathbb{Z}^2$, then used Lindstr\"{o}m-Gessel-Viennot Theorem (see e.g. \cite{Lind}, Lemma 1; or \cite{Stem}, Theorem 1.2) to turn the number of path families to the determinant of a matrix whose entries are binomial coefficients, and evaluated the determinant. \end{rmk} \begin{figure} \caption{Obtaining the region $P_{a,b,c} \label{KuoQAR7} \end{figure} A \textit{perfect matching} of a graph $G$ is collection of edges so that each vertex of $G$ is incident to exactly one selected edge. The \textit{dual graph} $G$ of a region $R$ on the triangular lattice is the graph whose vertices are unit triangle in $R$ and whose edges connect precisely two unit triangles sharing an edge. We have a bijection between the tilings of a region $R$ and the perfect matchings of its dual graph $G$. We use notation $\operatorname{M}(G)$ for the number of perfect matching of a graph $G$. In this paper, we give a new inductive proof of Theorem \ref{main} by using Proctor's Theorem \ref{proctor} as a base case. The method that we use in the proof is the \textit{graphical condensation method} first introduced by Eric Kuo \cite{Kuo}. In particular, we will employ the following theorem in our proof. \begin{thm}[Kuo \cite{Kuo}]\label{Kuothm} Let $G=(V_1\cup V_2, E)$ be a planar bipartite graph, and $V_1$ and $V_2$ its vertex classes. Assume that $x,y,z,t$ are four vertices appearing on a face of $G$ in a cyclic order. Assume in addition that $a,b,c\in V_1$, $d\in V_2$, and $|V_1|=|V_2|+1$. Then \begin{equation}\label{kuoeq} \operatorname{M}(G-\{y\})\operatorname{M}(G-\{x,z,t\})=\operatorname{M}(G-\{x\})\operatorname{M}(G-\{y,z,t\})+\operatorname{M}(G-\{t\})\operatorname{M}(G-\{x,y,z\}). \end{equation} \end{thm} \section{Proof of Theorem \ref{main}} We only prove (\ref{eq1}), as (\ref{eq2}) can be obtained by a perfectly analogous manner. It is easy to see that if $a=0$ then the region $R_{a,b,c}(s_1,s_2,\dotsc,s_k)$ has only one tiling (see Figure \ref{KuoQAR8}(a)). On the other hand, since now $\{s_1,\dotsc,s_k\}=[k]$\footnote{We use the notation $[k]$ for the set $\{1,2,\dotsc,k\}$ of all positive integers not exceed $k$.}, the right hand side of the equality (\ref{eq1}) is also equal to $1$, then (\ref{eq1}) holds for $a=0$. Moreover, if $b=0$, then $c=1$, $k=0$, and the region has the form as in Figure \ref{KuoQAR8}(b). In this case, the region has also a unique tiling; and it is easy to verify that the right hand side of (\ref{eq1}) equals 1. Thus, we can assume in the remaining of the proof that $a,b\geq 1$. We will prove (\ref{eq1}) by induction on $a+b$. \begin{figure} \caption{Several special cases of $R_{a,b,c} \label{KuoQAR8} \end{figure} If $a+b\leq2$, then we have $a=b=1$. It is easy to see that there are only 2 possible shapes for the region $R_{a,b,c}$ (i.e. the region before removed triangles from the base) as in Figures \ref{KuoQAR8}(c) and (d). Then it is routine to verify (\ref{eq1}) for $a=b=1$. For the induction step, we assume that (\ref{eq1}) is true for any region with $a+b<l$, for some $l\geq3$, then we need to show that it is also true for any region $R_{a,b,c}(s_1,\dotsc,s_k)$ with $a+b=l$. Let $A:=\{s_1,s_2,\dotsc,s_k\}$ be a set of positive integers, we define the operators $\Delta$ and $\bigstar$ by setting \begin{equation} \Delta(A):=\prod_{1\leq i<j\leq k}(s_j-s_i) \end{equation} and \begin{equation} \bigstar(A):=\prod_{1\leq i<j\leq k}(s_i+s_j-1). \end{equation} Then one can re-write (\ref{eq1}) in terms of the above operators as: \begin{equation}\label{eq1a} \operatorname{L}\big(R_{a,b,c}(s_1,\dotsc,s_k)\big)=\frac{\Delta(S)}{\Delta([k+c])}\frac{\bigstar(S)}{\bigstar([k+c])}, \end{equation} where $S:=\{s_1,s_2,\dotsc,s_{k+c}\}$ and $[k+c]:=\{1,2,\dotsc,k+c\}$. From this stage we will work on this new form of the equality (\ref{eq1}). We first consider three special cases as follows: \begin{enumerate} \item[(i)] If $c=0$, then by considering forced lozenges as in Figure \ref{KuoQAR8}(f), we get \begin{equation}\label{eq3} \operatorname{L}\big(R_{a,b,0}(s_1,s_2,\dotsc,s_k)\big)=\operatorname{L}\big(R_{a-q,b-1,1}(s_1,s_2,\dotsc,s_k)\big), \end{equation} where $q=a+\frac{b-c+1}{2}-a_k$. Then (\ref{eq1a}) follows from the induction hypothesis for the region on the right hand side of (\ref{eq3}). \item[(ii)] If $k=0$, then $b=c-1$; and we get the region $P_{a,b,b}$ is obtained from the region $R_{a,b,b+1}(\emptyset)$ by removing forced lozenges along its base. Thus, (\ref{eq1a}) follows from Proctor's Theorem \ref{proctor}. \item[(iii)] Let $d:=a+\frac{b-c+1}{2}$ (so $s_{k+i}=d+i$). We consider one more special case when $a_k=d$. By removing forced lozenges again, one can transform our region into the region $R_{a,b-1,c+1}(s_1,\dotsc,s_{k-1})$, then we get again (\ref{eq1a}) by induction hypothesis for the latter region (see Figure \ref{KuoQAR8}(e)). \end{enumerate} From now on, we assume that our region $R_{a,b,c}(s_1,\dotsc,s_k)$ has the two parameter $k$ and $c$ positive (so $b=c+2k-1\geq 2$), and that $a_k < d$. Now we consider the region $R$ obtained from $R_{a,b,c}(s_1,\dotsc,s_k)$ by recovering the unit triangle at the position $s_1$ on the base. We now apply Kuo's Theorem \ref{Kuothm} to the dual graph $G$ of $R$, where the unit triangles corresponding to the four vertices $x,y,z,t$ are chosen as in Figure \ref{KuoQAR4} (see the shaded triangles). In particular, the triangles corresponding to $x$ and $y$ are at the positions $s_1$ and $d$ on the base; and the ones corresponding to $z,t$ are on the upper-right corner of the region. \begin{figure} \caption{Region to which we apply Kuo's graphical condensation} \label{KuoQAR4} \end{figure} \begin{figure} \caption{Obtaining the recurrence for $\operatorname{L} \label{KuoQAR9} \end{figure} One readily sees that the six regions that have dual graphs appearing in the equation (\ref{kuoeq}) of Kuo's Theorem have some lozenges, which are forced into any tilings. Luckily, by removing such forced lozenges, we still get new regions of $R_{a,b,c}$-type. In particular, after removing forced lozenges from the region corresponding to $G-\{x\}$, we get the region $R_{a,b-1,c+1}(s_2,s_3,\dotsc,s_{k})$ (see the region restricted by bold contour in Figure \ref{KuoQAR9}(a)). This implies that \begin{equation}\label{eqn1} \operatorname{M}(G-\{x\})=\operatorname{L}\big(R_{a,b-1,c+1}(s_2,a_3,\dotsc,a_{k})\big). \end{equation} Similarly, we have five more equalities corresponding to other graphs in (\ref{kuoeq}): \begin{equation}\label{eqn2} \operatorname{M}(G-\{y\})=\operatorname{L}\big(R_{a,b,c}(s_1,s_2,\dotsc,s_{k})\big) \text{ (see Figure \ref{KuoQAR9}(b))}, \end{equation} \begin{equation}\label{eqn3} \operatorname{M}(G-\{z\})=\operatorname{L}\big(R_{a+1,b-2,c}(s_2,s_3,\dotsc,s_{k})\big) \text{ (see Figure \ref{KuoQAR9}(c))}, \end{equation} \begin{equation}\label{eqn4} \operatorname{M}(G-\{y,z,t\})=\operatorname{L}\big(R_{a,b-2,c-1}(s_1,s_2,\dotsc,s_{k})\big) \text{ (see Figure \ref{KuoQAR9}(d))}, \end{equation} \begin{equation}\label{eqn5} \operatorname{M}(G-\{x,z,t\})=\operatorname{L}\big(R_{a,b-2,c}(s_2,s_3,\dotsc,s_{k})\big) \text{ (see Figure \ref{KuoQAR9}(e))}, \end{equation} \begin{equation}\label{eqn6} \operatorname{M}(G-\{x,y,t\})=\operatorname{L}\big(R_{a-1,b,c}(s_1,s_2,\dotsc,s_{k})\big) \text{ (see Figure \ref{KuoQAR9}(f))}. \end{equation} Plugging the above six equalities (\ref{eqn1}) -- (\ref{eqn6}) in (\ref{kuoeq}), we have the following recurrence \begin{align}\label{recu1} \operatorname{L}\big(R_{a,b,c}&(s_1,s_2,\dotsc,s_{k})\big)\operatorname{L}\big(R_{a,b-2,c}(s_2,s_3,\dotsc,s_{k})\big)\notag\\&=\operatorname{L}\big(R_{a,b-1,c+1}(s_2,s_3,\dotsc,a_{k})\big)\operatorname{L}\big(R_{a,b-2,c-1}(s_1,s_2,\dotsc,s_{k})\big)\notag\\&\quad +\operatorname{L}\big(R_{a+1,b-2,c}(s_2,s_3,\dotsc,s_{k})\big)\operatorname{L}\big(R_{a-1,b,c}(s_1,s_2,\dotsc,s_{k})\big). \end{align} The five regions other than $R_{a,b,c}(s_1,s_2,\dotsc,s_{k})$ in the above recurrence (\ref{recu1}) have their $(a+b)$-parameter less than $l$. Therefore, by induction hypothesis, we get \begin{equation}\label{eqm1} \operatorname{L}\big(R_{a,b-1,c+1}(s_2,\dotsc,s_k)\big)=\frac{\Delta(S\cup\{d\}-\{s_1\})}{\Delta([k+c])}\frac{\bigstar(S\cup\{d\}-\{s_1\})}{\bigstar([k+c])}, \end{equation} \begin{equation}\label{eqm2} \operatorname{L}\big(R_{a,b-2,c}(s_1,\dotsc,s_k)\big)=\frac{\Delta(S-\{s_{k+c}\})}{\Delta([k+c-1])}\frac{\bigstar(S-\{s_{k+c}\})}{\bigstar([k+c-1])}, \end{equation} \begin{equation}\label{eqm3} \operatorname{L}\big(R_{a+1,b-2,c}(s_2,\dotsc,s_k)\big)=\frac{\Delta(S-\{s_1\})}{\Delta([k+c-1])}\frac{\bigstar(S-\{s_1\})}{\bigstar([k+c-1])}, \end{equation} \begin{equation}\label{eqm4} \operatorname{L}\big(R_{a-1,b,c}(s_1,\dotsc,s_k)\big)=\frac{\Delta(S\cup\{d\}-\{s_{k+c}\})}{\Delta([k+c])}\frac{\bigstar(S\cup\{d\}-\{s_{k+c}\})}{\bigstar([k+c])}, \end{equation} \begin{equation}\label{eqm5} \operatorname{L}\big(R_{a,b-2,c}(s_2,\dotsc,s_k)\big)=\frac{\Delta(S\cup\{d\}-\{s_1,s_{k+c}\})}{\Delta([k+c-1])}\frac{\bigstar(S\cup\{d\}-\{s_1,s_{k+c}\})}{\bigstar([k+c-1])}. \end{equation} By the above five equalities (\ref{eqm1}) -- (\ref{eqm5}) and the recurrence (\ref{recu1}), we only need to show that \begin{align}\label{eqfinal} 1&= \frac{\Delta(S\cup\{d\}-\{s_1\})\Delta(S-\{s_{k+c}\})}{\Delta(S)\Delta(S\cup\{d\}-\{s_1,s_{k+c}\})}\frac{\bigstar(S\cup\{d\}-\{s_1\})\bigstar(S-\{s_{k+c}\})}{\bigstar(S)\bigstar(S\cup\{d\}-\{s_1,s_{k+c}\})}\notag\\ &\quad\quad+\frac{\Delta(S\cup\{d\}-\{s_{k+c}\})\Delta(S-\{s_{1}\})}{\Delta(S)\Delta(S\cup\{d\}-\{s_1,s_{k+c}\})}\frac{\bigstar(S\cup\{d\}-\{s_{k+c}\})\bigstar(S-\{s_{1}\})}{\bigstar(S)\bigstar(S\cup\{d\}-\{s_1,s_{k+c}\})}, \end{align} and (\ref{eq1a}) follows. First, we want to simplify the first ratio in the first term on the right-hand side of (\ref{eqfinal}). We re-write it as \begin{equation} \frac{\Delta(S\cup\{d\}-\{s_1\})\Delta(S-\{s_{k+c}\})}{\Delta(S)\Delta(S\cup\{d\}-\{s_1,s_{k+c}\})}=\frac{\frac{\Delta(S\cup\{d\}-\{s_1\})}{\Delta(S)} \frac{\Delta(S-\{s_{k+c}\})}{\Delta(S)}}{\frac{\Delta(S\cup\{d\}-\{s_1,s_{k+c}\})}{\Delta(S)}} \end{equation} The ratio $\frac{\Delta(S\cup\{d\}-\{s_1\})}{\Delta(S)}$ has its numerator and denominator almost the same, except for some terms involving $s_1$ or $d$. Canceling out all common terms of the numerator and the denominator, we have \[\dfrac{\Delta(S\cup\{d\}-\{s_1\})}{\Delta(S)}=\frac{\prod_{i=2}^{k}(d-s_i)\prod_{i=1}^{c}(s_{k+i}-d)}{\prod_{i=2}^{k+c}(s_i-s_1)}.\] Similarly, we get \[\frac{\Delta(S-\{s_{k+c}\})}{\Delta(S)}=\frac{1}{\prod_{i=1}^{k+c-1}(s_{k+c}-s_i)}\] and \[\frac{\Delta(S\cup\{d\}-\{s_1,s_{k+c}\})}{\Delta(S)}=\frac{\prod_{i=2}^{k}(d-s_i)\prod_{i=1}^{c-1}(s_{k+i}-d)}{\prod_{i=2}^{k+c}(s_i-a_1)\prod_{i=2}^{k+c-1}(s_{k+c}-s_i)}.\] Thus, we obtain \begin{equation}\label{eqsimple1} \frac{\Delta(S\cup\{d\}-\{s_1\})\Delta(S-\{s_{k+c}\})}{\Delta(S)\Delta(S\cup\{d\}-\{s_1,s_{k+c}\})}=\frac{\frac{\Delta(S\cup\{d\}-\{s_1\})}{\Delta(S)} \frac{\Delta(S-\{s_{k+c}\})}{\Delta(S)}}{\frac{\Delta(S\cup\{d\}-\{s_1,s_{k+c}\})}{\Delta(S)}}=\frac{s_{k+c}-d}{s_{k+c}-s_1}. \end{equation} By the same trick, we can simply the first ratio in the second term on the right-hand side of (\ref{eqfinal}) as \begin{equation}\label{eqsimple2} \frac{\Delta(S\cup\{d\}-\{s_{k+c}\})\Delta(S-\{s_{1}\})}{\Delta(S)\Delta(S\cup\{d\}-\{s_1,s_{k+c}\})}=\frac{\frac{\Delta(S\cup\{d\}-\{s_{k+c}\})}{\Delta(S)}\frac{\Delta(S-\{s_{1}\})}{\Delta(S)}}{\frac{\Delta(S\cup\{d\}-\{s_1,s_{k+c}\})}{\Delta(S)}}=\frac{d-s_1}{s_{k+c}-s_1}. \end{equation} Next, we simply the second ratio in each term on the right-hand side of (\ref{eq1a}). By replacing the operator $\Delta$ by the operator $\bigstar$, the whole simplifying-process works in the same way with each term $(s_j-s_i)$ replaced by $(s_i+s_j-1)$. Thus, we get \begin{equation}\label{eqsimple3} \frac{\bigstar(S\cup\{d\}-\{s_1\})\bigstar(S-\{s_{k+c}\})}{\bigstar(S)\bigstar(S\cup\{d\}-\{s_1,s_{k+c}\})}=\frac{s_{k+c}+d-1}{s_{k+c}+s_1-1} \end{equation} and \begin{equation}\label{eqsimple4} \frac{\bigstar(S\cup\{d\}-\{s_{k+c}\})\bigstar(S-\{s_{1}\})}{\bigstar(S)\bigstar(S\cup\{d\}-\{s_1,s_{k+c}\})}=\frac{d+s_1-1}{s_{k+c}+s_1-1}. \end{equation} Finally, by (\ref{eqsimple1})--(\ref{eqsimple4}), we can simplify the equation (\ref{eqfinal}) to \begin{equation} 1=\frac{s_{k+c}-d}{s_{k+c}-s_1}\frac{s_{k+c}+d-1}{s_{k+c}+s_1-1}+\frac{d-s_1}{s_{k+c}-s_1}\frac{d+s_1-1}{s_{k+c}+s_1-1}, \end{equation} which is obviously true with $s_{k+c}=d+c$. This means that (\ref{eqfinal}) holds, and so does (\ref{eq1}). \end{document}
\begin{document} \title{QuEST and High Performance Simulation of Quantum Computers} \author{Tyson Jones} \affiliation{Department of Materials, University of Oxford, Parks Road, Oxford OX1 3PH, United Kingdom} \author{Anna Brown} \author{Ian Bush} \affiliation{Oxford e-Research Centre, Department of Engineering Science, University of Oxford, Keble Road, Oxford OX1 3PH, United Kingdom} \author{Simon Benjamin} \affiliation{Department of Materials, University of Oxford, Parks Road, Oxford OX1 3PH, United Kingdom} \date{\today} \begin{abstract} We introduce \href{https://quest.qtechtheory.org/}{QuEST\xspace}, the Quantum Exact Simulation Toolkit, and compare it to ProjectQ\xspace~\cite{projq_whitepaper}, qHipster\xspace~\cite{qhipster} and a recent distributed implementation~\cite{distributed_quantum_plus_plus} of Quantum++\xspace~\cite{quantum_plus_plus}. QuEST\xspace is the first open source, OpenMP\xspace and MPI\xspace hybridised, GPU\xspace accelerated simulator {of universal quantum circuits}. {Embodied as a C library, it is designed so that a user's code can be deployed seamlessly to any platform from a laptop to a supercomputer.} {QuEST is} capable of simulating generic quantum circuits of general single-qubit gates and multi-qubit controlled gates, \newedit{on pure and mixed states, represented as state-vectors and density matrices, and under the presence of decoherence}. Using the ARCUS Phase-B\xspace and ARCHER\xspace supercomputers, we benchmark QuEST\xspace's simulation of random circuits of up to 38 qubits, distributed over up to 2048 compute nodes, each with up to 24 cores. We directly compare QuEST\xspace's performance to ProjectQ\xspace's on single machines, and discuss the differences in distribution strategies of QuEST\xspace, qHipster\xspace and Quantum++\xspace. QuEST\xspace shows excellent scaling, both strong and weak, on multicore and distributed architectures. \end{abstract} \maketitle \section{Introduction} Classical simulation of quantum computation is vital for the study of new algorithms and architectures. As experimental researchers move closer to realising quantum computers of sufficient complexity to be useful, their work must be guided by an understanding of what tasks we can hope to perform. This in turn means we must explore an algorithm's scaling, its robustness versus errors and imperfections, and the relevance of limitations of the underlying hardware. {Because of these requirements simulation tools are needed on many different classical architectures; while a workstation may be sufficient for the initial stages of examining an algorithm, further study of scaling and robustness may require more powerful computational resources.} {Flexible, multi-platform supporting simulators of quantum computers are therefore essential.} {Further it} is important these simulations are very efficient since they are often repeated many times, for example to study the influence of many parameters, or the behaviour of circuits under noise. But it is expensive to exactly simulate a quantum system using a classical system, since a high-dimensional complex vector must be maintained with high fidelity. Both the memory requirements, and the time required to simulate an elementary circuit operation, grow exponentially with the number of qubits. A quantum computer of only 50 qubits is already too large to be comprehensively simulated by our best classical computers~\cite{49_qb_sim_intel}, and is barely larger than the 49 qubit computers in development by Intel and Google~\cite{intel_49qb,google_aim_for_49qb}. To simulate quantum computers even of the size already experimentally realised, it is necessary that a classical simulator take full advantage of the performance optimisations possible of high performance classical computing. { It is also equally important that the research community have access to an ecosystem of simulators. Verfication of complex simulations is a non-trivial task, one that is much eased by having the facility to compare the results of simulations performed by multiple packages. } {The number of single compute node generic}~\cite{microsoft_liquid,pyquil_whitepaper,microsoft_q_sharp} {and specialised}~\cite{graph_based_qc_simmer,qtorch_tensor_qc_simmer,clifford_t_polynomial_algorithm,quantum_network_simmer} {simulators is rapidly growing.} {However} despite many reported distributed simulators ~\cite{apparently_massive_parallel_qc_sim,first_parallel_qc_simmer,parallel_qc_simmer_noisy,qhipster,haener_45qb_sim,qx_high_performance_qasm_simulator,distributed_quantum_plus_plus,big_64qb_mpi_sim_approx} and proposals for GPU\xspace accelerated simulators~\cite{haener_45qb_sim,gpu_qc_sim_proposal,distributed_gpu_qc_sim_proposal,gpu_qc_sim_review,shor_sim_on_GPU}, QuEST\xspace is the first open source simulator available to offer {\textit{both} facilities}, and the only simulator to offer {support on all hardware plaforms commonly used in the classical simulation of quantum computation} \section{Background} \subsection{Target Platforms and Users} {Simulations of quantum computation are performed on a wide variety of classical computational platforms, from standard laptops to the most powerful supercomputers in the world, and on standard CPUs or on accelerators such as GPUs. Which is most suitable for the simulation of a given circuit will depend upon the algorithm being studied and the size of the quantum computer being modelled. To date this has resulted in a number of simulators which typically target one, or a small number, of these architectures. While this leads to a very efficient exploitation of a given architecture, it does mean that should a research project need to move from one architecture to another, for instance due to the need to simulate more qubits, a different simulation tool is required. This may require a complete rewrite of the simulation code, which is time consuming and makes verification across platforms difficult. In this article we describe QuEST which runs efficiently on \textit{all} architectures typically available to a researcher, thus facilitating the seamless deployment of the researcher's code. This universal support also allows the researcher to easily compare the performance of the different architectures available to them, and so pick that most suitable for their needed simulations.} {In the rest of this section we shall examine the nature of the architectures that are available, cover briefly how codes exploit them efficiently, and show how QuEST, the universal simulator, compares with the more platform specific implementations.} \subsection{Simulator Optimisations} Classical simulators of quantum computation can make good use of several performance optimisations. For instance, the data parallel task of modifying the state vector under a quantum operation can be sped up with single-instruction-multiple-data (SIMD\xspace) execution. SIMD\xspace instructions, like Intel's advanced vector extensions (AVX), operate on multiple operands held in vector registers to concurrently modify multiple array elements~\cite{intel_avx_whitepaper}, like state vector amplitudes. Task parallelism can be achieved through multithreading, taking advantage of the multiple cores found in modern CPU s. Multiple CPU s can cooperate through a shared NUMA\xspace memory space, which simulators can interface with through OpenMP\xspace~\cite{openmp_versions}. Simulators can defer the expensive exchange of data in a CPU{}'s last level cache (LLC\xspace) with main memory through careful data access; a technique known as cache blocking~\cite{cache_blocking}. Quantum computing simulators can cache block by combining sequential operations on adjacent qubits before applying them, a technique referred to as \textit{gate fusion}~\cite{qhipster,haener_45qb_sim}. For instance, gates represented as matrices can be fused by computing their tensor product. Machines on a network can communicate and cooperate through message passing. Simulators can partition the state vector and operations upon it between distributed machines, for example through MPI\xspace, to achieve both parallelisation and greater aggregate memory. Such networks are readily scalable, and are necessary for simulating many qubit circuits~\cite{haener_45qb_sim}. With the advent of general-purpose graphical processing units (GPGPU{}s), the thousands of linked cores of a GPU\xspace can work to parallelise scientific code. Simulators can make use of NVIDIA's compute unified device architecture (CUDA\xspace) to achieve massive speedup on cheap, discrete hardware, when simulating circuits of a limited size~\cite{gpu_qc_sim_review}. We mention too a recent proposal to utilise multi-GPU\xspace nodes for highly parallel simulation of many qubit quantum circuits~\cite{distributed_gpu_qc_sim_proposal}. \subsubsection{Single node} ProjectQ\xspace is an open-source quantum computing framework featuring a compiler targeting quantum hardware and a C++\xspace quantum computer simulator behind a Python\xspace interface~\cite{projq_whitepaper}. In this text, we review the performance of its simulator, which supports AVX{} instructions, employs OpenMP\xspace and cache blocking for efficient parallelisation on single-node shared-memory systems, and emulation to take computational shortcuts \cite{projq_emulator_abilities}. QuEST\xspace is a new open source simulator developed in ISO standard conformant C\xspace \cite{ISO_C_99}, and released under the open source MIT license. Both OpenMP\xspace and MPI\xspace based parallelisation strategies are supported, and they may be used together in a so-called hybrid strategy. This provides {seamless} support for both {single-node} shared-memory and distributed systems. QuEST\xspace also employs CUDA\xspace for GPU\xspace{} acceleration, and offers the same interface on single-node, distributed and GPU\xspace{} platforms. Though QuEST\xspace does not use cache blocking or emulation, we find QuEST\xspace performs equally or better than ProjectQ\xspace on multicore systems, and can use its additional message-passing facilities for faster and bigger simulations on distributed memory architectures. { ProjectQ offers a high-level Python interface, but can therefore be difficult to install and run on supercomputing architectures, though containerisation may make this process easier in future}~\cite{projectqOnSupercomputersGuide,projq_cache_anomaly_haener}. {Conversely, Quest is light-weight, stand-alone, and tailored for high-performance resources - its low-level C interface can be compiled directly to a native executable and run on personal laptops and supercomputers. } Both QuEST\xspace and ProjectQ\xspace maintain a pure state in $2^n$ complex floating point numbers for a system of $n$ qubits, with (by default) double precision in each real and imaginary component; QuEST\xspace can otherwise be configured to use single or quad precision. Both simulators store the state in C\xspace/C++\xspace primitives, and so (by default) consume $16\times 2^n$\,B~\cite{MSDN_C_prim_sizes} in the state vector alone. However ProjectQ\xspace incurs a $\times 1.5$ memory overhead during state allocation, and QuEST\xspace clones the state vector in distributed applications. Typical memory costs of both simulators on a single thread are shown in Figure\xspace~\ref{fig:quest_projq_memory}, which vary insignificantly from their multithreaded costs. While QuEST\xspace allows direct read and write access to the state-vector, ProjectQ\xspace's single amplitude fetching has a {Python overhead, and writing is only supported in batch which is memory expensive due to Python objects consuming more memory than a comparable C primitive - as much as $3\times$}~\cite{pythonDataObjects,python3xPrims}. {Iterating the state-vector in ProjectQ is therefore either very slow, or comes with an appreciable memory cost.} \begin{figure} \caption{Memory consumption of QuEST\xspace's C\xspace and ProjectQ\xspace's Python\xspace processes, as reported by Linux\xspace's \texttt{/proc/self/status} \label{fig:quest_projq_memory} \end{figure} QuEST\xspace applies a single-qubit gate (a $2\times2$ matrix $G$) on qubit $q$ of an $N$-qubit \newedit{pure state-vector} $\ket{\psi} = \sum \limits_{n=0}^{2^N-1} \alpha_n \ket{n}$, represented as the complex vector $\vec{\alpha}$, by updating vector elements \begin{align} \begin{pmatrix} \alpha_{n_i} \\ \alpha_{n_i + 2^q} \end{pmatrix} \mapsto G \begin{pmatrix} \alpha_{n_i} \\ \alpha_{n_i + 2^q} \end{pmatrix} \end{align} where $n_i = \left\lfloor i / {2^q} \right\rfloor 2^{q+1} + (i \mod 2^q)$ for every integer $i \in [0, 2^{N-1} - 1]$. This applies $G$ via $2^{N}$ computations of $a b + c d$ for complex $a, b, c, d$ and avoids having to compute and matrix-multiply a full $2^N \times 2^N$ unitary on the state-vector. This has a straight-forward generalisation to multi-control single-target gates, and lends itself to parallelisation. \newedit{ We leverage the same hardware-optimised code to enact gates on $N$-qubit density matrices, by storing them as $2N$-qubit state-vectors, } \begin{align} \rho = \sum \limits_{j=0}^{2^N-1} \sum\limits_{k=0}^{2^N-1} \alpha_{j,k} \ket{j}\bra{k} \ \ \rightarrow \ \ \rho^\prime=\sum \limits_{n=0}^{2^{2N}-1} \alpha_n' \ket{n}. \end{align} \newedit{ Here the object $\rho^\prime$ does not, in general, respect the constraint $\sum |\alpha^\prime_n|^2=1$. An operation $G_q \rho G_q^\dagger$, that is a gate on qubit $q$, can then be effected on $\rho^\prime$ as $ G_{q + N}^* G_q \rho^\prime, $ by exploiting the Choi--Jamiolkowski isomorphism} ~\cite{choi1975completely} \newedit{This holds also for multi-qubit gates. The distribution of the density matrix in this form lends itself well to the parallel simulation of dephasing and depolarising noise channels. } \subsubsection{Distributed} How simulators partition the state vector between processes and communicate over the network is key to their performance on distributed memory architectures. All simulators we have found so far employ a simple partitioning scheme; the memory to represent a state vector is split equally between all processes holding that vector. A common strategy to then evaluate a circuit is to pair nodes such that upon applying a single qubit gate, every process must send and receive the entirety of its portion of the state vector to its paired process~\cite{parallel_qc_simmer_noisy,qhipster,distributed_quantum_plus_plus}. The number of communications between paired processes, the amount of data sent in each and the additional memory incurred on the compute nodes form a tradeoff. A small number of long messages will ensure that the communications are bandwidth limited, which leads to best performance in the communications layer. However this results in a significant memory overhead, due to the process having to store buffers for both the data it is sending and receiving, and in an application area so memory hungry as quantum circuit simulation this may limit the size of circuit that can be studied. On the other hand many short messages will minimise the memory overhead as the message buffers are small, but will lead to message latency limited performance as the bandwidth of the network fabric will not be saturated. This in turn leads to poor parallel scaling, and hence again limits the size of the circuit under consideration, but now due to time limitations. Note that the memory overhead is at most a factor 2, which due to the exponential scaling of the memory requirements, means only 1 less qubit may be studied. Some communication strategies and their memory overheads and visualised in Figure\xspace~\ref{fig:distributed_memory_schemes}. \begin{figure} \caption{An illustration of strategies to distribute the state vector between two 64\,GiB nodes. A complete cloning ($\times 2$ memory) of the partition on each node is wasteful. Half the partition can be cloned, at the cost of twice as many MPI messages, to fit another qubit into memory~\cite{parallel_qc_simmer_noisy} \label{fig:distributed_memory_schemes} \end{figure} QuEST\xspace partitions the state vector equally between the processes within the job, and the message passing between the process pairs is so organised as to absolutely minimise the number of communications during the operation of a single gate. Thus parallel performance should be good, but there will be a significant memory overhead; in practice a factor of 2 as described above. For $n$ qubits distributed over $2^k$ nodes, these communications occur when operating on qubits with index $\ge n - k$, indexing from $0$. An alternative strategy is to clone, send and receive only half of each node's data in two exchanges~\cite{parallel_qc_simmer_noisy}, incurring instead a $1.5\times$ memory cost. This often leaves room to simulate an additional qubit, made clear in Figure\xspace~\ref{fig:distributed_memory_schemes}. This strategy can be recursed further to reduce the memory overhead even more, and negligible additional memory cost can be achieved by communicating every amplitude separately as in~\cite{distributed_quantum_plus_plus}, though this comes at a significant communication cost, since a message passing pattern is latency dominated and will exhibit poor scaling with process count. However an improvement made possible by having two exchanges is to overlap the communication of the first message with the computation on the second half of the state vector, an optimisation implemented in qHipster\xspace~\cite{qhipster}. This depends on the network effectively supporting asynchronous communications. We also mention recent strategies for further reducing network traffic by optimising the simulated circuit through gate fusion, state reordering~\cite{qhipster,haener_45qb_sim} and rescheduling operations~\cite{haener_45qb_sim}, though opportunities for such optimisations may be limited. In terms of the functionality implemented in the simulation packages we note that while qHipster\xspace is limited to single and two-qubit controlled gates, QuEST\xspace additionally allows the distributed operation of any-qubit controlled gates. \subsubsection{GPU\xspace} Though MPI\xspace distribution can be used for scalable parallelisation, networks are expensive and are overkill for deep circuits of few qubits. Simulations limited to 29 qubits can fit into a 12\,GB GPU\xspace which offers high parallelisation at low cost. In our testing, QuEST\xspace running a single Tesla K40m GPU\xspace (retailing currently for $\sim$3.6\,k\,USD) outperforms $8$ distributed 12-core Xeon E5-2697 v2 series processors, currently retailing at $\sim$21\,k\,USD total, ignoring the cost of the network. QuEST\xspace is the first available simulator of both state-vectors and density matrices which can run on a CUDA\xspace enabled GPU\xspace, {offering speedups of $\sim$5$\times$} over {already highly-parallelised 24-threaded single-node simulation}. We mention QCGPU~\cite{QCGPU} which is a recent GPU\xspace-accelerated {single-node} simulator being developed with \newedit{Python} and OpenCL, and Quantumsim, a CUDA-based simulator of density matrices~\cite{quantumsimrepo}. \subsubsection{Multi-platform} { QuEST is the only simulator which supports all of the above classical architectures. A simulation written in QuEST can be immediately deployed to all environments, from a laptop to a national-grade supercomputer, performing well at all simulation scales. } {We list the facilities supported by other state-of-the-art simulators in} Table\xspace \ref{tab:facility_table}. \begin{table*}[htbp!] { \begin{tabular}{|c|c|c|c|c|c|} \hline Simulator & multithreaded & distributed & GPU accelerated & stand-alone & density matrices \\ \hline QuEST\xspace & \checkmark & \checkmark & \checkmark & \checkmark & \checkmark \\ \hline qHipster\xspace & \checkmark & \checkmark & & \checkmark & \\ \hline Quantum++\xspace & \checkmark & & & ? & \\ \hline Quantumsim & & & \checkmark & & \checkmark \\ \hline QCGPU & \checkmark & & \checkmark & & \\ \hline ProjectQ\xspace & \checkmark & & & &\\ \hline \end{tabular} \caption{{A comparison of the facilities offered by some publicly available, state-of-the-art simulators. Note the distributed adaptation of Quantum++}\cite{distributed_quantum_plus_plus} {is not currently publicly available}. Here, density matrices refers to the ability to precisely represent mixed states. } } \label{tab:facility_table} \end{table*} \subsection{Algorithm} We compare QuEST\xspace and ProjectQ\xspace performing simulations of universal psuedo-random quantum circuits of varying depth and number of qubits. A random circuit contains a random sequence of gates, in our case with gates from the universal set $\{H$, $T$, $C(Z)$, $X^{1/2}$, $Y^{1/2}\}$. These are the Hadamard, $\pi/8$, controlled-phase and root Pauli X and Y gates. Being computationally hard to simulate, random circuits are a natural algorithm for benchmarking simulators~\cite{rand_quant_circuits}. We generate our random circuits by the algorithm in~\cite{rand_quant_circuits}, which fixes the topology for a given depth and number of qubits, though randomises the sequence of single qubit gates. An example is shown in Figure\xspace~\ref{fig:randcircexample}. The total number of gates (single plus control) goes like $\mathcal{O}(n d)$ for an $n$ qubit, depth $d$ random circuit, and the ratio of single to control gates is mostly fixed at $1.2 \pm 0.2$, so we treat these gates as equal in our runtime averaging. For measure, a depth 100 circuit of 30 qubits features 1020 single qubit gates and 967 controlled phase gates. \begin{figure} \caption{An example of a depth 10 random circuit on 5 qubits, of the linear topology described in \cite{rand_quant_circuits} \label{fig:randcircexample} \end{figure} Though we here treat performance in simulating a random circuit as an indication of the general performance of the simulator, we acknowledge that specialised simulators may achieve better performance on particular classes of circuits. For example, ProjectQ\xspace can utilise topological optimisation or classical emulation to shortcut the operation of particular subcircuits, such as the quantum Fourier transform~\cite{projq_emulator_abilities}. We additionally study QuEST\xspace's communication efficiency by measuring the time to perform single qubit rotations on distributed hardware. \section{Setup} \subsection{\label{sec:hardware}Hardware} We evaluate the performance of QuEST\xspace and ProjectQ\xspace using Oxford's computing facilities, specifically the ARCUS Phase-B\xspace supercomputer, and the UK National Supercomputing facility ARCHER\xspace. QuEST\xspace and ProjectQ\xspace are compared on single nodes with 1-16 threads, on ARCUS Phase-B\xspace with nodes of 64, 128 and 256 GiB memory (simulating 1-31, 32 and 33 qubits respectively), each with two 8-core Intel Xeon E5-2640 V3 processors and a collective last level cache (LLC\xspace) size of 41\,MB between two NUMA\xspace banks. We furthermore benchmark QuEST\xspace on ARCUS Phase-B\xspace Tesla K40m GPU\xspace nodes, which with 12\,GB global memory over 2880 CUDA\xspace cores, can simulate up to 29 qubit circuits. QuEST\xspace and ProjectQ\xspace are also compared on ARCHER\xspace, a CRAY XC30 supercomputer. ARCHER\xspace contains both 64 and 128 GiB compute nodes, each with two 12-core Intel Xeon E5-2697 v2 series processors linked by two QuickPath Interconnects, and a collective LLC\xspace of 61\,MB between two NUMA\xspace banks. Thus a single node is capable of simulating up to 32 qubits with 24 threads. We furthermore evaluate the scalability of QuEST\xspace when distributed over up to 2048 ARCHER\xspace compute nodes, linked by a Cray Aries interconnect, which supports an MPI latency of $\sim 1.4\pm 0.1\,\mu$s and a bisection bandwidth of 19\,TB/s. \subsection{Software} \subsubsection{Installation} On ARCUS Phase-B\xspace, we compile both single-node QuEST\xspace v0.10.0 and ProjectQ\xspace's C++\xspace backend with GCC\xspace 5.3.0, which supports OpenMP\xspace 4.0~\cite{openmp_versions} for parallelisation among threads. For GPU\xspace use, QuEST\xspaceGPU v0.6.0 is compiled with the NVIDIA CUDA\xspace 8.0. ProjectQ\xspace v0.3.5 is run with Python\xspace 3.5.4, inside an Anaconda\xspace 4.3.8 environment. On ARCHER\xspace, ProjectQ\xspace v0.3.6 is compiled with GCC\xspace 5.3.0, and run in Python\xspace 3.5.3 inside an Anaconda\xspace 4.0.6. QuEST\xspace is compiled with ICC\xspace 17.0.0 which supports OpenMP\xspace 4.5 \cite{openmp_versions}, and is distributed with the MPICH3 implementation of the MPI\xspace 3.0 standard, optimised for the Aries interconnect. \subsubsection{\label{sec:projq_config}Configuration} We attempt to optimise ProjectQ\xspace when simulating many qubits by enabling gate fusion only for multithreaded simulations~\cite{projq_cache_anomaly_haener}. \begin{codebox} \begin{minted}{python} from projectq import MainEngine from projectq.backends import Simulator MainEngine( backend=Simulator( gate_fusion=(threads > 1))) \end{minted} \end{codebox} We found that ProjectQ\xspace's multithreaded simulation of few qubit random circuits can be improved by disabling all compiler engines, to reduce futile time spent optimising the circuit in Python\xspace. \begin{codebox} \begin{minted}{python} MainEngine( backend=Simulator(gate_fusion=True), engine_list=[]) \end{minted} \end{codebox} However, this disables ProjectQ\xspace's ability to perform classical emulation and gate decomposition, and so is not explored in our benchmarking. We studied ProjectQ\xspace's performance for different combinations of compiler engines, number of gates considered in local optimisation and having gate fusion enabled, and found the above configurations gave the best performance for random circuits on our tested hardware. Our benchmarking measures the runtime of strictly the code responsible for simulating the sequence of gates, and excludes the time spent allocating the state vector, instantiating or freeing objects or other one-time overheads. In ProjectQ\xspace, this looks like: \begin{codebox} \begin{minted}{python} # prepare the simulator sim = Simulator(gate_fusion=(threads > 1)) engine = MainEngine(backend=sim) qubits = engine.allocate_qureg(num_qubits) engine.flush() # ensure we're in the 0 state sim.set_wavefunction([1], qubits) sim.collapse_wavefunction( qubits, [0]*num_qubits) engine.flush() # start timing, perform circuit # ensure cache is empty engine.flush() sim._simulator.run() # stop timing \end{minted} \end{codebox} and in QuEST\xspace: \begin{codebox} \begin{minted}{cpp} // prepare the simulator QuESTEnv env = createQuESTEnv(); Qureg qubits = createQureg(num_qubits, env); // ensure we're in the 0 state initZeroState(&qubits); // start timing, perform circuit // ensure distributed work finishes syncQuESTEnv(env); // stop timing \end{minted} \end{codebox} \section{Results} \subsection{Single Node Performance} \begin{figure} \caption{Comparison of QuEST\xspace and ProjectQ\xspace when simulating random circuits over 1, 16 (on ARCUS Phase-B\xspace) and 24 (on ARCHER\xspace) threads (top to bottom). Coloured lines indicate the mean, with shaded regions indicating a standard deviation either side, over a total of $\sim$77\,k simulations of varying depth. Vertical dashed lines indicate the maximum number of qubits for which the entire state vector fits into the LLC\xspace. The speedup subgraphs show the ratio of ProjectQ\xspace to QuEST\xspace runtime.} \label{fig:quest_projq_runtime_comp} \end{figure} \begin{figure} \caption{ Single-node strong scaling achieved when parallelising (through OpenMP\xspace) 30 qubit random circuits across a varying number of threads on a 16-CPU{} \label{fig:quest_projq_thread_scaling} \end{figure} \begin{figure} \caption{QuEST\xspace's single-node performance using multithreading and GPU\xspace acceleration to parallelise random circuit simulations. The subplot shows the speedup (ratio of runtimes) that a GPU\xspace of 2880 CUDA\xspace cores on ARCUS Phase-B\xspace achieves against 24 threads on ARCHER\xspace.} \label{fig:quest_gpu} \end{figure} The runtime performance of QuEST\xspace and ProjectQ\xspace, presented in Figure\xspace~\ref{fig:quest_projq_runtime_comp}, varies with the architecture on which they are run, and the system size they simulate. Anomalous slowdown of ProjectQ\xspace at 22 qubits may be explained by the LLC\xspace becoming full, due to its use of cache blocking through gate fusion~\cite{projq_cache_anomaly_haener}. For fewer than $\sim$22 qubits, ProjectQ\xspace's Python\xspace overhead is several orders of magnitude slower than QuEST\xspace's C\xspace overhead, independent of circuit depth. The Python\xspace overhead can be reduced by disabling some simulation facilities - see Section~\ref{sec:projq_config}. For larger systems, the time spent in ProjectQ\xspace's C++\xspace backend operating on the state vector dominates total runtime, and the time per gate of both simulators grows exponentially with increasing number of qubits. On a single ARCUS-B thread, ProjectQ\xspace becomes twice as fast as QuEST\xspace, attributable to its sophisticated circuit evaluation. However, these optimisations appear to scale poorly; QuEST\xspace outperforms ProjectQ\xspace on 16 threads on ARCUS-B, and on ARCHER both simulation packages are equally fast on 24 threads. This is made explicit in the strong scaling over threads shown in Figure\xspace~\ref{fig:quest_projq_thread_scaling}, which reveals ProjectQ\xspace's scaling is not monotonic. Performance suffers with the introduction of more than 8 threads, though is restored at 16. We demonstrate QuEST\xspace's utilisation of a GPU\xspace for highly parallelised simulation in Figure\xspace~\ref{fig:quest_gpu}, achieving a speedup of $\sim$5$\times$ from QuEST\xspace and ProjectQ\xspace on 24 threads. \subsection{Distributed Performance} Strong scaling of QuEST\xspace simulating a 30 and 38 qubit random circuit, distributed over 1 to 2048 ARCHER\xspace nodes, is shown in Figure\xspace~\ref{fig:quest_mpi_rc_strong_scaling}. In all cases one MPI process per node was employed, each with 24 threads. Recall that QuEST\xspace's communication strategy involves cloning the state vector partition stored on each node. The 30 qubit (38 qubit) simulations therefore demand 32\,GiB (8\,TiB) memory (excluding overhead), and require at least 1 node (256 nodes), whereas qHipster\xspace{}'s strategy would fit a 31 qubit (39 qubit) simulation on the same hardware~\cite{qhipster}. \begin{figure} \caption{QuEST\xspace multinode strong scaling when distributing (through MPI\xspace) a depth 100 (depth 10) random circuit simulation of 30 qubits (38 qubits) across many 24-thread 64\,GiB ARCHER\xspace nodes. } \label{fig:quest_mpi_rc_strong_scaling} \end{figure} \begin{figure} \caption{ QuEST\xspace multinode weak scaling of a single qubit rotation, distributed on \{16, 32, 64, 128, 256\} \label{fig:quest_weak_rotation_scaling} \end{figure} Communication cost is shown in Figure\xspace~\ref{fig:quest_weak_rotation_scaling} as the time to rotate a single qubit when using just enough nodes to store the state-vector; the size of the partition on each node is constant for increasing nodes and qubits. QuEST\xspace shows excellent weak scaling, and moving from 34 to 37 simulated qubits slows QuEST\xspace by a mere $\approx 9\%$. It is interesting to note that the equivalent results for qHipster\xspace show a slowdown of $\approx 148\%$ \cite{qhipster}, but this is almost certainly a reflection of the different network used in generating those results, rather than in any inherent weakness in qHipster\xspace itself. QuEST\xspace and qHipster\xspace show comparable $\sim 10^1$ slowdown when operating on qubits which require communication against operations on qubits which do not (shown in the bottom subplot of Figure\xspace~\ref{fig:quest_weak_rotation_scaling}). Though such slowdown is also network dependent, it is significantly smaller than the $\sim 10^6$ slowdown reported by the Quantum++\xspace adaptation on smaller systems \cite{distributed_quantum_plus_plus}, and reflects a more efficient communication strategy. We will discuss these network and other hardware dependencies further in future work, and also intend to examine qHipster\xspace on ARCHER\xspace so a true like with like comparison with QuEST\xspace can be made. \section{Summary} This paper introduced QuEST\xspace, a new high performance open source framework for simulating universal quantum computers. We demonstrated QuEST\xspace shows good strong scaling over OpenMP\xspace threads, competitive with a state of the art single-node simulator ProjectQ\xspace when performing multithreaded simulations of random circuits. We furthermore parallelised QuEST\xspace on a GPU\xspace for a $5 \times$ speedup over a 24 threaded simulation, and a $40 \times$ speedup over single threaded simulation. QuEST\xspace also supports distributed memory architectures via message passing with MPI\xspace, and we've shown QuEST\xspace to have excellent strong and weak scaling over multiple nodes. This behaviour has been demonstrated for up to 2048 nodes and has been used to simulate a 38 qubit random circuit. Despite its relative simplicity, we found QuEST\xspace's communication strategy yields comparable performance to qHipster\xspace's, and strongly outperforms the distributed adaptation of Quantum++\xspace. QuEST\xspace can be downloaded in Reference~\cite{quest_site} \end{document}
\begin{document} \title{Utilizing machine learning to improve the precision of fluorescence imaging of cavity-generated spin squeezed states} \author{Benjamin K. Malia} \affiliation{Department of Physics, Stanford University, Stanford, California 94305, USA} \author{Yunfan Wu} \affiliation{Department of Applied Physics, Stanford University, Stanford, California 94305, USA} \author{Juli\'{a}n Mart\'{i}nez-Rinc\'{o}n} \affiliation{Department of Physics, Stanford University, Stanford, California 94305, USA} \author{Mark A. Kasevich} \email[]{[email protected]} \affiliation{Department of Physics, Stanford University, Stanford, California 94305, USA} \affiliation{Department of Applied Physics, Stanford University, Stanford, California 94305, USA} \date{\today} \begin{abstract} We present a supervised learning model to calibrate the photon collection rate during the fluorescence imaging of cold atoms. The linear regression model finds the collection rate at each location on the sensor such that the atomic population difference equals that of a highly precise optical cavity measurement. This 192 variable regression results in a measurement variance 27\% smaller than our previous single variable regression calibration. The measurement variance is now in agreement with the theoretical limit due to other known noise sources. This model efficiently trains in less than a minute on a standard personal computer's CPU, and requires less than 10 minutes of data collection. Furthermore, the model is applicable across a large changes in population difference and across data collected on different days. \end{abstract} \pacs{} \maketitle \section{Introduction} Machine learning (ML) is becoming an increasingly important tool for analysis of scientific data due to its ability to handle large data sets with many dependant variables. In supervised learning, a regression model can be trained on previously collected data to find relationships between an experimental result and potentially dozens of parameters. A successful ML model will be able to accurately predict future data from these many parameters. A variety of ML algorithms are able to process high resolution images. In experiments involving fluorescence imaging, ML has recently been applied to denoise images~\cite{Wang2021}, classify objects in images~\cite{Sagar2020}, classify spectral signatures~\cite{Ju2019}, and quantify fluorescent decay lifetimes~\cite{Mannam2020,Smith2019}. In cold atom experiments, fluorescence imaging is used for atom number measurement to determine the populations of different quantum states. A variety of challenges, including scattered light, sensor read noise, and inhomogeneous photon collection, increase the difficulty of obtaining accurate measurements~\cite{Rocco_2014}. The need for accurate imaging is vital in the context of quantum metrology. In these experiments, imaging imperfections fundamentally limit the efficacies of quantum protocols. For example, Qu et al.~\cite{Qu2020} used principle component analysis to correct for background light in order to fully characterize a quantum state. \section{Spin Squeezing Measurements} Atomic clocks and sensors operate through measurement of population differences between quantum states. For two spin states $\ket{\uparrow}$ and $\ket{\downarrow}$, the collective spin is defined as $J_z = (N_{\ket{\uparrow}}-N_{\ket{\downarrow}})/2$ where $N = N_{\ket{\uparrow}} + N_{\ket{\downarrow}}$ is the total number of atoms in the sensor. The changes in $J_z$ are measured through two consecutive measurements, one before and one after the sensor has detected a field. We define the population difference as $J_z^{(1,2)}= J_z^{(2)}-J_z^{(1)}$. In the absence of environmental fields, a single measurement of $J_z^{(1,2)}$ should be close to, but not exactly equal to, zero due to the Heisenberg uncertainty principal. In a typical sensor, without spin-squeezing, the quantum projection noise (QPN) limits the variance of the measurement difference to $\big(\Delta J_z^{(1,2)}\big)^2=N/4$ ~\cite{Hosten_2016,Cox_2016,Schleier-Smith_2010b}. If the quantum state is squeezed with a quantum non-demolition (QND) measurement, as in our experiment, then the variance in population difference is reduced such that $\Delta J_z^{(1,2)}$ surpasses the QPN. While $J_z^{(1)}$ is a QND measurement, $J_z^{(2)}$ can be either another QND measurement or a fluorescence measurement. Squeezing allows for increased sensor precision when increasing atom number or interrogation time is not possible. Here we employ ML to identify and correct for a class of imperfects associated with non-uniform spatial imaging profiles. Potential sources which may contribute to non-uniform spatial imaging are the angle of light entering the objective lens ~\cite{Catrysse2002}, inhomogeneous transparency of vacuum chamber walls, and an inhomogeneous imaging beam power. As we show below, these imperfections, inferred to be ~10\% across a mm-scale spatially extended cloud, led to a 33\% increase in the noise variance in the work of Ref.~\cite{Malia2020}. To evaluate the performance of the system as a sensor, we define this difference in terms of angular difference $\theta = J_z^{(1,2)}/C(N/2)$ on the multiparticle Bloch sphere, where $C$ is the contrast in a Ramsey sequence. In the case of the data in ~\cite{Malia2020}, which is the subject of our analysis in this paper, $C=0.92$ and the QPN is defined as $\Delta\theta = 1/\sqrt{N}$. As an example for a sensor with $N=390\,000$, the QPN is $\Delta\theta = 1.6~\text{mrad}$. Eliminating extra noise sources is crucial to taking advantage of the noise reduction from spin squeezing. The relevant details of our experiment~\cite{Malia2020} are summarized here. ${}^{87}\text{Rb}$ atoms are cooled and trapped in an optical lattice. The QND measurement is first performed by detecting a cavity-QED dispersive readout phase using an optical light puslse. The lattice is then turned off and the atoms free fall. To perform population spectroscopy at the end of the free fall time, an on-resonance laser pulse imparts a momentum kick to the atoms in the $\ket{\uparrow}$ state. After the two states spatially separate, a retroreflected laser induces fluorescence in both states. The light is collected with an objective lens with a numerical aperture of 0.25 and is sent to a CMOS camera. Each state takes up approximately half of the sensor region. $J_z^{(2)}$ is determined by the difference in total counts between states $\ket{\uparrow}$ and $\ket{\downarrow}$. As a result of the mechanisms discussed earlier, the average number of counts per atom in each state are several percent different. Due to shot-to-shot fluctuations in the atoms' positions, the spatial inhomogeneity in the scattering rate directly leads to fluctuations in the inferred atomic populations. This additional noise is large enough that, uncorrected, results in $\Delta J_z$ larger than the QPN. \section{Machine Learning Model} The supervised learning model estimates $J_z^{(2)}$ by first dividing the fluorescence image into 192 ``superpixels" (Figure~\ref{fig:counts}). These superpixels are simply $128\times128$ pixel squares grouped into blocks. Our new regression model now has $n=192$ variables to fit to. Each superpixel is then assigned a weight $\beta_j$ which is multiplied by its total counts $c_j$ to estimate the correct atom number for that pixel. The estimated $\tilde{J}_z^{(2),(i)}$ are determined by subtracting the estimated atoms in the left half of the image from the ones in the right half: \begin{equation} \tilde{J}_z^{(2),(i)} = \beta_0 + \frac{1}{2}\sum_{j=1}^{n/2} c^{(i)}_j\beta_j- \frac{1}{2}\sum_{j=n/2+1}^n c^{(i)}_j\beta_j \end{equation} where $i$ index the images and $\beta_0$ is a bias term. We can also determine the total atom number $N$ from these $\beta_j$: \begin{equation} \tilde{N}^{(i)} = \sum_{j=1}^{n} c^{(i)}_j\beta_j \end{equation} The inhomogeneity must at least vary on length scales smaller than the atom cloud to make a significant impact on the precision of the measurement. The maximum spatial frequency accessible to the model is determined by the resolution of the images. We therefore chose a superpixel resolution large enough to capture most of the spatial variation, but small enough to allow for fast training on small sample sizes. As described below, we enforce a penalty term which puts an upper bound on the spatial frequency of the model's result. Consecutive low noise cavity measurements show that detection limit of the squeezed state is $\Delta \theta=310~\mu \text{rad}$, which is lower than the 690~$\mu$rad theoretical limit of the fluorescence detection (see \cite{Malia2020} supplement for details on these noise sources). We therefore take $J_z^{(1)}$ as the target for $J_z^{(2)}$ when training our regression model. We calculate the optimal $\beta_j$ by minimizing the cost function, $G$, with the Broyden–Fletcher–Goldfarb–Shanno algorithm~\cite{Broyden1970} over $m$ samples. Specifically, the \emph{fminunc} function in MATLAB is supplied with both $G$ and its gradient. $G$ is defined as a weighted least squares with regularization~\cite{Neumaier1998}: \begin{equation} G = \frac{1}{2m_c}\sum_{i=1}^{m} w^{(i)} \big(\tilde{J}_z^{(2),(i)}-\tilde{J}_z^{(1),(i)}\big)^2 + \frac{\lambda}{2m_c}P_{NN}. \label{eq:cost} \end{equation} Here, the weights $w^{(i)}$ are defined by the Heaviside step function $H$ and the magnitudes of $J^{(1),(i)}_z$: \begin{equation} w^{(i)} = H\big(|J_z^{(1),(i)}| - J_z^\text{cutoff}\big), \end{equation} $m_c$ is the total number of samples with non-zero weight. $\tilde{J}_z^{(1),(i)}$ is the frequency-corrected cavity measurement (\cite{Malia2020} supplement, Equation S8) \begin{equation} \tilde{J}_z^{(1),(i)} = J^{(1),(i)}_z + \frac{\delta^{(i)}}{2\Delta}\tilde{N}^{(i)} \end{equation} where $\delta^{i}$ is the difference in frequency between the probe laser and the target frequency (detuned $\Delta$ from the $\ket{\uparrow}\rightarrow\ket{e}$ transition). The nearest neighbor penalty is \begin{equation} P_{NN} = \sum_{<a,b>} (\beta_b-\beta_a)^2 \end{equation} where $<a,b>$ represents the set of all unique nearest neighbor pairs of superpixels and $\lambda$ is the regularization hyperparameter. The $(\Delta\theta)^2$ predicted by this model is the variance of $\tilde{\theta} = (\tilde{J}_z^{(2)}-\tilde{J}_z^{(1)})/(C \tilde{N}/2)$. \section{Results and Discussion} We train the model on a set of 500 images taken from the data collected for Ref.~\cite{Malia2020}. 120 images remain after $J_z^\text{cutoff}$ is applied. The hyperparameters, $J_z^\text{cutoff}$ and $\lambda$, are chosen to give the smallest $\Delta\theta$ in a separate validation set of 50 images. Figures~\ref{fig:lambda} and~\ref{fig:cutoff} show the results of training with a range of values for each hyperparameter. The optimal values are $J_z^\text{cutoff}=200$ and $\lambda = 20$. Note that samples with $|J_z^{(1)}|$ this large are typically excluded when the system is used as a sensor as those $J_z^{(1)}$ closest to zero are most accurate. Therefore, the validation set will have larger $\Delta\theta$ than that of the final result. The $\beta_j$ determined with these parameters are shown in Figure~\ref{fig:map}. $\beta_0 < 10^{-3}$ and does not significantly contribute to $\tilde{J}_z^{(2)}$. Figure~\ref{fig:gap} shows the learning curves as a function of nonzero weight sample size ($m_c$). Relatively few data points are needed for the model to minimize the difference of mean least squares error (the first term in Equation~\ref{eq:cost}) between the training and validation sets. The maximum number of points used ($m_c = 117$) corresponds to approximately 8 minutes of data collection and 1 minute of training. Increasing the model complexity by increasing the number of superpixels does not significantly change the gap between the training and validation set errors at maximum $m_c$. It also does not change the qualitative structure of the $\beta$ distribution. On the other hand, a model with less complexity fails to minimize the gap. At the cost of sample size, using the Heaviside function, $H$, to set the weights prevents the model from optimizing on values near zero. This eliminates a $\tilde{J}^{(1)}_z$ dependent trend in the residual $\tilde{J}_z^{(2)}-\tilde{J}_z^{(1)}$ and an unreasonably small $\tilde{N}^{(i)}$. When weights are included, the mean of predicted atom numbers $\tilde{N}^{(i)}$ are within 2\% of the previously calibrated mean value. The nearest neighbor penalty acts similarly to ridge regularization~\cite{Neumaier1998} in that it prevents the model from overfitting and keeps a relatively similar contribution from each $\beta_j$. The optimal $\lambda$ reveals the characteristic spatial frequency of the inhomogeneity. Despite the model fitting only on samples with $|J_z^{(1),(i)}|$ larger than the cutoff, it extrapolates successfully onto those with values smaller than the cutoff. Applying the model to the remaining data (samples with $|J_z^{(1),(i)}|<J_z^\text{cutoff}$) results in an angular resolution of $\Delta\theta = 691\pm60~\mu\text{rad}$. This variance is 27\% smaller than our previous reported result (see Table ~\ref{tab:testValues}). It is also consistent with our previously predicted limit given the known noise sources. In terms of metrologically useful squeezing, this is $7.2\pm0.3$ dB below the QPN limit. This collection efficiency map is independent of the mean $J_z$ as well as the mean position of the atoms and the total atom number. It is therefore versatile and can be applied to data sets taken on different days with different parameters. For example, with measurements where the atomic state is rotated along the polar angle of the Bloch sphere by 25 mrad and the total atom number is reduced by 40\%, the model now predicts $\Delta\theta = 880\pm60~\mu\text{rad}$ ($7.3\pm0.3$ dB below the QPN), a 27\% reduction in variance upon the previous method (see Table ~\ref{tab:testValues}). The collection inhomogeneity across the cloud can also lead to a degradation in the correlation between a single image and its corresponding QND measurement~\cite{Hu2015}. In this work, the estimated increase in variance due to this noise source is 0.5\%~\cite{Wu2020}, which is negligible. The ML model demonstrated here primarily reduces the variance from shot-to-shot fluctuations in the atom cloud distributions coupled with the detection inhomogeneity. In our previously reported results we applied a single weight, determined by the mean position of the atoms, to all of the counts from the $\ket{\uparrow}$ state. Although this method provided significant improvement (Table~\ref{tab:testValues}), the atoms are spread out over most of the image so applying the same same factor to all counts is an over-simplification. ML provides the capability of increasing the complexity of our model to accurately calibrate the measurement. \section{Conclusion} In this work, we have shown that supervised learning is capable of characterizing inhomogeneous fluorescence measurements when prior low noise measurements are available for training the model. This model is robust to changes in spatial arrangement of the atomic populations and correctly predicts the total atom number. This method could be extended to absorption imaging of cold atoms. Absorption imaging, especially high intensity imaging, requires careful calibration to accurately count atoms~\cite{Gross2012,Hueck2017}. A ML approach could circumvent traditional calibration techniques to convert pixel-by-pixel intensity changes to atom numbers in spin squeezing experiments~\cite{Huang2020}. Other ML techniques have the potential to further improve our result. In Ness et al.~\cite{Ness2020}, deep learning is used to predict the background noise based on the signal surrounding the atoms. A similar approach may allow our procedure to forgo subtracting the second background image. This would reduce the variance introduced by the background by up to half of its current value. \section{Acknowledgments} \begin{acknowledgments} This work is supported by the Department of Energy (DE-SC0019174-0001) and the Vannevar Bush Faculty Fellowship. \end{acknowledgments} \begin{thebibliography}{20} \makeatletter \providecommand \@ifxundefined [1]{ \@ifx{#1\undefined} } \providecommand \@ifnum [1]{ \ifnum #1\expandafter \@firstoftwo \else \expandafter \@secondoftwo \fi } \providecommand \@ifx [1]{ \ifx #1\expandafter \@firstoftwo \else \expandafter \@secondoftwo \fi } \providecommand \natexlab [1]{#1} \providecommand \enquote [1]{``#1''} \providecommand \bibnamefont [1]{#1} \providecommand \bibfnamefont [1]{#1} \providecommand \citenamefont [1]{#1} \providecommand \href@noop [0]{\@secondoftwo} \providecommand \href [0]{\begingroup \@sanitize@url \@href} \providecommand \@href[1]{\@@startlink{#1}\@@href} \providecommand \@@href[1]{\endgroup#1\@@endlink} \providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode `\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax} \providecommand \@@startlink[1]{} \providecommand \@@endlink[0]{} \providecommand \url [0]{\begingroup\@sanitize@url \@url } \providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }} \providecommand \urlprefix [0]{URL } \providecommand \Eprint [0]{\href } \providecommand \doibase [0]{http://dx.doi.org/} \providecommand \selectlanguage [0]{\@gobble} \providecommand \bibinfo [0]{\@secondoftwo} \providecommand \bibfield [0]{\@secondoftwo} \providecommand \translation [1]{[#1]} \providecommand \BibitemOpen [0]{} \providecommand \bibitemStop [0]{} \providecommand \bibitemNoStop [0]{.\EOS\space} \providecommand \EOS [0]{\spacefactor3000\relax} \providecommand \BibitemShut [1]{\csname bibitem#1\endcsname} \let\auto@bib@innerbib\@empty \bibitem [{\citenamefont {Wang}\ \emph {et~al.}(2021)\citenamefont {Wang}, \citenamefont {Pinkard}, \citenamefont {Khwaja}, \citenamefont {Zhou}, \citenamefont {Waller},\ and\ \citenamefont {Huang}}]{Wang2021} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Wang}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Pinkard}}, \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Khwaja}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Zhou}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Waller}}, \ and\ \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Huang}},\ }\href {\doibase 10.1101/2021.02.01.429188} {\bibfield {journal} {\bibinfo {journal} {bioRxiv}\ } (\bibinfo {year} {2021}),\ 10.1101/2021.02.01.429188}\BibitemShut {NoStop} \bibitem [{\citenamefont {Sagar}\ \emph {et~al.}(2020)\citenamefont {Sagar}, \citenamefont {Cheng}, \citenamefont {Ouellette}, \citenamefont {Williams}, \citenamefont {Watters},\ and\ \citenamefont {Eliceiri}}]{Sagar2020} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~A.~K.}\ \bibnamefont {Sagar}}, \bibinfo {author} {\bibfnamefont {K.~P.}\ \bibnamefont {Cheng}}, \bibinfo {author} {\bibfnamefont {J.~N.}\ \bibnamefont {Ouellette}}, \bibinfo {author} {\bibfnamefont {J.~C.}\ \bibnamefont {Williams}}, \bibinfo {author} {\bibfnamefont {J.~J.}\ \bibnamefont {Watters}}, \ and\ \bibinfo {author} {\bibfnamefont {K.~W.}\ \bibnamefont {Eliceiri}},\ }\href {\doibase 10.3389/fnins.2020.00931} {\bibfield {journal} {\bibinfo {journal} {Frontiers in Neuroscience}\ }\textbf {\bibinfo {volume} {14}},\ \bibinfo {pages} {931} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ju}\ \emph {et~al.}(2019)\citenamefont {Ju}, \citenamefont {Lyu}, \citenamefont {Hao}, \citenamefont {Shen},\ and\ \citenamefont {Cui}}]{Ju2019} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Ju}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Lyu}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Hao}}, \bibinfo {author} {\bibfnamefont {W.}~\bibnamefont {Shen}}, \ and\ \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Cui}},\ }\href {\doibase 10.1021/acs.analchem.9b01315} {\bibfield {journal} {\bibinfo {journal} {Analytical Chemistry}\ }\textbf {\bibinfo {volume} {91}} (\bibinfo {year} {2019}),\ 10.1021/acs.analchem.9b01315}\BibitemShut {NoStop} \bibitem [{\citenamefont {Mannam}\ \emph {et~al.}(2020)\citenamefont {Mannam}, \citenamefont {Zhang}, \citenamefont {Yuan}, \citenamefont {Ravasio},\ and\ \citenamefont {Howard}}]{Mannam2020} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Mannam}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Zhang}}, \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Yuan}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Ravasio}}, \ and\ \bibinfo {author} {\bibfnamefont {S.~S.}\ \bibnamefont {Howard}},\ }\href {\doibase 10.1088/2515-7647/abac1a} {\bibfield {journal} {\bibinfo {journal} {Journal of Physics: Photonics}\ }\textbf {\bibinfo {volume} {2}},\ \bibinfo {pages} {042005} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Smith}\ \emph {et~al.}(2019)\citenamefont {Smith}, \citenamefont {Yao}, \citenamefont {Sinsuebphon}, \citenamefont {Rudkouskaya}, \citenamefont {Un}, \citenamefont {Mazurkiewicz}, \citenamefont {Barroso}, \citenamefont {Yan},\ and\ \citenamefont {Intes}}]{Smith2019} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~T.}\ \bibnamefont {Smith}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Yao}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Sinsuebphon}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Rudkouskaya}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Un}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Mazurkiewicz}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Barroso}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Yan}}, \ and\ \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Intes}},\ }\href {\doibase 10.1073/pnas.1912707116} {\bibfield {journal} {\bibinfo {journal} {Proceedings of the National Academy of Sciences}\ }\textbf {\bibinfo {volume} {116}},\ \bibinfo {pages} {24019} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Rocco}\ \emph {et~al.}(2014)\citenamefont {Rocco}, \citenamefont {Palmer}, \citenamefont {Valenzuela}, \citenamefont {Boyer}, \citenamefont {Freise},\ and\ \citenamefont {Bongs}}]{Rocco_2014} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Rocco}}, \bibinfo {author} {\bibfnamefont {R.~N.}\ \bibnamefont {Palmer}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Valenzuela}}, \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Boyer}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Freise}}, \ and\ \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Bongs}},\ }\href {\doibase 10.1088/1367-2630/16/9/093046} {\bibfield {journal} {\bibinfo {journal} {New Journal of Physics}\ }\textbf {\bibinfo {volume} {16}},\ \bibinfo {pages} {93046} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Qu}\ \emph {et~al.}(2020)\citenamefont {Qu}, \citenamefont {Evrard}, \citenamefont {Dalibard},\ and\ \citenamefont {Gerbier}}]{Qu2020} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Qu}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Evrard}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Dalibard}}, \ and\ \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Gerbier}},\ }\href {\doibase 10.1103/PhysRevLett.125.033401} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {125}},\ \bibinfo {pages} {033401} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Hosten}\ \emph {et~al.}(2016)\citenamefont {Hosten}, \citenamefont {Engelsen}, \citenamefont {Krishnakumar},\ and\ \citenamefont {Kasevich}}]{Hosten_2016} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Hosten}}, \bibinfo {author} {\bibfnamefont {N.~J.}\ \bibnamefont {Engelsen}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Krishnakumar}}, \ and\ \bibinfo {author} {\bibfnamefont {M.~A.}\ \bibnamefont {Kasevich}},\ }\href {\doibase 10.1038/nature16176} {\bibfield {journal} {\bibinfo {journal} {Nature}\ }\textbf {\bibinfo {volume} {529}},\ \bibinfo {pages} {505} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Cox}\ \emph {et~al.}(2016)\citenamefont {Cox}, \citenamefont {Greve}, \citenamefont {Weiner},\ and\ \citenamefont {Thompson}}]{Cox_2016} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.~C.}\ \bibnamefont {Cox}}, \bibinfo {author} {\bibfnamefont {G.~P.}\ \bibnamefont {Greve}}, \bibinfo {author} {\bibfnamefont {J.~M.}\ \bibnamefont {Weiner}}, \ and\ \bibinfo {author} {\bibfnamefont {J.~K.}\ \bibnamefont {Thompson}},\ }\href {\doibase 10.1103/PhysRevLett.116.093602} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {116}},\ \bibinfo {pages} {093602} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Leroux}\ \emph {et~al.}(2010)\citenamefont {Leroux}, \citenamefont {Schleier-Smith},\ and\ \citenamefont {Vuleti\ifmmode~\acute{c}\else \'{c}\fi{}}}]{Schleier-Smith_2010b} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {I.~D.}\ \bibnamefont {Leroux}}, \bibinfo {author} {\bibfnamefont {M.~H.}\ \bibnamefont {Schleier-Smith}}, \ and\ \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Vuleti\ifmmode~\acute{c}\else \'{c}\fi{}}},\ }\href {\doibase 10.1103/PhysRevLett.104.073602} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {104}},\ \bibinfo {pages} {073602} (\bibinfo {year} {2010})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Catrysse}\ and\ \citenamefont {Wandell}(2002)}]{Catrysse2002} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {P.~B.}\ \bibnamefont {Catrysse}}\ and\ \bibinfo {author} {\bibfnamefont {B.~A.}\ \bibnamefont {Wandell}},\ }\href {\doibase 10.1364/JOSAA.19.001610} {\bibfield {journal} {\bibinfo {journal} {Journal of the Optical Society of America}\ }\textbf {\bibinfo {volume} {19}} (\bibinfo {year} {2002}),\ 10.1364/JOSAA.19.001610}\BibitemShut {NoStop} \bibitem [{\citenamefont {Malia}\ \emph {et~al.}(2020)\citenamefont {Malia}, \citenamefont {Martínez-Rincón}, \citenamefont {Wu}, \citenamefont {Hosten},\ and\ \citenamefont {Kasevich}}]{Malia2020} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Malia}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Martínez-Rincón}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Wu}}, \bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Hosten}}, \ and\ \bibinfo {author} {\bibfnamefont {M.~A.}\ \bibnamefont {Kasevich}},\ }\href {\doibase 10.1103/PhysRevLett.125.043202} {\bibfield {journal} {\bibinfo {journal} {Physical Review Letters}\ }\textbf {\bibinfo {volume} {125}} (\bibinfo {year} {2020}),\ 10.1103/PhysRevLett.125.043202}\BibitemShut {NoStop} \bibitem [{\citenamefont {Broyden}(1970)}]{Broyden1970} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.~G.}\ \bibnamefont {Broyden}},\ }\href {\doibase 10.1093/imamat/6.1.76} {\bibfield {journal} {\bibinfo {journal} {Journal of the Institute of Mathematics and Its Applications}\ }\textbf {\bibinfo {volume} {6}},\ \bibinfo {pages} {222} (\bibinfo {year} {1970})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Neumaier}(1998)}]{Neumaier1998} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Neumaier}},\ }\href {\doibase 10.1137/S0036144597321909} {\bibfield {journal} {\bibinfo {journal} {SIAM Review}\ }\textbf {\bibinfo {volume} {40}},\ \bibinfo {pages} {636} (\bibinfo {year} {1998})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Hu}\ \emph {et~al.}(2015)\citenamefont {Hu}, \citenamefont {Chen}, \citenamefont {Vendeiro}, \citenamefont {Zhang},\ and\ \citenamefont {Vuleti\ifmmode~\acute{c}\else \'{c}\fi{}}}]{Hu2015} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Hu}}, \bibinfo {author} {\bibfnamefont {W.}~\bibnamefont {Chen}}, \bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont {Vendeiro}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Zhang}}, \ and\ \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Vuleti\ifmmode~\acute{c}\else \'{c}\fi{}}},\ }\href {\doibase 10.1103/PhysRevA.92.063816} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {92}},\ \bibinfo {pages} {063816} (\bibinfo {year} {2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Wu}\ \emph {et~al.}(2020)\citenamefont {Wu}, \citenamefont {Krishnakumar}, \citenamefont {Martínez-Rincón}, \citenamefont {Malia}, \citenamefont {Hosten},\ and\ \citenamefont {Kasevich}}]{Wu2020} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Wu}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Krishnakumar}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Martínez-Rincón}}, \bibinfo {author} {\bibfnamefont {B.~K.}\ \bibnamefont {Malia}}, \bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Hosten}}, \ and\ \bibinfo {author} {\bibfnamefont {M.~A.}\ \bibnamefont {Kasevich}},\ }\href {\doibase 10.1103/PhysRevA.102.012224} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {102}} (\bibinfo {year} {2020}),\ 10.1103/PhysRevA.102.012224}\BibitemShut {NoStop} \bibitem [{\citenamefont {Gross}(2012)}]{Gross2012} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Gross}},\ }\href {\doibase 10.1088/0953-4075/45/10/103001} {\bibfield {journal} {\bibinfo {journal} {Journal of Physics B: Atomic, Molecular and Optical Physics}\ }\textbf {\bibinfo {volume} {45}},\ \bibinfo {pages} {103001} (\bibinfo {year} {2012})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Hueck}\ \emph {et~al.}(2017)\citenamefont {Hueck}, \citenamefont {Luick}, \citenamefont {Sobirey}, \citenamefont {Siegl}, \citenamefont {Lompe}, \citenamefont {Moritz}, \citenamefont {Clark},\ and\ \citenamefont {Chin}}]{Hueck2017} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Hueck}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Luick}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Sobirey}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Siegl}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Lompe}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Moritz}}, \bibinfo {author} {\bibfnamefont {L.~W.}\ \bibnamefont {Clark}}, \ and\ \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Chin}},\ }\href {\doibase 10.1364/OE.25.008670} {\bibfield {journal} {\bibinfo {journal} {Opt. Express}\ }\textbf {\bibinfo {volume} {25}},\ \bibinfo {pages} {8670} (\bibinfo {year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Huang}\ \emph {et~al.}(2020)\citenamefont {Huang}, \citenamefont {de~la Paz}, \citenamefont {Mazzoni}, \citenamefont {Ott}, \citenamefont {Sinatra}, \citenamefont {Alzar},\ and\ \citenamefont {Reichel}}]{Huang2020} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.-Z.}\ \bibnamefont {Huang}}, \bibinfo {author} {\bibfnamefont {J.~A.}\ \bibnamefont {de~la Paz}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Mazzoni}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Ott}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Sinatra}}, \bibinfo {author} {\bibfnamefont {C.~L.~G.}\ \bibnamefont {Alzar}}, \ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Reichel}},\ }\href {https://arxiv.org/abs/2007.01964} {\ (\bibinfo {year} {2020})},\ \Eprint {http://arxiv.org/abs/2007.01964} {arXiv:2007.01964 [quant-ph]} \BibitemShut {NoStop} \bibitem [{\citenamefont {Ness}\ \emph {et~al.}(2020)\citenamefont {Ness}, \citenamefont {Vainbaum}, \citenamefont {Shkedrov}, \citenamefont {Florshaim},\ and\ \citenamefont {Sagi}}]{Ness2020} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Ness}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Vainbaum}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Shkedrov}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Florshaim}}, \ and\ \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Sagi}},\ }\href {\doibase 10.1103/PhysRevApplied.14.014011} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Applied}\ }\textbf {\bibinfo {volume} {14}},\ \bibinfo {pages} {014011} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \end{thebibliography} \section{Figures} \begin{figure} \caption{Relative counts for each pixel. Full resolution image with $3.1\times 10^6$ pixels (upper) and counts binned into 192 superpixels (lower). The left half of the image ($\ket{\downarrow} \label{fig:counts} \end{figure} \begin{figure} \caption{Relative values for the weights $\beta_j$ for each superpixel under the parameters $J_z^\text{cutoff} \label{fig:map} \end{figure} \begin{figure} \caption{The evaluation metric $\Delta\theta$ is used to choose the weight $\lambda$ of the nearest neighbor penalty. The optimal regularization is $\lambda=20$ when $J_z^\text{cutoff} \label{fig:lambda} \end{figure} \begin{figure} \caption{The evaluation metric $\Delta\theta$ is used to choose the hyperparameter $J_z^\text{cutoff} \label{fig:cutoff} \end{figure} \begin{figure} \caption{Mean least squares error (first term of the cost function $G$) vs number of samples with nonzero weight ($m_c$). Optimal values of $J_z^\text{cutoff} \label{fig:gap} \end{figure} \section{Tables} \begingroup \squeezetable \begin{table}[h] \begin{tabularx}{0.5\linewidth}{c @{\hskip 0.35cm}c @{\hskip 0.35cm}c@{\hskip 0.35cm} c} \hline \hline \begin{tabular}{c}mean $\tilde{\theta}^{(2)}$ \\(mrad)\end{tabular} & \begin{tabular}{c}no correction\\(mrad) \end{tabular}&\begin{tabular}{c} mean position\\ correction (mrad)\end{tabular} & \begin{tabular}{c}supervised \\learning (mrad)\end{tabular}\\ \hline 0 & $1.86\pm0.09$ &$0.81\pm0.06$ &$0.69\pm0.06$ \\ 25 &$2.23\pm0.15$ &$1.03\pm0.06$&$0.88\pm0.06$ \\ \hline \hline \end{tabularx} \caption{Angular resolution $\Delta\theta$ of the test sets for different analysis methods. The set where the fluorescence measurement has a mean value of $\tilde{\theta}^{(2)}=\tilde{J}_z^{(2)}/C(\tilde{N}/2)=0$ contains $N=390\,000$ (QPN limit is $\Delta\theta=1.60~\text{mrad}$). The set with $\theta^{(2)}=25~\text{mrad}$ contains $N=240\,000$ (QPN limit is $\Delta\theta=2.04~\text{mrad}$).} \label{tab:testValues} \end{table} \endgroup \section{Pseudo code for training model} \noindent \textcolor{red}{BEGIN SCRIPT}\\ \newline \noindent\textcolor{green}{\% Import experimental results}\\ \noindent \textcolor{blue}{SET} \textcolor{magenta}{CavityJz} to the Jz values from the QND measurement of the sample dataset\\ \noindent \textcolor{blue}{SET} \textcolor{magenta}{PixelValuesReal} to the counts in the images corresponding to each \textcolor{magenta}{CavityJz}\\ \noindent \textcolor{blue}{SET} \textcolor{magenta}{FrequencyShift} to the difference in probe frequency between the measured value and the target value\\ \noindent \textcolor{blue}{DIVIDE} \textcolor{magenta}{FrequencyShift} by half of the hyperfine transition frequency\\ \newline \noindent\textcolor{green}{\% Create superpixels}\\ \noindent \textcolor{blue}{FOR} each image\\ \indent \indent \textcolor{blue}{FOR} each 128x128 section of real pixels\\ \indent \indent \indent \indent \textcolor{blue}{SET} \textcolor{magenta}{PixelValues} to sum of \textcolor{magenta}{PixelValuesReal} inside the section\\ \indent\indent \textcolor{blue}{END FOR}\\ \noindent \textcolor{blue}{END FOR}\\ \newline \noindent\textcolor{green}{\% Segment experimental data}\\ \noindent\textcolor{blue}{SORT} \textcolor{magenta}{CavityJz}, \textcolor{magenta}{PixelValues}, and \textcolor{magenta}{FrequencyShift} into Training, Validaiton, and Test datasets\\ \newline \noindent\textcolor{green}{\% Initialize and minimize beta}\\ \noindent \textcolor{blue}{SET} \textcolor{magenta}{InitialBetaBias} to 0\\ \noindent \textcolor{blue}{SET} all \textcolor{magenta}{InitialBetaTrain} to 1\\ \newline \noindent \textcolor{blue}{CALL} \textcolor{cyan}{fminunc} with \textcolor{cyan}{EvaluateCost}, \textcolor{magenta}{InitialBetaBias}, \textcolor{magenta}{InitialBetaTrain}, \textcolor{magenta}{PixelValuesTrain}, \textcolor{magenta}{CavityJzTrain}, \textcolor{magenta}{FrequencyShiftTrain}\\ \indent\indent \textcolor{blue}{RETURNING} \textcolor{magenta}{FinalBiasBeta}, \textcolor{magenta}{FinalBeta}, \textcolor{magenta}{FinalCost}\\ \newline \textcolor{green}{\% Find the model results for the Validation and Test sets}\\ \noindent \textcolor{blue}{CALL} \textcolor{cyan}{EvaluateModel} with \textcolor{magenta}{FinalBiasBeta}, \textcolor{magenta}{FinalBeta}, \textcolor{magenta}{PixelValuesValidation}\\ \indent \indent \textcolor{blue}{RETURNING} \textcolor{magenta}{JzValidation}, \textcolor{magenta}{NValidation}\\ \noindent \textcolor{blue}{SET} \textcolor{magenta}{TargetJzValidation} to \textcolor{magenta}{CavityJzValidation} plus half the product of \textcolor{magenta}{FrequencyShiftValidation} and \textcolor{magenta}{NValidation}\\ \noindent \textcolor{blue}{SET} \textcolor{magenta}{VarianceJzValidation} to the variance of the difference between \textcolor{magenta}{JzValidation} and \textcolor{magenta}{TargetJzValidation}\\ \newline \noindent \textcolor{blue}{CALL} \textcolor{cyan}{EvaluateModel} with \textcolor{magenta}{FinalBiasBeta}, \textcolor{magenta}{FinalBeta}, \textcolor{magenta}{PixelValuesTest}\\ \indent \indent \textcolor{blue}{RETURNING} \textcolor{magenta}{JzTest}, \textcolor{magenta}{NTest}\\ \noindent \textcolor{blue}{SET} \textcolor{magenta}{TargetJzTest} to \textcolor{magenta}{CavityJzTest} plus half the product of \textcolor{magenta}{FrequencyShiftTest} and \textcolor{magenta}{NTest}\\ \noindent \textcolor{blue}{SET} \textcolor{magenta}{VarianceJzTest} to the variance of the difference between \textcolor{magenta}{JzTest} and \textcolor{magenta}{TargetJzTest}\\ \textcolor{red}{END SCRIPT}\\ \newline \newline \textcolor{red}{FUNCTION} \textcolor{cyan}{fminunc}\\ \noindent \textcolor{blue}{PASS IN} cost function and its arguments (\textcolor{cyan}{EvaluateCost}, \textcolor{magenta}{BetaBias}, \textcolor{magenta}{BetaTrain}, \textcolor{magenta}{PixelValues}, \textcolor{magenta}{CavityJz}, \textcolor{magenta}{FrequencyShift})\\ \newline \noindent\textcolor{green}{\% This is a built in MATLAB function based on the BFGS algorithm.}\\ \noindent\textcolor{green}{\% The options used are:}\\ \noindent\textcolor{green}{\% TolX = 1E-7}\\ \noindent\textcolor{green}{\% TolFun = 1E-7}\\ \noindent\textcolor{green}{\% MaxIter 1E5}\\ \noindent\textcolor{green}{\% MaxFunction = 5E5}\\ \newline \noindent\textcolor{blue}{WHILE} model error is larger than tolerances\\ \indent\indent \textcolor{blue}{CALL} \textcolor{cyan}{EvaluateCost} with \textcolor{magenta}{BetaBias}, \textcolor{magenta}{BetaTrain}, \textcolor{magenta}{PixelValues}, \textcolor{magenta}{CavityJz}, \textcolor{magenta}{FrequencyShift}\\ \indent\indent\indent\indent \textcolor{blue}{RETURNING} \textcolor{magenta}{Cost}, \textcolor{magenta}{GradientCost}, \textcolor{magenta}{GradientCostBias}\\ \indent\indent \textcolor{blue}{COMPUTE} BFGS algorithm\\ \indent\indent\textcolor{blue}{UPDATE} \textcolor{magenta}{BetaBias} and \textcolor{magenta}{BetaTrain}\\ \textcolor{blue}{END WHILE}\\ \newline \noindent \textcolor{blue}{PASS OUT} \textcolor{magenta}{BetaBias}, \textcolor{magenta}{BetaTrain}, \textcolor{magenta}{Cost}\\ \textcolor{red}{END FUNCTION}\\ \newline \noindent\textcolor{red}{FUNCTION} \textcolor{cyan}{EvaluateCost}\\ \noindent \textcolor{blue}{PASS IN} \textcolor{magenta}{BetaBias}, \textcolor{magenta}{BetaTrain}, \textcolor{magenta}{PixelValues}, \textcolor{magenta}{CavityJz}, \textcolor{magenta}{FrequencyShift}\\ \newline \noindent\textcolor{green}{\% Initialize variables}\\ \noindent \textcolor{blue}{SET} all \textcolor{magenta}{JzWeights} to 0\\ \noindent \textcolor{blue}{SET} \textcolor{magenta}{NeigborPenalty} to 0\\ \noindent \textcolor{blue}{SET} all \textcolor{magenta}{NeighborGradients} to 0\\ \noindent \textcolor{blue}{SET} \textcolor{magenta}{JzNorm} to the mean of \textcolor{magenta}{CavityJz}\\ \noindent \textcolor{blue}{SET} \textcolor{magenta}{Cost} to 0\\ \noindent \textcolor{blue}{SET} all \textcolor{magenta}{GradientCost} to 0\\ \noindent \textcolor{blue}{SET} \textcolor{magenta}{SubsetSize} to 0\\ \newline \noindent\textcolor{green}{\% Define hyperparameters}\\ \noindent \textcolor{blue}{SET} \textcolor{magenta}{NeighborWeight} to 20\\ \noindent \textcolor{blue}{SET} \textcolor{magenta}{JzCutoff} to 200\\ \newline \noindent \textcolor{blue}{FOR} each sample\\ \indent\indent\textcolor{green}{\% Find this iteration's Jz and N values for the Test set}\\ \indent \indent \textcolor{blue}{CALL} \textcolor{cyan}{EvaluateModel} with \textcolor{magenta}{BetaBias}, \textcolor{magenta}{BetaTrain}, and \textcolor{magenta}{PixelValues} \\ \indent\indent\indent\indent \textcolor{blue}{RETURNING} \textcolor{magenta}{TestJz}, \textcolor{magenta}{TestN}\\ \newline \indent\indent\textcolor{green}{\% Apply frequency correction and find error}\\ \indent\indent \textcolor{blue}{SET} \textcolor{magenta}{TargetJz} to \textcolor{magenta}{CavityJz} plus half the product of \textcolor{magenta}{FrequencyShift} and \textcolor{magenta}{TestN}\\ \indent\indent \textcolor{blue}{SET} \textcolor{magenta}{JzError} to the difference of \textcolor{magenta}{TestJz} and \textcolor{magenta}{TargetJz}\\ \indent \indent \textcolor{blue}{SET} \textcolor{magenta}{JzNorm} to the mean of \textcolor{magenta}{CavityJz}\\ \indent \indent \textcolor{blue}{SET} \textcolor{magenta}{JzErrorNorm} to \textcolor{magenta}{JzError} to divided by \textcolor{magenta}{JzNorm}\\ \newline \indent\indent\textcolor{blue}{IF} \textcolor{magenta}{TargetJz} is less then \textcolor{magenta}{JzCutoff}\\ \indent\indent\indent\indent \textcolor{blue}{SET} \textcolor{magenta}{JzWeights} for this point to be 0\\ \indent\indent\textcolor{blue}{ELSE} \\ \indent\indent\indent\indent \noindent \textcolor{blue}{SET} \textcolor{magenta}{JzWeights} for this point to be 1\\ \indent\indent\indent\indent\textcolor{blue}{INCREMENT} \textcolor{magenta}{SubSetSize} by 1\\ \indent\indent\textcolor{blue}{END IF}\\ \indent\indent\textcolor{green}{\% Find neighbor penalty and its gradient}\\ \indent\indent \textcolor{blue}{FOR} each edge between neighboring pixels\\ \indent\indent\indent\indent\textcolor{blue}{ADD} the square of the difference in \textcolor{magenta}{BetaTrain} (corresponding to the pixels touching this edge) to \textcolor{magenta}{NeigborPenalty}\\ \indent\indent \textcolor{blue}{END FOR} \\ \newline \indent\indent\noindent \textcolor{blue}{FOR} each pixel\\ \indent\indent \indent\indent\textcolor{blue}{ADD} the difference in \textcolor{magenta}{BetaTrain} to \textcolor{magenta}{NeighborGradients} for all edges touching this pixel\\ \indent\indent\noindent \textcolor{blue}{END FOR}\\ \newline \indent\indent\textcolor{green}{\% Calculate cost function and its gradient}\\ \indent\indent \textcolor{blue}{ADD} the product of \textcolor{magenta}{JzWeights} the square of \textcolor{magenta}{JzError} to \textcolor{magenta}{Cost}\\ \indent\indent\textcolor{blue}{SET} \textcolor{magenta}{GradientCostBias} to the product of \textcolor{magenta}{JzWeights}, and \textcolor{magenta}{JzError}\\ \indent\indent \textcolor{blue}{FOR} each pixel corresponding the the left side of image\\ \indent\indent\indent\indent\textcolor{blue}{SUBTRACT} the product of \textcolor{magenta}{JzWeights}, \textcolor{magenta}{JzError}, and \textcolor{magenta}{PixelValues(pixel)} from \textcolor{magenta}{GradientCost}\\ \indent\indent \textcolor{blue}{END FOR}\\ \indent\indent \textcolor{blue}{FOR} each pixel corresponding to the right side of image\\ \indent\indent\indent\indent \textcolor{blue}{ADD} the product of \textcolor{magenta}{JzWeights}, \textcolor{magenta}{JzError}, and \textcolor{magenta}{PixelValues(pixel)} to \textcolor{magenta}{GradientCost}\\ \indent\indent \textcolor{blue}{END FOR}\\ \noindent \textcolor{blue}{END FOR}\\ \newline \noindent\textcolor{green}{\% add regularization amd normalize results}\\ \textcolor{blue}{ADD} the product of \textcolor{magenta}{NeigborPenalty} and \textcolor{magenta}{NeighborWeight} to \textcolor{magenta}{Cost}\\ \noindent \textcolor{blue}{DIVIDE} \textcolor{magenta}{Cost} by twice the \textcolor{magenta}{SubsetSize}\\ \newline \noindent \textcolor{blue}{FOR} every sample\\ \indent\indent \textcolor{blue}{FOR} every pixel\\ \indent\indent \indent\indent \textcolor{blue}{ADD} the product of twice \textcolor{magenta}{NeighborWeight} and \textcolor{magenta}{NeighborGradients} to \textcolor{magenta}{GradientCost}\\ \indent\indent \textcolor{blue}{END FOR}\\ \noindent \textcolor{blue}{END FOR}\\ \noindent\textcolor{blue}{DIVIDE} all \textcolor{magenta}{GradientCost} by twice the \textcolor{magenta}{SubsetSize}\\ \newline \noindent \textcolor{blue}{PASS OUT} \textcolor{magenta}{Cost}, \textcolor{magenta}{GradientCost}, \textcolor{magenta}{GradientCostBias}\\ \textcolor{red}{END FUNCTION} \\ \newline \textcolor{red}{FUNCTION} \textcolor{cyan}{EvaluateModel}\\ \noindent \textcolor{blue}{PASS IN} \textcolor{magenta}{BetaBias}, \textcolor{magenta}{BetaTrain}, \textcolor{magenta}{PixelValues}\\ \newline \noindent\textcolor{green}{\% Initialize Variables}\\ \noindent \textcolor{blue}{SET} \textcolor{magenta}{Jz} to \textcolor{magenta}{BetaBias}\\ \noindent \textcolor{blue}{SET} \textcolor{magenta}{N} to 0\\ \newline \noindent\textcolor{green}{\% Calculate N}\\ \noindent \textcolor{blue}{FOR} each pixel\\ \indent\indent\textcolor{blue}{ADD} product of \textcolor{magenta}{PixelValues(pixel)} and \textcolor{magenta}{BetaTrain(pixel)} to \textcolor{magenta}{N}\\ \noindent \textcolor{blue}{END FOR}\\ \newline \noindent\textcolor{green}{\% Calculate Jz}\\ \noindent \textcolor{blue}{FOR} each pixel corresponding the the left side of image\\ \indent\indent\textcolor{blue}{SUBTRACT} product of \textcolor{magenta}{PixelValues(pixel)} and \textcolor{magenta}{BetaTrain(pixel)} from \textcolor{magenta}{Jz}\\ \noindent \textcolor{blue}{END FOR}\\ \noindent \textcolor{blue}{FOR} each pixel corresponding to the right side of image\\ \indent\indent\textcolor{blue}{ADD} product of \textcolor{magenta}{PixelValues(pixel)} and \textcolor{magenta}{BetaTrain(pixel)} to \textcolor{magenta}{Jz}\\ \noindent \textcolor{blue}{END FOR}\\ \indent\indent\textcolor{blue}{DIVIDE} \textcolor{magenta}{Jz} by 2\\ \newline \noindent \textcolor{blue}{PASS OUT} \textcolor{magenta}{Jz}, \textcolor{magenta}{N}\\ \textcolor{red}{END FUNCTION} \end{document}
\begin{document} \title{Long-term stability of interacting Hawkes processes on random graphs} \begin{abstract} We consider a population of Hawkes processes modeling the activity of $N$ interacting neurons. The neurons are regularly positioned on the segment $[0,1]$, and the connectivity between neurons is given by a random possibly diluted and inhomogeneous graph where the probability of presence of each edge depends on the spatial position of its vertices through a spatial kernel. The main result of the paper concerns the long-time stability of the synaptic current of the population, as $N\to\infty$, in the subcritical regime in case the synaptic memory kernel is exponential, up to time horizons that are polynomial in $N$. \end{abstract} \noindent {\sc {\bf Keywords.}} Multivariate nonlinear Hawkes processes, Mean-field systems, Neural Field Equation, Spatially extended system, $W$-Random graph.\\ \noindent {\sc {\bf AMS Classification.}} 60F15, 60G55, 44A35, 92B20. \section{Introduction} \subsection{Hawkes processes in neuroscience} In the present paper we study the large time behavior of a population of interacting and spiking neurons, as the size of the population $N$ tends to infinity. We model the activity of a neuron by a point process where each point represents the time of a spike: $Z_{N,i}(t)$ counts the number of spikes during the time interval $[0,t]$ of the $i$th neuron of the population. Its intensity at time $t$ conditioned on the past $[0, t)$ is given by $\lambda_{N,i}(t)$, in the sense that $$\mathbf{P}\left( Z_{N,i} \text{ jumps between} (t,t+dt) \vert \mathcal{F}_t\right)= \lambda_{N,i}(t)dt,$$ where $\mathcal{F}_t:=\sigma\left( Z_{N,i}(s), s\leq t, 1\leq i\leq N\right)$. For the choice of $\lambda_{N,i}$, we want to account for the dependence of the activity of a neuron on the past of the whole population : the spike of one neuron can trigger others spikes. \textit{Hawkes processes} are then a natural choice to emphasize this interdependency. A generic choice is \begin{equation}\label{eq:def_lambda_generic} \lambda_{N,i}(t)=\mu(t,x_i)+f\left( v(t,x_i)+\dfrac{1}{N}\sum_{j=1}^N w_{ij}^{(N)} \int_0^{t-} h(t-s) dZ_{N,j}(s)\right). \end{equation} Here, with the $i$th neuron at position $x_i=\frac{i}{N}\in I:=[0,1]$, $f ~:~ \mathbb{R} \longrightarrow \mathbb{R}_+$ represents the synaptic integration, $\mu(t,\cdot)~:~ I \longrightarrow \mathbb{R}_+$ a spontaneous activity of the neuron at time $t$, $v(t,\cdot)~:~ I \longrightarrow \mathbb{R}$ a past activity and $h~:~ \mathbb{R}_+ \longrightarrow \mathbb{R}$ a memory function which models how a past jump of the system affects the present intensity. The term $w_{ij}^{(N)}$ represents the random inhomogeneous interaction between neurons $i$ and $j$, that will be modeled here in terms of the realization of a random graph. Since the seminal works of \cite{HAWKES1971, Hawkes1974}, there has been a renewed interest in the use of Hawkes processes, especially in neuroscience. A common simplified framework is to consider an interaction on the complete graph, that is taking $w_{ij}^{(N)}=1$ in \eqref{eq:def_lambda_generic}, as done in \cite{delattre2016}. In this case, a very simple instance of \eqref{eq:def_lambda_generic} concerns the so called \emph{linear case}, when $f(x)=x$,$\mu(t,x)=\mu$ and $v=0$, that is $\lambda_{N,i}(t)=\lambda_N(t)=\mu+\frac{1}{N}\sum_{j=1}^N \int_0^{t-} h(t-s) dZ_{N,j}(s)$, with $h\geq 0$ (see \cite{delattre2016}). The biological evidence \cite{Bosking1997,Mountcastle1997} of a spatial organisation of neurons in the brain has led to more elaborate Hawkes models with spatial interaction (see \cite{Touboul2014,Ditlevsen2017,CHEVALLIER20191}), possibly including inhibition (see \cite{Pfaffelhuber2022}). This would correspond in \eqref{eq:def_lambda_generic} to take $w_{ij}^{(N)}=W(x_i,x_j)$, where $W$ is a macroscopic interaction kernel, usual examples being the exponential distribution on $\mathbb{R}$, $W(x,y)=\dfrac{1}{2\sigma}\exp\left( -\dfrac{\vert x-y\vert}{\sigma}\right)$ or the ``Mexican hat'' distribution $W(x,y)=e^{-\vert x-y\vert} - Ae^{\frac{-\vert x-y\vert}{\sigma}}$, $A\in \mathbb{R},~\sigma>0$. The macroscopic limit of the multivariate Hawkes process \eqref{eq:def_lambda_generic} is then given by a family of spatially extended inhomogeneous Poisson processes whose intensities $(\lambda_t(x))_{x\in I}$ solve the convolution equation \begin{equation}\label{eq:def_lambda_lim_generic} \lambda_t(x)=\mu_t(x)+f\left( v_t(x)+\int_I W(x,y) \int_0^{t} h(t-s) \lambda_s(y)dsdy\right). \end{equation} A crucial example is the exponential case, that is when $h(t)=e^{-\alpha t}$ for some $\alpha>0$. In this case, the Hawkes process with intensity \eqref{eq:def_lambda_generic} is Markovian (see \cite{Ditlevsen2017}). Denoting in \eqref{eq:def_lambda_lim_generic} $u_t(x):=v_t(x)+\int_I W(x,y) \int_0^{t} h(t-s) \lambda_s(y)dsdy$ as the potential of a neuron (the synaptic current) localised in $x$ at time $t$ (so that \eqref{eq:def_lambda_lim_generic} becomes $\lambda_t(x)=f(u_t(x))$), an easy computation (see \cite{CHEVALLIER20191}) gives that, when $v_t(x)=e^{-\alpha t}v_0(x)$ for some $v_0$, $u$ solves the \emph{Neural Field Equation} (NFE) \begin{equation}\label{eq:NFE} \dfrac{\partial u_t(x)}{\partial t}=-\alpha u_t(x)+\int_I W(x,y)f(u_t(y))dy+ I_t(x), \end{equation} with source term $I_t(x):=\int_I W(x,y)\mu_t(y)dy$. Equation \eqref{eq:NFE} has been extensively studied in the literature, mostly from a phenomenological perspective \cite{Wilson1972,Amari1977}, and is an important example of macroscopic neural dynamics with non-local interactions (we refer to \cite{Bressloff2011} for an extensive review on the subject). In a previous work \cite{agathenerine2021multivariate}, we give a microscopic interpretation of the macroscopic kernel $W$ in terms of an inhomogeneous graph of interaction. We consider $w_{ij}^{(N)}=\xi_{ij}^{(N)} \kappa_i$ in \eqref{eq:def_lambda_generic}, where $\left(\xi_{ij}^{(N)}\right)_{1\leq i,j\leq N}$ is a collection of independent Bernoulli variables, with individual parameter $W(x_i,x_j)$: the probability that two neurons are connected depends on their spatial positions. The term $\kappa_i$ is a suitable local renormalisation parameter, to ensure that the interaction remains of order $1$. This modeling constitutes a further difficulty in the analysis as we are no longer in a mean-field framework: contrary to the case $w_{ij}^{(N)}=1$, the interaction \eqref{eq:def_lambda_generic} is no longer a functional of the empirical measure of the particles $\left(Z_{N,1},\cdots, Z_{N,N}\right)$. A recent interest has been shown to similar issues in the case of diffusions interacting on random graphs (first in the homogeneous Erd\H{o}s-R\'enyi case \cite{DelattreGL2016,Coppini2019,Coppini_Lucon_Poquet2022,Coppini2022}, and secondly for inhomogenous random graph \cite{Luon2020,bayraktar2021graphon,bet2020weakly}). A common motivation between \cite{agathenerine2021multivariate} in the case of Hawkes processes and \cite{Luon2020,bayraktar2021graphon,bet2020weakly} in the case of diffusions is to understand how the inhomogeneity of the underlying graph may or may not influence the long time dynamics of the system. An issue common to all mean-field models (and their perturbations) is that there is, in general, no possibility to interchange the limits $N\to \infty $ and $t\to\infty$. More precisely, restricting to Hawkes processes, a usual propagation of chaos result (see \cite[Theorem 8]{delattre2016}, \cite[Theorem 1]{CHEVALLIER20191}, \cite[Theorem 3.10]{agathenerine2021multivariate}) may be stated as follows: for fixed $T>0$, there exists some $C(T)>0$ such that \begin{equation}\label{eq:chaos_generic} \sup_{1\leq i \leq N} \mathbf{E}\left(\sup_{s\in [0,T]} \left\vert Z_{N,i}(s) - \overline{Z}_{i}(s) \right\vert \right) \leq \dfrac{C(T)}{\sqrt{N}}, \end{equation} where $\overline{Z}_{i}$ is a Poisson process with intensity $(\lambda_t(x_i))_{t\geq 0}$ defined in \eqref{eq:def_lambda_lim_generic} suitably coupled to $Z_{N,i}$, see the above references for details. Generically, $C(T)$ is of the form $\exp(CT)$, such that \eqref{eq:chaos_generic} remains only relevant up to $T \sim c \log N$ with $c$ sufficiently small. In the pure mean-field linear case ($w_{ij}^{(N)}=1$, $f(x)=x$), there is a well known phase transition \cite[Theorems 10,11]{delattre2016} when $\Vert h \Vert_1=\int_0^\infty h(t) dt<1$ (\emph{subcritical case}), $\lambda_t\xrightarrow[t\to\infty]{}\dfrac{\mu}{1-\Vert h \Vert_1}$, whereas when $\Vert h \Vert_1>1$ (\emph{supercritical case}), $\lambda_t\xrightarrow[t\to\infty]{}\infty$. This phase transition has been extended to the inhomogeneous case in \cite{agathenerine2021multivariate}. In the subcritical case, one can actually improve \eqref{eq:chaos_generic} in the sense that $C(T)$ is now linear in $T$ so that \eqref{eq:chaos_generic} remains relevant up to $T=o(\sqrt{N})$. A natural question is to ask if this approximation remains valid beyond this time scale. The purpose to the present work is to address this question: we show that, in the whole generality of \eqref{eq:def_lambda_generic}, in the subcritical regime and exponential case (see details below), the macroscopic intensity \eqref{eq:def_lambda_lim_generic} converges to a finite limit when $t\to\infty$ and that the microscopic system remains close to this limit up to polynomial times in $N$. \subsection{Notation} We denote by $C_{\text{parameters}}$ a constant $C>0$ which only depends on the parameters inside the lower index. These constants can change from line to line or inside a same equation, we choose just to highlight the dependency they contain. When it is not relevant, we just write $C$. For any $d\geq 1$, we denote by $\vert x\vert$ and $x \cdot y$ the Euclidean norm and scalar product of elements $x,y\in \mathbb{R}^d$. For $(E,\mathcal{A},\mu)$ a measured space, for a function $g$ in $L^p(E,\mu)$ with $p\geq 1$, we write $\Vert g \Vert_{E,\mu,p}:=\left( \int_E \vert g \vert^p d\mu \right)^\frac{1}{p}$. When $p=2$, we denote by $\langle \cdot,\cdot \rangle$ the Hermitian scalar product in $L^2(E)$. Without ambiguity, we may omit the subscript $(E,\mu)$ or $\mu$. For a real-valued bounded function $g$ on a space $E$, we write $\Vert g \Vert _\infty := \Vert g \Vert _{E,\infty}=\sup_{x\in E} \vert g(x) \vert$. For $(E,d)$ a metric space, we denote by $ \Vert g \Vert_L = \sup_{x\neq y} \vert g(x) - g(y) \vert / d(x,y)$ the Lipschitz seminorm of a real-valued function $g$ on $E$. We denote by $\mathcal{C}(E,\mathbb{R})$ the space of continuous functions from $E$ to $\mathbb{R}$, and $\mathcal{C}_b(E,\mathbb{R})$ the space of continuous bounded ones. For any $T>0$, we denote by $\mathbb{D}\left([0,T],E\right)$ the space of c\`adl\`ag (right continuous with left limits) functions defined on $[0,T]$ and taking values in $E$. For any integer $N\geq 1$, we denote by $\llbracket 1, N \rrbracket$ the set $\left\{1,\cdots,N\right\}$. For any $p\in [0,1]$, $\mathcal{B}(p)$ denotes the Bernoulli distribution with parameter $p$. \subsection{The model} First, let us focus on the interaction between the particles. The graph of interaction for \eqref{eq:def_lambda_generic} is constructed as follows: \begin{deff}\label{def:espace_proba_bb} On a common probability space $\left(\widetilde{\Omega}, \widetilde{\mathcal{F}},\mathbb{P}\right)$, we consider a family of random variables $\xi^{(N)}=\left( \xi^{(N)}_{ij}\right)_{N\geq 1, i,j \in \llbracket 1,N \rrbracket}$ on $\widetilde{\Omega}$ such that under $\mathbb{P}$, for any $N\geq 1$ and $i,j \in \llbracket 1,N \rrbracket$, $\xi^{(N)}$ is a collection of mutually independent Bernoulli random variables such that for $1\leq i,j \leq N$, $\xi_{ij}^{(N)}$ has parameter $W_N(\frac{i}{N},\frac{j}{N})$, where \begin{equation}\label{eq:def_WN_P} W_N(x,y):= \rho_N W(x,y), \end{equation} with $\rho_N$ some dilution parameter and $W:I^2\to [0,1]$ a macroscopic interaction kernel. We assume that the particles in \eqref{eq:def_lambda_generic} are connected according to the oriented graph $\mathcal{G}_N= \left( \left\{1,\cdots,N\right\} , \xi^{(N)}\right)$. For any $i$ and $j$, $\xi^{(N)}_{ij}=1$ encodes for the presence of the edge $j\to i$ and $\xi^{(N)}_{ij}=0$ for its absence. The interaction in \eqref{eq:def_lambda_generic} is fixed as \begin{equation}\label{eq:def_wij} w_{ij}^{(N)}=\dfrac{\xi_{ij}^{(N)}}{\rho_N}, \end{equation} so that the interaction term remains of order 1 as $N\to\infty$. \end{deff} The class \eqref{eq:def_WN_P} of inhomogenous graphs falls into the framework of $W$-random graphs, see \cite{Lovsz2006,borgs2008,borgs2012}. One distinguishes the \textbf{dense case} when $\lim_{N\to\infty} \rho_N= \rho>0$ and the \textbf{diluted case} when $\rho_N \to 0$. We now fix these sequences, and work on a filtered probability space $\left(\Omega,\mathcal{F},\left(\mathcal{F}_t\right)_{t\geq 0},\mathbf{P}\right)$ rich enough for all the following processes can be defined. We denote by $\mathbf{E}$ the expectation under $\mathbf{P}$ and $\mathbb{E}$ the expectation w.r.t. $ \mathbb{ P}$. In the following definitions, $N$ is fixed and the particles are regularly located on the segment $I=[0,1]$. We denote by $x_i=\frac{i}{N}$ the position of the $i$-th neuron in the population of size $N$. We also divide $I$ in $N$ segments $B_{N,i}=\left(\frac{i-1}{N},\frac{i}{N}\right)$ of equal length.\\ We can now formally define our process of interest. \begin{deff}\label{def:H2} Let $\left(\pi_i(ds,dz)\right)_{1\leq i \leq N}$ be a sequence of i.i.d. Poisson random measures on $\mathbb{R}_+\times \mathbb{R}_+$ with intensity measure $dsdz$. A $\left(\mathcal{F}_t\right)$-adapted multivariate counting process $\left(Z_{N,1}\left(t\right),...,Z_{N,N}\left(t\right)\right)_{t\geq 0}$ defined on $\left(\Omega,\mathcal{F},\left(\mathcal{F}_t\right)_{t\geq 0},\mathbf{P}\right)$ is called \emph{a multivariate Hawkes process} with the set of parameters $\left(N,F,\xi^{(N)},W_N,\eta,h\right)$ if $\mathbf{P}$-almost surely, for all $t\geq 0$ and $i \in \llbracket 1, N \rrbracket$: \begin{equation}\label{eq:def_ZiN} Z_{N,i}(t) = \int_0^t \int_0^\infty \mathbf{1}_{\{z\leq \lambda_{N,i}(s)\}} \pi_i(ds,dz) \end{equation} with $\lambda_{N,i}(t)$ defined by \begin{equation}\label{eq:def_lambdaiN_intro} \lambda_{N,i}(t)= F(X_{N,i}(t-), \eta_t(x_i)), \end{equation} where \begin{equation}\label{eq:def_UiN} X_{N,i}(t)=\sum_{j=1}^N \dfrac{w_{ij}^{(N)}}{N}\int_0^{t} h(t-s) dZ_{N,j}(s), \end{equation} $\eta~:~[0, +\infty)\times I\longrightarrow \mathbb{R}^d$ for some $d \geq 1$ and $F ~:~ \mathbb{R}\times \mathbb{R}^d \longrightarrow \mathbb{R}^+$. \end{deff} Our main focus is to study the quantity $\left(X_{N,i}\right)_{1\leq i \leq N}$ defined in \eqref{eq:def_UiN} as $N\to\infty$, and more precisely the random profile defined for all $x\in I$ by: \begin{equation}\label{eq:def_UN} X_N(t)(x):=\sum_{i=1}^N X_{N,i}(t) \mathbf{1}_{x\in\left(\frac{i-1}{N}, \frac{i}{N}\right]}. \end{equation} As $N \to \infty$, an informal Law of Large Numbers (LLN) argument shows that the empirical mean in \eqref{eq:def_lambdaiN_intro} becomes an expectation w.r.t. the candidate limit for $Z_{N,i}$: we can replace the sum in \eqref{eq:def_UiN} by the integral, the microscopic interaction term $w_{ij}^{(N)}$ in \eqref{eq:def_lambdaiN_intro} by the macroscopic term $W(x,y)$ (where $y$ describes the macroscopic distribution of the positions), and the past activity of the neuron $dZ_{N,j}(s)$ by its intensity in large population. In other words, the macroscopic spatial profile will be described by \begin{equation}\label{eq:def_utx} X_t(x)=\int_{I} W(x,y)\int_0^th(t-s) \lambda_s(y)ds~ dy, \end{equation} where the macroscopic intensity of a neuron at position $x\in I$ denoted by $\lambda_t(x)=F(X_t(x),\eta_t(x))$ solves \begin{equation}\label{eq:def_lambdabarre} \lambda_t(x)=F\left(\int_{I} W(x,y)\int_0^t h(t-s) \lambda_s(y)dsdy,\eta_t(x)\right). \end{equation} Such informal law of large number on a bounded time interval has been made rigorous under various settings, we refer for further references to \cite{delattre2016,CHEVALLIER20191} and more especially to \cite{agathenerine2021multivariate} which exactly incorporates the present hypotheses. \begin{remark}\label{rem:F-ou-f} In the expression \eqref{eq:def_lambdaiN_intro} of the intensity $\lambda_{N, i}$, $X_{N, i}$ given in \eqref{eq:def_UiN} accounts for the stochastic influence of the other interacting neurons, whereas $\eta_t$ represents the deterministic part of the intensity $\lambda_{N, i}$. Having in mind the generic example given in \eqref{eq:def_lambda_generic}, a typical choice would correspond to taking $d=2$ with $\eta:=(\mu, v)$ and \begin{equation} \label{eq:gen_F} F(X, \eta)= F(X, \mu, v)= \mu + f(v + X) \end{equation} Once again, $\mu$ here corresponds to the spontaneous Poisson activity of the neuron and one may see $v$ as a deterministic part in the evolution of the membrane potential of neuron $i$. Note that we generalize here slightly the framework considered in \cite{CHEVALLIER20191} in the sense that \cite{CHEVALLIER20191} considered \eqref{eq:gen_F} for $\mu\equiv 0$ and $v_t(x)= e^{-\alpha t} v_0(x)$ for some initial membrane potential $v_0(x)$. In the case of \eqref{eq:gen_F}, one retrieves the expression of the macroscopic intensity $\lambda_t(x)$ given in \eqref{eq:def_lambda_lim_generic}. Typical choices of $f$ in \eqref{eq:gen_F} are $f(x)=x$ (the so-called linear model) or some sigmoïd function. Note that there will be an intrinsic mathematical difficulty in dealing with the linear case in this paper, as $f$ is not bounded in this case. As already mentioned in the introduction, for the choice of $h(t)= e^{-\alpha t}$ and $v_t(x)=e^{-\alpha t}v_0(x)$, a straightforward calculation shows that $u_t(x):= v_t(x)+ X_t(x)$ solves the scalar neural field equation \eqref{eq:NFE} with source term $I_t(x)= \int_I W(x,y)\mu(t,y)dy$. We choose here to work with the generic expression \eqref{eq:def_lambdaiN_intro} instead of \eqref{eq:def_lambda_generic} not only for conciseness of notation, but also to emphasize that the result does not intrinsically depend on the specific form of the function $F$. \end{remark} \section{Hypotheses and main results} \subsection{Hypotheses} \begin{hyp}\label{hyp_globales} We assume that \begin{itemize} \item $F$ is Lipschitz continuous : there exists $\Vert F \Vert_{L}$ such that for any $x, x'\in \mathbb{R}$, $\eta,\eta'\in \mathbb{R}^d$, we have $\vert F(x,\eta) - F(x',\eta') \vert \leq \Vert F \Vert_{L} \left( \vert x-x'\vert + \vert \eta-\eta'\vert \right)$. \item $F$ is non decreasing in the first variable, that is for any $\eta\in \mathbb{R}^d$, for any $x, x'\in \mathbb{R}$ such that $x\leq x'$, one has $F(x,\eta)\leq F(x',\eta)$. Moreover, we assume that $F$ is $\mathcal{C}^2$ on $\mathbb{R}^{d+1}$ with bounded derivatives. We denote by $\partial_x F$ and $\partial_x^2 F$ the partial derivatives of $F$ w.r.t. $x$ and (with some slight abuse of notation) $\partial_\eta F= \left(\partial_{\eta_k}F\right)_{k=1, \ldots d}$ as the gradient of $F$ w.r.t. the variable $\eta\in \mathbb{R}^d$ as well as $\partial_{x, \eta}^2 F= \left(\partial_{x, \eta_k}^2 F\right)_{k=1, \ldots d}$ and $\partial_\eta^2 F= \left(\partial^2_{\eta_k, \eta_l}F\right)_{k,l=1, \ldots d}$ the Hessian of $F$ w.r.t. the variable $\eta$. \item $\left(\eta_t(x)\right)_{t\geq 0,x\in I}$ is uniformly bounded in $(t,x)$. We also assume that there exists $\eta_\infty$ Lipschitz continuous on $I$ such that \begin{equation}\label{eq:def_delta_s} \delta_t:=\sup_{x\in I} \left| \eta_t(x)-\eta_\infty(x)\right| \xrightarrow[t\to\infty]{}0. \end{equation} \item The memory kernel $h$ is nonnegative and integrable on $[0,+\infty)$. \item We assume that $W:I^2\to [0,1]$ is continuous. We refer nonetheless to Section \ref{S:extension} where we show that the results of the paper remain true under weaker hypotheses on $W$. \end{itemize} \end{hyp} It has been showed in \cite{agathenerine2021multivariate} that the process defined in \eqref{eq:def_ZiN} is well-posed, and that the large population limit intensity \eqref{eq:def_lambdabarre} is well defined in the following sense. \begin{prop} \label{prop:exis_H_N} Under Hypothesis \ref{hyp_globales}, for a fixed realisation of the family $\left(\pi_i\right)_{1\leq i \leq N}$, there exists a pathwise unique multivariate Hawkes process (in the sense of Definition \ref{def:H2}) such that for any $T<\infty$, $\sup_{t\in [0,T]} \sup_{1\leq i \leq N} \mathbf{E}[Z_{N,i}(t)] <\infty$. \end{prop} \begin{prop} \label{prop:exis_lambda_barre} Let $T>0$. Under Hypothesis \ref{hyp_globales}, there exists a unique solution $\lambda$ in $\mathcal{C}_b([0,T]\times I, \mathbb{R})$ to \eqref{eq:def_lambdabarre} and this solution is nonnegative. \end{prop} Both Propositions \ref{prop:exis_H_N} and \ref{prop:exis_lambda_barre} can be found in \cite{agathenerine2021multivariate} as Propositions 2.5 and 2.7 respectively, where $F$ is chosen as $\eta=(\mu, v)$ and $F(x,\eta)=f(x+v)$ with $f$ a Lipschitz function. The same proofs work for our general case $F$. Proposition \ref{prop:exis_lambda_barre} also implies that the limiting spatial profile $X_t$ solving \eqref{eq:def_utx} is well defined.\\ Before writing our next hypothesis, we need to introduce the following integral operator. \begin{prop}\label{prop:proprietes_TW} Under Hypothesis \ref{hyp_globales}, the integral operator \begin{equation*} \begin{array}{rrcl} T_W:& H & \longrightarrow & H \\ & g & \longmapsto & \left(T_Wg : x \longmapsto \int_I W(x,y) g(y) dy \right) \end{array} \end{equation*} is continuous in both cases $H=L^\infty(I)$ and $H=L^2(I)$. When $H=L^2(I)$, $T_W$ is compact, its spectrum is the union of $\{0\}$ and a discrete sequence of eigenvalues $(\mu_{n})_{ n\geq1}$ such that $ \mu_{n}\to0$ as $n\to\infty$. Denote by $r_{\infty}=r_\infty(T_W)$, respectively $r_2=r_2(T_W)$ the spectral radii of $T_W$ in $L^\infty(I)$ and $L^2(I)$ respectively. Moreover, we have that \begin{equation} \label{eq:spectral_radii_equal} r_{ 2}(T_{ W})= r_{ \infty}(T_W). \end{equation} \end{prop} The proof can be found in Section \ref{S:proof_det_ut}. \begin{hyp}\label{hyp:subcritical} In the whole article, we are in the subcritical case defined by \begin{equation}\label{eq:def_subcritical} \left\Vert \partial_x F \right\Vert_\infty \Vert h \Vert _1 r_\infty < 1. \end{equation} \end{hyp} Note that in the complete mean-field case, $W\equiv 1$ and $r_\infty=1$ so that one retrieves the usual subcritical condition as in \cite{delattre2016}. In the linear case $\eta=\mu$ and $F(x, \eta)= \mu +x$, \eqref{eq:def_subcritical} is exactly the subcritical condition stated in \cite{agathenerine2021multivariate}. The aim of the paper is twofold: firstly, we state a general convergence result as $t\to\infty$ of $X_t$ defined in \eqref{eq:def_utx} (or equivalently $ \lambda_t$ in \eqref{eq:def_lambdabarre}), see Theorem~\ref{thm:large_time_cvg_u_t}. This result is valid for any general kernel $h$ satisfying Hypothesis~\ref{hyp_globales}. Secondly, we address the long-term stability of the microscopic profile $X_N$ defined in \eqref{eq:def_UN}, see Theorem~\ref{thm:long_time}. Contrary to the first one, this second result is stated for the particular choice of the exponential kernel $h$ defined as \begin{equation}\label{eq:def_exponential} h(t)=e^{-\alpha t}, \text{with }\alpha>0. \end{equation} The parameter $\alpha>0$ is often addressed as the leakage rate.The main advantage of this choice is that the process $X_N$ then becomes Markovian (see e.g. \cite[Section~5]{Ditlevsen2017}). This will turn out to be particularly helpful for the proof of Theorem~\ref{thm:long_time}. As already mentioned in the introduction, \eqref{eq:def_exponential} is the natural framework where to observe the NFE \eqref{eq:NFE} as a macroscopic limit, recall Remark~\ref{rem:F-ou-f}. Note that in the exponential case \eqref{eq:def_exponential}, the subcritical case \eqref{eq:def_subcritical} reads \begin{equation}\label{eq:def_sub_exp} \left\Vert \partial_x F \right\Vert_\infty r_\infty < \alpha. \end{equation} For our second result (Theorem~\ref{thm:long_time}), we also need some hypotheses on the dilution of the graph. Recall the definition of $\rho_N$ in Definition~\ref{def:espace_proba_bb}. \begin{hyp}\label{hyp:scenarios} The dilution parameter $\rho_N \in [0, 1]$ satisfies the following dilution condition: there exists $\tau\in (0,\frac{1}{2})$ such that \begin{equation}\label{eq:dilution} N^{1-2\tau}\rho_N^4\xrightarrow[N\to\infty]{}\infty. \end{equation} If one supposes further that $F$ is bounded, we assume the weaker condition \begin{equation}\label{eq:dilution_Fbounded} N\rho_N^2\xrightarrow[N\to\infty]{}\infty. \end{equation} \end{hyp} \begin{rem} Hypothesis~\ref{hyp:scenarios} is stronger than $ \frac{N\rho_N}{\log N}\xrightarrow[N\to\infty]{} \infty$, which is a dilution condition commonly met in the literature concerning LLN results on bounded time intervals for interacting particles on random graphs: it is the same as in \cite{DelattreGL2016,Coppini2019} (and slightly stronger than the optimal $N\rho_N\to +\infty$ obtained in \cite{Coppini_Lucon_Poquet2022} in the case of diffusions and as in \cite{agathenerine2021multivariate} in the case of Hawkes processes). \end{rem} \subsection{Main results} Our first result, Theorem \ref{thm:large_time_cvg_u_t}, studies the limit as $t\to\infty$ of the macroscopic profile $X_t$ (as an element of $\mathcal{C}(I)$) defined in \eqref{eq:def_utx}. Our second result, Theorem \ref{thm:long_time}, focuses on the large time behaviour of $X_N(t)$ defined in \eqref{eq:def_UN} on any time interval of polynomial length. \subsubsection{Asymptotic behavior of $(X_t)$} Recall the definition of $X_t$ in \eqref{eq:def_utx}. \begin{thm}\label{thm:large_time_cvg_u_t} Under Hypotheses \ref{hyp_globales} and \ref{hyp:subcritical}, \begin{enumerate}[label=(\roman*)] \item there exists a unique continuous function $X_\infty:I\mapsto \mathbb{R}^+$ solution of \begin{equation}\label{eq:def_u_infty} X_\infty=\Vert h \Vert_1 T_W F\left( X_\infty,\eta_\infty\right). \end{equation} \item $\left(X_t\right)_{t\geq 0}$ converges uniformly on $I$ when $t\to\infty$ towards $X_\infty$. \end{enumerate} \end{thm} \begin{remark}\label{rem:correspondance_Xt_ell} Translating the result of Theorem~\ref{thm:large_time_cvg_u_t} in terms of the macroscopic intensity $\lambda_t$ defined in \eqref{eq:def_lambdabarre} gives immediately that $ \lambda_t$ converges uniformly to $ \ell$ solution to \begin{equation} \label{eq:def_l_lim} \ell= F \left(\Vert h\Vert_1 T_W \ell, \eta_\infty\right) \end{equation} The correspondence between $X_\infty$ and $\ell$ (recall \eqref{eq:def_utx}) is simply given by $X_\infty= \Vert h \Vert_1 T_W \ell$. \end{remark} \begin{remark}\label{rem:sys_lin} In the particular case of an exponential memory kernel \eqref{eq:def_sub_exp}, as a straightforward consequence of the expression of $X_t$ in \eqref{eq:def_utx} and $X_\infty$ in \eqref{eq:def_u_infty}, we have the following differential equation \begin{equation}\label{eq:dynamic_ut_uinfty} \partial_t \left( X_t-X_\infty\right)=-\alpha\left( X_t-X_\infty\right) + T_W \left( F(X_t,\eta_t) - F(X_\infty,\eta_\infty)\right). \end{equation} A simple Taylor expansion of $X_t$ around $X_\infty$ shows that the linearised system associated to the nonlinear \eqref{eq:dynamic_ut_uinfty} is then \begin{equation}\label{eq:sys_lin_Y_t} \partial_t Y_t = -\alpha Y_t + T_W \left( G Y_t \right), \end{equation} where \begin{equation}\label{eq:def_G} G:=\partial_x F(X_\infty,\eta_\infty). \end{equation} \end{remark} The subcritical condition \eqref{eq:def_sub_exp} translates into the existence of a spectral gap for the linear dynamics \eqref{eq:sys_lin_Y_t}, which makes the stationary point $X_\infty$ linearly stable. More precisely, \begin{prop}\label{prop:operateur_L} Assume that the memory kernel $h$ is exponential \eqref{eq:def_sub_exp}. Define the linear operator \begin{equation} \label{eq:def_operator_L} \begin{array}{rrcl} \mathcal{L}:& L^2 (I) & \longrightarrow &L^2 (I) \\ & g & \longmapsto & \mathcal{L}(g)=-\alpha g + T_W( Gg). \end{array} \end{equation} Then under Hypotheses \ref{hyp_globales} and \ref{hyp:subcritical}, $\mathcal{L}$ generates a contraction semi-group on $L^2(I)$ $\left(e^{t\mathcal{L}}\right)_{t\geq 0}$ such that for any $g\in L^2(I)$ \begin{equation}\label{eq:contraction_sg} \Vert e^{t\mathcal{L}} g \Vert_2 \leq e^{-t\gamma} \Vert g \Vert_2, \end{equation} where \begin{equation}\label{eq:def_gamma} \gamma:=\alpha-r_\infty \left\Vert \partial_uF\right\Vert_\infty>0. \end{equation} \end{prop} \subsubsection{Long-term stability of the microscopic spatial profile} From now on, we place ourselves in the exponential case \eqref{eq:def_sub_exp}. We first state a convergence result of $X_N$ towards the macroscopic $X$ on a bounded time interval $[0, T]$. \begin{prop}\label{prop:finite_time} Let $T>0$. Under Hypotheses \ref{hyp_globales}, \ref{hyp:subcritical} and \ref{hyp:scenarios}, for any $\varepsilon>0$, $ \mathbb{ P}$-a.s. \begin{equation}\label{eq:finite_time} \mathbf{P}\left( \sup_{t\in [0,T]}\left\Vert X_N(t)-X_t \right\Vert_2\geq \varepsilon\right) \xrightarrow[N\to\infty]{}0. \end{equation} \end{prop} Note that Proposition~\ref{prop:finite_time} slightly generalises \cite[Prop. 3.17]{agathenerine2021multivariate} (see also \cite[Cor.~2]{CHEVALLIER20191} for a similar result) where it is proven that $\mathbf{E}\left[ \int_0^T\int_I \left\vert X_N(t)(x)-X_t(x)\right\vert dx~ dt\right]\xrightarrow[N\to\infty]{}0$ for any $T>0$. Here, we are more precise as we show uniform convergence of $X_N(t)$ in $L^2(I)$ instead of $L^1(I)$. We are now in position to state the main result of the paper: the proximity stated in Proposition~\ref{prop:finite_time} is not only valid on a bounded time interval, but propagates to arbitrary polynomial times in $N \rho_N$. \begin{thm}\label{thm:long_time} Choose some $t_{f}>0$ and $m\geq 1$. Then, under Hypotheses \ref{hyp_globales}, \ref{hyp:scenarios} and \ref{hyp:subcritical}, $ \mathbb{ P}$-a.s. for $ \varepsilon>0$ small enough, \begin{equation}\label{eq:long_time_pol} \mathbf{ P} \left( \sup_{ t\in \left[ t_{ \varepsilon}, (N \rho_{ N})^{ m} t_{ f}\right]} \left\Vert X_{ N}(t) - X_{ \infty}\right\Vert_2\geq \varepsilon\right) \xrightarrow[ N\to\infty]{}0. \end{equation} for some $ t_{\varepsilon}>0$ independent on $N$. \end{thm} Since $F$ is Lipschitz and $\lambda_{N,i}(t)= F(X_{N,i}(t-), \eta_t(x_i))$ by \eqref{eq:def_lambdaiN_intro}, it is straightforward to derive from Theorem~\ref{thm:long_time} a similar result for the profile of densities \begin{equation}\label{eq:def_lambdaN} \lambda_{N}(t)(x):=\sum_{i=1}^N \lambda_{N,i}(t) \mathbf{1}_{x\in\left(\frac{i-1}{N}, \frac{i}{N}\right]},\ x\in I. \end{equation} \begin{cor}\label{cor:long_time_lambda} Recall the definition of $\ell$ in \eqref{eq:def_l_lim}. Under the same set of hypotheses of Theorem \ref{thm:long_time} and with the same notation, \begin{equation}\label{eq:long_time_pol_lambda} \mathbf{ P} \left( \sup_{ t\in \left[ t_{ \varepsilon}, (N \rho_{ N})^{ m} t_{ f}\right]} \left\Vert \lambda_{N}(t) - \ell\right\Vert_2\geq \varepsilon\right) \xrightarrow[ N\to\infty]{}0. \end{equation} \end{cor} \subsection{Examples and extensions} We give here some illustrating examples of our main results. \subsubsection{Mean-field framework} To the best of the knowledge of the author, already in the simple homogeneous case of mean-field interaction, there exists no long-term stability result such as Theorem~\ref{thm:long_time}. We stress that our result may have an interest of its own in this case. Let us be more specific. When $\rho_N=W_N=1$ and $\mu_t(x)=\mu\geq 0$, the process introduced in Definition \ref{def:H2} reduces to the usual mean-field framework \cite{delattre2016}: \begin{equation}\label{eq:def_ZiN_CM} Z_{N,i}(t) = \int_0^t \int_0^\infty \mathbf{1}_{\{z\leq \lambda_{N}(s)\}} \pi_i(ds,dz) \end{equation} with $\lambda_{N}(t)$ defined by \begin{equation}\label{eq:def_lambdaiN_intro_CM} \lambda_{N}(t)= F(X_{N}(t-), \eta), \end{equation} where \begin{equation}\label{eq:def_UiN_CM} X_{N}(t)=\sum_{j=1}^N \dfrac{1}{N}\int_0^{t} h(t-s) dZ_{N,j}(s), \end{equation} In this simple case, the spatial framework is no longer useful (in particular the spatial profile defined in \eqref{eq:def_UN} is constant in $x$ so that the $L^2$ framework is not relevant, one has only to work in $\mathbb{R}$). The macroscopic intensity and synaptic current (respectively \eqref{eq:def_lambdabarre} and \eqref{eq:def_utx} become \begin{equation}\label{eq:def_macro_CM} X_t:=\int_0^t h(t-s)\lambda_sds,\quad \lambda_t:=F(X_t,\eta). \end{equation} The main results of the paper translate then into \begin{thm}\label{thm:CM} Under Hypothesis \ref{hyp_globales} and when $\left\Vert \partial_x F \right\Vert_\infty \Vert h \Vert _1 < 1$, there exists a unique $X_\infty\in \mathbb{R}_+$ solution to $X_\infty=\Vert h \Vert_1 F\left(X_\infty,\eta\right)$, and $\left(X_t\right)_{t\geq 0}$ converges when $t\to\infty$ towards $X_\infty$. Respectively, $(\lambda_t)_{t\geq 0}$ converges towards $\ell$, the unique solution to $\ell=F\left(\Vert h \Vert_1 \ell,\eta\right)$. Moreover, under the same hypotheses, in the exponential case \eqref{eq:def_exponential}, for any $t_{f}>0$ and $m\geq 1$, $ \mathbb{ P}$-a.s. for $ \varepsilon>0$ small enough, $\displaystyle \mathbf{ P} \left( \sup_{ t\in \left[ t_{ \varepsilon}, N ^{ m} t_{ f}\right]} \left\vert X_{ N}(t) - X_{ \infty}\right\vert\geq \varepsilon\right)$ and $\mathbf{ P} \left( \sup_{ t\in \left[ t_{ \varepsilon}, N ^{ m} t_{ f}\right]} \left\vert \lambda_{ N}(t) - \ell\right\vert\geq \varepsilon\right)$ tend to $0$ as $N\to\infty$ for some $ t_{\varepsilon}>0$ independent on $N$. \end{thm} \begin{remark}\label{rem:result_CM} The previous result applies in particular to the linear case where $\eta= \mu$ and $F(x,\eta)=\mu+x$. We have then that $\ell=\dfrac{\mu}{1-\Vert h \Vert_1}$ in this case, as in \cite{delattre2016}. \end{remark} \subsubsection{Erd\H{o}s-R\'enyi graphs} An immediate extension of the last mean-field case concerns the case of homogeneous Erd\H{o}s-R\'enyi graphs: choose $W_N(x,y)=\rho_N$ for all $x,y\in I$. The results of our paper are valid under the dilution Hypothesis \ref{hyp:scenarios}. It is however likely that these dilution conditions are not optimal (compare with the result of \cite{Coppini2022} with the condition $N\rho_N\to\infty$ in the diffusion case, but a difficulty here is that we deal with a multiplicative noise whereas it is essentially additive in \cite{Coppini2022}). \subsubsection{Examples in the inhomogeneous case} As already mentionned in Hypothesis \ref{hyp_globales}, the results are valid for any $W$ continuous, interesting examples include $W(x,y)=1-\max(x,y)$, $W(x,y)= 1-xy$, see \cite{borgs2011,Borgs2018}. Note also that we do not suppose any symmetry on $W$. Another rich class of examples concerns the \emph{Expected Degree Distribution} model \cite{Chung2002,Ouadah2019} where $W(x,y)=f(x)g(y)$ for any continuous functions $f$ and $g$ on $I$. The specificity of such class is that we have an explicit formulation of $r_\infty$, that is $r_\infty= \int_I f(x)g(x)dx$ when $\int_I g =1$. In the linear case, we obtain an explicit formula for $\lambda_t$ in \cite[Example 4.3]{agathenerine2021multivariate}. \subsubsection{Extensions}\label{S:extension} It is apparent from the proofs below that one can weaken the hypothesis of continuity of $W$. Under the hypothesis that $W$ is bounded, Proposition \ref{prop:exis_lambda_barre} remains true when $\mathcal{C}_b([0,T]\times I)$ is replaced by $\mathcal{C}\left( [0,T], L^\infty(I)\right)$ (continuity of $\lambda_t$ and $X_t$ in $x$ may not be satisfied). Supposing further that there exists a partition of $I$ into $p$ intervals $I=\sqcup_{k=1,\cdots,p} C_k$ such that for all $\epsilon>0$, there exists $\eta>0$ such that $\int_I \left\vert W(x,y)-W(x',y)\right\vert dy <\epsilon$ when $\vert x-x'\vert<\eta$ and $x,x'\in C_k$, then for every $k$, $\lambda_{\vert [0,T]\times\mathring{C_k}} $ and $X_{\vert [0,T]\times\mathring{C_k}}$ are both continuous. When $p=1$, both $\lambda$ and $X$ are continuous on $[0,T]\times I$. Concerning Theorem \ref{thm:large_time_cvg_u_t}, defining for $k\in \{1,2\}$: \begin{equation}\label{eq:def_Rnk} R^W_{N,k}:=\dfrac{1}{N} \sum_{i,j=1}^N \int_{B_{N,j}} \left\vert W(x_i,x_j)-W(x_i,y)\right\vert^k dy, \end{equation} and \begin{equation}\label{eq:def_Sn} S^W_{N}:=\sum_{i=1}^N \int_{B_{N,i}} \left(\int_I \left\vert W(x_i,y) - W(x,y)\right\vert^2 dy\right)dx, \end{equation} Theorem \ref{thm:long_time} remains true when $R^W_{N,1},R^W_{N,2}, S^W_{N} \xrightarrow[N\to\infty]{}0$, see Lemmas \ref{lem:drift_term_phi_N1}, \ref{lem:drift_term_phi_N2} and \ref{lem:drift_term_phi_N3}. These particular conditions are met in the following cases (details of the computation are left to the reader) \begin{itemize} \item P-nearest neighbor model \cite{Omelchenko2012}: $W(x,y)=\mathbf{1}_{d_{\mathcal{S}_1}(x,y)< r}$ for any $(x,y)\in I^2$ for some fixed $r\in (0,\frac{1}{2})$, with $d_{\mathcal{S}_1}(x,y)=\min(\vert x-y \vert,1-\vert x-y \vert)$. \item Stochastic block model \cite{holland1983,Ditlevsen2017}: it corresponds to considering $p$ communities $(C_k)_{1\leq k \leq p}$. An element of the community $C_l$ communicates with an element of the community $C_k$ with probability $p_{kl}$. This corresponds to the choice of interaction kernel $W(x,y)=\sum_{k,l}p_{kl}\mathbf{1}_{x\in C_k, y\in C_l}$. \end{itemize} \subsection{Link with the literature}\label{S:litterature} Several previous works have complemented the propagation of chaos result mentioned in \eqref{eq:chaos_generic} in various situations: Central Limit Theorems (CLT) have been obtained in \cite{delattre2016,Ditlevsen2017} for homogeneous mean-field Hawkes processes (when both time and $N$ go to infinity) or with age-dependence in \cite{Chevallier2017}. One should also mention the functional fluctuation result recently obtained in \cite{Heesen2021}, also in a pure mean-field setting. A result closer to our case with spatial extension is \cite{ChevallierOst2020}, where a functional CLT is obtained for the spatial profile $X_{ N}$ around its limit. Some insights of the necessity of considering stochastic versions of the NFE \eqref{eq:NFE} as second order approximations of the spatial profile are in particular given in \cite{ChevallierOst2020}. Note here that all of these works provide approximation results of quantities such that $ \lambda_{ N}$ or $X_{ N}$ that are either valid on a bounded time interval $[0, T]$ or under strict growth condition on $T$ (see in particular the condition $ \frac{ T}{ N} \to 0$ for the CLT in \cite{Ditlevsen2017}), whereas we are here concerned with time-scales that grow polynomially with $N$. The analysis of mean-field interacting processes on long time scales has a significant history in the case of interacting diffusions. The important issue of uniform propagation of chaos has been especially studied mostly in reversible situations (see e.g. the case of granular media equation \cite{Bolley:2013}) but also more recently in some irreversible situations (see \cite{Colombani2022}). There has been in particular a growing interest in the long-time analysis of phase oscillators (see \cite{giacomin_poquet2015} and references therein for a comprehensive review on the subject). We do not aim here to be exhaustive, but as the techniques used in this work present some formal similarities, let us nonetheless comment on the analysis of the simplest situation, i.e. the Kuramoto model. One is here interested in the longtime behavior of the empirical measure $ \mu_{ N, t}:= \frac{ 1}{ N} \sum_{ i=1}^{ N} \delta_{ \theta_{ i, t}}$ of the system of interacting diffusions $(\theta_{ 1}, \ldots, \theta_{ N})$ solving the system of coupled SDEs $ {\rm d} \theta_{ i,t}= - \frac{ K}{ N} \sum_{ j=1}^{ N} \sin( \theta_{ i,t}- \theta_{ j,t}){\rm d} t + {\rm d}B_{ i, t}$. Standard propagation of chaos techniques show that $ \mu_{ N}$ converges weakly on a bounded time interval $[0, T]$ to the solution $ \mu_{ t}$ to the nonlinear Fokker-Planck (NFP) equation $\partial_t \mu_t\, =\, \frac{1}{2} \partial_{ \theta}^{ 2} \mu_t+K\partial_\theta \Big( \mu_t(\sin * \mu_t)\Big)$. The simplicity of the Kuramoto model lies in the fact that one can easily prove the existence of a phase transition for this model: when $K\leq 1$, $ \mu\equiv \frac{ 1}{ 2\pi}$ is the only (stable) stationary point of the previous NFP (subcritical case), whereas it coexists with a stable circle of synchronised profiles when $K>1$ (supercritical case). A series of papers have analysed the longtime behavior of the empirical measure $\mu_N$ of the Kuramoto model (and extensions) in both the subcritical and supercritical cases (see in particular \cite{bertini14,lucon_poquet2017,giacomin2012,Coppini2022} and references therein). The main arguments of the mentioned papers lie in a careful analysis of two contradictory phenomena that arise on a long-time scale: the stability of the deterministic dynamics around stationary points (that forces $ \mu_{ N}$ to remain in a small neighborhood of these points) and the presence of noise in the microscopic system (which makes $ \mu_{ N}$ diffuse around these points). In particular, the work that is somehow formally closest to the present article is \cite{Coppini2022}, where the long-time stability of $ \mu_{ N}$ is analysed in both sub and supercritical cases for Kuramoto oscillators interacting on an Erd\H{o}s-R\'enyi graph. We are here (at least formally) in a similar situation to the subcritical case of \cite{Coppini2022}: the deterministic dynamics of the spatial profile $X_{ N}$ (given by \eqref{eq:def_UN}) has a unique stationary point which possesses sufficient stability properties. The point of the analysis relies then on a time discretization and some careful control on the diffusive influence of noise that competes with the deterministic dynamics. The main difference (and present difficulty in the analysis) with the diffusion case in \cite{Coppini2022} is that our noise (Poissonnian rather than Brownian) is multiplicative (whereas it is essentially additive in \cite{Coppini2022}). This explains in particular the stronger dilution conditions that we require in Hypothesis~\ref{hyp:scenarios} (compared to the optimal $N \rho_{ N}\to \infty$ in \cite{Coppini2022}) and also the fact that we only reach polynomial time scales (compared to the sub-exponential scale in \cite{Coppini2022}). There is however every reason to believe that the stability result of Theorem~\ref{thm:long_time} would remain valid up to this sub-exponential time scale. Note here that we deal directly with the control of the Poisson noise. Another possibility would have been to use some Brownian approximation of the dynamics of $X_{ N}$. Some results in this direction have been initiated in \cite{Ditlevsen2017} for spatially-extended Hawkes processes exhibiting oscillatory behaviors: some diffusive approximation of the dynamics of the (equivalent of) the spatial profile is provided (see \cite[Section~5]{Ditlevsen2017}). Note however that this approximation is based on the comparison of the corresponding semigroups and is not uniform in time. Hence, it is unclear how one could exploit these techniques for our case. Some stronger (pathwise) approximations between Hawkes and Brownian dynamics have been further proposed in \cite{chevallierMT2021}, based on Koml\'os, Major and Tusn\'ady (KMT) coupling techniques (\cite{ethier_kurtz1986}, see also \cite{Prodhomme_arxiv2020} for similar techniques applied to finite dimensional Markov chains). However, this approximation is again not uniform in time so that applying this coupling to our present case is unclear. Our proof is more direct and does not rely on such Brownian coupling. To the author’s knowledge, this is the first result on large time stability of Hawkes process (not mentioning the issue of the random graph of interaction, we believe that our results remain also relevant in the pure mean-field case, see Theorem~\ref{thm:CM}). \subsection{Strategy of proof and organization of the paper} Section~\ref{S:proof_det_ut} is devoted to prove the convergence result as $t\to\infty$ of Theorem \ref{thm:large_time_cvg_u_t}. This in particular requires some spectral estimates on the operator $\mathcal{L}$ defined in Proposition~\ref{prop:operateur_L} that are gathered in Section~\ref{S:proofs_operator_L}. The main lines of proof for Theorem \ref{thm:long_time} are given in Section~\ref{S:proof_largetime}.The strategy of proof is sketched here: \begin{enumerate} \item The starting point of the analysis is a semimartingale decomposition of $Y_N:= X_N- X$, detailed in Section~\ref{S:mild_formulation}. The point is to decompose the dynamics of $Y_N$ in terms of, at first order, the linear dynamics \eqref{eq:sys_lin_Y_t} governing the behavior of the deterministic profile $X$, modulo some drift terms coming from the graph and its mean-field approximation, some noise term and finally some quadratic remaining error coming from the nonlinearity of $F$. \item A careful control on each of these terms in the semimartingale expansion on a bounded time interval are given in the remaining of Section~\ref{S:mild_formulation}. The proof of these estimates are respectively given in Section~\ref{S:proof_NP} (for the noise term) and Section~\ref{S:proof_drift} (for the drift term). \item The rest of Section~\ref{S:proof_largetime} is devoted to the proof of Theorem~\ref{thm:long_time}, see Section~\ref{S:proof_mainTh}. The first point is that for any given $\varepsilon>0$, one has to wait a deterministic time $t_\varepsilon>0$, so that the deterministic profile $X_t$ reaches an $\varepsilon$-neighborhood of $X_\infty$. It is easy to see from the spectral gap estimate \eqref{eq:contraction_sg} that this $t_\varepsilon$ is actually of order $\frac{-\log(\varepsilon)}{\gamma}$. Then, using Proposition~\ref{prop:finite_time}, the microscopic process $X_N$ is itself $\varepsilon$-close to $X_\infty$ with high-probability. \item The previous argument is the starting point of an iterative procedure that works as follows: the point is to see that provided $X_N$ is initially close to $X_\infty$, it will remain close to $X_\infty$ on some $[0, T]$ for some sufficiently large deterministic $T>0$. The key argument is that on a bounded time interval, the deterministic linear dynamics dominates upon the contribution of the noise, so that one has only to wait some sufficiently large $T$ so that the deterministic dynamics prevails upon the other contributions. \item The rest of the proof consists in an iterative procedure from the previous argument, taking advantage of the Markovian structure of the dynamics of $X_N$. The time horizon at which one can pursue this recursion is controlled by moment estimates on the noise, proven in Section~\ref{S:proof_NP}. \end{enumerate} The rest of the paper is organised as follows: Section~\ref{S:finite} collects the proofs for the finite time behavior of Proposition~\ref{prop:finite_time} whereas some technical estimates are gathered in the appendix. \section{Asymptotic behavior of $(X_t)$}\label{S:proof_det_ut} This section is related to the proof of Theorem~\ref{thm:large_time_cvg_u_t}. \subsection{Estimates on the operator $\mathcal{L}$} \label{S:proofs_operator_L} \begin{proof}[Proof of Proposition \ref{prop:proprietes_TW}] The continuity and compactness of $T_W$ come from the boundedness of $W$. The structure of the spectrum of $T_W$ is a consequence of the spectral theorem for compact operators. The equality between the spectral radii is postponed to Lemma \ref{lem:op_radius} where a more general result is stated (see also Proposition 4.7 of \cite{agathenerine2021multivariate} for a similar result). \end{proof} \begin{proof}[Proof of Proposition \ref{prop:operateur_L}] Let us introduce the operator \begin{equation} \label{eq:def_operator_U} \begin{array}{rrcl} \mathcal{U}:& L^2 (I) & \longrightarrow &L^2 (I) \\ & g & \longmapsto & \mathcal{U}(g)=T_W( Gg), \end{array} \end{equation} we have then $\mathcal{L}=-\alpha Id + \mathcal{U}$. By Hypothesis \ref{hyp_globales}, $G$ is bounded. Then, for any $g\in L^2(I)$ using Cauchy-Schwarz inequality, $\Vert\mathcal{U}(g)\Vert_2^2\leq \Vert W \Vert_2^2 \Vert G \Vert_\infty \Vert g \Vert_2^2$. The operator $\mathcal{U}$ is then compact and thus has a discrete spectrum. Moreover, $r_2(\mathcal{U})=r_\infty(\mathcal{U})$, see Lemma \ref{lem:op_radius}, and $r_\infty(\mathcal{U}) \leq r_\infty(T_W) \Vert G \Vert_\infty$ as for any $g\in L^\infty$ and $x\in I$, $\vert\mathcal{U} g(x)\vert \leq \Vert T_W \Vert_\infty \Vert Gg \Vert_\infty \leq \Vert T_W \Vert_\infty \Vert G \Vert_\infty \Vert g \Vert_\infty$. Then $\mathcal{L}$ also has a discrete spectrum, which is the same as $\mathcal{U}$ but shifted by $\alpha$. Since $r_2(\mathcal{U})=r_\infty(\mathcal{U})$ (see Lemma \ref{lem:op_radius}), for any $\mu\in \sigma(\mathcal{L})\setminus\{0\}$, $ \vert \mu + \alpha \vert \leq r_\infty(\mathcal{U}) $ thus $Re(\mu)\leq -\alpha + r_\infty(\mathcal{U})\leq -\alpha + r_\infty \Vert \partial_uF\Vert_\infty<0$ by \eqref{eq:def_subcritical}. The estimate \eqref{eq:contraction_sg} follows then from functional analysis (see e.g. Theorem 3.1 of \cite{Pazy1974}). \end{proof} \subsection{About the large time behavior of $X_t$} \begin{proof}[Proof of Theorem \ref{thm:large_time_cvg_u_t}] We prove that \begin{itemize} \item there exists a unique function $\ell:I\mapsto \mathbb{R}^+$ solution of \eqref{eq:def_l_lim}, continuous and bounded on $I$, and that \item $\left(\lambda_t\right)_{t\geq 0}$ converges uniformly when $t\to\infty$ towards $\ell$. \end{itemize} It gives then that $X_\infty:=\Vert h\Vert_1 T_W\ell$ is the unique solution of \eqref{eq:def_u_infty}. Then, as $X_t(x)=\int_{I} W(x,y)\int_0^t h(t-s) \lambda_s(y)ds~ dy$, as $\left(\lambda_t\right)$ is uniformly bounded, and as $h$ is integrable and $\lambda_t\to \ell$ uniformly, we conclude by dominated convergence that uniformly on $y$, $\int_0^t h(t-s) \lambda_s(y)ds \xrightarrow[t\to\infty]{} \Vert h \Vert_1 \ell(y)$. As $T_W$ is continuous, the result follows: $X_t$ converges uniformly towards $X_\infty$. We show first that $(\lambda_t)$ is uniformly bounded. Let $\overline{\lambda}_t(x)=\sup_{s\in [0,t]} \lambda_s(x)$, we have then with \eqref{eq:def_lambdabarre}, for $s\in[0, t]$ \begin{align*} \lambda_s(x)&\leq F(0,0) + \Vert F \Vert_L \vert\eta_s(x)\vert+\Vert \partial_x F \Vert_\infty\int_{I} W(x,y)\int_0^s h(s-u) \lambda_u(y)dudy \\ &\leq F(0,0) + \Vert F \Vert_L \sup_{s,x}\vert\eta_s(x)\vert + \Vert \partial_x F \Vert_\infty \Vert h \Vert_1 T_W \overline{\lambda}_t(x), \end{align*} hence $\overline{\lambda}_t(x)\leq C_{F,\eta} + \Vert \partial_x F \Vert_\infty \Vert h \Vert_1 T_W \overline{\lambda}_t(x)$. An immediate iteration gives then $\overline{\lambda}_t(x) \leq C_{F,\eta,n_0,h} + \Vert \partial_x F \Vert_\infty^{n_0} \Vert h \Vert_1^{n_0} \left\vert T_W^{n_0} \overline{\lambda}_t(x) \right\vert$, so that, by \eqref{eq:def_subcritical} and choosing $n_0$ sufficiently large such that $\Vert \partial_x F \Vert_\infty^{n_0} \Vert h \Vert_1^{n_0} \Vert T_W \Vert^{n_0}<1$, we obtain that $ \Vert \overline{\lambda}_t \Vert_\infty <C$, where $C$ is independent of $t$. Passing to the limit as $t\to\infty$, this implies that $\left(\lambda_t\right)_{t>0}$ is then uniformly bounded, i.e. $\sup_{t\geq 0}\sup_{x\in I} \left\vert \lambda_t(x)\right\vert=:\Vert \lambda\Vert_\infty <\infty$. We show next that $\left(\lambda_t\right)$ converges pointwise. We start by studying the supremum limit of $\lambda_t$, denoted by $\overline{\ell}(x):= \limsup_{t\to\infty} \lambda_t(x) = \inf_{r>0}\sup_{t>r} \lambda_t(x) =: \inf_{r>0} \Lambda(r,x)$. Then for any $r>0$ and $t>r$: \begin{align*} \lambda_t(x) &= F\left( \int_{I} W(x,y)\int_0^r h(t-s) \lambda_s(y)ds~ dy + \int_{I} W(x,y)\int_r^t h(t-s) \lambda(s,y)ds~ dy, \eta_t(x) \right) \\ &\leq F\left( \int_{I} W(x,y)\int_0^r h(t-s) \lambda_s(y)ds~ dy + \int_{I} W(x,y)\Lambda(r,y)\int_r^t h(t-s) ds~ dy, \eta_t(x) \right) \end{align*} by monotonicity of $F$ in the first variable and by positivity of $W$ and $h$. As $\int_r^t h(t-s)ds\leq \Vert h \Vert_1$, it gives $$\lambda_t(x) \leq F\left( \int_{I} W(x,y)\int_0^r h(t-s) \lambda_s(y)ds~ dy + \Vert h \Vert_1 \int_{I} W(x,y) \Lambda(r,y)dy, \eta_t(x) \right),$$ and as $h(t)\to 0$, by dominated convergence $\int_{I} W(x,y)\int_0^r h(t-s) \lambda_s(y)ds~ dy \xrightarrow[t\to\infty]{}0$ and by continuity and monotonicity of $F$, we obtain \begin{equation} \label{eq:limsup_l} \overline{\ell}(x) \leq F\left( \Vert h \Vert_1 \left(T_W\overline{\ell}\right)(x),\eta_\infty(x)\right). \end{equation} Note that $\Vert \overline{\ell}\Vert_\infty \leq \Vert \lambda \Vert_\infty<\infty$, by the first step of this proof. Denote in a same way $\underline{\ell}(x):= \liminf_{t\to\infty} \lambda_t(x) = \sup_{r>0}\inf_{t>r} \lambda_t(x) =: \sup_{r>0} v(r,x)$, for any $t>0$ we have by monotonocity of $F$ in the first variable: \begin{align*} \lambda_t(x) &= F\left( \int_0^{\frac{t}{2}}\int_{I} W(x,y)h(t-s) \lambda_s(y) dyds + \int_{\frac{t}{2}}^t\int_{I} W(x,y)h(t-s) \lambda_s(y) dyds, \eta_t(x) \right)\\ &\geq F\left( \int_{\frac{t}{2}}^t\int_{I} W(x,y)h(t-s) v\left(\frac{t}{2}, y\right) dyds, \eta_t(x) \right)\\ &= F\left(\int_{0}^{\frac{t}{2}}h(u)du \int_{I} W(x,y)v\left(\frac{t}{2}, y\right) dy, \eta_t(x) \right). \end{align*} Taking $\liminf_{t\to\infty}$ on both sides, by monotone convergence, we obtain \begin{equation} \label{eq:liminf_l} \underline{\ell}(x) \geq F\left( \Vert h \Vert_1 \left(T_W \underline{\ell}\right)(x), \eta_\infty(x) \right). \end{equation} Combining \eqref{eq:limsup_l} and \eqref{eq:liminf_l}, setting $H: l \in L^\infty \mapsto F\left( \Vert h \Vert_1 T_W l, \eta_\infty\right)\in L^{\infty}$, we have shown \begin{equation} \label{eq:control_Hl} H \underline{\ell} \leq \underline{\ell} \leq \overline{\ell} \leq H\overline{\ell}. \end{equation} For any $l$ and $l'$ in $L^\infty(I)$ and any $x\in I$, we have \begin{align*} \vert Hl(x) - Hl'(x) \vert &\leq \left| F\left( \Vert h \Vert_1 \left(T_W l\right)(x), \eta_\infty(x)\right) - F\left( \Vert h \Vert_1 \left(T_W l'\right)(x), \eta_\infty(x)\right) \right|\\ &\leq \left\Vert \partial_x F \right\Vert_\infty \Vert h \Vert_1 \left|\left( T_W ( l - l') \right) (x)\right|. \end{align*} By iteration we show that $\Vert H^{n_0}l - H^{n_0}l' \Vert_\infty \leq \left\Vert \partial_u F \right\Vert_\infty^{n_0}\Vert h \Vert _1^{n_0} \Vert T_W^{n_0} \Vert \Vert l - l' \Vert_\infty$, so that, choosing again $n_0$ sufficiently large, $H^{n_0}$ is a contraction mapping by \eqref{eq:def_subcritical}. Hence, by \eqref{eq:control_Hl}, one has necessarily that for all $x\in I$ $\underline{\ell}(x)=\overline{\ell}(x)<+\infty$ thus $(\lambda_t)$ converges pointwise towards $\ell=\underline{\ell}=\overline{\ell}$ the unique fixed point of $H$ which satisfies \eqref{eq:def_l_lim}. We show now that the family $\left(\lambda_t\right)_{t\geq 0}$ is equicontinuous so that the pointwise convergence will imply uniform convergence on the compact set $I$. For any $(x,y)\in I$ and $t\geq 0$, we have \begin{align*} \left\vert \lambda_t(x) - \lambda_t(y)\right\vert&=\left\vert F(X_t(x),\eta_t(x))-F(X_t(y),\eta_t(y)\right\vert\\ &\leq \Vert F \Vert_L \left( \left\vert X_t(x)-X_t(y)\right\vert + \left\vert \eta_t(x)-\eta_t(y)\right\vert\right). \end{align*} With \eqref{eq:def_delta_s}, we have \begin{align*} \left\vert \eta_t(x)-\eta_t(y)\right\vert&\leq \left\vert \eta_t(x)-\eta_\infty(x)\right\vert+\left\vert \eta_\infty(x)-\eta_\infty(y)\right\vert+\left\vert \eta_\infty(y)-\eta_t(y)\right\vert\\ &\leq 2\delta_t + \Vert \eta_\infty\Vert_L \vert x-y\vert, \end{align*} and as $\lambda$ is bounded, we have \begin{align}\label{eq:reg_Xt_aux} \left\vert X_t(x)-X_t(y)\right\vert &= \left\vert \int_I \left(W(x,z)-W(y,z)\right)\int_0^t h(t-s)\lambda_s(z)dsdz\right\vert\notag\\ &\leq \Vert \lambda \Vert_\infty \Vert h \Vert_1 \int_I \left\vert W(x,z)-W(y,z)\right\vert dz. \end{align} Then $\vert \lambda_t(x)-\lambda_t(y)\vert\leq C_{F,\lambda,h,W}\left(\delta_t+\vert x-y\vert+\int_I \left\vert W(x,z)-W(y,z)\right\vert dz\right)$. Fix $\varepsilon >0$, with \eqref{eq:def_delta_s}, one can find $T$ such that $ C_{F,\lambda,h,W}\delta_t\leq \dfrac{\varepsilon}{2}$ for any $t\geq T$, and as $W$ is uniformly continuous on $I^2$, one can find $\eta>0$ such that $C_{F,\lambda,h,W}\left(\vert x-y\vert+\int_I \left\vert W(x,z)-W(y,z)\right\vert dz\right)\leq \dfrac{\varepsilon}{2}$ when $\vert x-y\vert\leq \eta$. We can divide $[0,1]$ in intervals $[z_k,z_{k+1}]$ such that for any $k$, $ z_{k+1}-z_k \leq \eta$. Then, for any $x\in [0,1]$, one can find $z_k$ such that $\vert z_k-x\vert\leq \eta$, and $\vert \lambda_t(x)-\ell(x)\vert\leq \vert \lambda_t(x)-\lambda_t(z_k)\vert + \vert \lambda_t(z_k)-\ell(z_k)\vert+\vert\ell(z_k)-\ell(x)\vert$. By pointwise convergence, $\vert\lambda_t(z_k)-\ell(z_k)\vert\leq\varepsilon$ for $t$ large enough (but independent of the choice of $x$), and $\vert \ell(z_k)-\ell(x)\vert\leq \varepsilon$ by taking the limit when $t\to\infty$ in $\vert\lambda_t(z_k)-\lambda_t(x)\vert\leq \varepsilon$. It gives then $\vert \lambda_t(x)-\ell(x)\vert\leq 3\varepsilon$ hence $\sup_{x\in I}\vert \lambda_t(x)-\ell(x)\vert \xrightarrow[t\to\infty]{}0$, i.e. $\left(\lambda_t\right)$ converges uniformly towards $\ell$. Similarly to \eqref{eq:reg_Xt_aux}, for any $x,x'\in I$, $$\left\vert X_\infty(x) - X_\infty(x') \right\vert \leq \Vert h \Vert_1 \Vert \ell \Vert_\infty \int_I \left\vert W(x,y) - W(x',y) \right\vert dy$$ which gives, as $W$ is uniformly continous, the continuity of $X_\infty$. \end{proof} \section{Large time behavior of $U_N(t)$}\label{S:proof_largetime} The aim of the present section is to prove Theorem~\ref{thm:long_time}. To study the behavior of $\left\Vert X_N(t) - X_\infty\right\Vert_2$, let \begin{equation}\label{eq:def_Y_N} Y_N:=X_N-X_\infty. \end{equation} The first step is to write the semimartingale decomposition of $Y_N$, written in a mild form (see Section~\ref{S:mild_formulation}). The proper control on the drift and noise terms are given in Propositions~\ref{prop:noise_perturbation} and~\ref{prop:drift_term}. In Section~\ref{S:proof_mainTh}, we give the proof of Theorem \ref{thm:long_time}, based in particular on the convergence on a bounded time interval in Proposition~\ref{prop:finite_time}. \subsection{Mild formulation} \label{S:mild_formulation} \begin{prop}\label{prop:termes_sys_micros} The process $\left(Y_N(t)\right)_{t\geq 0}$ satisfies the following semimartingale decomposition in $D([0,T],L^2(I))$, written in a mild form: for any $0\leq t_0\leq t$ \begin{equation}\label{eq:def_Y_N_termes} Y_N(t)=e^{(t-t_0)\mathcal{L}}Y_N(t_0) + \phi_N(t_0,t) + \zeta_N(t_0,t) \end{equation} where: \begin{equation}\label{eq:def_phi_N} \phi_N(t_0,t)=\int_{t_0}^t e^{(t-s)\mathcal{L}}r_N(s)ds \end{equation} with \begin{multline}\label{eq:def_r_N} r_N(t)(x)=T_W\left( g_N(t)\right)(x)+\\ \sum_{i=1}^N \left( \dfrac{1}{N\rho_N} \sum_{j=1}^N \xi_{ij}^{(N)}F(X_{N,j }(t),\eta_t(x_j)) - \int_I W(x,y) F(X_N(t,y),\eta_t(y))dy \right)\mathbf{1}_{B_{N,i}}(x), \end{multline} \begin{multline}\label{eq:def_gN(s)} g_N(t)(y):= \int_0^1 (1-r) \partial^2_x F\left( X_\infty(y)+rY_N(t)(y),(1-r)\eta_\infty(y)+r\eta_t(y)\right) Y_N(t)(y)^2 dr+\\ \int_0^1 (1-r)\left(\eta_t(y)-\eta_\infty(y)\right)\cdot\partial^2_\eta F\left( X_\infty(y)+r Y_N(t)(y),(1-r)\eta_\infty(y)+r\eta_t(y)\right) \left(\eta_t(y)-\eta_\infty(y)\right) dr\\ +\int_0^1 2(1-r) \partial^2_{x,\eta}F\left(X_\infty(y)+rY_N(t)(y),(1-r)\eta_\infty(y)+r\eta_t(y)\right)\cdot\left(\eta_t(y)-\eta_\infty(y)\right)Y_N(t)(y)dr\\ +\partial_\eta F\left(X_\infty(y),\eta_\infty(y)\right)\cdot \left(\eta_t(y) - \eta_\infty(y)\right), \end{multline} and \begin{equation}\label{eq:def_zeta_N} \zeta_N(t_0,t)=\int_{t_0}^t e^{(t-s)\mathcal{L}}dM_N(s) \end{equation} with \begin{equation}\label{eq:def_M_N} M_N(t)= \sum_{i=1}^N \sum_{j=1}^N \dfrac{w_{ij}}{N} \left( Z_{N,j}(t) - \int_0^t\lambda_{N,j}(s)ds\right) \mathbf{1}_{B_{N,i}}. \end{equation} \end{prop} $\phi_N$ is the drift term and $\zeta_N$ is the noise term coming from the jumps of the process $X_N$. \begin{proof}[Proof of Proposition~\ref{prop:termes_sys_micros}] From \eqref{eq:def_UiN} and \eqref{eq:def_UN}, we obtain that $X_N$ verifies \begin{equation}\label{eq:dUN} dX_N(t)=-\alpha X_N(t)dt + \sum_{i=1}^N \sum_{j=1}^N \dfrac{w_{ij}}{N} dZ_{N,j}(t)\mathbf{1}_{ B_{N,i}}. \end{equation} The centered noise $M_N$ defined in \eqref{eq:def_M_N} verifies $$\displaystyle dM_{N}(t):= \sum_{i=1}^N \sum_{j=1}^N \dfrac{w_{ij}}{N} \left( dZ_{N,j}(t) - F(X_{N,j}(t),\eta_t(x_j))dt\right) \mathbf{1}_{B_{N,i}},$$ and is a martingale in $L^2(I)$. Thus recalling the definition of $X_\infty$ in \eqref{eq:def_u_infty} and by inserting the term $\sum_{i=1}^N \sum_{j=1}^N \dfrac{w_{ij}}{N}F(X_{N,j}(t),\eta_t(x_j))dt\mathbf{1}_{ B_{N,i}}$ in \eqref{eq:dUN}, we obtain \begin{equation}\label{eq:partial_incomplet} d Y_N(t) = -\alpha Y_N(t) + d M_{N}(t) + \sum_{i=1}^N \left( \sum_{j=1}^N \dfrac{w_{ij}}{N} F(X_{N,j}(t),\eta_t(x_j))\right)\mathbf{1}_{B_{N,i}}dt - T_W F(X_\infty,\eta_\infty)dt. \end{equation} A Taylor's expansion gives $$F(X_N(t,y),\eta_t(y)) - F(X_\infty(y),\eta_\infty(y))=\partial_x F\left(X_\infty(y),\eta_\infty(y)\right) \left( X_N(t,y) - X_\infty(y)\right) + g_N(t)(y),$$ with $g_N$ given in \eqref{eq:def_gN(s)}. Hence, we have with $G$ defined in \eqref{eq:def_G} $$-T_W F(X_\infty,\eta_\infty)(x)=-\int_I W(x,y)F(X_N(t,y),\eta_t(y))dy + T_W(GY_N(t)) + T_Wg_N(t)(x),$$ hence coming back to \eqref{eq:partial_incomplet} and recognizing the operator $\mathcal{L}$ \eqref{eq:def_operator_L} \begin{multline*} d Y_N(t) = \mathcal{L} Y_N(t) + d M_{N}(t) + \sum_{i=1}^N \left( \sum_{j=1}^N \dfrac{w_{ij}}{N} F(X_{N,j}(t),\eta_t(x_j))\right)\mathbf{1}_{B_{N,i}}dt\\ - T_W F(X_N(t,\cdot),\eta_t(\cdot))dt+ T_Wg_N(t). \end{multline*} We recognize $r_N$ defined in \eqref{eq:def_r_N}, and obtain exactly \begin{equation}\label{eq:def_Y_N_diff} dY_N(t) = \mathcal{L}Y_N(t)dt + r_N(t)dt + dM_N(t). \end{equation} Then the mild formulation \eqref{eq:def_Y_N_termes} is a direct consequence of Lemma 3.2 of \cite{Zhu2017}: the unique strong solution to \eqref{eq:def_Y_N_diff} is indeed given by \eqref{eq:def_Y_N_termes}. \end{proof} \begin{prop}[Noise perturbation]\label{prop:noise_perturbation} Let $m\geq 1$ and $T> t_0\geq 0$. Under Hypotheses \ref{hyp_globales} and \ref{hyp:scenarios}, there exists a constant $C=C(T,m,F,\eta_0)>0$ such that $\mathbb{P}$-almost surely for $N$ large enough: $$\mathbf{E}\left[\sup_{s\leq T} \Vert \zeta_N(t_0,s) \Vert_2^{2m}\right] \leq \dfrac{C}{\left(N\rho_N\right)^m}.$$ \end{prop} The proof is postponed to Section \ref{S:proof_NP}. \begin{prop}[Drift term]\label{prop:drift_term} Under Hypothesis \ref{hyp_globales}, for any $t\geq t_0>0$, $\mathbb{P}$-almost surely if $N$ is large enough, \begin{align}\label{eq:control_drift} \Vert \phi_N(t_0,t)\Vert_2 &\leq C_\text{drift} \left( \int_{t_0}^t e^{-(t-s)\gamma} \Vert Y_N(s) \Vert_2^2ds + G_{N}+\int_{t_0}^t e^{-\gamma(t-s)} \left(\delta_s^2 +\delta_s\right) ds\right), \end{align} where $C_\text{drift}=C_{W,F,\alpha}$, $\gamma$ is defined in \eqref{eq:def_gamma}, $\delta_s$ is defined in \eqref{eq:def_delta_s} and $G_N=G_N(\xi)$ is an explicit quantity to be found in the proof that tends to 0 as $N\to \infty$. \end{prop} The proof is postponed to Section \ref{S:proof_drift}. \subsection{Proof of the large time behaviour} \label{S:proof_mainTh} We prove here Theorem~\ref{thm:long_time}, based on the results of Section~\ref{S:mild_formulation}. The approach followed is somehow formally similar to the strategy of proof developed in \cite{Coppini2022} for the diffusion case. \begin{proof}[Proof of Theorem \ref{thm:long_time}] Choose $m\geq 1$ and $t_f>0$. Let \begin{equation}\label{eq:def_varepsilon_0} \varepsilon_0 = \dfrac{\gamma}{6 C_{drift}}, \end{equation} where $\gamma$ is defined in \eqref{eq:def_gamma} and the constant $C_{drift}$ comes from Proposition \ref{prop:drift_term} above. We consider $\varepsilon$ small enough, such that $\varepsilon<\varepsilon_0$. As $(X_t)$ converges uniformly towards $X_\infty$ (Theorem \ref{thm:large_time_cvg_u_t}), there exists $t_\varepsilon^1<\infty$ such that \begin{equation}\label{eq:choice_t_varepsilon1} \left\Vert X_t-X_\infty \right\Vert_2\leq \dfrac{\varepsilon}{4},\quad t\geq t_\varepsilon^1. \end{equation}Moreover, with \eqref{eq:def_delta_s}, we also have that $\int_0^t e^{-\gamma(t-s)}\left(\delta_s^2+\delta_s\right)ds \xrightarrow[t\to\infty]{}0$, hence there exists $t_\varepsilon^2<\infty$ such that \begin{equation}\label{eq:choice_t_varepsilon2} C_\text{drift}\int_0^t e^{-\gamma(t-s)}\left(\delta_s^2+\delta_s\right)ds \leq \dfrac{\varepsilon}{18}, \quad t\geq t_\varepsilon^2. \end{equation} We set now $t_\varepsilon=\max(t_\varepsilon^1,t_\varepsilon^2)$. Let $T$ such that \begin{equation}\label{eq:def_T} e^{-\gamma T} <\frac{1}{3}, \quad T>t_f. \end{equation} The strategy of proof relies on the following time discretisation. The point is to control $\Vert X_N(t)-X_\infty\Vert_2$ on $[t_\varepsilon,T_N]$ with \begin{equation}\label{eq:def_TN} T_N:=a_N T+t_\varepsilon, \quad \text{with } a_N:=\lceil (N\rho_N)^m \rceil, \end{equation} which will imply the result \eqref{eq:long_time_pol} as $[t_\varepsilon,(N \rho_{ N})^{ m}t_f]\subset [t_\varepsilon,T_N]$ since $T>t_f$. We decompose below the interval $[t_\varepsilon,T_N]$ into $a_N$ intervals of length $T$. We define the following events, with $0\leq t_a\leq t_b$ (recall that $Y_N(t)=X_N(t)-X_\infty$) \begin{align}\label{eq:def_events} A_1^N(\varepsilon)&:=\left\{ \left\Vert X_N(t_\varepsilon)-X_\infty\right\Vert_2 \leq \dfrac{\varepsilon}{2}\right\},\\ A_2^N(\varepsilon)&:=\left\{ \sup_{t\in [t_\varepsilon,t_\varepsilon+T]} \left\Vert \zeta_N(t_\varepsilon, t) \right\Vert_2 \leq\dfrac{\varepsilon}{18}\right\},\\ E(t_a,t_b)&:= \left\{ \max \left( 2\left\Vert Y_N(t_a)\right\Vert_2, \sup_{t\in [t_a,t_b]} \left\Vert Y_N(t) \right\Vert_2, 2\left\Vert Y_N(t_b)\right\Vert_2 \right) \leq \varepsilon\right\}\label{eq:def_event_E}. \end{align} By \eqref{eq:choice_t_varepsilon1}, and as Proposition \ref{prop:finite_time} gives that $\mathbf{P}\left( \sup_{t\in [0,t_\varepsilon]} \left\Vert X_N(t) - X_t\right\Vert_2>\dfrac{\varepsilon}{4}\right) \xrightarrow[N\to\infty]{}0 $, we have by triangle inequality \begin{equation}\label{eq:A1P1} \mathbf{P}\left( A_1^N(\varepsilon) \right) \xrightarrow[N\to\infty]{}1. \end{equation} \paragraph{Step 1} We have from the definition \eqref{eq:def_event_E} of $E(t_a,t_b)$ that \begin{equation}\label{eq:PminE} \mathbf{P}\left( \sup_{t\in [t_\varepsilon,T_N]} \left\Vert X_N(t) - X_\infty\right\Vert_2\leq\varepsilon\right) \geq \mathbf{P}\left( E(t_\varepsilon,T_N)\right) = \mathbf{P}\left( E(t_\varepsilon, T_N)\vert A_1^N(\varepsilon) \right) \mathbf{P}\left(A_1^N(\varepsilon)\right). \end{equation} Moreover, \begin{align*} &\mathbf{P}\left( E(t_\varepsilon,T_N)\vert A_1^N(\varepsilon)\right)\\ &= \mathbf{P}\left( E(t_\varepsilon,t_\varepsilon+a_N T)\vert A_1^N(\varepsilon)\right)\\ &\geq \mathbf{P}\left( E(t_\varepsilon,t_\varepsilon+a_N T)\cap E(t_\varepsilon,t_\varepsilon+(a_N-1) T) \vert A_1^N(\varepsilon)\right)\\ &= \mathbf{P}\left( E(t_\varepsilon,t_\varepsilon+a_N T)\vert E(t_\varepsilon,t_\varepsilon+(a_N-1) T) \cap A_1^N(\varepsilon)\right)\mathbf{P}\left( E(t_\varepsilon,t_\varepsilon+(a_N-1) T) \vert A_1^N(\varepsilon)\right). \end{align*} Recall that we are in the exponential case \eqref{eq:def_exponential}, so that $\left( X_N(t)\right)_t$ is a Markov process. Thus by Markov property \begin{align*} &\mathbf{P}\left( E(t_\varepsilon,t_\varepsilon+a_N T)\vert E(t_\varepsilon,t_\varepsilon+(a_N-1) T) \cap A_1^N(\varepsilon)\right)\\ =& \mathbf{P}\left( E(t_\varepsilon+(a_N-1)T,t_\varepsilon+a_N T)\vert E(t_\varepsilon,t_\varepsilon+(a_N-1) T)\right)\\ =& \mathbf{P}\left( E(t_\varepsilon+(a_N-1)T,t_\varepsilon+a_N T)\left\vert \left\{ \left\Vert Y_N(t_\varepsilon+(a_N-1)T)\right\Vert_2 \leq \dfrac{\varepsilon}{2}\right\}\right.\right). \end{align*} $\mathbf{P}\left( E(t_\varepsilon+(a_N-1)T,t_\varepsilon+a_N T)\vert \left\{ \left\Vert Y_N(t_\varepsilon+(a_N-1)T)\right\Vert_2 \leq \dfrac{\varepsilon}{2}\right\}\right))$ means that, under an initial condition at $t_\varepsilon+(a_N-1)T$, we look at the probability that $Y_N$ stays under $\varepsilon$ on the interval $[t_\varepsilon+(a_N-1)T,t_\varepsilon+a_N T]$ of size $T$ and comes back under $\dfrac{\varepsilon}{2}$ at the final time $t_\varepsilon+a_N T$. By Markov's property, it is exactly $\mathbf{P}\left( E(t_\varepsilon,t_\varepsilon+T) \vert A_1^N(\varepsilon)\right)$. An immediate iteration gives then \begin{equation}\label{eq:Ean} \mathbf{P}\left( E(t_\varepsilon,T_N)\vert A_1^N(\varepsilon)\right) \geq \mathbf{P}\left( E(t_\varepsilon,t_\varepsilon+T) \vert A_1^N(\varepsilon)\right)^{a_N}. \end{equation} By \eqref{eq:A1P1}, from now on we consider that we are on this event $A_1^N(\varepsilon)$ and omit this notation for simplicity. \paragraph{Step 2} We show that \begin{equation}\label{eq:AN2incluE} A_2^N(\varepsilon)\subset E(t_\varepsilon, t_\varepsilon+T). \end{equation} Let us place ourselves in $A_2^N(\varepsilon)$. As we are also under $A_1^N(\varepsilon)$, we have indeed $\left\Vert Y_N(t_\varepsilon)\right\Vert_2\leq \dfrac{\varepsilon}{2}$ for the first condition of $E(t_\varepsilon, t_\varepsilon+T)$. As $Y_N$ verifies \eqref{prop:termes_sys_micros}, it can be written for $t\geq t_\varepsilon$ \begin{equation}\label{eq:Y_N-MF_t_epsilon} Y_N(t)=e^{\mathcal{L}(t-t_\varepsilon)}Y_N(t_\varepsilon) + \phi_N(t_\varepsilon, t)+\zeta_N(t_\varepsilon, t). \end{equation} For any $t\in [t_\varepsilon,t_\varepsilon+T]$, \begin{align}\label{eq:maintheorem_aux2} \left\Vert \phi_N(t_\varepsilon, t) \right\Vert_2 &\leq C_\text{drift} \left( \int_{t_\varepsilon}^t e^{-(t-s)\gamma} \Vert Y_N(s) \Vert_2^2ds + G_{N}+\int_{t_0}^t e^{-\gamma(t-s)} \left(\delta_s^2 +\delta_s\right) ds\right)\notag\\ &\leq C_\text{drift} \left( \int_{t_\varepsilon}^t e^{-(t-s)\gamma} \Vert Y_N(s) \Vert_2^2ds \right)+ \dfrac{\varepsilon}{9} \end{align} where the first inequality comes from Proposition \ref{prop:drift_term}, and the second is true for $N$ large enough using $G_N\to 0$ and \eqref{eq:choice_t_varepsilon2}. Coming back to \eqref{eq:Y_N-MF_t_epsilon}, using that by Proposition \ref{prop:operateur_L} \begin{equation}\label{eq:maintheorem_aux4} \left\Vert e^{\mathcal{L}(t-t_\varepsilon)}Y_N(t_\varepsilon) \right\Vert_2\leq e^{-\gamma(t-t_\varepsilon)}\left\Vert Y_N(t_\varepsilon)\right\Vert_2, \end{equation} and using \eqref{eq:maintheorem_aux2}, we have on $A_1^N(\varepsilon)\cap A_2^N(\varepsilon)$ $$\left\Vert Y_N(t) \right\Vert_2 \leq \dfrac{\varepsilon}{2} + C_\text{drift} \left( \int_{t_\varepsilon}^t e^{-(t-s)\gamma} \Vert Y_N(s) \Vert_2^2ds \right)+\dfrac{\varepsilon}{9}+\dfrac{\varepsilon}{18}.$$ Let $\delta>0$ such that $\delta\leq \min\left( \dfrac{\varepsilon}{6},\dfrac{\gamma}{9C_\text{drift}}\right)$. Recall that $\left\Vert Y_N(\cdot)\right\Vert_2$ is not a continuous function, it jumps whenever a spike of the process $\left(Z_{N,1},\cdots,Z_{N,N}\right)$ occurs, but the size jump never exceeds $\dfrac{1}{N}$, and for $N$ large enough $\dfrac{1}{N}\leq \dfrac{\delta}{2}$. Then, one can apply Lemma \ref{lem:gronwal_quadratic} and obtain that for all $N$ large enough, \begin{equation}\label{eq:maintheorem_aux3} \sup_{t\in [t_\varepsilon,t_\varepsilon+T]}\left\Vert Y_N(t)\right\Vert_2\leq \dfrac{\varepsilon}{2}+ 3\delta\leq\varepsilon. \end{equation} It remains to prove that $\left\Vert Y_N(t_\varepsilon+T)\right\Vert_2\leq\dfrac{\varepsilon}{2}$. We obtain from \eqref{eq:Y_N-MF_t_epsilon}, \eqref{eq:maintheorem_aux2} and \eqref{eq:maintheorem_aux4} for $t=t_\varepsilon + T$ on $A_1^N(\varepsilon)\cap A_2^N(\varepsilon)$ $$\left\Vert Y_N(t_\varepsilon + T)\right\Vert_2\leq e^{-\gamma T}\dfrac{\varepsilon}{2} + \dfrac{\varepsilon}{6} + C_\text{drift}\int_{t_\varepsilon}^{t_\varepsilon+T} e^{-(t_\varepsilon+T-s)\gamma} \Vert Y_N(s) \Vert_2^2ds.$$ Using the a priori bound \eqref{eq:maintheorem_aux3} \begin{align*} \left\Vert Y_N(t_\varepsilon + T)\right\Vert_2&\leq e^{-\gamma T}\dfrac{\varepsilon}{2} +\dfrac{\varepsilon}{12} + \varepsilon^2 \dfrac{C_\text{drift}}{\gamma}\leq e^{-\gamma T}\dfrac{\varepsilon}{2} +\dfrac{\varepsilon}{6} + \dfrac{\varepsilon}{6}\leq \dfrac{\varepsilon}{2}, \end{align*} where we recall the particular choices of $T$ and $\varepsilon<\varepsilon_0$ in \eqref{eq:def_T} and \eqref{eq:def_varepsilon_0}. This concludes the proof of \eqref{eq:AN2incluE}. \paragraph{Step 3} We obtain with \eqref{eq:Ean} and Markov's inequality, \begin{align*} \mathbf{P}\left( E(t_\varepsilon,T_N)\right)&\geq \mathbf{P}\left( E(t_\varepsilon,t_\varepsilon+T)\right)^{a_N}\geq \mathbf{P}(A_2^N(\varepsilon))^{a_N}\\ &=\left( 1 - \mathbf{P}\left( \sup_{t\in [t_\varepsilon,t_\varepsilon+T]} \left\Vert \zeta_N(t_\varepsilon, t) \right\Vert_2 >\dfrac{\varepsilon}{18}\right) \right)^{a_N}\\ &\geq \left( 1 - 18^{2m'}\dfrac{\mathbb{E}\left[ \sup_{t\in [t_\varepsilon,t_\varepsilon+T]} \left\Vert \zeta_N(t_\varepsilon, t) \right\Vert_2^{2m'}\right] }{\varepsilon^{2m'}}\right)^{a_N}, \end{align*} where we have taken $m'>m$. With Proposition \ref{prop:noise_perturbation}, it gives $$\mathbf{P}\left( E(t_\varepsilon,T_N)\right)\geq \left(1- \dfrac{C}{\left(\varepsilon^2N\rho_N\right)^{m'}}\right)^{a_N}=\exp\left( a_N \ln \left(1- \dfrac{C}{\left(\varepsilon^2N\rho_N\right)^{m'}}\right)\right).$$ By definition \eqref{eq:def_TN}, $a_N=o\left(N\rho_N \right)^{m'}$, the right term tends to 1 as $N$ goes to $\infty$ under Hypothesis \ref{hyp:scenarios}. By \eqref{eq:PminE}, we conclude that $$\mathbf{P}\left( \sup_{t\in [t_\varepsilon,T_N]} \left\Vert X_N(t) - X_\infty\right\Vert_2\leq\varepsilon\right) \xrightarrow[N\to\infty]{}1.$$ This concludes the proof of Theorem~\ref{thm:long_time}. \end{proof} \section{Proofs - Noise perturbation}\label{S:proof_NP} In this section, we prove Proposition~\ref{prop:noise_perturbation} concerning the control of the noise perturbation $\zeta_N(t_0,t)$ defined in \eqref{eq:def_zeta_N}. For simplicity of notation, we assume that $t_0=0$. Recall the expression of $\left(Z_{N,j}\right)_{1\leq j \leq N}$ in \eqref{eq:def_ZiN}. Introduce the compensated measure $\tilde{\pi}_j(ds,dz):=\pi_j(ds,dz)-\lambda_{N,j}dsdz$, so that with the linearity of $(e^{t\mathcal{L}})_{t\geq 0}$, we obtain that $\zeta_N$ can be written as \begin{equation}\label{eq:zeta_N_chi} \zeta_N(0,t) = \sum_{j=1}^N \int_0^t\int_0^\infty e^{(t-s)\mathcal{L}}\chi_j(s,z) \tilde{\pi}_j(ds,dz), \end{equation} with $\displaystyle \chi_j(s,z):=\left( \sum_{i=1}^N \mathbf{1}_{B_{N,i}} \dfrac{w_{ij}}{N} \mathbf{1}_{z\leq \lambda_{N,j}(s)}\right)$. The proof of Proposition \ref{prop:drift_term} relies on a adaptation of an argument given in \cite{Zhu2017} (Theorem 4.3), where a similar quantity to \eqref{eq:zeta_N_chi} is considered for $N=1$. \subsection{Control of the moments of the process $Z_{N, i}$} \begin{prop}\label{prop:control_mean_Zj^m} Let $m\geq 1$ and $T>0$. Under Hypotheses \ref{hyp_globales} and \ref{hyp:scenarios}, $\mathbb{P}$-almost surely $$\sup_{N\geq 1} \mathbf{E}\left[\dfrac{1}{N} \sum_{j=1}^N Z_{N,j}(T)^m \right] <\infty.$$ \end{prop} \begin{proof} Let $N\geq 1$. We have for any $i\in \llbracket 1,N\rrbracket$ \begin{align} \mathbf{E} \left[ Z_{N,i}(T)^m \right] &\leq \mathbf{E} \left[ \left( \left( Z_{N,i}(T))-\int_0^T \lambda_{N,i}(t)dt\right) + \int_0^T \lambda_{N,i}(t)dt\right)^m \right] \notag\\ &\leq 2^{m-1}\mathbf{E} \left[ \left( Z_{N,i}(T)-\int_0^T \lambda_{N,i}(t)dt\right)^m \right] + 2^{m-1} \mathbf{E} \left[ \left(\int_0^T \lambda_{N,i}(t)dt\right)^m \right]\notag \\ &\leq 2^{m-1}C \mathbf{E} \left[ \left(\int_0^T \lambda_{N,i}(t)dt\right)^{\frac{m}{2}} \right] + (2T)^{m-1} \mathbf{E} \left[ \int_0^T {\lambda_{N,i}}(t)^mdt \right], \label{eq:Z_iNT_m} \end{align} where we used Jensen's inequality and Burkholder-Davis-Gundy Inequality on the martingale $\left( Z_{N,i}(T)-\int_0^T \lambda_{N,i}(t)dt\right)$. Similarly, we obtain $$ \mathbf{E} \left[ \left(\int_0^T \lambda_{N,i}(t)dt\right)^{\frac{m}{2}} \right] \leq T^{\frac{m}{2}-1} \mathbf{E} \left[ \int_0^T \left(\lambda_{N,i}(t)\right)^{\frac{m}{2}} dt \right].$$ We focus now on the term $ \mathbf{E} \left[ \int_0^T \lambda_{N,i}(t)^kdt \right]$ for $k\geq 1$. From the definition of $\lambda_{N,i}$ \eqref{eq:def_lambdaiN_intro}, by Lipschitz continuity of $F$ and with Jensen's inequality \begin{multline*} \mathbf{E} \left[ \int_0^T \lambda_{N,i}(t)^kdt \right] \leq 2^{k-1} T F(0,\eta_t(x_i))^k\\ + 2^{k-1} \Vert F \Vert_L^k \mathbf{E} \left[\int_0^T \left( \dfrac{1}{N} \sum_{j=1}^N \int_0^{t-}w_{ij} e^{-\alpha(t-s)} dZ_{N,j}(s)\right)^k dt\right]. \end{multline*} Let $S_i:=\sum_{j=1}^N \dfrac{w_{ij}}{N}$. By \eqref{eq:estimees_IC}, we have that $\mathbb{P}$-almost surely, $\limsup_{N\to\infty}\sup_{1\leq i \leq N} S_i \leq 2$. We obtain with discrete Jensen's inequality that for any $t\geq 0$ $$\left(\dfrac{1}{N} \sum_{j=1}^N \int_0^{t-}w_{ij} e^{-\alpha(t-s)} dZ_{N,j}(s)\right)^k \leq S_i^k\left( \sum_{j=1}^N \dfrac{w_{ij}}{NS_i}Z_{N,j}(t)\right)^k\leq S_i^{k-1} \sum_{j=1}^N \dfrac{w_{ij}}{N} Z_{N,j}(t)^k.$$ We obtain then $$\mathbf{E} \left[ \int_0^T \lambda_{N,i}(t)^kdt \right] \leq C_{T,F,\eta_0,k} + C_{k,F} \sum_{j=1}^N \dfrac{w_{ij}}{N} \mathbf{E}\left[\int_0^T Z_{N,j}(t)^{k}dt\right],$$ thus, going back to \eqref{eq:Z_iNT_m}, with $C=C_{T,F,\eta_0,m}$ \begin{align*} \mathbf{E}\left[\dfrac{1}{N} \sum_{j=1}^N Z_{N,j}(T)^m \right] &\leq \dfrac{C}{N} \sum_{i=1}^N \left( \mathbf{E} \left[ \int_0^T \lambda_{N,i}(t)^{\frac{m}{2}}dt \right] + C \mathbf{E} \left[ \int_0^T \lambda_{N,i}(t)^mdt \right]\right)\\ &\leq C\left(1 + \sum_{i,j=1}^N \dfrac{w_{ij}}{N^2} \mathbf{E}\left[\int_0^T Z_{N,j}(t)^{\frac{m}{2}}dt\right] + \sum_{i,j=1}^N \dfrac{w_{ij}}{N^2} \mathbf{E}\left[\int_0^T Z_{N,j}(t)^mdt\right]\right). \end{align*} With \eqref{eq:estimees_IC}, it gives that, $\mathbb{P}$-almost surely for $N$ large enough \begin{align*} \mathbf{E}\left[\dfrac{1}{N} \sum_{j=1}^N Z_{N,j}(T)^m \right] \leq C\left(1 + \int_0^T \mathbf{E}\left[\dfrac{1}{N}\sum_{j=1}^N Z_{N,j}(t)^{\frac{m}{2}}\right]dt + \int_0^T\mathbf{E}\left[ \dfrac{1}{N}\sum_{j=1}^N Z_{N,j}(t)^m\right]dt\right). \end{align*} As for any $t\geq 0$ $$\mathbf{E}\left[\dfrac{1}{N}\sum_{i=1}^N Z_{N,i}(t)\right] = \dfrac{1}{N}\sum_{i=1}^N \mathbf{E}\left[ \int_0^t \lambda_{N,i}(s)ds\right] \leq C_{T,\eta_0,F} + C_{T,\eta_0,F} \int_0^t\mathbf{E}\left[ \dfrac{1}{N}\sum_{j=1}^N Z_{N,j}(s)\right]ds,$$ Gr{\"o}nwall's lemma gives that $\displaystyle\sup_{t\leq T} \mathbf{E}\left[\dfrac{1}{N}\sum_{i=1}^N Z_{N,i}(t)\right] <\infty$ (independently of $N$) and similarly an immediate iteration gives that for any $k\geq 0$, $ \displaystyle\sup_{N\geq 1}\mathbf{E} \left[ \dfrac{1}{N} \sum_{j=1}^N Z_{N,j}(T)^{2^k} \right]<\infty$ which concludes the proof. \end{proof} \subsection{Proof of Proposition \ref{prop:noise_perturbation}} \begin{proof} We divide the proof in different steps. Fix $m\geq 1$. We prove Proposition \ref{prop:noise_perturbation} for the choice $t_0=0$, but it remains the same for a general initial time $t_0\geq 0$. \textit{Step 1 -} The functional $\phi:L^2(I)\to \mathbb{R}$ given by $\phi(v)=\Vert v \Vert_2^{2m}$ is of class $\mathcal{C}^2$ (recall that $\zeta_N\in L^2(I)$) so that by Itô formula on the expression \eqref{eq:zeta_N_chi} we obtain \begin{align}\label{eq:zeta_spatial_def} \phi\left(\zeta_N(t)\right)&= \int_0^t \phi'\left(\zeta_N(s)\right) \mathcal{L}\left(\zeta_N(s)\right)ds + \sum_{j=1}^N \int_0^t \int_0^\infty \phi'\left(\zeta_N(s-)\right)\chi_j(s,z)\tilde{\pi}_j(ds,dz) \notag\\+ &\sum_{j=1}^N\int_0^t\int_0^\infty \left[ \phi\left( \zeta_N(s-)+\chi_j(s,z)\right) - \phi\left(\zeta_N(s-)\right) - \phi'\left(\zeta_N(s-)\right)\chi_j(s,z)\right]\pi_j(ds,dz)\notag\\ &:= I_0(t) + I_1(t) + I_2(t). \end{align} We have then for any $v,h,k\in L^2(I)$, $\phi'(v)h=2m\Vert v \Vert_2^{2m-2}\text{Re}\left(\langle v,h\rangle\right)\in\mathbb{R}$ and $\phi''(v)(h,k)=2m(2m-1)\Vert v \Vert_2^{2m-4}\text{Re}\langle v,k \rangle \text{Re}\langle v,h \rangle +2m\Vert v \Vert^{2m-2} \text{Re}\langle h,k\rangle$. \textit{Step 2 -} We have $I_0(t)= \int_0^t 2m \Vert \zeta_N(s) \Vert_2^{2m-2}\text{Re}\left( \langle \zeta_N(s),\mathcal{L}(\zeta_N(s))\rangle \right)ds$. From Proposition \ref{prop:operateur_L}, $\mathcal{L}$ generates a contraction semi-group hence for any $s\geq 0$, $\text{Re}\left( \langle \zeta_N(s),\mathcal{L}(\zeta_N(s))\right)\rangle\leq 0$ by Lumer-Philipps Theorem (see Section 1.4 of \cite{Pazy1974}). Then for any $t\geq 0$ we have $I_0(t)\leq 0$. \textit{Step 3 -} About $I_1$ in \eqref{eq:zeta_spatial_def}, with $\alpha_j(s,z):=2m \Vert \zeta_N(s-)\Vert_2^{2m-2} \langle \zeta_N(s-),\chi_j(s,z)\rangle\in \mathbb{R}$, $$ I_1(t)= \sum_{j=1}^N \int_0^t\int_0^\infty \alpha_j(s,z) \tilde{\pi}_j(ds,dz).$$ $I_1$ is then a real martingale. By Burkholder-David-Gundy inequality, there exists a constant $C>0$ such that for any $t\geq 0$: $$\mathbf{E}\left[ \sup_{s\leq t} \left|I_1(s)\right|\right] \leq C \mathbf{E}\left[ \sqrt{[I_1]_t} \right],$$ where $[I_1]_t=\sum_{s\leq t} \left| \Delta I_1(s) \right|^2$ stands for the quadratic variation of $I_1$. It is computed as follows (as the $(\pi_j)_{1\leq j \leq N}$ are independent, there are almost surely no simultaneous jumps so that $[\tilde{\pi}_j,\tilde{\pi}_{j'}]=0$ if $j\neq j'$): \begin{align*} [I_1]_t &= \sum_{j=1}^N\int_{0}^t \int_0^\infty \alpha_j(s,z)^2\pi_{j}(ds,dz)\\ &= \sum_{j=1}^N\int_{0}^t \int_0^\infty \left(2m \Vert \zeta_N(s-)\Vert_2^{2m-2} \langle \zeta_N(s-),\chi_j(s,z)\rangle\right)^2\pi_{j}(ds,dz)\\ &\leq 4m^2 \sup_{0\leq s \leq t} \left( \Vert \zeta_N(s)\Vert_2^{4m-2}\right) \sum_{j=1}^N\int_0^t\int_0^\infty \Vert \chi_j(s,z)\Vert_2^2 \pi_j(ds,dz). \end{align*} We obtain then $$\mathbf{E}\left[ \sqrt{[I_1]_t} \right] \leq 2m \mathbf{E}\left[ \sup_{0\leq s \leq t} \left( \Vert \zeta_N(s)\Vert_2^{2m-1}\right) \left(\sum_{j=1}^N\int_0^t\int_0^\infty \Vert \chi_j(s,z)\Vert_2^2 \pi_j(ds,dz)\right)^{\frac{1}{2}}\right].$$ Applying H{\"o}lder inequality with parameter $\frac{2m-1}{2m}+\frac{1}{2m}=1$ for the random variables $\sup_{0\leq s \leq t} \left( \Vert \zeta_N(s)\Vert_2^{2m-1}\right)$ and $ \left(\sum_{j=1}^N\int_0^t\int_0^\infty \Vert \chi_j(s,z)\Vert_2^2 \pi_j(ds,dz)\right)^{\frac{1}{2}}$, we obtain that $\mathbf{E}\left[ \sqrt{[I_1]_t} \right]$ is upper bounded by $$ 2m \left( \mathbf{E}\left[ \sup_{0\leq s \leq t} \left( \Vert \zeta_N(s)\Vert_2^{2m}\right) \right]\right)^{\frac{2m-1}{2m}} \left(\mathbf{E}\left[\left(\sum_{j=1}^N\int_0^t\int_0^\infty \Vert \chi_j(s,z)\Vert_2^2 \pi_j(ds,dz)\right)^m\right]\right) ^{\frac{1}{2m}}.$$ Let $\varepsilon>0$ to be chosen later. From Young's inequality, for any $a,b \geq 0$, we can write $ab=\left( \varepsilon^{\frac{2m-1}{2m}}a\right) \left( \varepsilon^{\frac{-(2m-1)}{2m}}b\right)\leq \frac{2m-1}{2m}\left( \varepsilon^{\frac{2m-1}{2m}}a\right)^{\frac{2m}{2m-1}}+\frac{1}{2m}\left( \varepsilon^{\frac{-(2m-1)}{2m}}b\right)^{2m}=\frac{2m-1}{2m}\varepsilon a^{\frac{2m}{2m-1}}+\frac{1}{2m}\varepsilon^{-(2m-1)}b^{2m}$. This gives for the choice $a=\left( \mathbf{E}\left[ \sup_{0\leq s \leq t} \left( \Vert \zeta_N(s)\Vert_2^{2m}\right) \right]\right)^{\frac{2m-1}{2m}}$ and $b=\left(\mathbf{E}\left[\left(\sum_{j=1}^N\int_0^t\int_0^\infty \Vert \chi_j(s,z)\Vert_2^2 \pi_j(ds,dz)\right)^m\right]\right) ^{\frac{1}{2m}}$: \begin{multline*} \mathbf{E}\left[\sqrt{ [I_1]_t} \right] \leq (2m-1) \varepsilon \mathbf{E}\left[ \sup_{0\leq s \leq t} \left( \Vert \zeta_N(s)\Vert_2^{2m}\right) \right] \\+\varepsilon^{-(2m-1)} \mathbf{E}\left[\left(\sum_{j=1}^N\int_0^t\int_0^\infty \Vert \chi_j(s,z)\Vert_2^2 \pi_j(ds,dz)\right)^m\right]. \end{multline*} We have then shown that, for the constant $C$ given by Burkholder-Davis-Gundy Inequality, \begin{multline}\label{eq:spatial_I1} \mathbf{E}\left[ \sup_{s\leq T} \left|I_1(s)\right|\right] \leq C (2m-1) \varepsilon \mathbf{E}\left[ \sup_{0\leq s \leq T} \left( \Vert \zeta_N(s)\Vert_2^{2m}\right) \right]\\ + C \varepsilon^{-(2m-1)}\mathbf{E}\left[\left(\sum_{j=1}^N\int_0^T\int_0^\infty \Vert \chi_j(s,z)\Vert_2^2 \pi_j(ds,dz)\right)^m\right]. \end{multline} Let us focus now on $I_2$ in \eqref{eq:zeta_spatial_def}: $$I_2(t)=\sum_{j=1}^N\int_0^t\int_0^\infty \left[ \phi\left( \zeta_N(s-)+\chi_j(s,z)\right) - \phi\left(\zeta_N(s-)\right) - \phi'\left(\zeta_N(s-)\right)\chi_j(s,z)\right]\pi_j(ds,dz).$$ For any jump $(s,z)$ of the Poisson measure $\pi_j$, from Taylor's Lagrange formula there exists $\tau_s\in (0,1)$ such that \begin{multline*} \phi\left( \zeta_N(s-)+\chi_j(s,z)\right) - \phi\left(\zeta_N(s-)\right) - \phi'\left(\zeta_N(s-)\right)\chi_j(s,z)\\= \dfrac{1}{2} \phi''\left(\zeta_N(s-)+\tau_s \chi_j(s,z)\right) \left( \chi_j(s,z),\chi_j(s,z) \right). \end{multline*} As $\phi''(v)(h,k)=2m(2m-1)\Vert v \Vert_2^{2m-4}\text{Re}\langle v,k \rangle \text{Re}\langle v,h \rangle +2m\Vert v \Vert^{2m-2} \text{Re}\langle h,k\rangle$ for any $v,h,k \in L^2(I)$, one has with Cauchy–Schwarz inequality that $$\phi''\left(\zeta_N(s-)+\tau_s \chi_j(s,z)\right) \left( \chi_j(s,z) \right)^2 \leq 4m^2 \Vert \zeta_N(s-)+\tau_s \chi_j(s,z) \Vert_2^{2m-2} \Vert \chi_j(s,z)\Vert_2^2.$$ But as $\Vert x+\tau y\Vert_2^2 \leq \max \left( \Vert x\Vert_2^2 , \Vert x+y \Vert_2^2\right)$ for any $x,y \in L^2(I)$ and $\tau \in (0,1)$, we have here $$\Vert \zeta_N(s-)+\tau_s \chi_j(s,z) \Vert_2^{2m-2} \leq \max\left( \Vert \zeta_N(s-)\Vert_2^{2m-2} , \Vert \zeta_N(s-)+ \chi_j(s,z) \Vert_2^{2m-2} \right),$$ and as $\Vert \zeta_N(s-)\Vert_2^{2m-2} \leq \sup_{s\leq t} \Vert \zeta_N(s)\Vert_2^{2m-2}$ and $ \Vert \zeta_N(s-)+ \chi_j(s,z) \Vert_2^{2m-2} = \Vert \zeta_N(s) \Vert_2^{2m-2} \leq \sup_{s\leq t} \Vert \zeta_N(s)\Vert_2^{2m-2}$, thus $$\mathbf{E}\left[ \sup_{s\leq t}\vert I_2(s) \vert \right] \leq 2m^2 \mathbf{E}\left[ \sup_{s\leq t} \Vert \zeta_N(s)\Vert_2^{2m-2} \sum_{j=1}^N \int_0^t \int_0^\infty \Vert \chi_j(s,z)\Vert_2^2\pi_j(ds,dz)\right].$$ We proceed now similarly as for $I_1$. From H{\"o}lder inequality, as $\frac{2m-2}{2m}+\frac{1}{m}=1$ we know that for any $A,B$ random non-negative variables, $\mathbf{E}\left[AB\right] \leq \left( \mathbf{E}\left[A^{\frac{2m}{2m-2}}\right]\right)^{\frac{2m-2}{2m}} \left( \mathbf{E}\left[ B^{m} \right]\right)^{\frac{1}{m}}$. It leads for the choice $A=\sup_{0\leq s \leq t} \left( \Vert \zeta_N(s)\Vert_2^{2m-2}\right)$ and $B = \sum_{j=1}^N\int_0^t\int_0^\infty \Vert \chi_j(s,z)\Vert_2^2 \pi_j(ds,dz)$ to $\mathbf{E}\left[ \sup_{s\leq t}\vert I_2(s) \vert \right]$ equals to $$2m^2 \left(\mathbf{E}\left[\sup_{0\leq s \leq t} \left( \Vert \zeta_N(s)\Vert_2^{2m}\right)\right]\right)^{\frac{2m-2}{2m}} \left(\mathbf{E}\left[\left(\sum_{j=1}^N\int_0^t\int_0^\infty \Vert \chi_j(s,z)\Vert_2^2 \pi_j(ds,dz)\right)^m\right]\right)^{\frac{1}{m}}.$$ With the same $\varepsilon$ introduced for $I_1$, from Young's inequality, for any $a,b \geq 0$, we can write $ab=\left( \varepsilon^{\frac{2m-2}{2m}}a\right) \left( \varepsilon^{\frac{-(2m-2)}{2m}}b\right)\leq \frac{2m-2}{2m}\left( \varepsilon^{\frac{2m-2}{2m}}a\right)^{\frac{2m}{2m-2}}+\frac{1}{m}\left( \varepsilon^{\frac{-(2m-2)}{2m}}b\right)^{m}= \frac{2m-2}{2m}\varepsilon a^{\frac{2m}{2m-2}}+\frac{1}{m}\varepsilon^{-(2m-2)}b^{m}$. For the choice \begin{align*} a&=\left( \mathbf{E}\left[ \sup_{0\leq s \leq t} \left( \Vert \zeta_N(s)\Vert_2^{2m}\right) \right]\right)^{\frac{2m-2}{2m}} \text{ and}\\ b&=\left(\mathbf{E}\left[\left(\sum_{j=1}^N\int_0^t\int_0^\infty \Vert \chi_j(s,z)\Vert_2^2 \pi_j(ds,dz)\right)^m\right]\right) ^{\frac{1}{m}}, \end{align*} it gives that $\mathbf{E}\left[ \sup_{s\leq t}\vert I_2(s) \vert \right] $ is upper bounded by \begin{align}\label{eq:spatial_I2} & m(2m-2) \varepsilon \mathbf{E}\left[\sup_{0\leq s \leq t} \left( \Vert \zeta_N(s)\Vert_2^{2m}\right)\right] + 2m \varepsilon^{-(2m-2)}\mathbf{E}\left[\left(\sum_{j=1}^N\int_0^t\int_0^\infty \Vert \chi_j(s,z)\Vert_2^2 \pi_j(ds,dz)\right)^m\right]. \end{align} Taking the expectation in \eqref{eq:zeta_spatial_def} and combining \eqref{eq:spatial_I1} and \eqref{eq:spatial_I2}, we obtain that \begin{multline}\label{eq:spatial_zeta_maj_eps} \mathbf{E}\left[\sup_{s\leq T} \Vert \zeta_N(s) \Vert_2^{2m}\right] \leq \varepsilon\left( C(2m-1)+m(2m-2) \right) \mathbf{E}\left[\sup_{0\leq s \leq T} \left( \Vert \zeta_N(s)\Vert_2^{2m}\right)\right] \\ \quad + \left( C \varepsilon^{-(2m-1)}+2m \varepsilon^{-(2m-2)}\right) \mathbf{E}\left[\left(\sum_{j=1}^N\int_0^T\int_0^\infty \Vert \chi_j(s,z)\Vert_2^2 \pi_j(ds,dz)\right)^m\right]. \end{multline} \textit{Step 4 -} We can now fix $\varepsilon$ such that $\varepsilon\left( C(2m-1)+m(2m-2) \right) \leq \frac{1}{2}$ so that \eqref{eq:spatial_zeta_maj_eps} leads to \begin{equation}\label{eq:spatial_zeta_maj} \mathbf{E}\left[\sup_{s\leq T} \Vert \zeta_N(s) \Vert_2^{2m}\right] \leq 2 C\mathbf{E}\left[\left(\sum_{j=1}^N\int_0^T\int_0^\infty \Vert \chi_j(s,z)\Vert_2^2 \pi_j(ds,dz)\right)^m\right], \end{equation} where $C>0$ depends only on $m$. \textit{Step 5 -} Let $A_N:=\mathbf{E}\left[\left(\sum_{j=1}^N\int_0^T\int_0^\infty \Vert \chi_j(s,z)\Vert_2^2 \pi_j(ds,dz)\right)^m\right]$. We have $$\Vert\chi_j(s,z)\Vert_2^2 = \int_I \left( \sum_{i=1}^N \mathbf{1}_{B_{N,i}}(x) \dfrac{w_{ij}}{N}\mathbf{1}_{z\leq \lambda_{N,j}(s)}\right)^2 dx = \mathbf{1}_{z\leq \lambda_{N,j}(s)} \sum_{i=1}^N\dfrac{\xi_{ij}}{N^3\rho_N^2}, $$ which leads to, with the definition of $Z_{N,j}$ in \eqref{eq:def_ZiN} \begin{align*} A_N &= \mathbf{E}\left[\left(\sum_{i,j=1}^N\int_0^T \int_0^\infty\mathbf{1}_{z\leq \lambda_{N,j}(s)} \dfrac{\xi_{ij}}{N^3\rho_N^2}\pi_j(ds,dz)\right)^m\right]\\ &\leq \left(\dfrac{1}{N\rho_N}\right)^m\mathbf{E}\left[ \left( \sum_{i,j=1}^N\dfrac{\xi_{ij}}{N^2\rho_N}Z_{N,j}(T)\right)^m\right]. \end{align*} With \eqref{eq:estimees_IC}, Jensen's discrete inequality and \eqref{eq:spatial_zeta_maj}, it leads to \begin{align*} A_N &\leq \left(\dfrac{1}{N\rho_N}\right)^m\mathbf{E}\left[ \left( \sum_{j=1}^N \dfrac{1}{N} \left( \sup_j \sum_{i=1}^N \dfrac{\xi_{ij}}{N\rho_N} \right) Z_{N,j}(T)\right)^m\right]\\ &\leq \dfrac{C}{\left(N\rho_N\right)^m } \mathbf{E}\left[\dfrac{1}{N} \sum_{j=1}^N Z_{N,j}(T)^m \right], \end{align*} hence the result with Proposition \ref{prop:control_mean_Zj^m}. \end{proof} \section{Proofs - Drift term}\label{S:proof_drift} In this section, we prove Proposition~\ref{prop:drift_term} concerning the control of the drift term perturbation $\phi_N(t_0,t)$ defined in \eqref{eq:def_phi_N}. \subsection{Notation} We introduce the following auxiliary functions in $L^2(I)$: \begin{align} \Theta_{t,i,1} &:= \dfrac{1}{N\rho_N} \sum_{j=1}^N \left(\xi_{ij}^{(N)}-\rho_NW(x_i,x_j)\right) F(X_{N,j }(t),\eta_t(x_j)),\label{eq:def_Theta1}\\ \Theta_{t,i,2} &:= \dfrac{1}{N} \sum_{j=1}^N W(x_i,x_j)F(X_{N,j}(t),\eta_t(x_j)) - \int_I W(x_i,y)F(X_N(t,y),\eta_t(y))dy, \label{eq:def_Theta2}\\ \Theta_{t,i,3}(x) &:= \int_I \left( W(x_i,y) - W(x,y) \right)F(X_N(t,y),\eta_t(y))dy, \label{eq:def_Theta3}. \end{align} From the expression of $r_N$ in \eqref{eq:def_r_N}, we have then \begin{equation}\label{eq:def_r_N_aux} r_N(t)=\sum_{i=1}^N \left( \sum_{k=1}^3 \Theta_{t,i,k} \right)\mathbf{1}_{B_{N,i}} \\+ T_W\left( g_N(t) \right), \end{equation} and we can divide $\phi_N$ defined in \eqref{eq:def_phi_N} in several terms $\displaystyle \phi_N(t)=\sum_{k=0}^3 \phi_{N,k}(t)$ with \begin{align} \phi_{N,0}(t)&:= \int_{t_0}^t e^{(t-s)\mathcal{L}}T_W\left(g_N(s)\right)ds,\label{eq:def_phi_n0}\\ \phi_{N,k}(t)&:= \int_{t_0}^t e^{(t-s)\mathcal{L}}\sum_{i=1}^N \dfrac{1}{N} \Theta_{s,i,k}\mathbf{1}_{B_{N,i}}ds\quad \text{for }k\in \llbracket 1,3 \rrbracket, \label{eq:def_phi_nk}. \end{align} \subsection{Preliminary results} \begin{lem}\label{lem:tilde_YN_control} Denoting by $\tilde{Y}_N(s)(v):= Y_N(s)\left(\dfrac{\lceil Nv \rceil}{N}\right)$, we have \begin{equation}\label{eq:tilde_YN_control} \sup_{s\geq 0} \Vert \tilde{Y}_N(s) - Y_N(s)\Vert_2 \xrightarrow[N\to\infty]{}0. \end{equation} \end{lem} \begin{proof} A direct computation gives, for any $s\geq 0$, \begin{align*} \Vert \tilde{Y}_N(s) - Y_N(s)\Vert_2^2 &=\sum_j \int_{B_{N,j}} \left( X_{N,j}(s)-X_\infty(x_j)-X_N(s)(y)+X_\infty(y)\right)^2dy. \end{align*} By definition of $X_N(s)$ in \eqref{eq:def_UN}, $X_N=X_{N,j}$ on $B_{N,j}$ hence using Theorem \ref{thm:large_time_cvg_u_t} $$\Vert \tilde{Y}_N(s) - Y_N(s)\Vert_2^2=\sum_j \int_{B_{N,j}} \left(X_\infty(y) -X_\infty(x_j)\right)^2dy.$$ Then \eqref{eq:tilde_YN_control} is a straightforward consequence of the uniform continuity of $X_\infty$ on the compact $I$ (see Theorem \ref{thm:large_time_cvg_u_t}). It still holds under the hypotheses of Section \ref{S:extension} by decomposing the sum on each interval $C_k$. \end{proof} We will often use \begin{equation}\label{eq:tilde_YN_maj} \dfrac{1}{N} \sum_{j=1}^N\left\vert Y_N(s)(x_j)\right\vert^2 =\Vert \tilde{Y}_N(s)\Vert_2^2\leq \dfrac{1}{2} \left(1+\Vert \tilde{Y}_N(s)\Vert_2^4\right)\leq \dfrac{1}{2} \left(2+\Vert Y_N(s)\Vert_2^4\right), \end{equation} the last inequality being true for $N$ large enough (independently of $s$) using Lemma \ref{lem:tilde_YN_control}. \begin{lem}\label{lem:control_Rnk} Under Hypothesis \ref{hyp_globales}, \begin{equation} R^W_{N,k}\xrightarrow[N\to\infty]{}0, \quad k\in \{1,2\}, \quad S^W_{N}\xrightarrow[N\to\infty]{}0, \end{equation} where $R_{N,k}^W$ and $S_N^W$ are respectively defined in \eqref{eq:def_Rnk} and \eqref{eq:def_Sn}. \end{lem} \begin{proof} Fix $\varepsilon>0$. As $W$ is uniformly continuous on $I$, there exists $\eta>0$ such that $\vert W(x,y) - W(x,z)\vert \leq \epsilon$ for any $(x,y,z)\in I^3$ with $\vert y-z\vert \leq \eta$. Then, for $N$ large enough (such that $\frac{1}{N}\leq \eta$, we have directly that $R_{N,1}^W\leq \epsilon$ and $R_{N,2}^W\leq \epsilon$ hence the result. We can do the same for $S_N^W$. \end{proof} \begin{lem}\label{lem:drift_term_quadratic} Under Hypothesis \ref{hyp_globales}, for any $t> t_0\geq 0$, \begin{equation}\label{eq:maj_phi_N0} \left\Vert \phi_{N,0}(t) \right\Vert_2\leq C_{F,W} \int_{t_0}^t e^{-\gamma(t-s)} \left( \Vert Y_N(s) \Vert_2^2 + \delta_s + \delta_s^2\right)ds. \end{equation} \end{lem} \begin{proof}[Proof of Lemma \ref{lem:drift_term_quadratic}] By Proposition \ref{prop:operateur_L} we have $\left\Vert \phi_{N,0}(t)\right\Vert_2\leq \int_{t_0}^t e^{-\gamma(t-s)} \left\Vert T_W g_N(s) \right\Vert_2ds.$ As for any $x\in I$, $\left\vert T_W g_N(s) (x) \right\vert \leq \int_I W(x,y) \left\vert g_N(s)(y)\right\vert dy, $ and as \begin{align*} \vert g_N(s)(y)\vert &\leq \left\Vert \partial_x^2 F \right\Vert_\infty Y_N(t)(y)^2 + \left\Vert \partial_\eta^2 F\right\Vert_\infty \left\vert\eta_t(y)-\eta_\infty(y)\right\vert^2\\ &\quad+2\left\Vert \partial_{x,\eta}^2 F \right\Vert_\infty \left\vert Y_N(t)(y) \right\vert \left\vert \eta_t(y)-\eta_\infty(y)\right\vert + \left\Vert \partial_\eta F\right\Vert_\infty \left\vert \eta_t(y)-\eta_\infty(y)\right\vert, \end{align*} with Hypothesis \ref{hyp_globales} it gives \begin{align*} \Vert T_W g_N(s) \Vert_2^2 &= \int_I \left( \int_I W(x,y) g_N(s)(y) dy\right)^2 dx\\ &\leq C_F \int_I \left( \int_I W(x,y) \left( Y_N(s)(y)^2 + \delta_s^2+Y_N(s)(y)\delta_s+\delta_s\right)dy\right)^2 dx\\ &\leq C_{F,W} \left( \Vert Y_N(s) \Vert_2^4 + \Vert Y_N(s) \Vert_2^2 \delta_s^2 + \delta_s^2 + \delta_s^4\right)\\ &\leq C_{F,W}\left( \dfrac{3}{2} \Vert Y_N(s) \Vert_2^4 + \dfrac{3}{2} \delta_s^2 + \delta_s^4\right) \end{align*} as $W$ is bounded. We obtain then, as $\sqrt{a+b}\leq \sqrt{a}+\sqrt{b}$, $$\left\Vert \phi_{N,0}(t) \right\Vert_2\leq C_{F,W} \int_{t_0}^t e^{-\gamma(t-s)} \left( \Vert Y_N(s) \Vert_2^2 + \Vert Y_N(s) \Vert_2 \delta_s + \delta_s + \delta_s^2\right)ds.$$ Then \eqref{eq:maj_phi_N0} follows as $\left\Vert Y_N(s) \right\Vert_2 \leq \dfrac{1}{2} \left(1+\left\Vert Y_N(s)\right\Vert_2^2\right)$ and $\sup_s \delta_s <\infty$. \end{proof} \begin{lem}\label{lem:drift_term_phi_N1} Under Hypotheses \ref{hyp_globales} and \ref{hyp:scenarios}, $\mathbb{P}$-almost surely for $N$ large enough and for any $t> t_0\geq 0$, \begin{equation}\label{eq:maj_phi_N1} \Vert \phi_{N,1}(t)\Vert_2\leq C_F \int_{t_0}^t e^{-(t-s)\gamma} \left\Vert Y_N(s)\right\Vert_2^2ds + G_{N,1}, \end{equation} where $G_{N,1}=G_{N,1}(\xi)$ is explicit in $N$ and tends to 0 as $N\to\infty $. Moreover, if we suppose $F$ bounded, we have a better bound \begin{equation}\label{eq:maj_phi_N1_B} \sup_{t>0}\left\Vert \phi_{N,1}(t) \right\Vert_2\leq \dfrac{C_{F}}{\sqrt{N\rho_N^2}}. \end{equation} \end{lem} \begin{proof}[Proof of Lemma \ref{lem:drift_term_phi_N1}] Proposition \ref{prop:operateur_L} gives that \begin{equation}\label{eq:lem_phi_N1_aux} \Vert \phi_{N,1}(t)\Vert_2\leq K\int_{t_0}^t e^{-(t-s)\gamma} \Vert \gamma_N(s) \Vert_2ds \end{equation} with \begin{equation} \label{eq:gammaN} \gamma_N(s):=\sum_{i=1}^N \Theta_{i,s,1}\mathbf{1}_{B_{N,i}}=\sum_{i,j=1}^N \dfrac{1}{N\rho_N} \overline{\xi_{ij}} F(X_{N,j}(s),\eta_s(x_j))\mathbf{1}_{B_{N,i}}. \end{equation} where we have used the notation \begin{equation} \label{eq:overline_xi} \overline{\xi_{ij}} =\xi_{ij}^{(N)}-W_N(x_i,x_j), \end{equation} Forgetting about the term $F(X_{N,j}(s),\eta_s(x_j))$ in \eqref{eq:gammaN}, $\gamma_N$ is essentially an empirical mean of the independent centered variables $ \overline{\xi_{ij}}$ and thus should be small as $N\to\infty$. One difficulty here is that concentration bounds (e.g. Bernstein inequality) for weighted sums such as $\sum_j \overline{\xi_{ij}} u_{i,j}$ (for some deterministic fixed weight $u_{i,j}$) are not directly applicable, as $u_{i,j}=F(X_{N,j}(s),\eta_s(x_j))\mathbf{1}_{B_{N,i}}$ depends in a highly nontrivial way on the variables $\xi_{i,j}^{(N)}$ themselves. A strategy would be to use Grothendieck inequality (see Theorem~\ref{thm:grothendieck}). We refer here to \cite{Coppini2022,Coppini_Lucon_Poquet2022} where the use of such Grothendieck inequality (and extensions) has been implemented in a similar context of interacting diffusions on random graphs. However here, a supplementary difficulty lies in the fact that $F$ need not be bounded (recall that a particular example considered here concerns the linear case where $F(x, \eta)=x + \mu$). Hence the application of Grothendieck inequality is not straightforward when $F$ is unbounded. For this reason, we give below two different controls on $ \gamma_N$: a general one, without assuming that $F$ and a second (sharper) one, when $F$ is bounded (using Grothendieck inequality). In the first case, we get around the difficulty of unboundedness of $F$ by introducing $F(X_\infty(x_j),\eta_\infty(x_j))$ which is bounded, since $X_{\infty}$ is. First begin with the general control on $\gamma_N$: we can write \begin{align}\label{eq:lem_phiN1_aux} \gamma_N(s)&= \sum_{i,j=1}^N \dfrac{1}{N\rho_N} \overline{\xi_{ij}} \left(F(X_{N,j}(s),\eta_s(x_j))-F(X_\infty(x_j),\eta_\infty(x_j))\right)\mathbf{1}_{B_{N,i}}\notag\\&\quad+ \sum_{i,j=1}^N \dfrac{1}{N\rho_N} \overline{\xi_{ij}} F(X_\infty(x_j),\eta_\infty(x_j))\mathbf{1}_{B_{N,i}}=:\gamma_{N,1}(s) + \gamma_{N,2}(s). \end{align} Denoting by $\Delta F_j:=F(X_{N,j}(s),\eta_s(x_j))-F(X_\infty(x_j),\eta_\infty(x_j))$, we have, as $\langle \mathbf{1}_{B_{N,i}}, \mathbf{1}_{B_{N,i'}}\rangle= \dfrac{\mathbf{1}_{i=i'}}{N}$ and with $\displaystyle S_{jj'}:= \dfrac{1}{N}\sum_{i=1}^N \overline{\xi_{ij}}~ \overline{\xi_{ij'}}$, $\displaystyle \left\Vert \gamma_{N,1}(s)\right\Vert_2^2 = \dfrac{1}{N^2\rho_N^2} \sum_{j,j'=1}^N \Delta F_j \Delta F_{j'} \dfrac{1}{N}S_{jj'}$. Define the following quantity $S_{N}^\text{max}:= \sup_{1\leq j\neq j'\leq N} \left\vert S_{jj'} \right\vert$. The purpose of Lemma \ref{lem:maj_S} is exactly to control $S_{N}^\text{max}$, see in particular \eqref{eq:maj_SNMAX}. We have \begin{align*} \left\Vert \gamma_{N,1}(s)\right\Vert_2^2 &= \left(\dfrac{1}{N^2\rho_N^2} \sum_{j\neq j'=1}^N \Delta F_j \Delta F_{j'} \dfrac{S_{jj'}}{S_{N}^\text{max}}\right)S_{N}^\text{max} + \dfrac{1}{N^3\rho_N^2} \sum_{i,j=1}^N \Delta F_j^2 \overline{\xi_{ij}}^2\\ &\leq S_{N}^\text{max} \left( \dfrac{1}{N\rho_N^2} \sum_{j=1}^N \left\vert \Delta F_j\right\vert^2\right) + \dfrac{1}{N^2\rho_N^2} \sum_{j=1}^N \Delta F_j^2. \end{align*} As $\left\vert \Delta F_j \right\Vert \leq \Vert F \Vert_L \left( \left\vert Y_N(s)(x_j)\right\vert + \delta_s\right)$, we obtain as $s\mapsto\delta_s$ is bounded $$\left\Vert \gamma_{N,1}(s)\right\Vert_2^2 \leq C_F \left( \left\Vert \tilde{Y}_N(s)\right\Vert_2^2 +1\right)\left(\dfrac{S_{N}^\text{max}}{\rho_N^2} +\dfrac{1}{N\rho_N^2}\right),$$ hence as $\left\Vert Y_N(s) \right\Vert_2 \leq \dfrac{1}{2} \left(1+\left\Vert Y_N(s)\right\Vert_2^2\right)$ and using \eqref{eq:tilde_YN_maj}, \begin{equation}\label{eq:lem_phiN1_gamma1} \left\Vert \gamma_{N,1}(s)\right\Vert_2^2 \leq C_F \left( \left\Vert Y_N(s)\right\Vert_2^4 +1\right)\left(\dfrac{S_{N}^\text{max}}{\rho_N^2} +\dfrac{1}{N\rho_N^2}\right). \end{equation} For the second term of \eqref{eq:lem_phiN1_aux}, we have \begin{align*} \Vert \gamma_{N,2}(s)\Vert_2^2 &= \dfrac{1}{N} \sum_{i=1}^N \left( \dfrac{1}{N\rho_N} \sum_{j=1}^N \overline{\xi_{ij}} F(X_\infty(x_j),\eta_\infty(x_j)) \right)^2\\ &= \dfrac{1}{N^3\rho_N^2} \sum_{i=1}^N \sum_{j,j'=1}^N \overline{\xi_{ij}}~ \overline{\xi_{ij'}} F(X_\infty(x_j),\eta(x_j)) F(X_\infty(x_{j'}),\eta_\infty(x_{j'})). \end{align*} Let $\displaystyle \alpha_{i,j,j'}:=\dfrac{F(X_\infty(x_j),\eta_\infty(x_j)) F(X_\infty(x_{j'}),\eta_\infty(x_{j'}))}{\Vert F(X_\infty, \eta_\infty)\Vert_\infty^2}\in [0,1]$, $\displaystyle R_k:= \sum_{ \substack{i,j,j'=1\\j\neq j'}}^k \alpha_{i,j,j'} \overline{\xi_{ij}} ~\overline{\xi_{ij'}}$, and $\mathcal{F}_k=\sigma\left( \xi_{ij}, 1 \leq i,j\leq k\right)$ (with respect to $\mathbb{P}$, i.e. the realisation of the graphs). We have then $\displaystyle\Vert \gamma_{N,2}(s)\Vert_2^2 = \dfrac{C_{F,X_\infty}}{N^3\rho_N^2} \sum_{i,j=1}^N \alpha_{i,j,j} \overline{\xi_{ij}}^2 +\dfrac{C_{F,X_\infty}}{N^3\rho_N^2} R_N \leq \dfrac{C_{F,X_\infty}}{N\rho_N^2} +\dfrac{C_{F,X_\infty}}{N^3\rho_N^2} R_N$. We show next that $(R_N)$ is a martingale: for any $k\geq 1$ (note that $R_1=0$): \begin{multline*} \mathbb{E}\left[ R_{k+1} \vert \mathcal{F}_k\right] =\mathbb{E}\left[ \left.\sum_{ \substack{i,j,j'=1\\j\neq j'}}^{k+1} \alpha_{i,j,j'} \overline{\xi_{ij}}~ \overline{\xi_{ij'}} \right\vert \mathcal{F}_k\right] = R_k + \mathbb{E}\left[ \left.\sum_{\substack{j,j'=1\\j\neq j'}}^{k} \alpha_{k+1,j,j'} \overline{\xi_{k+1,j}} ~\overline{\xi_{k+1,j'}} \right\vert \mathcal{F}_k\right]\\+ \mathbb{E}\left[ \left.\sum_{i,j'=1}^{k} \alpha_{i,k+1,j'} \overline{\xi_{i,k+1}}~ \overline{\xi_{ij'}} \right\vert \mathcal{F}_k\right]+ \mathbb{E}\left[ \left.\sum_{i,j=1}^{k} \alpha_{i,j,k+1} \overline{\xi_{i,k+1}} ~\overline{\xi_{ij}} \right\vert \mathcal{F}_k\right] \\+ \mathbb{E}\left[ \left.\sum_{j=1}^{k} \left(\alpha_{k+1,k+1,j}+\alpha_{k+1,j,k+1}\right) \overline{\xi_{k+1,k+1}} ~\overline{\xi_{k+1,j}} \right\vert \mathcal{F}_k\right]=R_k, \end{multline*} as $\mathbb{E}\left[\overline{\xi_{ij}} ~\overline{\xi_{ij'}} \vert \mathcal{F}_k\right]=0$ if $j\neq j'$ and at least one of the indexes $i,j,j'$ is equal to $k+1$ by independence of the family of random variables $\left(\xi_{ij}\right)_{i,j}$. Similarly, we have \begin{align*} \Delta R_k &= R_{k+1}-R_k=\sum_{\substack{j,j'=1\\j\neq j'}}^{k} \alpha_{k+1,j,j'} \overline{\xi_{k+1,j}} ~\overline{\xi_{k+1,j'}} +\sum_{\substack{1\leq i\leq k+1\\ 1\leq j \leq k}} \left(\alpha_{i,j,k+1}+\alpha_{i,k+1,j'} \right) \overline{\xi_{i,k+1}}~ \overline{\xi_{ij}} . \end{align*} As each $\vert\overline{\xi_{i,j}}\vert\leq 1$ and $\vert\alpha_{i,j,k}\vert\leq 1$, it gives $\vert\Delta R_k \vert\leq 3k^2+k$. Theorem \ref{thm:ineg_AZ-HO} gives then that \begin{align*} \mathbb{P}\left( \left\vert \dfrac{C_{F,X_\infty}}{N^3\rho_N^2} R_N \right\vert \geq x \right) &= \mathbb{P}\left( \vert R_N \vert \geq \dfrac{xN^3\rho_N^2}{C_{F,X_\infty}} \right)\\ &\leq 2 \exp \left( -\dfrac{\left( \dfrac{xN^3\rho_N^2}{C_{F,X_\infty}}\right)^2}{2\sum_{k=1}^N \left(3k^2+k\right)^2} \right)\\ &= 2 \exp \left(- \dfrac{x^2 N^6\rho_N^4}{C_{F,X_\infty}^2P(N)}\right), \end{align*} with $\displaystyle P(N)= 2 N (N+1) \left( \dfrac{9}{5}\left(N+\dfrac{1}{2}\right)\left(N^2+N-\dfrac{1}{3}\right)+\dfrac{3N(N+1)}{2}+\dfrac{2N+1}{6}\right) \sim_{N\to\infty} \dfrac{18}{5} N^5$. For the choice $x^2= \dfrac{C_{F,X_\infty}^2P(N)}{N^{6-2\tau}\rho_N^4}$ with $\tau$ in \eqref{eq:dilution}, ($x^2\propto \dfrac{1}{N^{1-2\tau}\rho_N^4}$) it gives $$ \mathbb{P}\left( \left\vert \dfrac{C_{F,X_\infty}}{N^3} R_N \right\vert \geq \sqrt{\dfrac{C_{F,X_\infty}^2P(N)}{N^{6-2\tau}\rho_N^4}} \right) \leq 2 \exp \left(- N^{2\tau}\right),$$ which is summable hence by Borel-Cantelli Lemma, there exists $\mathcal{O}\in\mathcal{F}$ such that $\mathbb{P}(\mathcal{O})=1$ and on $\mathcal{O}$, there exists $\widetilde{N}<\infty$ such that if $N\geq \widetilde{N}$, $\left\vert \dfrac{C_{F,X_\infty}}{N^3} R_N \right\vert \leq \sqrt{\dfrac{C_{F,X_\infty}^2P(N)}{N^{6-2\tau}\rho_N^4}} \propto \dfrac{1}{N^{1/2-\tau}\rho_N^2}$, hence $\mathbb{P}$-a.s. for $N$ large enough \begin{equation}\label{eq:lem_phiN1_gamma2} \left\Vert \gamma_{N,2}(s)\right\Vert_2^2 \leq C\left( \dfrac{1}{N\rho_N^2} + \dfrac{1}{N^{1-2\tau}\rho_N^4}\right) \end{equation} Coming back to \eqref{eq:lem_phiN1_aux}, combining \eqref{eq:lem_phiN1_gamma1} and \eqref{eq:lem_phiN1_gamma2} and a control of $S_N^{\text{max}}$ from Lemma \ref{lem:maj_S}, we have $\mathbb{P}$-a.s. for $N$ large enough $$ \left\Vert \gamma_{N}(s)\right\Vert_2^2 \leq C_F \left( \left\Vert Y_N(s)\right\Vert_2^4 +1\right)\left(\dfrac{1}{N^{1/2-\tau}\rho_N^2} +\dfrac{1}{N\rho_N^2}\right) + C_F\left( \dfrac{1}{N\rho_N^2} + \dfrac{1}{N^{1-2\tau}\rho_N^4}\right),$$ hence taking the square root and using \eqref{eq:lem_phi_N1_aux}, $$\Vert \phi_{N,1}(t)\Vert_2\leq C_F \int_{t_0}^t e^{-(t-s)\gamma} \left\Vert Y_N(s)\right\Vert_2^2ds + G_{N,1},$$ where $G_{N,1}=C_F\left( \dfrac{1}{N\rho_N^2} + \dfrac{1}{N^{1-2\tau}\rho_N^4}+\dfrac{1}{N^{1/2-\tau}\rho_N^2}\right)\to 0$ under Hypothesis \ref{hyp:scenarios}. Let us now turn to the sharper control on $\gamma_N$ defined in \eqref{eq:gammaN} when $F$ is bounded. Coming back to \eqref{eq:lem_phiN1_aux}, we have \begin{align*} \Vert \gamma_N(s)\Vert_2^2 &= \int \left( \sum_{i,j=1}^N \dfrac{1}{N\rho_N}\overline{\xi_{ij}} F\left(X_{N,j}(s),\eta_s(x_j)\right)\mathbf{1}_{B_{N,i}}(x)\right)^2 dx\\ &= \dfrac{1}{N} \sum_{i=1}^N \left( \sum_{j=1}^N \dfrac{1}{N\rho_N} \overline{\xi_{ij}} F\left(X_{N,j}(s),\eta_s(x_j)\right) \right)^2\\ &= \dfrac{1}{N^3\rho_N^2} \sum_{i,j,k=1}^N \overline{\xi_{ij}}~\overline{\xi_{ik}} F\left(X_{N,j}(s),\eta_s(x_j)\right)F\left(X_{N,k}(s),\eta_s(x_k)\right)\\ &= \left(\dfrac{ \Vert F \Vert_\infty}{N\rho_N}\right)^2\dfrac{1}{N} \sum_{j,k=1}^N \alpha_{jk} F_jF_k \end{align*} with $\alpha_{jk}:=\sum_{i=1}^N\overline{\xi_{ij}}~\overline{\xi_{ik}}$ and $F_j:=\dfrac{F\left(X_{N,j}(s),\eta_s(x_j)\right)}{\Vert F \Vert_\infty}$. Grothendieck inequality (see Theorem \ref{thm:grothendieck}) gives then that there exists $K>0$ such that \begin{align*} \Vert \gamma_N(s)\Vert_2^2 &\leq K \dfrac{1}{N} \left(\dfrac{ \Vert F \Vert_\infty}{N\rho_N}\right)^2 \sup_{s_j,t_k = \pm 1}\sum_{j,k} \alpha_{jk} s_j t_k \\ &\leq \dfrac{C_F}{N^3\rho_N^2} \sup_{s_j,t_k = \pm 1}\sum_{i,j,k=1}^N \overline{\xi_{ij}}~\overline{\xi_{ik}}s_j t_k. \end{align*} Fix some vectors of signs $s=(s_i)_{1\leq i \leq N}$ and $t=(t_j)_{1\leq j \leq N}$. Let $A=\left( \overline{\xi_{ij}} \right)_{1\leq i,j \leq N}$, then $\displaystyle\sum_{i,j,k=1}^N \overline{\xi_{ij}}~\overline{\xi_{ik}}s_j t_k = \langle t, A^*As\rangle$ where $\langle,\rangle$ denotes the scalar product in $\mathbb{R}^N$ and $A^*$ the transpose of $A$. As for any sign vector $t$, $\Vert t \Vert^2= \sum_{k=1}^N t_k^2 = N$, and $\Vert A^* A \Vert = \Vert A \Vert_{\text{op}}^2$, we obtain as $\vert \langle t, A^*As\rangle \vert \leq \Vert t \Vert \Vert A^* A s \Vert \leq N \Vert A \Vert_{\text{op}}^2$: $$ \Vert \gamma_N(s)\Vert_2^2 \leq \dfrac{C_F}{N^3\rho_N^2} N \Vert A \Vert_{\text{op}}^2 =\dfrac{C_F}{N^2\rho_N^2} \Vert A \Vert_{\text{op}}^2.$$ From Theorem \ref{thm:tao2012_upper_tail}, there exist $C_a$ and $C_b$ positive constants such that for any $x\geq C_a$, $$\mathbb{P} \left( \Vert A \Vert_{\text{op}} > x\sqrt{N} \right) \leq C_a \exp \left( -C_bxN \right).$$ We apply it for $x=C_a$, hence, by Borel-Cantelli Lemma as $\exp(-CN)$ is summable, there exists $\widetilde{\mathcal{O}}\in\mathcal{F}$ such that $\mathbb{P}(\widetilde{\mathcal{O}})=1$ and on $\widetilde{\mathcal{O}}$, there exists $\widetilde{N}<\infty$ such that if $N\geq \widetilde{N}$, $\Vert A \Vert_{\text{op}} \leq C_a\sqrt{N}$. We obtain then that $$\Vert \gamma_N(s)\Vert_2^2 \leq \dfrac{C_F}{N\rho_N^2}$$ $\mathbb{P}$-a.s. for $N$ large enough, which concludes the proof in the bounded case with \eqref{eq:lem_phi_N1_aux} as $\int_{t_0}^t e^{-(t-s)\gamma}ds\leq \dfrac{1}{\gamma}$. \end{proof} \begin{lem}\label{lem:drift_term_phi_N2} Under Hypothesis \ref{hyp_globales}, for any $t>t_0\geq0$, \begin{equation}\label{eq:maj_phi_N2} \Vert \phi_{N,2}(t)\Vert_2 \leq C_{F,X_\infty,\eta,W} \int_{t_0}^t e^{-(t-s)\gamma} \left(\left\Vert Y_N(s)\right\Vert_2^2+\delta_s\right)ds + G_{N,2}, \end{equation} where $G_{N,2}$ is explicit in $N$ and tends to 0 as $N\to\infty $. Moreover, if we suppose $F$ bounded, we have \begin{equation}\label{eq:maj_phi_N2_B} \Vert \phi_{N,2}(t)\Vert_2 \leq C \left(\int_{t_0}^t e^{-(t-s)\gamma} \delta_sds + \sqrt{R_{N,2}^W} +\dfrac{1}{N}\right),. \end{equation} with $R_{N,2}^W$ defined in \eqref{eq:def_Rnk}. \end{lem} \begin{proof}[Proof of Lemma \ref{lem:drift_term_phi_N2}] We have, with $\Theta_{s,i,2}$ defined in \eqref{eq:def_Theta2}, $\Theta_{s,i,2}\leq e_{s,i,1}+e_{s,i,2}+e_{s,i,3}$ with \begin{align*} e_{s,i,1}&:= \sum_{j=1}^N \int_{B_{N,j}} \left( W(x_i,x_j)-W(x_i,y)\right) \left( F \left( X_N(s,x_j),\eta_s(x_j)\right) - F \left( X_\infty(x_j),\eta_s(x_j)\right)\right)dy\\ e_{s,i,2}&:= \sum_{j=1}^N \int_{B_{N,j}}\left( W(x_i,x_j)-W(x_i,y)\right)F\left(X_\infty(x_j),\eta_s(x_j)\right)dy\\ e_{s,i,3}&:= \sum_{j=1}^N \int_{B_{N,j}} W(x_i,y)\left( F \left( X_N(s,x_j),\eta_s(x_j)\right) - F \left( X_N(s,x_j),\eta_s(y)\right)\right)dy. \end{align*} We upper-bound each term. We have as $F$ is Lipschitz continuous \begin{align*} e_{s,i,1}&\leq \sum_{j=1}^N \left\vert F \left( X_N(s,x_j),\eta_s(x_j)\right) - F \left( X_\infty(x_j),\eta_s(x_j)\right)\right\vert \left\vert \int_{B_{N,j}} \left( W(x_i,x_j)-W(x_i,y)\right) dy\right\vert\\ &\leq \sum_{j=1}^N \Vert F \Vert_L \left\vert Y_N(s)(x_j)\right\vert \left\vert \int_{B_{N,j}} \left( W(x_i,x_j)-W(x_i,y)\right) dy\right\vert, \end{align*} which is upper-bounded by $$ C_F \left( \sum_{j=1}^N \left\vert \int_{B_{N,j}} \left( W(x_i,x_j)-W(x_i,y)\right) dy\right\vert\right)^\frac{1}{2}\left( \sum_{j=1}^N \left\vert Y_N(s)(x_j)\right\vert^2 \left\vert \int_{B_{N,j}} \left( W(x_i,x_j)-W(x_i,y)\right) dy\right\vert \right)^\frac{1}{2}$$ by discrete Jensen's inequality. We have $N\left\vert \int_{B_{N,j}} \left( W(x_i,x_j)-W(x_i,y)\right) dy\right\vert \leq C$ as $W$ is bounded, hence \begin{align*} e_{s,i,1}&\leq C_{F,W}\left( \sum_{j=1}^N \left\vert \int_{B_{N,j}} \left( W(x_i,x_j)-W(x_i,y)\right) dy\right\vert\right)^\frac{1}{2}\left(\dfrac{1}{N} \sum_{j=1}^N \left\vert Y_N(s)(x_j)\right\vert^2 \right)^\frac{1}{2}\\ &\leq C_F \left( \sum_{j=1}^N \left\vert \int_{B_{N,j}} \left( W(x_i,x_j)-W(x_i,y)\right) dy\right\vert\right)^\frac{1}{2}\left\Vert \tilde{Y}_N(s)\right\Vert_2. \end{align*} We have then \begin{align*} \dfrac{1}{N} \sum_{i=1}^N e_{s,i,1}^2 &\leq \dfrac{C_F}{N} \sum_{i=1}^N \left( \sum_{j=1}^N \left\vert \int_{B_{N,j}} \left( W(x_i,x_j)-W(x_i,y)\right) dy\right\vert\right)\left\Vert \tilde{Y}_N(s)\right\Vert_2^2\\ &\leq C_F R_{N,1}^W\left\Vert \tilde{Y}_N(s)\right\Vert_2^2, \end{align*} where $R_{N,1}^W$ is defined in \eqref{eq:def_Rnk}. For the second term, we have as $x\mapsto \sup_{s}F(X_\infty(x),\eta_s(x))$ is bounded \begin{align*} \dfrac{1}{N}\sum_{i=1}^N e_{s,i,2}^2 &= \dfrac{1}{N} \sum_{i=1}^N \left( \sum_{j=1}^N \int_{B_{N,j}} \left( W(x_i,x_j)-W(x_i,y)\right) F\left( X_\infty(x_j),\eta_s(x_j)\right)dy \right)^2\\ &\leq \dfrac{C_F}{N} \sum_{i=1}^N \sum_{j=1}^N \int_{B_{N,j}} \left\vert W(x_i,x_j)-W(x_i,y)\right\vert^2 dy \leq C_F R_{N,2}^W, \end{align*} where $R_{N,2}^W$ is defined in \eqref{eq:def_Rnk}. For the third term, as $F$ is Lipschitz continuous \begin{align*} e_{s,i,3}&\leq \sum_{j=1}^N \int_{B_{N,j}} W(x_i,y) \Vert F \Vert_L \vert \eta_s(x_j)-\eta_s(y)\vert dy\\ &\leq \sum_{j=1}^N \int_{B_{N,j}} W(x_i,y) \Vert F \Vert_L \left( \vert \eta_s(x_j)-\eta_\infty(x_j)\vert + \vert \eta_\infty(x_j)-\eta_\infty(y)\vert\right) dy\\ &\leq C_{F,X,W} \left( \delta_s + \dfrac{1}{N}\right) . \end{align*} We obtain then with \eqref{eq:tilde_YN_maj} \begin{align*} \dfrac{1}{N}\sum_{i=1}^N \Theta_{s,i,2}^2 &\leq \dfrac{3}{N} \sum_{i=1}^N \left( e_{s,i,1}^2+e_{s,i,2}^2+e_{s,i,3}^2\right)\\ &\leq C_{F,X_\infty,X,W} \left(R_{N,1}^W\left( 1 + \left\Vert Y_N(s)\right\Vert_2^4\right)+ R_{N,2}^W + \delta_s^2 +\dfrac{1}{N^2} \right). \end{align*} With \eqref{eq:def_phi_nk} and Proposition \ref{prop:operateur_L}, $\Vert \phi_{N,2}(t)\Vert_2 \leq \int_{t_0}^t e^{-(t-s)\gamma}\Vert \sum_{i=1}^N\Theta_{s,i,2}\mathbf{1}_{B_{N,i}}\Vert_2ds,$ and as $\Vert \sum_{i=1}^N\Theta_{s,i,2}\mathbf{1}_{B_{N,i}}\Vert_2^{2} = \dfrac{1}{N} \sum_{i=1}^N \Theta_{s,i,2}^2$, the result follows with $$G_{N,2}=\sqrt{ R_{N,1}^W + R_{N,2}^W} +\dfrac{1}{N},$$ and Lemma \ref{lem:control_Rnk}. When $F$ is bounded, similarly we show that $$\dfrac{1}{N}\sum_{i=1}^N \Theta_{s,i,2}^2 \leq C_{F,X_\infty,\eta,W} \left( R_{N,2}^W+ \delta_s^2 +\dfrac{1}{N^2} \right),$$ hence the result. \end{proof} \begin{lem}\label{lem:drift_term_phi_N3} Under Hypothesis \ref{hyp_globales}, for any $t> t_0\geq 0$, \begin{equation}\label{eq:maj_phi_N3} \Vert \phi_{N,3}(t)\Vert_2 \leq C_{F,X_\infty,W}\int_{t_0}^t e^{-(t-s)\gamma} \left(\left\Vert Y_N(s)\right\Vert_2^2+\delta_s\right)ds + G_{N,3}, \end{equation} where $G_{N,3}$ is explicit in $N$ and tends to 0 as $N\to\infty $. Moreover, if we suppose $F$ bounded, we have \begin{equation}\label{eq:maj_phi_N3_B} \sup_{t\geq 0} \Vert \phi_{N,3}(t)\Vert_2 \leq \sqrt{S_N^W}, \end{equation} where $S_N^W$ is defined in \eqref{eq:def_Sn}. \end{lem} \begin{proof}[Proof of Lemma \ref{lem:drift_term_phi_N3}] We have \begin{align*} \Vert \sum_{i=1}^N\Theta_{s,i,3}\mathbf{1}_{B_{N,i}}\Vert_2^{2} &= \int_I \left( \sum_{i=1}^N\Theta_{s,i,3}(x)\mathbf{1}_{B_{N,i}}(x)\right)^2 dx\\ &= \sum_{i=1}^N \int_{B_{N,i}} \left( \int_I \left(W(x_i,y)-W(x,y)\right)F\left(X_N(s,y),\eta_s(y)\right)dy\right)^2 dx\\ &\leq \sum_{i=1}^N \int_{B_{N,i}} \left( \int_I \left(W(x_i,y)-W(x,y)\right)^2dy\right)\left(\int_I\left(F\left(X_N(s,y),\eta_s(y)\right) \right)^2dy\right) dx, \end{align*} with Cauchy Schwarz's inequality. We can recognize $S_N^W$ defined in \eqref{eq:def_Sn}, and we have that, as $F$ is Lipschitz continuous and $x\mapsto F\left(X_\infty(y),\eta_\infty(y) \right)$ is bounded, \begin{align*} &\int_IF\left(X_N(s,y),\eta_s(y)\right)^2dy\\ &\leq \int_I\left(F\left(X_N(s,y),\eta_s(y)\right)- F\left(X_\infty(y),\eta_s(y)\right) \right)^2dy + \int_I F\left(X_\infty(y),\eta_s(y) \right)^2dy\\ &\leq \Vert F \Vert_L^2 \int_I Y_N(s)(y)^2 dy+\Vert F(X_\infty,\eta_\infty)\Vert_\infty^2\leq C_{F,W}\left( \Vert Y_N(s)\Vert_2^2 +1\right)\leq C_{F,W}\left( \Vert Y_N(s)\Vert_2^4 +1\right). \end{align*} As before, \eqref{eq:def_phi_nk} and Proposition \ref{prop:operateur_L} give that $\Vert \phi_{N,3}(t)\Vert_2 \leq \int_{t_0}^t e^{-(t-s)\gamma}\Vert \sum_{i=1}^N\Theta_{s,i,3}\mathbf{1}_{B_{N,i}}\Vert_2ds$ and $\Vert \sum_{i=1}^N\Theta_{s,i,3}\mathbf{1}_{B_{N,i}}\Vert_2^{2} \leq C_{F,W}S_N^W\left( \Vert Y_N(s)\Vert_2^4 +1\right)$ hence the result with $G_{N,3}= C_{F,W} \sqrt{R_{N,3}}$ and Lemma \ref{lem:control_Rnk}. When $F$ is bounded, we directly have $\Vert \sum_{i=1}^N\Theta_{s,i,3}\mathbf{1}_{B_{N,i}}\Vert_2^{2} \leq S_N^W$ hence \eqref{eq:maj_phi_N3_B} as $\int_{t_0}^t e^{-(t-s)\gamma}ds\leq \dfrac{1}{\gamma}.$ \end{proof} \subsection{Proof of Proposition \ref{prop:drift_term}} Proposition \ref{prop:drift_term} is then a direct consequence of \eqref{eq:def_phi_n0} and \eqref{eq:def_phi_nk}, of the controls given by Lemmas \ref{lem:drift_term_quadratic}, \ref{lem:drift_term_phi_N1}, \ref{lem:drift_term_phi_N2} and \ref{lem:drift_term_phi_N3}, with $G_N=G_{N,1}+G_{N,2}+G_{N,3}$, and of Lemma \ref{lem:control_Rnk} to have $G_N\to 0$. \section{About the finite time behavior}\label{S:finite} In this section, we prove Proposition \ref{prop:finite_time}. \subsection{Main technical results} In the following, we denote by $\widehat{Y}_N(t):=X_N(t)-X_t$. \begin{proof}[Proof of Proposition \ref{prop:finite_time}] Let $t\leq T$. Recall the definition of $X_N(t)$ in \eqref{eq:def_UN} and $X_t$ in \eqref{eq:def_utx}. Proceeding exactly as in the proof of Proposition \ref{prop:termes_sys_micros}, and recalling the definition of $M_N(t)$ in \eqref{eq:def_M_N}, we have \begin{multline*} d\widehat{Y}_N(t)=-\alpha \widehat{Y}_N(t)dt + dM_N(t) + \sum_{i,j=1}^N \mathbf{1}_{B_{N,i}}\dfrac{w_{ij}}{N} F\left(X_{N,j}(t),\eta_t(x_j)\right)dt - T_W F\left(X_t,\eta_t\right)dt\\ =-\alpha \widehat{Y}_N(t)dt + dM_N(t) + \sum_{k=1}^3 \sum_{i=1}^N \Theta_{t,i,k}\mathbf{1}_{B_{N,i}}dt + T_W\left( F\left(X_{N,j}(t),\eta_t(x_j)\right)-F\left(X_t,\eta_t\right) \right)dt \end{multline*} with the notations introduced in \eqref{eq:def_Theta1} - \eqref{eq:def_Theta3}. It gives then, as $\widehat{Y}_N(0)=0$, $$\widehat{Y}_N(t)=\int_0^t e^{-\alpha(t-s)}\widehat{r}_N(s)ds + \int_0^t e^{-\alpha(t-s)}dM_N(s)=:\widehat{\phi}_N(t) + \widehat{\zeta}_N(t)$$ with $$\widehat{r}_N(t)=\sum_{k=1}^4 \sum_{i=1}^N \Theta_{t,i,k}\mathbf{1}_{B_{N,i}} + T_W\left( F\left(X_{N}(t),\eta_t\right)-F\left(X_t,\eta_t\right) \right).$$ Note that we obtain a similar expression as for $Y_N$ in Proposition \ref{prop:termes_sys_micros}, but with $e^{-\alpha t}$ instead of the semi-group $e^{t\mathcal{L}}$. We use then the two following results, similar to Propositions \ref{prop:noise_perturbation} and \ref{prop:drift_term}. \begin{prop}\label{prop:noise_perturbation_finite} Let $T>0$. Under Hypothesis \ref{hyp_globales}, there exists a constant $C=C(T,F,\Vert \eta\Vert_\infty)>0$ such that $\mathbb{P}$-almost surely for $N$ large enough: $$\mathbf{E}\left[\sup_{s\leq T} \Vert \widehat{\zeta}_N(s) \Vert_2\right] \leq \dfrac{C}{\sqrt{N\rho_N}}.$$ \end{prop} \begin{prop}\label{prop:drift_term_finite} Under Hypotheses \ref{hyp_globales} and \ref{hyp:scenarios}, for any $t>0$, \begin{align}\label{eq:control_drift_finite} \Vert \widehat{\phi}_N(t)\Vert_2 &\leq C \left( \int_0^t e^{-\alpha(t-s)} \Vert \widehat{Y}_N(s) \Vert_2ds + \widehat{G}_{N}\right), \end{align} where $\widehat{G}_{N}$ is an explicit quantity to be found in the proof that tends to 0 as $N\to \infty$. \end{prop} Their proofs are postponed to the following subsection. Hence we obtain $$\left\Vert \widehat{Y}_N(t)\right\Vert_2 \leq C\left( \widehat{G}_{N} + \left\Vert \widehat{\zeta}_N(t)\right\Vert_2+\int_0^t e^{-\alpha(t-s)}\left\Vert \widehat{Y}_N(s)\right\Vert_2ds\right),$$ which gives with Grönwall lemma $$\sup_{t\leq T} \left\Vert \widehat{Y}_N(t)\right\Vert_2 \leq C\left( \widehat{G}_{N} + \sup_{t\leq T} \left\Vert \widehat{\zeta}_N(t)\right\Vert_2\right).$$ With Proposition \ref{prop:noise_perturbation_finite}, it leads to $$\mathbf{E}\left[\sup_{t\leq T}\left\Vert\widehat{Y}_N(t)\right\Vert_2\right]\leq C\left( \widehat{G}_N + \dfrac{1}{\sqrt{N\rho_N}}\right),$$ hence the result \eqref{eq:finite_time} as \eqref{eq:dilution} implies $\dfrac{1}{\sqrt{N\rho_N}}\to 0$ and $\widehat{G}_N\to 0$. \end{proof} \subsection{Proofs of Propositions \ref{prop:noise_perturbation_finite} and \ref{prop:drift_term_finite}} \begin{proof}[Proof of Proposition \ref{prop:noise_perturbation_finite}] We do as for Proposition \ref{prop:noise_perturbation}, and apply Îto's formula on $$\widehat{\zeta}_N(t)=\sum_{j=1}^N \int_0^t \int_0^\infty e^{-\alpha(t-s)}\chi_j(s,z)\tilde{\pi}_j(ds,dz).$$ The term $I_0(t)$ in \eqref{eq:zeta_spatial_def} becomes $-\alpha \int_0^t \left\Vert \widehat{\zeta}_N(s)\right\Vert_2ds$ which is still non-positive. About $I_1(t)$ and $I_2(t)$, the proof remains the same aside from the fact that we now consider $\widehat{\zeta}_N$ instead of $\zeta_N$. \end{proof} To prove \ref{prop:drift_term_finite}, we introduce an auxilliary quantity as in Lemma \ref{lem:tilde_YN_control}. \begin{lem}\label{lem:control_hat_YN} Let $\overline{Y}_N(s)(v):=\widehat{Y}_N(s)\left( \dfrac{\lceil Nv\rceil}{N}\right)$. Then for any $T\geq 0$ \begin{equation}\label{eq:barYN_control} \sup_{0\leq s\leq T} \Vert\overline{Y}_N(s) - \widehat{Y}_N(s)\Vert_2 \xrightarrow[N\to\infty]{}0. \end{equation} \end{lem} \begin{proof} It plays the role of $\tilde{Y}_N(s)$ introduced in Lemma \ref{lem:tilde_YN_control}. Similarly at what has been done before, we have $$\left\Vert \widehat{Y}_N(s)-\overline{Y}_N(s)\right\Vert_2^2 = \sum_{j=1}^N \int_{B_{N,j}} \left( \widehat{Y}_N(s)(y)-\overline{Y}_N(s)(y)\right)^2dy = \sum_{j=1}^N \int_{B_{N,j}} \left(X_s(x_j)-X_s(y)\right)^2dy$$ which tends to 0 by uniform continuity of $X$ on $ [0,T]\times I$. It still holds under the hypotheses of Section \ref{S:extension} by decomposing the sum on each interval $C_k$. \end{proof} \begin{proof}[Proof of Proposition \ref{prop:drift_term_finite}] We divide $\widehat{\phi}$ as in \eqref{eq:def_phi_nk} and study each contribution. About $\widehat{\phi}_{N,0}(t):=\int_0^te^{-\alpha(t-s)} T_W\left( F(X_N(s),\eta_s)-F(X_s,\eta_s)\right)ds$, we have \begin{align*} \left\Vert T_W\left( F(X_N(s),\eta_s)-F(X_s,\eta_s)\right)\right\Vert_2^2 &\leq C_{W,F} \left( \int_I \Vert F \Vert_L \left\vert X_N(s)(y)-X_s(y)\right\vert dy\right)^2 \\ &\leq C_{W,F} \left\Vert \widehat{Y}_N(s)\right\Vert_2^2, \end{align*} which gives $$\left\Vert \widehat{\phi}_{N,0}(t)\right\Vert_2\leq C_{W,F} \int_0^te^{-\alpha(t-s)} \left\Vert \widehat{Y}_N(s)\right\Vert_2ds.$$ About $\widehat{\phi}_{N,1}(t):=\int_0^te^{-\alpha(t-s)} \sum_{i=1}^N \frac{\Theta_{s,i,1}}{N} \mathbf{1}_{B_{N,i}} ds$, we do as in Lemma \ref{lem:drift_term_phi_N1}. Instead of inserting the terms $F(X_\infty(x_j),\eta_\infty(x_j))$ in \eqref{eq:lem_phiN1_aux} we insert the terms $F(X_s(x_j),\eta_s(x_j))$, that is \begin{multline*} \gamma_N(s)\leq \sum_{i,j=1}^N \dfrac{1}{N} \kappa_{N,i} \overline{\xi_{ij}} \left(F(X_{N,j}(s),\eta_s(x_j))-F(X_s(x_j),\eta_s(x_j))\right)\mathbf{1}_{B_{N,i}}\\+ \sum_{i,j=1}^N \dfrac{1}{N} \kappa_{N,i} \overline{\xi_{ij}} F(X_s(x_j),\eta_s(x_j))\mathbf{1}_{B_{N,i}}=:\widehat{\gamma}_{N,1}(s) + \widehat{\gamma}_{N,2}(s). \end{multline*} The treatment of $\widehat{\gamma}_{N,1}$ is similar of $\gamma_{N,1}$: we make $\overline{Y}_N(s)$ appear instead of $\tilde{Y}_N$ and obtain $\Vert\widehat{\gamma}_{N,1}(s)\Vert_2^2\leq C_F \left( \left\Vert \widehat{Y}_N(s)\right\Vert_2^2 +1\right)\left(\dfrac{S_{N}^\text{max}}{\rho_N^2} +\dfrac{1}{N\rho_N^2}\right)$ with \eqref{eq:barYN_control}. About $\widehat{\gamma}_{N,2}$, we do as $\gamma_{N,2}$ as $\sup_{t\in [0,T],x\in I} F(X_t(x),\eta_t(x))<\infty$ and obtain that $\mathbb{P}$-almost surely if $N$ is large enough, $\Vert\widehat{\gamma}_{N,2}\Vert_2^2\leq C\left( \dfrac{1}{N\rho_N^2} + \dfrac{1}{N^{1-2\tau}\rho_N^4}\right)$. We have then that, $\mathbb{P}$-almost surely if $N$ is large enough, $$\left\Vert \widehat{\phi}_{N,1}(t)\right\Vert_2\leq C_F \int_{t_0}^t e^{-\alpha(t-s)} \left\Vert \widehat{Y}_N(s)\right\Vert_2ds + G_{N,1},$$ where $G_{N,1}\to 0$. About $\widehat{\phi}_{N,k}(t):=\int_0^te^{-\alpha(t-s)} \sum_{i=1}^N \frac{\Theta_{s,i,k}}{N} \mathbf{1}_{B_{N,i}} ds$ for $k\in \{2,3\}$, we proceed similarly, doing as in Lemmas \ref{lem:drift_term_phi_N2} and \ref{lem:drift_term_phi_N3} but instead of inserting the terms $F(X_\infty(x_j),\eta_\infty(x_j))$ we insert the terms $F(X_s(x_j),\eta_s(x_j))$: then there is no $\delta_s$ terms. We obtain then $$\Vert \widehat{\phi}_{N,2}(t)\Vert_2 \leq C \int_{t_0}^t e^{-\alpha(t-s)}\left\Vert \widehat{Y}_N(s)\right\Vert_2ds + G_{N,2},$$ and $$\Vert \widehat{\phi}_{N,3}(t)\Vert_2 \leq C\int_{t_0}^t e^{-\alpha(t-s)} \left\Vert Y_N(s)\right\Vert_2ds + G_{N,3},$$ where both $G_{N,2}$ and $G_{N,3}$ tends to 0. Note that we can obtain better bounds when $F$ is bounded. By putting all the terms $\widehat{\phi}_{N,k}$ together, we get \eqref{eq:control_drift_finite}. \end{proof} \appendix \section{Auxiliary results} \label{S:appendix} \subsection{Concentration results} \begin{thm}[Grothendieck's inequality as in \cite{Coppini2022}]\label{thm:grothendieck} Let $\{a_{ij}\}_{i,j=1,\cdots,n}$ be a $n\times n$ real matrix such that for all $s_i$, $t_j\in\{-1,1\}$ $$\sum_{i,j=1}^na_{ij}s_it_j\leq 1.$$ Then, there exists a constant $K_R>0$, such that for every Hilbert space $\left( H, \langle \cdot,\cdot\rangle_H\right)$ and for all $S_i$ and $T_j$ in the unit ball of $H$ $$ \sum_{i,j=1}^n a_{ij}\langle S_i,T_j\rangle_H \leq K_R.$$ \end{thm} \begin{thm}[Azuma–Hoeffding inequality]\label{thm:ineg_AZ-HO} Let $(M_n)$ be a martingale with $M_0=0$. Assume that for all $1\leq k \leq n$, $\vert \Delta M_k \vert \leq c_k$ a.s. for some constants $(c_k)$. Then for all $x\geq 0$ \begin{equation}\label{eq:ineg_AZ-HO} \mathbb{P}\left( \vert M_n \vert \geq x \right) \leq 2 \exp \left( -\dfrac{x^2}{2\sum_{k=1}^n c_k^2} \right). \end{equation} \end{thm} \begin{thm}[Upper tail estimate for iid ensembles, Corollary 2.3.5 of \cite{tao2012}]\label{thm:tao2012_upper_tail} Suppose that $M=(m_{ij})_{1\leq i,j \leq n}$, where $n$ is a (large) integer and the $m_{ij}$ are independent centered random variables uniformly bounded in magnitude by 1. Then there exist absolute constants $C, c > 0$ such that $$\mathbb{P} \left( \Vert M \Vert_{op} > x\sqrt{n} \right) \leq C \exp \left( -cxn \right)$$ for any $x\geq C$. \end{thm} \begin{lem} \label{prop:estimees_IC} Under Hypothesis \ref{hyp:scenarios}, we have $\mathbb{P}$-almost surely if $N$ is large enough: \begin{equation}\label{eq:estimees_IC} \sup_{1 \leq j \leq N} \left( \sum_{i=1}^N \dfrac{\xi_{ij}^{(N)}}{N\rho_N}\right) \leq 2, \quad \sup_{1 \leq i \leq N} \left( \sum_{j=1}^N \dfrac{\xi_{ij}^{(N)}}{N\rho_N}\right) \leq 2. \end{equation} \end{lem} \begin{proof} It is a direct consequence of Corollary 8.2 of a previous work \cite{agathenerine2021multivariate}, in the case $w_N=\rho_N$, $\kappa_N=\frac{1}{\rho_N}$, $W_N(x_i,x_j)=\rho_NW(x_i,x_j)$ with $W$ bounded. \end{proof} \begin{lem}\label{lem:maj_S} Let $N\geq 1$, for $j\neq j'$ in $\llbracket 1, N \rrbracket$, let $\displaystyle S_{jj'}:= \dfrac{1}{N}\sum_{i=1}^N \overline{\xi_{ij}}~ \overline{\xi_{ij'}}$ with $\xi$ defined in Definition \ref{def:espace_proba_bb}, and $S_{N}^\text{max}:= \sup_{1\leq j\neq j'\leq N} \left\vert S_{jj'} \right\vert$. Then, under Hypothesis \ref{hyp:scenarios}, $\mathbb{P}$-a.s. \begin{equation}\label{eq:maj_SNMAX} \limsup_{N\to\infty} S_{N}^\text{max}\leq N^{\tau-\frac{1}{2}} \end{equation} where $\tau\in(0,\frac{1}{2})$ comes from Hypothesis \ref{hyp:scenarios}. \end{lem} \begin{proof} When $j$ and $j'$ are fixed and $j\neq j'$, $\left(X_i:= \overline{\xi_{ij}}~ \overline{\xi_{ij'}}\right)_{1\leq i \leq N}$ is a family of independent random variables with $\vert X_i\vert\leq 1$, $\mathbf{E}[X_i]=0$ and $\mathbf{E}[X_i^2]\leq 1$. Bernstein's inequality gives then for any $t>0$ $$\mathbf{P}\left( \left\vert \sum_{i=1}^N \overline{\xi_{ij}}~ \overline{\xi_{ij'}} \right\vert>t\right)\leq 2\exp\left( -\dfrac{1}{2} \dfrac{t^2}{N+\frac{t}{3}}\right)$$ hence for the choice $t=N^{\frac{1}{2}+\tau}$ with $\tau\in (0,\frac{1}{2})$, $$\mathbf{P}\left( \left\vert \sum_{i=1}^N \overline{\xi_{ij}}~ \overline{\xi_{ij'}} \right\vert>N^{\frac{1}{2}+\tau} \right)\leq 2\exp\left( -\dfrac{1}{2} \dfrac{N^{2\tau}}{1+\frac{1}{3}N^{-\frac{1}{2}+\tau}}\right) \leq 2\exp\left( -\dfrac{1}{4}N^{2\tau}\right)$$ as $1+\frac{1}{3}N^{-\frac{1}{2}+\tau}\leq 2$. With an union bound $$\mathbf{P}\left( \sup_{j\neq j'} \left\vert S_{jj'} \right\vert > \dfrac{1}{N^{\frac{1}{2}-\tau}}\right) \leq 2N^2 \exp\left( -\dfrac{1}{4} N^{2\tau}\right).$$ We apply then Borel Cantelli's lemma and obtain \eqref{eq:maj_SNMAX}. \end{proof} \begin{lem}\label{lem:inegalit_concentration_Y} Fix $N > 1$ and $\left(Y_l\right)_{l=1,\ldots,n}$ real valued random variables defined on a probability space $\left(\Omega, \mathcal{F}, \mathbb{P}\right)$. Suppose that there exists $\nu>0$ such that, almost surely, for all $l = 1,\ldots, n-1$, $Y_l\leq 1$, $\mathbb{E}\left[Y_{l+1} \left| Y_l \right.\right] = 0$ and $\mathbb{E}\left[Y_{l+1}^2 \left|Y_l\right.\right]\leq \nu$. Then $$\mathbb{ P} \left(n^{ -1} (Y_{ 1}+ \ldots+ Y_{ n}) \geq x\right) \leq \exp \left( -n \frac{ x^{ 2}}{ 2\nu} B \left( \frac{ x}{\nu}\right)\right)$$ for all $x \geq 0$, where \begin{equation}\label{eq:def_B(u)} B(u):= u^{-2}\left( \left( 1+u \right) \log \left( 1+u \right) - u \right). \end{equation} \end{lem} \begin{proof} A direct application of \cite[Corollary 2.4.7]{zeitouni1998large} gives that $$ \mathbb{ P}\left(n^{ -1} (Y_{ 1}+ \ldots+ Y_{ n}) \geq x\right) \leq \exp \left( -n H \left( \frac{ x+v}{ 1+v} \vert \frac{ v}{ 1+v}\right)\right),$$ where $H(p\vert q):= p \log(p/q) +(1-p) \log((1-p)/(1-q))$ for $p,q\in [0, 1]$. Then, the inequality $ H \left( \frac{ x+v}{ 1+v} \vert \frac{ v}{ 1+v}\right)\geq \frac{ x^{ 2}}{ 2v} B \left( \frac{ x}{ v}\right)$ (see \cite[Exercise 2.4.21]{zeitouni1998large}) gives the result. \end{proof} \begin{cor}\label{cor:ineg_concentration_xi_carre} Let $\left(Z_{ij}\right)_{i,j}$ be a family of independent Bernoulli variables, with $\mathbb{E}[Z_{ij}]=m_{ij}$. Let $(\beta{ij})_{ij}$ be a sequence such that for any $i,j$, $ \beta_{i,j}\in (0,1]$.Then, for all $x\geq 0$ $$\mathbb{P}\left( \dfrac{1}{N^2} \sum_{i,j=1}^{N} \beta_{ij} \left( \left(Z_{ij}-m_{ij}\right)^2 - \mathbb{E}\left(Z_{ij}-m_{ij}\right)^2\right) \geq x\right) \leq \exp\left( -\dfrac{N^2x^2}{2}B(x)\right).$$ \end{cor} \begin{proof} Fix a bijection $\phi_N:\llbracket 1 , N^2 \rrbracket \to \llbracket 1 , N \rrbracket \times \llbracket 1 , N \rrbracket$. For any $k\in \llbracket 1 , N^2 \rrbracket$ and $(i,j)=\phi_N(k)$, let $R_k=\beta_{ij} \left( \left(Z_{ij}-m_{ij}\right)^2 - \mathbb{E}\left(Z_{ij}-m_{ij}\right)^2\right)$. As the $\left(m_{ij}\right)_{i,j}$ are independent, the family of randon variables $\left(R_k\right)_{1\leq k \leq N^2}$ is also independent. As $R_k\leq 1$ a.s., $\mathbb{E}\left[R_{k+1}\vert R_k\right]=0$ and $\mathbb{E}\left[R_{k+1}^2\vert R_k\right]\leq 1$, Lemma \ref{lem:inegalit_concentration_Y} implies that for any $x\geq 0$, $$\mathbb{P}\left( \dfrac{1}{N^2} \sum_{k=1}^{N^2} R_k \geq x\right) \leq \exp\left( -\dfrac{N^2x^2}{2}B(x)\right)$$ where $B$ is defined in \eqref{eq:def_B(u)}. \end{proof} \subsection{Other technical results} \begin{lem}\label{lem:op_radius}Let $K$ be a kernel from $I^2 \to \mathbb{R}_+$ such that $\sup_{x\in I}\int_I K(x,y)^2dy <\infty$. Let $T_K:g\mapsto T_Kg:=\left(x\to\int_I K(x,y)dy\right)$ be the operator associated to $K$, that can be defined from $L^2(I)\to L^2(I)$ and from $L^\infty(I)\to L^\infty(I)$. We assume that $T_K^2:L^2(I)\to L^2(I)$ is compact. Then $$r_{ 2}(T_K)= r_{ \infty}(T_K).$$ \end{lem} \begin{proof} First note that for all $p\geq1$, $ r(T_K^{ p})^{ \frac{ 1}{ p}}= \left( \lim_{ n\to\infty} \left\Vert T_K^{ pn} \right\Vert^{ \frac{ 1}{ n}}\right)^{ \frac{ 1}{ p}}= \lim_{ n\to\infty} \left\Vert T_K^{ pn} \right\Vert^{ \frac{ 1}{ pn}}= r(T)$, so that $r(T_K^{ p})= r(T_K)^{ p}$. Hence $r_{ 2}(T_K^{ 2})= r_{ \infty}(T^{ 2})$ gives $r_{ 2}(T_{ K})= r_{ \infty}(T_{K})$. Let us prove that $r_{ 2}(T_K^{ 2})= r_{ \infty}(T_K^{ 2})$ by proving that they have the same spectrum. To do so, first note that $T_K^{ 2}: L^{ \infty}(I) \to L^{ \infty}(I)$ is compact: consider $\left(f_n\right)_n$ a bounded sequence of $L^\infty(I)$. It is then also bounded in $L^2(I)$, and as $T_K:L^2(I)\to L^2(I)$ is compact, there exists a subsequence $\left(f_{\phi(n)}\right)$ such that $T_Kf_{\phi(n)}$ converges in $L^2(I)$ to a certain $g$. Then for any $x\in I$, $$ \vert T_K^2 f_{\phi(n)} - Tg \vert (x) \leq \int_I K(x,y) \left| T_Kf_{\phi(n)}(y) - g(y) \right| dy \leq C_K \Vert T_Kf_{\phi(n)}-g\Vert_2 \xrightarrow[n\to\infty]{} 0,$$ thus $T_K^2:L^\infty(I)\to L^\infty(I)$ is compact. Hence, if one denotes by $ \sigma_{ \infty}(T_{ K}^{ 2})$ and $ \sigma_{ 2}(T_{K}^{ 2})$ the corresponding spectrum of $T_{K}^{ 2}$ (in $L^{ \infty}(I)$ and $L^{ 2}(I)$ respectively), we have that each nonzero element of $ \sigma_{ \infty}(T_{K}^{ 2})$ and $ \sigma_{ 2}(T_{ K}^{ 2})$ is an eigenvalue of $T_{K}^{ 2}$: let $\mu \in \sigma_2(T_K^2)\setminus\{0\}$, there exists $g\in L^2(I)$ such that $\mu g = T_K^2g$. As $$\left| T_K^2g(x) \right| = \left| \int_I K(x,y) \int_I K(y,z) g(z) ~ \nu(dz)\nu(dy)\right| \leq C_K\Vert g \Vert_2 <\infty,$$ $g = \frac{1}{\mu}T_K^2g \in L^\infty(I)$ and $\mu \in \sigma_\infty(T_K^2)$. Conversely, let $\mu \in \sigma_\infty(T_K^2)\setminus\{0\}$, there exists $g \in L^\infty(I)$ such that $\mu g = T_K^2g$. As $L^\infty(I)\subset L^2(I)$, $\mu \in \sigma_2(T_K^2)$. Hence $r_{ 2}(T_{K}^{ 2})= r_{ \infty}(T_{K}^{ 2})$ and \eqref{eq:spectral_radii_equal} follows. \end{proof} \begin{lem}[Quadratic Gr\"{o}nwall's lemma]\label{lem:gronwal_quadratic} Let $f$ be a non-negative function piecewise continuous with finite number of distinct jumps of size inferior to $\theta$ on $[t_0,T]$, let $g$ be a non-negative continuous function and $h \in L_1$.. For any $t\in[t_0,T]$, assume $f$ satisfies $$f(t)\leq f(t_0)+g(t) + \int_{t_0}^t h(t-s) f(s)^2 ds.$$ Then, for $\delta<\dfrac{1}{9\Vert h\Vert_1}$, if $\theta\leq \dfrac{\delta}{2}$ and if $\sup_{t\in [t_0,T]}g(t) \leq \delta$, we have $$\sup_{t\in [t_0,T]} f(t) \leq f(t_0)+3\delta.$$ \end{lem} \begin{proof} Let $A=\{t\in [t_0,T], f(t)>f(t_0)+3\delta\}$, suppose $A\neq \emptyset$. Let $t^*=\inf\{t\in [t_0,T], f(t)>f(t_0)+3\delta\}$. If there is no jump at $t_0$, by the initial conditions $t^*>t_0$, and if there is a jump, $f(t_0^+)\leq f(t_0)+\dfrac{\delta}{2}$ hence we also have $t^*>t_0$. Moreover, for all $t\in [t_0,t^{*-}]$, $f(t)\leq f(t_0)+ \delta+9\delta^2 \int_{t_0}^t h(t-s)ds\leq f(t_0)+2\delta$. If there is a jump at $t^*$, it is of amplitude $\theta\leq\dfrac{\delta}{2}$ hence $f(t^*)\leq f(t_0)+ \dfrac{5\delta}{2}<f(t_0)+3\delta$ which is a contradiction. If there is no jump at $t^*$, by local continuity we have $f(t^*)\leq f(t_0)+ \delta+9\delta^2 \int_{t_0}^{t^*} h(t-s)ds\leq f(t_0)+2\delta$ which is also a contradiction. We conclude then that $\sup_{t\in [t_0,T]} f(t) \leq f(t_0)+3\delta$. \end{proof} \end{document}
\begin{document} \title {Reflected BSDEs with monotone generator} \author {Tomasz Klimsiak} \date{} \maketitle \begin{abstract} We give necessary and sufficient condition for existence and uniqueness of $\mathbb{L}^{p}$-solutions of reflected BSDEs with continuous barrier, generator monotone with respect to $y$ and Lipschitz continuous with respect to $z$, and with data in $\mathbb{L}^{p}$, $p\ge 1$. We also prove that the solutions may be approximated by the penalization method. \end{abstract} \footnotetext{{\em Mathematics Subject Classifications (2010):} Primary 60H20; Secondary 60F25.} \footnotetext{{\em Key words or phrases:} Reflected backward stochastic differential equation, monotone generator, $\mathbb{L}^{p}$-solutions.} \footnotetext{Research supported by the Polish Minister of Science and Higher Education under Grant N N201 372 436.} \nsubsection{Introduction} Let $B$ be a standard $d$-dimensional Brownian motion defined on some probability space $(\Omega,{\mathcal{F}},P)$ and let $\{{\mathcal{F}}_t\}$ denote the augmentation of the natural filtration generated by $B$. In the present paper we study the problem of existence, uniqueness and approximation of ${\mathbb L}^p$-solutions of reflected backward stochastic differential equations (RBSDEs for short) with monotone generator of the form \begin{equation} \label{eq1.1} \left\{ \begin{array}{l} Y_{t}=\xi+{\int_{t}^{T}} f(s,Y_{s},Z_{s})\,ds -{\int_{t}^{T}} dK_{s} -{\int_{t}^{T}} Z_{s}\,dB_{s},\quad t\in [0,T],\\ Y_{t}\ge L_{t},\quad t\in [0,T], \\ K\mbox{ is continuous, increasing, }K_{0}=0,\,\int_{0}^{T} (Y_{t}-L_{t})\,dK_{t}=0. \end{array} \right. \end{equation} Here $\xi$ is an ${\mathcal{F}}_T$-measurable random variable called the terminal condition, $f:[0,T]\times\Omega\times{\mathbb R}\times{\mathbb R}^d\rightarrow{\mathbb R}$ is the generator (or coefficient) of the equation and an $\{{\mathcal{F}}_t\}$-adapted continuous proces $L=\{L_t,t\in[0,T]\}$ such that $L_T\le\xi$ $P$-a.s. is called the obstacle (or barrier). A solution of (\ref{eq1.1}) is a triple $(Y,Z,K)$ of $\{{\mathcal{F}}_t\}$-progressively measurable processes having some integrability properties depending on assumptions imposed on the data $\xi,f,L$ and satisfying (\ref{eq1.1}) $P$-a.s. Equations of the form (\ref{eq1.1}) were introduced in El Karoui et al. \cite{EKPPQ}. At present it is widely recognized that they provide a useful and efficient tool for studying problems in different mathematical fields, such as mathematical finance, stochastic control and game theory, partial differential equations and others (see, e.g., \cite{CK,EKPPQ,EPQ,H,Kl2}). In \cite{EKPPQ} existence and uniqueness of square-integrable solutions of (\ref{eq1.1}) are proved under the assumption that $\xi$, $\int^T_0|f(t,0,0)|\,dt$ and $L^*_T=\sup_{t\le T}|L_t|$ are square-integrable, $f$ satisfies the linear growth condition and is Lipschitz continuous with respect to both variables $y$ and $z$. These assumptions are too strong for many interesting applications. Therefore many attempts have been made to prove existence and uniqueness of solutions of RBSDEs under less restrictive assumptions on the data. Roughly speaking one can distinguish here two types of results: for RBSDEs with less regular barriers (see, e.g., \cite{PengXu}) and for equations with continuous barriers whose generators or terminal conditions satisfy weaker assumptions than in \cite{EKPPQ}. We are interested in the second direction of investigation of (\ref{eq1.1}). In the paper we consider $\mathbb{L}^{p}$-integrable data with $p\ge 1$ and we assume that the generator is continuous and monotone in $y$ and Lipschitz continuous with respect to $z$. Assumptions of that type were considered in \cite{A,HP,LMX,RS} but it is worth mentioning that the case where the generator is monotone and at the same time the data are $\mathbb{L}^{p}$-integrable for some $p\in [1,2)$ was considered previously only in \cite{A,RS} (to be exact, in \cite{A} the author considers the case $p\in (1,2)$ but for generalized RBSDEs). Let us also mention that in the case $p=2$ existence and uniqueness results are known for equations with generators satisfying even weaker regularity conditions. For instance, in \cite{C} continuous generators satisfying the linear growth conditions are considered, in \cite{ZZ} it is assumed that the generator is left-Lipschitz continuous and possibly discontinuous in $y$, and in \cite{K} equations with generators satisfying the superlinear growth condition with respect to $y$, the quadratic growth condition with respect to $z$ and with data ensuring boundedness of the first component $Y$ are considered. In all these papers except for \cite{RS} the authors consider the so-called general growth condition which says that \begin{align}\label{i4} |f(t,y,0)|\le |f(t,0,0)|+\varphi(|y|),\quad t\in[0,T],y\in\mathbb{R}, \end{align} where $\varphi:\mathbb{R}^{+}\rightarrow \mathbb{R}^{+}$ is a continuous increasing function or continuous function which is bounded on bounded subsets of $\mathbb{R}$. In \cite{RS} weaker than (\ref{i4}) condition of the form \begin{align}\label{i5} \forall_{r>0}\quad \sup_{|y|\le r}|f({(\cdot,X_{\cdot})}ot,y,0)-f({(\cdot,X_{\cdot})}ot,0,0)|\in \mathbb{L}^{1}(0,T). \end{align} is assumed. Condition (\ref{i5}) seems to be the best possible growth condition on $f$ with respect to $y$. It was used earlier in the paper \cite{BDHPS} devoted to ${\mathbb L}^p$-solutions of usual (non-reflected) BSDEs with monotone generators. Similar condition is widely used in the theory of partial differential equations (see \cite{Betal.} and the references given there). Let us point out, however, that in contrast to the case of usual BSDEs with monotone generators, in general assumption (\ref{i4}) (or (\ref{i5})) together with $\mathbb{L}^{p}$-integrability of the data (integrability of $\xi$, $L^*_T$, $\int^T_0|f(t,0,0)|\,dt$ in our case) do not guarantee existence of $\mathbb{L}^{p}$-integrable solutions of (\ref{eq1.1}). For existence some additional assumptions relating the growth of $f$ with that of the barrier is required. In \cite{A,LMX} existence of solutions is proved under the assumption that $E|\varphi(\sup_{t\le T}e^{\mu t} L^{+}_{t})|^2<+\infty$, where $\varphi$ is the function of condition (\ref{i4}) and $\mu$ is the monotonicity coefficient of $f$. In \cite{RS} it is shown that it suffices to assume that \begin{align}\label{i7} E(\int_{0}^{T}|f(t,\sup_{s\le t} L^{+}_{t},0)|\,dt)^{p}\,dt<+\infty. \end{align} Condition (\ref{i7}) is still not the best possible. In our main result of the paper we give a necessary and sufficient condition for existence and uniqueness of $\mathbb{L}^{p}$-integrable solution of RBSDE (\ref{eq1.1}) under the assumptions that the data are ${\mathbb L}^p$-integrable, $f$ is monotone in $y$ and Lipschitz continuous in $z$ and (\ref{i5}) is satisfied. Moreover, our condition is not only weaker than (\ref{i7}) but at the same time much easier to check than (\ref{i7}) in case of very important in applications Markov type RBSDEs with obstacles of the form $L=h({(\cdot,X_{\cdot})}ot,X)$, where $h:[0,T]\times{\mathbb R}^d\rightarrow{\mathbb R}$ is a measurable function and $X$ is a Hunt process associated with some Markov semigroup. In the case of Markov RBSDEs which appear for instance in applications to variational problems for PDEs (see, e.g., \cite{EKPPQ,Kl2}) our condition can be formulated in terms of $f,h$ only. We prove the main result for $p\ge1$. Moreover, we show that for $p\ge1$ a unique solution of RBSDE (\ref{eq1.1}) can be approximated via penalization. The last result strengthens the corresponding result in \cite{RS} proved in case $p>1$ for general generators and in case $p=1$ for generators not depending on $z$. In the last part of the paper we study (\ref{eq1.1}) in the case where $\xi$, $L^{+,*}$, $\int^T_0|f(t,0,0)|\,dt$ are ${\mathbb L}^p$-integrable for some $p\ge 1$ but our weaker form of (\ref{i7}) is not satisfied. We have already mentioned, that then there are no ${\mathbb L}^p$-integrable solutions of (\ref{eq1.1}). We show that still there exist solutions of (\ref{eq1.1}) having weaker regularity properties. The paper is organized as follows. Section \ref{sec2} contains notation and main hypotheses used in the paper. In Section \ref{sec3} we show basic a priori estimates for solutions of BSDEs. In Section \ref{sec4} we prove comparison results as well as some useful results on c\`adl\`ag regularity of monotone limits of semimartingales and uniform estimates of monotone sequences. In Section \ref{sec5} we prove our main existence and uniqueness result for $p>1$, and in Section \ref{sec6} for $p=1$. Finally, in Section \ref{sec7} we deal with nonintegrable solutions. \nsubsection{Notation and hypotheses} \label{sec2} Let $B=\{B_{t}, t\ge 0\}$ be a standard $d$-dimensional Brownian motion defined on some complete probability space $(\Omega,{\mathcal{F}},P)$ and let $\{{\mathcal{F}}_{t}, t\ge 0\}$ be the augmented filtration generated by $B$. In the whole paper all notions whose definitions are related to some filtration are understood with respect to the filtration $\{{\mathcal{F}}_{t}\}$. Given a stochastic process $X$ on $[0,T]$ with values in $\mathbb{R}^{n}$ we set $X^{*}_{t}=\sup_{0\le s\le t}|X_{s}|$, $t\in[0,T]$, where $|{(\cdot,X_{\cdot})}ot|$ denotes the Euclidean norm on $\mathbb{R}^{n}$. By $\mathcal{S}$ we denote the set of all progressively measurable continuous processes. For $p>0$ we denote by $\mathcal{S}^{p}$ the set of all processes $X\in\mathcal{S}$ such that \[ \|X\|_{\mathcal{S}^{p}} =(E\sup_{t\in [0,T]} |X_{t}|^{p})^{1\wedge1/p}<+\infty. \] $M$ is the set of all progressively measurable processes $X$ such that \[ P(\int_{0}^{T}|X_{t}|^{2}\,dt<+\infty)=1 \] and for $p>0$, $M^{p}$ is the set of all processes $X\in M$ such that \[ (E(\int_{0}^{T}|X_{t}|^{2}\,dt)^{p/2})^{1\wedge 1/p} <+\infty. \] For $p,q>0$, $\mathbb{L}^{p,q}({\mathcal{F}})$ (resp. $\mathbb{L}^{p}({\mathcal{F}}_{T})$) denotes the set of all progressively measurable processes (${\mathcal{F}}_T$ measurable random variables) $X$ such that \[ (E(\int_{0}^{T}|X_{t}|^{p}\,dt)^{q/(1\wedge 1/p)})^{1\wedge 1/q}<+\infty \quad \left(\mbox{resp. }(E|X|^{p})^{1/p}<+\infty \right). \] For brevity we denote $\mathbb{L}^{p,p}({\mathcal{F}})$ by $\mathbb{L}^{p}({\mathcal{F}})$. By $\mathbb{L}^{1}(0,T)$ we denote the space of Lebesgue integrable real valued functions on $[0,T]$. ${\mathcal{M}}_{c}$ is the set of all continuous martingales (resp. local martingales) and ${\mathcal{M}}^{p}_{c}$, $p\ge1$, is the set of all martingales $M\in{\mathcal{M}}_{c}$ such that $E(\langle M \rangle_{T})^{p/2}<+\infty$. $\mathcal{V}_{c}$ (resp. $\mathcal{V}^{+}_{c}$) is the set of all continuous progressively measurable processes of finite variation (resp. increasing processes) and $\mathcal{V}^{p}_{c}$ (resp. $\mathcal{V}^{+,p}_{c}$) is the set of all processes $V\in\mathcal{V}_{c}$ (resp. $V\in\mathcal{V}^{+}_{c}$) such that $E|V|^{p}_{T}<+\infty$. We put ${\mathcal{H}}^{p}_{c}={\mathcal{M}}_{c}^{p}+\mathcal{V}_{c}^{p}$. For a given measurable process $Y$ of class (D) we denote \[ \|Y\|_{1}=\sup\{E|Y_{\tau}|,\tau\in\mathcal{T}\}. \] In what follows $f:[0,T]\times\Omega\times\mathbb{R}\times{\mathbb R}D\rightarrow \mathbb{R}$ is a measurable function with respect to $Prog\times\mathcal{B}(\mathbb{R})\times\mathcal{B}({\mathbb R}D)$, where $Prog$ denotes the $\sigma$-field of progressive subsets of $[0,T]\times\Omega$. In the whole paper all equalities and inequalities between random elements are understood to hold $P$-a.s. Let $p\ge 1$. In the paper we consider the following hypotheses. \begin{enumerate} \item[(H1)] $E|\xi|^{p}+E(\int_{0}^{T}|f(t,0,0)|\,dt)^{p}<\infty$ \item[(H2)] There exists $\lambda>0$ such that $|f(t,y,z)-f(t,y,z')| \le \lambda |z-z'|$ for every $t\in [0,T], y\in\mathbb{R}, z,z'\in{\mathbb R}D$. \item[(H3)] There exists $\mu\in\mathbb{R}$ such that $(f(t,y,z)-f(t,y',z))(y-y')\le \mu(y-y')^{2}$ for every $t\in [0,T], y, y'\in\mathbb{R}, z,z'\in{\mathbb R}D$. \item [(H4)] For every $(t,z)\in[0,T]\times{\mathbb R}D$ the mapping $\mathbb{R}\ni y\rightarrow f(t,y,z)$ is continuous. \item[(H5)] For every $r>0$ the mapping $[0,T]\ni t\rightarrow\sup_{|y|\le r}|f(t,y,0)-f(t,0,0)|$ belongs to $\mathbb{L}^{1}(0,T)$. \item[(H6)]$L$ is a continuous, progressively measurable process such that $L_T\le\xi$. \item [(H7)]There exists a semimartingale $X$ such that $X\in {\mathcal{H}}^{p}_{c}$ for some $p>1$, $X_{t}\ge L_{t}$, $t\in [0,T]$ and $E(\int_{0}^{T}f^{-}(s,X_{s},0)\,ds)^{p}<\infty$. \item [(H7*)] There exists a semimartingale $X$ of class (D) such that $X\in\mathcal{V}^{1}_{c}+{\mathcal{M}}^{q}_{c}$ for every $q\in (0,1)$, $X_{t}\ge L_{t}$, $t\in [0,T]$ and $E\int_{0}^{T}f^{-}(s,X_{s},0)\,ds<+\infty$. \item[(A)]There exist $\mu\in\mathbb{R}$ and $\lambda \ge 0$ such that \[ {(\theta,X_{\theta})}at{y}f(t,y,z)\le f_{t}+\mu|y|+\lambda |z| \] for every $t\in[0,T]$, $y\in\mathbb{R}$, $z\in\mathbb{R}^d$, where ${(\theta,X_{\theta})}at{y}=\mathbf{1}_{\{y\neq 0\}}\frac{y}{|y|}$ and $\{f_{t};t\in[0,T\}$ is a nonnegative progressively measurable process. \item[(Z)]There exist $\alpha\in(0,1)$, $\gamma\ge 0$ and a nonnegative process $g\in\mathbb{L}^{1}({\mathcal{F}})$ such that \[ |f(t,y,z)-f(t,y,0)|\le \gamma (g_{t}+|y|+|z|)^{\alpha} \] for every $t\in[0,T]$, $y\in\mathbb{R}$, $z\in\mathbb{R}^d$. \end{enumerate} \nsubsection{A priori estimates} \label{sec3} In this section $K$ denotes an arbitrary but fixed process of the class $\mathcal{V}^{+}_{c}$ such that $K_{0}=0$. The following version of It\^o's formula will be frequently used in the paper. \begin{stw} \label{prop.ito} Let $p\ge 1$ and let $X$ be a progressively measurable process of the form \begin{align*} X_{t}=X_{0}+\int_{0}^{t} dK_{s}+\int_{0}^{t} Z_{s}\,dB_{s},\quad t\in [0,T], \end{align*} where $Z\in M$. Then there is $L\in \mathcal{V}^{+}_{c}$ such that \begin{align*} |X_{t}|^{p}-|X_{0}|^{p}&=p\int_{0}^{t} |X_{s}|^{p-1}{(\theta,X_{\theta})}at{X}_{s}\,dK_{s} +p\int_{0}^{t}|X_{s}|^{p-1}{(\theta,X_{\theta})}at{X}_{s}\,dB_{s}\nonumber\\ &\quad+c(p)\int^t_0\mathbf{1}_{\{X_{s}\neq 0\}}|X_{s}|^{p-2}|Z_{s}|^{2}\,ds +L_{t}\mathbf{1}_{\{p=1\}} \end{align*} with $c(p)=p(p-1)/2$. \end{stw} \begin{dow} \!\!The proof is a matter of slight modification of the proof of \cite[\!Lemma 2.2]{BDHPS}. \end{dow} \begin{df} We say that a pair $(Y,Z)$ of progressively measurable processes is a solution of BSDE$(\xi,f+dK)$ iff $Z\in M$, the mapping $[0,T]\ni t\mapsto f(t,Y_{t},Z_{t})$ belongs to $\mathbb{L}^{1}(0,T)$, $P$-a.s. and \begin{equation} \label{eq3.02} Y_{t}=\xi+\int_{t}^{T}f(s,Y_{s},Z_{s})\,ds +\int_{t}^{T}dK_{s}-\int_{t}^{T} Z_{s}\,dB_{s},\quad t\in[0,T]. \end{equation} \end{df} \begin{lm}\label{lm1} Let $(Y,Z)$ be a solution of \mbox{\rm BSDE}$(\xi,f+dK)$. Assume that \mbox{\rm(H3)} is satisfied and there exists a progressively measurable process $X$ such that $X_{t}\ge Y_{t}$, $t\in [0,T]$ and the mappings $[0,T]\ni t\mapsto X^{+}_t$, $[0,T]\ni t\mapsto f^{-}(t,X_{t},0)$ belong to $\mathbb{L}^{1}(0,T)$, $P$-a.s. \begin{enumerate} \item[\rm(i)]If \mbox{\rm(H2)} is satisfied then for every stopping time $\tau\le T$ and $a\ge \mu$, \begin{align*} \int_{0}^{\tau} e^{at}dK_{t}&\le |e^{a\tau}Y_{\tau}| +|Y_{0}|+\int_{0}^{\tau}e^{as}Z_{s}\,dB_{s} +\lambda\int_{0}^{\tau}e^{as}|Z_{s}|\,ds\\&\quad +\int_{0}^{\tau}e^{as}f^{-}(s,X_{s},0)\,ds +\int_{0}^{\tau}a^{+}e^{as}X_{s}^{+}\,ds. \end{align*} \item[\rm(ii)] If \mbox{\rm(Z)} is satisfied then for every stopping time $\tau\le T$ and $a\ge\mu$, \begin{align*} \int_{0}^{\tau} e^{at}dK_{t}&\le |e^{a\tau}Y_{\tau}|+|Y_{0}| +\int_{0}^{\tau}e^{as}Z_{s}\,dB_{s}+\gamma\int_{0}^{\tau}e^{as}(g_{s} +|Y_{s}|+|Z_{s}|)^{\alpha}\,ds\\ &\quad+\int_{0}^{\tau}e^{as}f^{-}(s,X_{s},0)\,ds +\int_{0}^{\tau}a^{+}e^{as}X_{s}^{+}\,ds. \end{align*} \end{enumerate} \end{lm} \begin{dow} Assume that $\mu\le 0$. Then $f^{-}(s,Y_{s},0)\le f^{-}(s,X_{s},0)$, $s\in [0,T]$ and from (\ref{eq3.02}) and (H2) it follows that \begin{align*} K_{\tau}\le -Y_{\tau}+Y_{0}+\int_{0}^{\tau}Z_{s}\,dB_{s} +\lambda\int_{0}^{\tau}|Z_{s}|\,ds-\int_{0}^{\tau} f(s,Y_{s},0)\,ds, \end{align*} which implies (i) with $a=0$. Now, let $a\ge\mu$ and let $\tilde{Y}_{t}=e^{at}Y_{t}$, $\tilde{Z}_{t}=e^{at}Z_{t}$ and $\tilde{\xi}=e^{aT}\xi$, $\tilde{f}(t,y,z) =e^{at}f(t,e^{-at}y,e^{-at}z)-ay$, $d\tilde{K}_{t}=e^{at}\,dK_{t}$. Then $\tilde{f}$ satisfies (H3) with $\mu=0$ and by It\^o's formula, \[ \tilde{Y}_{t}=\tilde{\xi} +\int_{t}^{T}\tilde{f}(s,\tilde{Y}_{s},\tilde{Z}_{s})\,ds +\int_{t}^{T}d\tilde{K}_{s}-\int_{t}^{T}\tilde{Z}_{s}\,dB_{s}, \quad t\in [0,T], \] from which in the same manner as before we obtain (i) for $a\ge\mu$. To prove (ii) let us observe that from (\ref{eq3.02}) and (Z) it follows immediately that \begin{align*} K_{\tau}\le -Y_{\tau}+Y_{0}+\int_{0}^{\tau}Z_{s}\,dB_{s} +\gamma\int_{0}^{\tau}(g_{s}+|Y_{s}|+|Z_{s}|)^{\alpha}\,ds -\int_{0}^{\tau} f(s,Y_{s},0)\,ds. \end{align*} Therefore repeating arguments from the proof of (i) we get (ii). \end{dow} \begin{lm}\label{lm2} Assume \mbox{\rm(A)} and let $(Y,Z)$ be a solution of \mbox{\rm BSDE}$(\xi,f+dK)$. If $Y\in\mathcal{S}^{p}$ for some $p>0$ and \[ E(\int_{0}^{T}X^{+}_{s}\,ds)^{p} +E(\int_{0}^{T}f^{-}(s,X_{s},0)\,ds)^{p} +E(\int_{0}^{T}|f(s,0,0)|\,ds)^{p}<+\infty \] for some progressively measurable process $X$ such that $X_{t}\ge Y_{t}$, $t\in [0,T]$, then $Z\in M^{p}$ and there exists $C$ depending only on $\lambda,p,T$ such that for every $a\ge \mu+\lambda^{2}$, \begin{align*} &E\bigg((\int_{0}^{T} e^{2as}|Z_{s}|^{2}\,ds)^{p/2}+(\int_{0}^{T} e^{as}\,dK_{s})^{p}\bigg) \le CE\bigg(\sup_{t\le T} e^{apt}|Y_{t}|^{p}\\ &\qquad+(\int_{0}^{T}e^{as}|f(s,0,0)|\,ds)^{p} +(\int_{0}^{T}e^{as}f^{-}(s,X_{s},0)\,ds)^{p} + (\int_{0}^{T}a^{+}e^{as}X_{s}^{+}\,ds)^{p}\bigg). \end{align*} \end{lm} \begin{dow} By standard arguments we may assume that $\mu+\lambda^{2}\le 0$ and take $a=0$. For each $k\in\mathbb{N}$ let us consider the stopping time \begin{equation} \label{eq3.01} \tau_k=\inf\{t\in [0,T];\int_0^t|Z_{s}|^{2}\,ds\ge k\}\wedge T. \end{equation} Then as in the proof of Eq. (5) in \cite{BDHPS} we get \begin{align*} (\int_{0}^{\tau_{k}}|Z_{s}|^{2}\,ds)^{p/2}\le c_{p}\bigg(|Y^{*}_{T}|^{p} +(\int_{0}^{T} f_{s}\,ds)^{p} +|\int_{0}^{\tau_{k}}Y_{s}Z_{s}\,dB_{s}|^{p/2} +(\int_{0}^{\tau_{k}}|Y_{s}|\,dK_{s})^{p/2}\bigg), \end{align*} and hence, repeating arguments following Eq. (5) in \cite{BDHPS} we show that \begin{align}\label{eq2.1} E(\int_{0}^{\tau_{k}}|Z_{s}|^{2}\,ds)^{p/2}\le c_{p}E\bigg(|Y^{*}_{T}|^{p} +(\int_{0}^{T} f_{s}\,ds)^{p} +(\int_{0}^{\tau_{k}}|Y_{s}|\,dK_{s})^{p/2}\bigg). \end{align} By Lemma \ref{lm1} and the Burkholder-Davis-Gundy inequality, \begin{align}\label{eq2.2} EK^{p}_{\tau_{k}}\le c'(p,\lambda,T)E\{|Y^{*}_{T}|^{p} +(\int_{0}^{\tau_{k}}|Z_{s}|^{2}\,ds)^{p/2} +(\int_{0}^{T}f^{-}(s,X_{s},0)\,ds)^{p}\}. \end{align} Moreover, applying Young's inequality we conclude from (\ref{eq2.1}) that for every $\alpha>0$, \begin{align}\label{eq2.3} &E(\int_{0}^{\tau_{k}}|Z_{s}|^{2}\,ds)^{p/2} \nonumber\\ &\quad\le c''(p,\alpha)E\{|Y^{*}_{T}|^{p}+(\int_{0}^{T} f_{s}\,ds)^{p}+(\int_{0}^{T}f^{-}(s,X_{s},0)\,ds)^{p}\}+\alpha EK_{\tau_{k}}^{p}. \end{align} Taking $\alpha=(2c'(p,\lambda,T))^{-1}$ and combining (\ref{eq2.2}) with (\ref{eq2.3}) we obtain \[ E(\int_{0}^{\tau_{k}}|Z_{s}|^{2}\,ds)^{p/2} \le C(p,\lambda,T)E\{|Y^{*}_{T}|^{p}+(\int_{0}^{T} f_{s}\,ds)^{p} +(\int_{0}^{T}f^{-}(s,X_{s},0)\,ds)^{p}\}. \] Applying Fatou's lemma we conclude from the above inequality and (\ref{eq2.2}) that \begin{align*} E(\int_{0}^{T}|Z_{s}|^{2}\,ds)^{p/2}+EK_{T}^{p} \le CE\{|Y^{*}_{T}|^{p}+(\int_{0}^{T} f_{s}\,ds)^{p} +(\int_{0}^{T}f^{-}(s,X_{s},0)\,ds)^{p}\}, \end{align*} which is the desired estimate. \end{dow} \begin{uw}\label{uw1} Observe that if $f$ does not depend on $z$ then the constant $C$ of Lemma \ref{lm2} depends only on $p$. This follows from the fact that in this case $c'$ in the key inequality (\ref{eq2.2}) depends only on $p$. \end{uw} \begin{stw}\label{stw1} Assume that \mbox{\rm(A)} is satisfied and \[ E(\int_{0}^{T}f^{-}(s,X_{s},0)\,ds)^{p} +E(\int_{0}^{T}|f(s,0,0)|\,ds)^{p}<+\infty \] for some $p>1$ and $X^{+}\in \mathcal{S}^{p}$ such that $X_{t}\ge Y_{t}$, $t\in [0,T]$. Then if $(Y,Z)$ is a solution of \mbox{\rm BSDE}$(\xi,f+dK)$ such that $Y\in\mathcal{S}^{p}$, then there exists $C$ depending only on $\lambda,p,T$ such that for every $a\ge\mu+\lambda^{2}/[1\wedge(p-1)]$ and every stopping time $\tau\le T$, \begin{align*} &E\sup_{t\le \tau}e^{apt}|Y_{t}|^{p}+E(\int_{0}^{\tau} e^{2as}|Z_{s}|^{2}\,ds)^{p/2} +E(\int_{0}^{\tau}e^{as}\,dK_{s})^{p}\\ &\qquad\le CE\bigg(e^{ap\tau}|Y_{\tau}|^{p} +(\int_{0}^{\tau}e^{as}|f(s,0,0)|\,ds)^{p}+\sup_{t\le \tau} |e^{at}X^{+}_{t}|^{p}\\ &\qquad\quad+(\int_{0}^{\tau}e^{as}f^{-}(s,X_{s},0)\,ds)^{p} +(\int_{0}^{\tau}a^{+}e^{as}X^{+}_{s}\,ds)^{p}\bigg). \end{align*} Assume additionally that $f$ does not depend on $z$. If $p=1$ and $X^{+},Y$ are of class \mbox{\rm(D)} then for every $a\ge\mu$, \begin{align*} \|e^{a{(\cdot,X_{\cdot})}ot} Y\|_{1} +E\int_{0}^{T}e^{as}\,dK_{s}&\le E\bigg(e^{aT}|\xi| +\int_{0}^{T}e^{as}|f(s,0)|\,ds\\ &\quad+ \int_{0}^{T}e^{as}f^{-}(s,X_{s})\,ds +\int_{0}^{T}a^{+}e^{as}X^{+}_{s}\,ds\bigg)+\|e^{a{(\cdot,X_{\cdot})}ot}X^{+}\|_{1}\,. \end{align*} \end{stw} \begin{dow} To shorten notation we prove the proposition in the case where $\tau=T$. The proof of the general case requires only minor technical changes. Moreover, by the change of variables used at the beginning of the proof of Lemma \ref{lm1} we can reduce the proof to the case where $a=0$ and $\mu+\lambda^{2}/[1\wedge(p-1)]\le 0$. Therefore we will assume that $a,\mu,\lambda$ satisfy the last two conditions. By It\^o's formula (see Proposition \ref{prop.ito}), \begin{align*} |Y_{t}|^{p}&+c(p)\int_{t}^{T}|Y_{s}|^{p-2}\mathbf{1}_{\{Y_{s} \neq 0\}}|Z_{s}|^{2}\,ds= |\xi|^{p} +p{\int_{t}^{T}}|Y_{s}|^{p-1}{(\theta,X_{\theta})}at{Y_{s}}f(s,Y_{s},Z_{s})\,ds\\ &+p{\int_{t}^{T}}|Y_{s}|^{p-1}{(\theta,X_{\theta})}at{Y}_{s}\,dK_{s} -p{\int_{t}^{T}}|Y_{s}|^{p-1}{(\theta,X_{\theta})}at{Y_{s}}Z_{s}\,dB_{s},\quad t\in [0,T]. \end{align*} By the same method as in the proof of Eq. (6) in \cite{BDHPS} we deduce from the above inequality that \begin{align}\label{eq2.4} \nonumber|Y_{t}|^{p}+\frac{c(p)}{2} \int_{t}^{T}|Y_{s}|^{p-2}\mathbf{1}_{\{Y_{s}\neq 0\}}|Z_{s}|^{2}\,ds &\le H-p{\int_{t}^{T}}|Y_{s}|^{p-1}{(\theta,X_{\theta})}at{Y_{s}}Z_{s}\,dB_{s}\\ &\quad+p{\int_{t}^{T}}|Y_{s}|^{p-1}{(\theta,X_{\theta})}at{Y}_{s}\,dK_{s},\quad t\in [0,T], \end{align} where $H=|\xi|^{p}+\int_{0}^{T}|Y_{s}|^{p-1}f_{s}\,ds$. Since the mapping $\mathbb{R}\ni y\mapsto|y|^{p-1}{(\theta,X_{\theta})}at{y}$ is increasing, \[ {\int_{t}^{T}}|Y_{s}|^{p-1}{(\theta,X_{\theta})}at{Y}_{s}\,dK_{s} \le {\int_{t}^{T}}|X^{+}_{s}|^{p-1}{(\theta,X_{\theta})}at X^{+}_{s}\,dK_{s},\quad t\in [0,T]. \] From this and (\ref{eq2.4}), \begin{equation} \label{eq3.06} |Y_{t}|^{p}+\frac{c(p)}{2}\int_{t}^{T}|Y_{s}|^{p-2}\mathbf{1}_{\{Y_{s} \neq 0\}}|Z_{s}|^{2}\,ds\le H' -p{\int_{t}^{T}}|Y_{s}|^{p-1}{(\theta,X_{\theta})}at{Y_{s}}Z_{s}\,dB_{s}, \end{equation} where $H'=|\xi|^{p}+p\int_{0}^{T}|Y_{s}|^{p-1}f_{s}\,ds +p\int_{0}^{T}|X^{+}_{s}|^{p-1}\,dK_{s}$. As in the proof of \cite[Proposition 3.2]{BDHPS} (see (7) and the second inequality following (8) in \cite{BDHPS}), using the Burkholder-Davis-Gundy inequality we conclude from (\ref{eq3.06}) that \begin{equation}\label{eq2.5} E|Y^{*}_{T}|^{p}\le d_{p} EH'. \end{equation} Applying Young's inequality we get \begin{align}\label{eq2.6} pd_{p}E\int_{0}^{T}|Y_{s}|^{p-1}f_{t}\,dt \le pd_{p}E(|Y^{*}_{T}|^{p-1}\int_{0}^{T}f_{t}\,dt) \le \frac14E|Y^{*}_{T}|^{p}+d'_{p}E(\int_{0}^{T}f_{t}\,dt)^{p} \end{align} and \begin{align}\label{eq2.7} pd_{p}E\int_{0}^{T}|X^{+}_{t}|^{p-1}\,dK_{t} \le d'(p,\alpha)E|X^{+,*}_{T}|^{p}+\alpha EK^{p}_{T}\,. \end{align} By Lemma \ref{lm1}, there exists $d(p,\lambda,T)>0$ such that \[ EK_{T}^{p}\le d(p,\lambda,T)E\{|Y^{*}_{T}|^{p} +(\int_{0}^{T}|Z_{s}|^{2}\,ds)^{p/2} +(\int_{0}^{T}f^{-}(s,X_{s},0)\,ds)^{p}\}. \] From this and Lemma \ref{lm2} we see that there exists $c(p,\lambda,T)>0$ such that \begin{align} \label{eq2.8} EK^{p}_{T}\le c(p,\lambda,T)E\{|Y^{*}_{T}|^{p} +(\int_{0}^{T}f_{s}\,ds)^{p} +(\int_{0}^{T}f^{-}(s,X_{s},0)\,ds)^{p}\}. \end{align} Put $\alpha=(4c(p,\lambda,T))^{-1}$. Then from (\ref{eq2.5})--(\ref{eq2.8}) it follows that there is $C(p,\lambda,T)$ such that \begin{align*} E|Y_{T}^{*}|^{p}&\le C(p,\lambda,T)E\{|\xi|^{p} +(\int_{0}^{T}|f(s,0,0)|\,ds)^{p} \\ &\quad+ (\int_{0}^{T}f^{-}(s,X_{s},0)\,ds)^{p}+\sup_{t\le T} |X^{+}_{t}|^{p}\}. \end{align*} Hence, by (\ref{eq2.8}) and Lemma \ref{lm2}, \begin{align*} E|Y^{*}_{\tau}|^{p}+E(\int_{0}^{\tau}|Z_{s}|^{2}\,ds)^{p/2} +EK_{T}^{p}&\le CE\bigg(|Y_{\tau}|^{p} +(\int_{0}^{\tau}|f(s,0,0)|\,ds)^{p} +|X^{+,*}_{\tau}|^{p}\\ &\quad+(\int_{0}^{\tau}f^{-}(s,X_{s},0)\,ds)^{p}\bigg). \end{align*} From this the first assertion follows. Now suppose that $f$ does not depend on $z$. As in the first part of the proof we may assume that $\mu\le 0$ and $a=0$. Applying It\^o's formula (see Proposition \ref{prop.ito}) we conclude that for any stopping times $\sigma\le\tau\le T$, \begin{align}\label{eq2.9} |Y_{\sigma}|\le|Y_{\tau}|+\int_{\sigma}^{\tau}f(s,Y_{s}){(\theta,X_{\theta})}at{Y}_{s}\,ds +\int_{\sigma}^{\tau}{(\theta,X_{\theta})}at{Y}_{s}\,dK_{s} -\int_{\sigma}^{\tau}Z_{s}{(\theta,X_{\theta})}at{Y}_{s}\,dB_{s}. \end{align} Let us define $\tau_k$ by (\ref{eq3.01}). Then $\int_{0}^{\tau_{k}\wedge{(\cdot,X_{\cdot})}ot}Z_{s}{(\theta,X_{\theta})}at{Y}_{s}\,dB_{s}$ is a uniformly integrable martingale. Using this, the fact that $Y$ is of class (D) and monotonicity of $f$ with respect to $y$ we deduce from (\ref{eq2.9}) that $|Y_{\sigma}|\le E(|\xi|+\int_{0}^{T}|f(s,0)|\,ds +K_{T}|\mathcal{F}_{\sigma})$, hence that \begin{equation} \label{eq2.10} \|Y\|_{1}\le E(|\xi|+\int_{0}^{T}|f(s,0)|\,ds+K_{T}). \end{equation} On the other hand, $-f(t,Y_{t})\le -f(t,X_{t})$ for $t\in [0,T]$ since $Y_{t}\le X_{t}$, $t\in [0,T]$. Therefore \begin{align*} K_{\tau}&=Y_{0}-Y_{\tau}-\int_{0}^{\tau}f(s,Y_{s})\,ds +\int_{0}^{\tau}Z_{s}\,dB_{s}\\ &\le X_{0}-Y_{\tau} -\int_{0}^{\tau}f(s,X_{s})\,ds +\int_{0}^{\tau}Z_{s}\,dB_{s}. \end{align*} Taking $\tau=\tau_{k}$ and using the fact that $Y$ is of class (D) we deduce from the above inequality that \[ EK_{T}\le EX^{+}_{0}+E|\xi|+E\int_{0}^{T}f^{-}(s,X_{s})\,ds. \] Combining this with (\ref{eq2.10}) we get the desired result. \end{dow} \begin{uw} If $f$ does not depend on $z$ then the constant $C$ of the first assertion of Proposition \ref{stw1} depends only on $p$. To see this it suffices to observe that if $f$ does not depend on $z$ then the constant $c$ in the key inequality (\ref{eq2.8}) depends only on $p$ (see Remark \ref{uw1}). \end{uw} \nsubsection{Some useful tools} \label{sec4} We begin with a useful comparison result for solutions of (\ref{eq3.02}) with $K\equiv0$. \begin{stw}\label{stw2} Let $(Y^{1},Z^{1}), (Y^{2},Z^{2})$ be solutions of \mbox{\rm BSDE}$(\xi^{1},f^{1})$, \mbox{\rm BSDE}$(\xi^{2},f^{2})$, respectively. Assume that $(Y^{1}-Y^{2})^{+}\in\mathcal{S}^{q}$ for some $q>1$. If $\xi^{1}\le \xi^{2}$ and for a.e. $t\in [0,T]$ either \begin{equation} \label{eq4.01} \mathbf{1}_{\{Y^{1}_{t}>Y^{2}_{t}\}} (f^{1}(t,Y^{1}_{t},Z^{1}_{t})-f^{2}(t,Y^{1}_{t},Z^{1}_{t}))\le 0,\quad f^{2}\mbox{ satisfies }\mbox{\rm(H2)}, \mbox{\rm(H3)} \end{equation} or \begin{equation} \label{eq4.02} \mathbf{1}_{\{Y^{1}_{t}>Y^{2}_{t}\}} (f^{1}(t,Y^{2}_{t},Z^{2}_{t})-f^{2}(t,Y^{2}_{t},Z^{2}_{t}))\le 0, \quad f^{1}\mbox{ satisfies }\mbox{\rm(H2)}, \mbox{\rm(H3)} \end{equation} is satisfied then $Y^{1}_{t}\le Y^{2}_{t}$, $t\in [0,T]$. \end{stw} \begin{dow} We show the proposition in case (\ref{eq4.01}) is satisfied. If (\ref{eq4.02}) is satisfied, the proof is analogous. Without loss of generality we may assume that $\mu\le 0$. By the It\^o-Tanaka formula, for every $p\in(1,q)$ and every stopping time $\tau\le T$, \begin{align} \label{eq3.1} &|(Y^{1}_{t\wedge\tau}-Y^{2}_{t\wedge\tau})^{+}|^{p} +\frac{p(p-1)}{2}\int_{t\wedge\tau}^{\tau}\mathbf{1}_{\{Y^{1}_{s} \neq Y^{2}_{s}\}}|(Y^{1}_{s}-Y^{2}_{s})^{+}|^{p-2} |Z^{1}_{s}-Z^{2}_{s}|^{2}\,ds\nonumber\\ &\qquad=|(Y^{1}_{\tau}-Y^{2}_{\tau})^{+}|^{p} +p\int_{t\wedge\tau}^{\tau}|(Y^{1}_{s}-Y^{2}_{s})^{+}|^{p-1} (f^{1}(s,Y^{1}_{s},Z^{1}_{s})-f^{2}(s,Y^{2}_{s},Z^{2}_{s}))\,ds \nonumber\\ &\qquad\quad-p\int_{t\wedge\tau}^{\tau}|(Y^{1}_{s}-Y^{2}_{s})^{+}|^{p-1} (Z^{1}_{s}-Z^{2}_{s})\,dB_{s}. \end{align} By (\ref{eq4.01}), \begin{align*} &\mathbf{1}_{\{Y^{1}_{t}>Y^{2}_{t}\}} (f^{1}(t,Y^{1}_{t},Z^{1}_{t})-f^{2}(t,Y^{2}_{t},Z^{2}_{t}))\\ &\qquad=\mathbf{1}_{\{Y^{1}_{t}>Y^{2}_{t}\}} (f^{1}(t,Y^{1}_{t},Z^{1}_{t})-f^{2}(t,Y^{1}_{t},Z^{1}_{t}))\\ &\qquad\quad+\mathbf{1}_{\{Y^{1}_{t}>Y^{2}_{t}\}} (f^{2}(t,Y^{1}_{t},Z^{1}_{t}) -f^{2}(t,Y^{2}_{t},Z^{2}_{t}))\\ &\qquad\le\mathbf{1}_{\{Y^{1}_{t}>Y^{2}_{t}\}} (f^{2}(t,Y^{1}_{t},Z^{1}_{t})-f^{2}(t,Y^{2}_{t},Z^{1}_{t}))\\ &\qquad\quad+\mathbf{1}_{\{Y^{1}_{t}>Y^{2}_{t}\}} (f^{2}(t,Y^{2}_{t},Z^{1}_{t})-f^{2}(t,Y^{2}_{t},Z^{2}_{t}))\\ &\qquad\le\lambda\mathbf{1}_{\{Y^{1}_{t}>Y^{2}_{t}\}} |Z^{1}_{t}-Z^{2}_{t}|. \end{align*} From this, (\ref{eq3.1}) and Young's inequality, \begin{align*} &|(Y^{1}_{t\wedge\tau}-Y^{2}_{t\wedge\tau})^{+}|^{p}+\frac{p(p-1)}{2} \int_{t\wedge\tau}^{\tau}\mathbf{1}_{\{Y^{1}_{s}\neq Y^{2}_{s}\}}|(Y^{1}_{s}-Y^{2}_{s})^{+}|^{p-2} |Z^{1}_{s}-Z^{2}_{s}|^{2}\,ds\\ &\qquad\le |(Y^{1}_{\tau}-Y^{2}_{\tau})^{+}|^{p} +p\lambda\int_{t\wedge\tau}^{\tau}|(Y^{1}_{s} -Y^{2}_{s})^{+}|^{p-1}|Z^{1}_{s}-Z^{2}_{s}|\,ds\\ &\qquad\quad-p\int_{t\wedge\tau}^{\tau}|(Y^{1}_{s}-Y^{2}_{s})^{+}|^{p-1} (Z^{1}_{s}-Z^{2}_{s})\,dB_{s}\\ &\qquad\le|(Y^{1}_{\tau}-Y^{2}_{\tau})^{+}|^{p} +\frac{p\lambda^{2}}{p-1}\int_{t\wedge\tau}^{\tau} |(Y^{1}_{s}-Y^{2}_{s})^{+}|^{p}\,ds\\ &\qquad\quad+\frac{p(p-1)}{4}\int_{t\wedge\tau}^{\tau}\mathbf{1}_{\{Y^{1}_{s} \neq Y^{2}_{s}\}} |(Y^{1}_{s}-Y^{2}_{s})^{+}|^{p-2}|Z^{1}_{s}-Z^{2}_{s}|^{2}\,ds \\&\qquad\quad-p\int_{t\wedge\tau}^{\tau}|(Y^{1}_{s}-Y^{2}_{s})^{+}|^{p-1} (Z^{1}_{s}-Z^{2}_{s})\,dB_{s}. \end{align*} Let $\tau_{k}=\inf\{t\in [0,T];\int_{0}^{t} |(Y^{1}_{s}-Y^{2}_{s})^{+}|^{2(p-1)} |Z^{1}_{s}-Z^{2}_{s}|^{2}\,ds\ge k\}\wedge T$. From the above estimate it follows that \[ E|(Y^{1}_{t\wedge\tau_{k}}-Y^{2}_{t\wedge\tau_{k}})^{+}|^{p} \le E|(Y^{1}_{\tau_{k}}-Y^{2}_{\tau_{k}})^{+}|^{p} +\frac{p\lambda^{2}}{p-1}E\int_{t\wedge\tau_{k}}^{\tau_{k}} |(Y^{1}_{s}-Y^{2}_{s})^{+}|^{p}\,ds. \] Since$(Y^{1}-Y^{2})^{+}\in \mathcal{S}^{q}$, letting $k\rightarrow\infty$ and using the assumptions we get \[ E|(Y^{1}_{t}-Y^{2}_{t})^{+}|^{p} \le \frac{p\lambda^{2}}{p-1}E\int_{t}^{T} |(Y^{1}_{s}-Y^{2}_{s})^{+}|^{p}\,ds,\quad t\in [0,T]. \] By Gronwall's lemma, $E|(Y^{1}_{t}-Y^{2}_{t})^{+}|^{p}=0,\, t\in [0,T]$, from which the desired result follows. \end{dow} \begin{lm}\label{lm3} Assume that $\{(X^{n},Y^{n},K^{n})\}$ is a sequence of real valued c\`adl\`ag progressively measurable processes such that \begin{enumerate} \item [\rm (a)]$Y^{n}_{t}=-K^{n}_{t} +X_{t}^{n},\, t\in [0,T],\,K^{n}$-increasing, $K^{n}_{0}=0,$ \item [\rm (b)] $Y^{n}_{t}\uparrow Y_{t}$, $t\in [0,T]$, $Y^{1},Y$ are of class (D), \item [\rm (c)]There exists a c\`adl\`ag process $X$ such that for some subsequence $\{n'\}$, $X^{n'}_{\tau}\rightarrow X_{\tau}$ weakly in $\mathbb{L}^{1}({\mathcal{F}}_{T})$ for every stopping time $\tau\le T$. \end{enumerate} Then $Y$ is c\`adl\`ag and there exists a c\`adl\`ag increasing process $K$ such that $K^{n'}_{\tau}\rightarrow K_{\tau}$ weakly in $\mathbb{L}^{1}({\mathcal{F}}_{T})$ for every stopping time $\tau\le T$ and \[ Y_{t}=-K_{t}+X_{t},\quad t\in [0,T]. \] \end{lm} \begin{dow} From (b) it follows that $Y^{n'}_{\tau}\rightarrow Y_{\tau}$ weakly in $\mathbb{L}^{1}({\mathcal{F}}_{T})$ for every stopping time $\tau\le T$. Set $K_{t}=X_{t}-Y_{t}$. By the above and (c), $K^{n'}_{\tau}\rightarrow K_{\tau}$ weakly in $\mathbb{L}^{1}(\Omega)$ for every stopping time $\tau\le T$. If $\sigma,\tau$ are stopping times such that $\sigma\le\tau\le T$ then $K_{\sigma}\le K_{\tau}$ since $K^{n}_{\sigma}\le K^{n}_{\tau}$, $n\in{\mathbb N}$. Therefore $K$ is increasing. The fact that $Y,K$ are c\`adl\`ag processes follows easily from \cite[Lemma 2.2]{Peng}. \end{dow} In what follows we say that a sequence $\{\tau_{k}\}$ of stopping times is stationary if \[ P(\liminf_{k\rightarrow+\infty} \{\tau_{k}=T\})=1. \] \begin{lm}\label{lm4} Assume that $\{Y^{n}\}$ is a nondecreasing sequence of continuous processes such that $\sup_{n\ge1}E|Y^{n,*}_{T}|^{q}<\infty$ for some $q>0$. Then there exists a stationary sequence $\{\tau_{k}\}$ of stopping times such that $Y^{n,*}_{\tau_{k}}\le k\vee|Y^{n}_{0}|$, $P$-a.s. for every $k\in\mathbb{N}$. \end{lm} \begin{dow} Set $V^{n}_{t}=\sup_{0\le s\le t}(Y^{n}_{s}-Y^{1}_{s})$. Then $V^{n}$ is nonnegative and $V^{n}\in\mathcal{V}^{+}_{c}$. Since $\{Y^{n}\}$ is nondecreasing, there exists an increasing process $V$ such that $V^{n}_{t}\uparrow V_{t}$, $t\in [0,T]$. By Fatou's lemma, \[ EV^{q}_{T}\le \liminf_{n\rightarrow+\infty} E|V^{n}_{T}|^{q}\le c(q)\sup_{n\ge1}E|Y^{n,*}_{T}|^{q}<\infty. \] Now, set $V'_{t}=\inf_{t<t'\le T}V_{t'}$, $t\in [0,T]$ and then $\tau_{k}=\inf\{t\in [0,T]; Y^{1,*}_{t}+V'_{t}>k\}\wedge T$. It is known that $V'$ is a progressively measurable c\`adl\`ag process. Since $V_{T}$ is integrable, the sequence $\{\tau_{k}\}$ is stationary. From the above it follows that if $\tau_{k}>0$ then \[ Y^{n,*}_{\tau_{k}}=Y^{n,*}_{\tau_{k}-} \le V'_{\tau_{k}-}+Y^{1,*}_{\tau_{k}-}\le k,\quad k\in\mathbb{N}, \] and the proof is complete. \end{dow} \begin{lm}\label{lm5} If $\{Z^{n}\}$ is a sequence of progressively measurable processes such that $\sup_{n\ge1}E(\int_{0}^{T}|Z^{n}_{t}|^{2}\,dt)^{p/2}<\infty$ for some $p>1$, then there exists $Z\in M^{p}$ and a subsequence $\{n'\}$ such that for every stopping time $\tau\le T$, $\int_{0}^{\tau}Z^{n'}_{t}\,dB_{t}\rightarrow \int_{0}^{\tau}Z_{t}\,dB_{t}$ weakly in $\mathbb{L}^{p}({\mathcal{F}}_{T})$. \end{lm} \begin{dow} Since $\{Z^{n}\}$ is bounded in $\mathbb{L}^{2,p}({\mathcal{F}})$ and the space $\mathbb{L}^{2,p}({\mathcal{F}})$ is reflexive, there exists a subsequence (still denoted by $\{n\}$) and $Z\in\mathbb{L}^{2,p}({\mathcal{F}})$ such that $Z^{n}\rightarrow Z$ weakly in $\mathbb{L}^{2,p}({\mathcal{F}})$. It is known that if $\xi\in\mathbb{L}^{p'}({\mathcal{F}}_{T})$, where $p'=p/(p-1)$, then there exists $\eta\in \mathbb{L}^{2,p'}({\mathcal{F}})=(\mathbb{L}^{2,p}({\mathcal{F}}))^{*}$ such that \begin{equation} \label{eq3.2} \xi=E\xi+\int_{0}^{T}\eta_{t}\,dB_{t}. \end{equation} Let $f\in (\mathbb{L}^{p}({\mathcal{F}}_{T}))^{*}$. Then there exists $\xi\in\mathbb{L}^{p'}({\mathcal{F}}_{T})$ such that $f(\zeta)=E\zeta\xi$ for every $\zeta\in\mathbb{L}^{p}({\mathcal{F}}_{T})$. Let $\eta\in\mathbb{L}^{2,p'}({\mathcal{F}})$ be such that (\ref{eq3.2}) is satisfied. Without loss of generality we may assume that $E\xi=0$. Then by It\^o's isometry, \begin{align*} f(\int_{0}^{T}Z^{n}_{t}\,dB_{t}) &=E\xi\int_{0}^{T} Z^{n}_{t}\,dB_{t} =E\int_{0}^{T}\eta_{t}\,dB_{t}\int_{0}^{T}Z^{n}_{t}\,dB_{t}\\ &=E\int_{0}^{T}\eta_{t}Z^{n}_{t}\,dt\rightarrow E\int_{0}^{T}\eta_{t}Z_{t}=f(\int_{0}^{T}Z_{t}\,dB_{t}). \end{align*} Since the same reasoning applies to the sequence $\{\mathbf{1}_{\{{(\cdot,X_{\cdot})}ot\le\tau\}}Z^{n}\}$ in place of $\{Z^n\}$, the lemma follows. \end{dow} \nsubsection{Existence and uniqueness results for $p>1$} \label{sec5} First we recall the definition of a solution $(Y,Z,K)$ of (\ref{eq1.1}). Note that a priori we do not impose any integrability conditions on the processes $Y,Z,K$. \begin{df} We say that a triple $(Y,Z,K)$ of progressively measurable processes is a solution of RBSDE$(\xi,f,L)$ iff \begin{enumerate} \item [\rm(a)]$K$ is an increasing continuous process, $K_{0}=0$, \item [\rm(b)]$Z\in M$ and the mapping $[0,T]\ni t\mapsto f(t,Y_{t},Z_{t})$ belongs to $\mathbb{L}^{1}(0,T),\, P$-a.s., \item [\rm(c)]$Y_{t}=\xi+\int_{t}^{T}f(s,Y_{s},Z_{s})\,ds +\int_{t}^{T}dK_{s}-\int_{t}^{T}Z_{s}\,dB_{s},\quad t\in [0,T],$ \item [\rm(d)]$Y_{t}\ge L_{t},\, t\in [0,T]$, $\int_{0}^{T}(Y_{t}-L_{t})\,dK_{t}=0.$ \end{enumerate} \end{df} \begin{stw}\label{stw2.5} Let $(Y^{1},Z^{1},K^{1}), (Y^{2},Z^{2},K^{2})$ be solutions of \mbox{\rm RBSDE}$(\xi^{1},f^{1},L^{1})$, \mbox{\rm RBSDE}$(\xi^{2},f^{2},L^{2})$, respectively. Assume that $(Y^{1}-Y^{2})^{+}\in\mathcal{S}^{q}$ for some $q>1$. If $\xi^{1}\le\xi^{2}$, $L^{1}_{t}\le L^{2}_{t}$, $t\in [0,T]$, and either \mbox{\rm (\ref{eq4.01})} or \mbox{\rm(\ref{eq4.02})} is satisfied then $Y^{1}_{t}\le Y^{2}_{t}$, $t\in [0,T]$. \end{stw} \begin{dow} Assume that (\ref{eq4.01}) is satisfied. Let $q>1$ be such that $(Y^{1}-Y^{2})^{+}\in\mathcal{S}^{q}$. Without loss of generality we may assume that $\mu\le 0$. By the It\^o-Tanaka formula, for $p\in(1,q)$ and every stopping time $\tau\le T$, \begin{align}\label{eqt} \nonumber &|(Y^{1}_{t\wedge\tau}-Y^{2}_{t\wedge\tau})^{+}|^{p} +\frac{p(p-1)}{2}\int_{t\wedge\tau}^{\tau}\mathbf{1}_{\{Y^{1} \neq Y^{2}_{s}\}}|(Y^{1}_{s}-Y^{2}_{s})^{+}|^{p-2} |Z^{1}_{s}-Z^{2}_{s}|^{2}\,ds\nonumber\\ &\quad=|(Y^{1}_{\tau}-Y^{2}_{\tau})^{+}|^{p} +p\int_{t\wedge\tau}^{\tau}|(Y^{1}_{s}-Y^{2}_{s})^{+}|^{p-1} (f^{1}(s,Y^{1}_{s},Z^{1}_{s})-f^{2}(s,Y^{2}_{s},Z^{2}_{s}))\,ds\nonumber\\ &\qquad+p\int_{t\wedge\tau}^{\tau} |(Y^{1}_{s}-Y^{2}_{s})^{+}|^{p-1 }\, (dK^{1}_{s}-dK^{2}_{s})\nonumber\\ &\qquad-p\int_{t\wedge\tau}^{\tau}|(Y^{1}_{s}-Y^{2}_{s})^{+}|^{p-1} (Z^{1}_{s}-Z^{2}_{s})\,dB_{s}. \end{align} By monotonicity of the function $x\mapsto {(\theta,X_{\theta})}at{x}|x|^{p-1}$, condition (d) of the definition of a solution of reflected BSDE and the fact that $L^1_t\le L^2_t$ for $t\in[0,T]$, \begin{align*} \int_{t\wedge\tau}^{\tau}|(Y^{1}_{s}-Y^{2}_{s})^{+}|^{p-1}\, (dK^{1}_{s}-dK^{2}_{s})&\le \int_{t\wedge\tau}^{\tau} |(Y^{1}_{s}-Y^{2}_{s})^{+}|^{p-1}\,dK^{1}_{s}\\& \le \int_{t\wedge\tau}^{\tau} |(Y^{1}_{s}-L^{1}_{s})^{+}|^{p-1}\,dK^{1}_{s}=0. \end{align*} Combining this with (\ref{eqt}) we get estimate (\ref{eq3.1}) in Proposition \ref{stw2}. Therefore repeating arguments following (\ref{eq3.1}) in the proof of that proposition we obtain the desired result. The proof in case (\ref{eq4.02}) is satisfied is analogous and therefore left to the reader. \end{dow} \begin{stw}\label{stw3} If $f$ satisfies \mbox{\rm(H2)}, \mbox{\rm(H3)} then there exists at most one solution $(Y,Z,K)$ of \mbox{\rm RBSDE}$(\xi,f,L)$ such that $Y\in \mathcal{S}^{p}$ for some $p>1$. \end{stw} \begin{dow} Follows immediately from Proposition \ref{stw2.5} and uniqueness of the Doob-Meyer decomposition of semimartingales. \end{dow} \begin{tw}\label{tw1} Let $p>1$. \begin{enumerate} \item[\rm(i)] Assume \mbox{\rm(H1)}--\mbox{\rm(H6)}. Then there exists a solution $(Y,Z,K)$ of \mbox{\rm RBSDE}$(\xi,f,L)$ such that $(Y,Z,K)\in \mathcal{S}^{p}\otimes M^{p}\otimes\mathcal{V}^{+,p}_{c}$ iff \mbox{\rm(H7)} is satisfied. \item[\rm(ii)]Assume \mbox{\rm(H1)}--\mbox{\rm(H7)}. For $n\in\mathbb{N}$ let $(Y^{n},Z^{n})$ be a solution of the BSDE \begin{equation} \label{eq5.01} Y^{n}_{t}=\xi+\int_{t}^{T}f(s,Y^{n}_{s},Z^{n}_{s})\,ds +\int_{t}^{T}dK^n_s-\int_{t}^{T}Z^{n}_{s}\,dB_{s},\, t\in [0,T] \end{equation} with \begin{equation} \label{eq5.02} K_{t}^{n}=\int_{0}^{t}n(Y^{n}_{s}-L_{s})^{-}\,ds \end{equation} such that $(Y^{n},Z^{n})\in\mathcal{S}^{p}\otimes M^{p}$. Then \begin{equation} \label{eq5.03} E\sup_{t\le T}|Y^{n}_{t}-Y_{t}|^{p} +E\sup_{t\le T}|K^{n}_{t}-K_{t}|^{p} +E(\int_{0}^{T}|Z^{n}_{t}-Z_{t}|^{2}dt)^{p/2}\rightarrow 0 \end{equation} as $n\rightarrow +\infty$. \end{enumerate} \end{tw} \begin{dow} Without loss of generality we may assume that $\mu\le0$. Assume that there is a solution $(Y,Z,K)\in\mathcal{S}^{p}\otimes M^{p}\otimes\mathcal{V}^{+,p}_{c} $ of RBSDE$(\xi,f,L)$. Then by \cite[Remark 4.3]{BDHPS}, \begin{equation}\label{eq4.1} E(\int_{0}^{T}|f(s,Y_{s},Z_{s})|\,ds)^{p}\le cE\{|\xi|^{p} +(\int_{0}^{T}f_{s}\,ds)^{p}+K^{p}_{T}\} \end{equation} which in view of (H2) and the fact that $Y_{t}\ge L_{t}$, $t\in [0,T]$ shows (H7). Conversely, let us assume that (H1)--(H7) are satisfied. Let $(Y^{n},Z^{n})$ be a solution of (\ref{eq5.01}) such that $(Y^{n},Z^{n})\in \mathcal{S}^{p}\otimes M^{p}$. We will show that there exists a process $\overline{X}\in\mathcal{H}^{p}_{c}$ such that $\overline{X}_{t}\ge Y^{n}_{t}$, $t\in [0,T]$ for every $n\in\mathbb{N}$. Since $X\in{\mathcal{H}}^{p}_{c}$, there exist $M\in {\mathcal{M}}^{p}_{c}$ and $V\in\mathcal{V}^{p}_{c}$ such that $X=V+M$. By the representation property of Brownian filtration, there exists $Z'\in M^{p}$ such that \[ X_{t}=X_{T}-{\int_{t}^{T}} dV_{s}-{\int_{t}^{T}} Z'_{s}\,dB_{s},\quad t\in [0,T]. \] The above identity can be rewritten in the form \begin{align*} X_{t}&=X_{T}+{\int_{t}^{T}} f(s,X_{s},Z'_{s})\,ds -{\int_{t}^{T}} (f^{+}(s,X_{s},Z'_{s})\,ds+dV^{+}_{s})\\&\quad +{\int_{t}^{T}} (f^{-}(s,X_{s},Z'_{s})\,ds+dV^{-}_{s}) -{\int_{t}^{T}} Z'_{s}\,dB_{s},\quad t\in [0,T]. \end{align*} By \cite[Theorem 4.2]{BDHPS}, there exists a unique solution $(\overline{X},\overline{Z})\in\mathcal{S}^{p}\otimes M^{p}$ of the BSDE \begin{align*} \overline{X}_{t}=\xi\vee X_T +{\int_{t}^{T}} f(s,\overline{X}_{s},\overline{Z}_{s})\,ds +{\int_{t}^{T}} (f^{-}(s,X_{s},Z'_{s})\,ds+dV^{-}_{s})-{\int_{t}^{T}} \overline{Z}_{s}\,dB_{s}. \end{align*} By Proposition \ref{stw2}, $\overline{X}_{t}\ge X_{t}\ge L_{t},\, t\in [0,T]$. Hence \begin{align*} \overline{X}_{t}&=\xi\vee X_T +{\int_{t}^{T}} f(s,\overline{X}_{s},\overline{Z}_{s})\,ds +{\int_{t}^{T}} n(\overline{X}_{s}-L_{s})^{-}\,ds \\&\quad+{\int_{t}^{T}} (f^{-}(s,X_{s},Z'_{s})\,ds +dV^{-}_{s})-{\int_{t}^{T}} \overline{Z}_{s}\,dB_{s},\quad t\in [0,T], \end{align*} so using once again Proposition \ref{stw2} we see that $\overline{X}_{t}\ge Y^{n}_{t}$, $t\in [0,T]$. By \cite[Remark 4.3]{BDHPS}, $E(\int_{0}^{T}|f(s,\overline{X}_{s},0)|\,ds)^{p}<\infty$. Hence, by Lemma \ref{lm1} and Proposition \ref{stw1}, \begin{align} \label{eq4.2} &E|Y^{n,*}_{T}|^{p}+E(\int_{0}^{T}|Z^{n}_{s}|^{2}\,ds)^{p/2} +E|K^{n}_{T}|^{p}\nonumber\\ &\qquad\le C(p,\lambda,T)E\{|\xi|^{2} +(\int_{0}^{T}f_{s}\,ds)^{p} +(\int_{0}^{T}|f(s,\overline{X}_{s},0)|\,ds)^{p}\}. \end{align} From this and \cite[Remark 4.3]{BDHPS}, \begin{equation} \label{T1} E(\int_{0}^{T}|f(s,Y^{n}_{s},Z_{s}^{n})|\,ds)^{p}\le C^{'}(p,\lambda,T). \end{equation} By Proposition \ref{stw2} there exists a progressively measurable process $Y$ such that $Y^{n}_{t}\uparrow Y_{t}$, $t\in [0,T]$. Using the monotone convergence of $Y^{n}$, (H3)--(H5), (\ref{eq4.2}), (\ref{T1}) and the Lebesgue dominated convergence theorem we conclude that \begin{equation} \label{T2} E(\int_{0}^{T}|f(s,Y^{n}_{s},0)-f(s,Y_{s},0)|\,ds)^{p}\rightarrow 0 \end{equation} Moreover, by (H2) and (\ref{eq4.2}), \[ \sup_{n\ge1}E\int_{0}^{T} |f(s,Y^{n}_{s},Z^{n}_{s})-f(s,Y^{n}_{s},0)|^{p}\,ds<\infty. \] It follows in particular that there exists a process $\eta\in\mathbb{L}^{p}({\mathcal{F}})$ such that \[ \int_{0}^{\tau}(f(s,Y^{n}_{s},Z^{n}_{s})-f(s,Y^{n}_{s},0))\,ds \rightarrow \int_{0}^{\tau}\eta_{s}\,ds \] weakly in $\mathbb{L}^{1}({\mathcal{F}}_{T})$ for every stopping time $\tau\le T$. Consequently, by Lemmas \ref{lm3} and \ref{lm5}, $Y$ is a c\`adl\`ag process and there exist $Z\in M^{p}$ and a c\`adl\`ag increasing process $K$ such that $K_{0}=0$ and \begin{equation} \label{eq4.3} Y_{t}=\xi+{\int_{t}^{T}} f(s,Y_{s},0)\,ds+{\int_{t}^{T}}\eta_{s}\,ds+{\int_{t}^{T}} dK_{s} -{\int_{t}^{T}} Z_{s}\,dB_{s},\quad t\in [0,T]. \end{equation} From (\ref{eq5.01}), (\ref{eq4.2}), (\ref{T1}) and pointwise convergence of the sequence $\{Y^{n}\}$ one can deduce that $E\int_{0}^{T}(Y_{s}-L_{s})^{-}\,ds=0$, which when combined with (H6) and the fact that $Y$ is c\`adl\`ag implies that $Y_{t}\ge L_{t}$, $t\in [0,T]$. From this, the monotone character of the convergence of the sequence $\{Y^{n}\}$ and Dini's theorem we conclude that \begin{align}\label{eq4.4} E|(Y^{n}-L)^{-,*}_{T}|^{p}\rightarrow 0. \end{align} By Proposition \ref{prop.ito}, for $n,m\in\mathbb{N}$ we have \begin{align}\label{eq4.5} \nonumber &|Y^{n}_{t}-Y^{m}_{t}|^{p}+c(p){\int_{t}^{T}} |Y^{n}_{s}-Y^{m}_{s}|^{p-2} \mathbf{1}_{\{Y^{n}_{s}-Y^{m}_{s}\neq 0\}} |Z^{n}_{s}-Z^{m}_{s}|^{2}\,ds\\ \nonumber &\qquad = p{\int_{t}^{T}} |Y^{n}_{s}-Y^{m}_{s}|^{p-1} \widehat{Y^{n}_{s}-Y^{m}_{s}} (f(s,Y^{n}_{s},Z^{n}_{s})-f(s,Y^{m}_{s},Z^{m}_{s}))\,ds\\ \nonumber &\qquad\quad+p{\int_{t}^{T}}|Y^{n}_{s}-Y^{m}_{s}|^{p-1} \widehat{Y^{n}_{s}-Y^{m}_{s}}(dK^{n}_{s}-dK^{m}_{s})\\ &\qquad\quad -p{\int_{t}^{T}} |Y^{n}_{s}-Y^{m}_{s}|^{p-1} \widehat{Y^{n}_{s}-Y^{m}_{s}}(Z^{n}_{s}-Z^{m}_{s})\,dB_{s},\quad t\in [0,T]. \end{align} By monotonicity of the function $\mathbb{R}\ni x\mapsto|x|^{p-1}{(\theta,X_{\theta})}at{x}$, \begin{align} \label{eq4.6} {\int_{t}^{T}}|Y^{n}_{s}-Y^{m}_{s}|^{p-1}\widehat{Y^{n}_{s}-Y^{m}_{s}}\,dK^{n}_{s} \le {\int_{t}^{T}}|(Y^{m}_{s}-L_{s})^{-}|^{p-1} \widehat{(Y^{m}_{s}-L_{s})^{-}}\,dK^{n}_{s} \end{align} and \begin{align} \label{eq4.7} -{\int_{t}^{T}}|Y^{n}_{s}-Y^{m}_{s}|^{p-1} \widehat{Y^{n}_{s}-Y^{m}_{s}}\,dK^{m}_{s} \le {\int_{t}^{T}}|(Y^{n}_{s}-L_{s})^{-}|^{p-1} \widehat{(Y^{n}_{s}-L_{s})^{-}}\,dK^{m}_{s}. \end{align} By (H2), (H3), (\ref{eq4.5})--(\ref{eq4.7}) and H\"older's inequality, \begin{align} \label{eq.tem2012} \nonumber & E|Y^{n}_{t}-Y^{m}_{t}|^{p} +c(p)E{\int_{t}^{T}}|Y^{n}_{s}-Y^{m}_{s}|^{p-2} \mathbf{1}_{\{Y^{n}_{s}-Y^{m}_{s}\neq0\}} |Z^{n}_{s}-Z^{m}_{s}|^{2}\,ds\\ &\nonumber\quad\le p\lambda E\int_{0}^{T} |Y^{n}_{s}-Y^{m}_{s}|^{p-1}|Z^{n}_{s}-Z^{m}_{s}|\,ds +(E|(Y^{n}-L)^{-,*}_{T}|^{p})^{(p-1)/p}(E|K^{m}_{T}|^{p})^{1/p} \\&\qquad +(E|(Y^{m}-L)^{-,*}_{T}|^{p})^{(p-1)/p}(E|K^{n}_{T}|^{p})^{1/p}. \end{align} Since \begin{align*} p\lambda |Y^{n}_{s}-Y^{m}_{s}|^{p-1}|Z^{n}_{s}-Z^{m}_{s}| &\le\frac{p\lambda^{2}}{1\wedge(p-1)}|Y^{n}_{s}-Y^{m}_{s}|^{p}\\& \quad+\frac{c(p)}{2}\mathbf{1}_{\{Y^{n}_{s}-Y^{m}_{s} \neq 0\}}|Y^{n}_{s}-Y^{m}_{s}|^{p-2}|Z^{n}_{s}-Z^{m}_{s}|^{2}, \end{align*} from (\ref{eq.tem2012}) we get \begin{align*} \label{eq.tem2012} \nonumber & E|Y^{n}_{t}-Y^{m}_{t}|^{p} +\frac{c(p)}{2}E{\int_{t}^{T}}|Y^{n}_{s}-Y^{m}_{s}|^{p-2} \mathbf{1}_{\{Y^{n}_{s}-Y^{m}_{s}\neq0\}} |Z^{n}_{s}-Z^{m}_{s}|^{2}\,ds\\ &\nonumber\quad\le c(p,\lambda) E\int_{0}^{T}|Y^{n}_{s}-Y^{m}_{s}|^{p}\,ds +(E|(Y^{n}-L)^{-,*}_{T}|^{p})^{(p-1)/p}(E|K^{m}_{T}|^{p})^{1/p} \\&\qquad +(E|(Y^{m}-L)^{-,*}_{T}|^{p})^{(p-1)/p}(E|K^{n}_{T}|^{p})^{1/p}\equiv I_{n,m}. \end{align*} From the above, (\ref{eq4.2}), (\ref{eq4.4}) and the monotone convergence of $\{Y^{n}\}$ we get \begin{equation} \label{T3} \lim_{n,m\rightarrow +\infty} I^{n,m}=0 \end{equation} which implies that \begin{equation}\label{eq4.9} \lim_{n,m\rightarrow+\infty}E\int_{0}^{T}|Y^{n}_{s}-Y^{m}_{s}|^{p-2} \mathbf{1}_{\{Y^{n}_{s}\neq Y^{m}_{s}\}} |Z^{n}_{s}-Z^{m}_{s}|^{2}\,ds=0. \end{equation} From (\ref{eq4.5}) one can also conclude that \begin{align*} &E\sup_{0\le t\le T}|Y^{n}_{t}-Y^{m}_{t}|^{p}\\ &\qquad\le c'(p,\lambda) \{I^{n,m}+E\sup_{0\le t\le T}|{\int_{t}^{T}} |Y^{n}_{s}-Y^{m}_{s}|^{p-1} \widehat{Y^{n}_{s}-Y^{m}_{s}}(Z^{n}_{s}-Z^{m}_{s})\,dB_{s}|\}. \end{align*} Using the Burkholder-Davis-Gundy inequality and then Young's inequality we deduce from the above that \[ E\sup_{0\le t\le T}|Y^{n}_{t}-Y^{m}_{t}|^{p}\le c''(p,\lambda) \{I^{n,m}+E\int_{0}^{T}\mathbf{1}_{\{Y^{n}_{s} \neq Y^{m}_{s}\}} |Y^{n}_{s}-Y^{m}_{s}|^{p-2} |Z^{n}_{s}-Z^{m}_{s}|^{2}\,ds\}. \] Hence, by (\ref{T3}) and (\ref{eq4.9}), \begin{align}\label{eq4.10} \lim_{n,m\rightarrow+\infty}E\sup_{0\le t\le T}|Y^{n}_{t}-Y^{m}_{t}|^{p}=0, \end{align} which implies that $Y\in\mathcal{S}^{p}$. Our next goal is to show that \begin{equation}\label{eq4.11} \lim_{n,m\rightarrow+\infty} E(\int_{0}^{T}|Z^{n}_{t}-Z^{m}_{t}|^{2}\,dt)^{p/2}=0. \end{equation} By It\^o's formula applied to $|Y^{n}-Y^{m}|^{2}$, (H2) and (H3), \begin{align*} \int_{0}^{T}|Z^{n}_{t}-Z^{m}_{t}|^{2}\,dt &\le 2\lambda\int_{0}^{T}|Y^{n}_{t}-Y^{m}_{t}||Z^{n}_{t}-Z^{m}_{t}|\,dt +2\int_{0}^{T}|Y^{n}_{t}-Y^{m}_{t}|\,dK^{n}_{t}\\&\quad +2\int_{0}^{T}|Y^{n}_{t}-Y^{m}_{t}|\,dK^{m}_{t} +\sup_{0\le t\le T}|{\int_{t}^{T}} (Z^{n}_{s}-Z^{m}_{s}) (Y^{n}_{s}-Y^{m}_{s})\,dB_{s}|. \end{align*} Hence, by the Burkholder-Davis-Gundy inequality and Young's inequality, \begin{align*} &E(\int_{0}^{T}|Z^{n}_{t}-Z^{m}_{t}|^{2}\,dt)^{p/2}\le C(p,\lambda)\{E|(Y^{n}-Y^{m})^{*}_{T}|^{p}\\ &\qquad+(E|(Y^{n}-Y^{m})^{*}_{T}|^{p})^{1/2}(E|K^{n}_{T}|^{p})^{1/2} +(E|(Y^{n}-Y^{m})^{*}_{T}|^{p})^{1/2}(E|K^{m}_{T}|^{p})^{1/2}\}. \end{align*} From the above inequality, (\ref{eq4.2}) and (\ref{eq4.10}) we get (\ref{eq4.11}). From (\ref{eq4.11}) and (\ref{eq4.3}) it follows immediately that \[ Y_{t}=\xi+\int_{t}^{T}f(s,Y_{s},Z_{s})\,ds+\int_{t}^{T}dK_{s} -\int_{t}^{T}Z_{s}\,dB_{s},\quad t\in [0,T], \] which implies that $K$ is continuous. In fact, by (\ref{eq4.2}), $K\in \mathcal{V}^{+,p}_{c}$. Moreover, from (\ref{eq5.01}), (\ref{T1}), (\ref{T2}) (\ref{eq4.10}), (\ref{eq4.11}) and (H2) we deduce that \begin{align}\label{eq4.12} \lim_{n,m\rightarrow+\infty}E\sup_{0\le t\le T} |K^{n}_{t}-K^{m}_{t}|^{p}=0. \end{align} Since $\int_{0}^{T}(Y^{n}_{t}-L_{t})\,dK^{n}_{t}\le 0$, it follows from (\ref{eq4.10}), (\ref{eq4.12}) that $\int_{0}^{T}(Y_{t}-L_{t})\,dK_{t}\le0$, which when combined with the fact that $Y_{t}\ge L_{t}$, $t\in [0,T]$ shows that \[ \int_{0}^{T}(Y_{t}-L_{t})\,dK_{t}=0. \] Thus the triple $(Y,Z,K)$ is a solution of RBSDE$(\xi,f,L)$, which completes the proof of (i). Assertion (ii) follows from (\ref{eq4.10})--(\ref{eq4.12}). \end{dow} \begin{uw} Let $p>1$ and let assumptions (H1)--(H3) hold. If $(Y,Z,K)$ is a solution of RBSDE$(\xi,f,L)$ such that $(Y,Z)\in \mathcal{S}^{p}\otimes M^{p}$ then from \cite[Remark 4.3]{BDHPS} it follows immediately that \[ E(\int_{0}^{T}|f(s,Y_{s},Z_{s})|\,ds)^{p}<+\infty \mbox{ iff } EK^{p}_{T}<+\infty. \] Moreover, if there exists $X\in{\mathcal{H}}^{p}_{c}$ such that $E(\int_{0}^{T}f^{-}(s,X_{s},0)\,ds)^{p}<+\infty$ then \begin{equation} \label{eq5.14} E(\int_{0}^{T}\mathbf{1}_{\{Y_{s}\le X_{s}\}}\, dK_{s})^{p}<+\infty. \end{equation} Indeed, since $X\in{\mathcal{H}}^{p}_{c} $, there exist $M\in{\mathcal{M}}^{p}_{c}$ and $V\in\mathcal{V}^{p}_{c}$ such that $X_{t}=X_{0}+M_{t}+V_{t}$, $t\in [0,T]$. Let $L^{0}(Y-X)$ denote the local time of $Y-X$ at 0. By (H2), (H3) and the It\^o-Tanaka formula applied to $(Y-X)^{-}$, \begin{align*} \int_{0}^{T}\mathbf{1}_{\{Y_{s}\le X_{s}\}}\,dK_{s} &=(Y_{T}-X_{T})^{-}-(Y_{0}-X_{0})^{-} -\int_{0}^{T}\mathbf{1}_{\{Y_{s}\le X_{s}\}}f(s,Y_{s},Z_{s})\,ds \\&\quad -\int_{0}^{T}\mathbf{1}_{\{Y_{s}\le X_{s}\}} dV_{s} -\frac12\int_{0}^{T}dL^{0}_{s}(Y-X) -\int_{0}^{T}\mathbf{1}_{\{Y_{s}\le X_{s}\}} Z_{s}\,dB_{s}\\ &\quad +\int_{0}^{T}\mathbf{1}_{\{Y_{s}\le X_{s}\}}\,dM_{s}\\ &\le2Y^{*}_{T}+2X^{*}_{T}-\int_{0}^{T}\mathbf{1}_{\{Y_{s} \le X_{s}\}}f(s,X_{s},0)\,ds+\lambda\int_{0}^{T}|Z_{s}|\,ds\\ &\quad+\int_{0}^{T}d|V|_{s}-\int_{0}^{T}\mathbf{1}_{\{Y_{s}\le X_{s}\}} Z_{s}\,dB_{s}+\int_{0}^{T}\mathbf{1}_{\{Y_{s}\le X_{s}\}} \,dM_{s}, \end{align*} from which one can easily get (\ref{eq5.14}). \end{uw} We close this section with an example which shows that assumption (\ref{i7}) is not necessary for existence of $p$-integrable solutions of reflected BSDEs. \begin{prz} Let $V_{t}=\exp(|B_{t}|^{4})$, $t\in [0,T]$. Observe that \[ P(\int_{0}^{T}V_{t}\,dt<+\infty)=1,\quad E\int_{a}^{T}V_{t}\,dt =+\infty,\quad a\in(0,T). \] Now, set $\xi\equiv 0$, $f(t,y)=-(y-(T-t))^{+}V_{t}$, $L_{t}=T-t$, $t\in [0,T]$. Then $\xi,f,L$ satisfy (H1)--(H7) with $p=2$. On the other hand, \begin{align*} E\int_{0}^{T}f^{-}(t,L^{*}_{t})\,dt=E\int_{0}^{T}f^{-}(t,T)\,dt =E\int_{0}^{T}tV_{t}\,dt\ge aE\int_{a}^{T}V_{t}\,dt=+\infty. \end{align*} \end{prz} \nsubsection{Existence and uniqueness results for $p=1$} \label{sec6} We first prove uniqueness. \begin{stw}\label{stw4} If $f$ satisfies \mbox{\rm(H2)}, \mbox{\rm(H3)} and \mbox{\rm(Z)} then there exists at most one solution $(Y,Z,K)$ of \mbox{\rm RBSDE}$(\xi,f,L)$ such that $Y$ is of class \mbox{\rm(D)} and $Z\in\bigcup_{\beta>\alpha} M^{\beta}$. \end{stw} \begin{dow} Without loss of generality we may assume that $\mu\le 0$. Let $(Y^{1},Z^{1},K^{1})$, $(Y^{2},Z^{2},K^{2})$ be two solutions to RBSDE$(\xi,f,L)$. By Proposition \ref{stw2.5} it suffices to prove that $|Y^{1}-Y^{2}|\in \mathcal{S}^{p}$ for some $p>1$. Write $Y=Y^{1}-Y^{2}$, $Z=Z^{1}-Z^{2}$, $K=K^{1}-K^{2}$ and $\tau_{k}=\inf\{t\in[0,T]; \int_{0}^{t}(|Z^{1}_{s}|^{2}+|Z^{2}_{s}|^{2})\,ds>k\}\wedge T$. Then by the It\^o formula (see \cite[Corollary 2.3]{BDHPS}), \begin{align*} |Y_{t\wedge\tau_{k}}|&\le |Y_{\tau_{k}}| +\int_{t\wedge \tau_{k}}^{\tau_{k}}{(\theta,X_{\theta})}at{Y}_{s} (f(s,Y^{1}_{s},Z^{1}_{s})-f(s,Y^{2}_{s},Z^{2}_{s}))\,ds\\&\quad +\int_{t\wedge \tau_{k}}^{\tau_{k}}{(\theta,X_{\theta})}at{Y}_{s}\,dK_{s} -\int_{t\wedge \tau_{k}}^{\tau_{k}}{(\theta,X_{\theta})}at{Y}_{s}Z_{s}\,dB_{s}, \quad t\in [0,T]. \end{align*} By the minimality property (d) of the reaction measures $K^{1}, K^{2}$ in the definition of a solution of RBSDE$(\xi,f,L)$, $\int_{0}^{T}{(\theta,X_{\theta})}at{Y}_{s}\,dK_{s}\le 0$. Hence \begin{align*} |Y_{t\wedge\tau_{k}}|&\le |Y_{\tau_{k}}| +\int_{t\wedge \tau_{k}}^{\tau_{k}}{(\theta,X_{\theta})}at{Y}_{s} (f(s,Y^{1}_{s},Z^{1}_{s})-f(s,Y^{2}_{s},Z^{2}_{s}))\,ds -\int_{t\wedge \tau_{k}}^{\tau_{k}} {(\theta,X_{\theta})}at{Y}_{s}Z_{s}\,dB_{s}\\ &\le |Y_{\tau_{k}}| +\int_{0}^{T}|f(s,Y^{1}_{s},Z^{1}_{s})-f(s,Y^{1}_{s},Z^{2}_{s})|\,ds -\int_{t\wedge \tau_{k}}^{\tau_{k}}{(\theta,X_{\theta})}at{Y}_{s}Z_{s}\,dB_{s} \end{align*} for $t\in[0,T]$, the last inequality being a consequence of (H3). Consequently, \begin{align*} |Y_{t\wedge\tau_{k}}|\le E^{{\mathcal{F}}_{t}}(|Y_{\tau_{k}}| +\int_{0}^{T}|f(s,Y^{1}_{s},Z^{1}_{s})-f(s,Y^{1}_{s},Z^{2}_{s})|\,ds), \quad t\in [0,T]. \end{align*} Since $Y$ is of class (D), letting $k\rightarrow +\infty$ we conclude from the above that \begin{align*} |Y_{t}|\le E^{{\mathcal{F}}_{t}}(\int_{0}^{T} |f(s,Y^{1}_{s},Z^{1}_{s})-f(s,Y^{1}_{s},Z^{2}_{s})|\,ds),\quad t\in [0,T]. \end{align*} By (Z), \[ |Y_{t}|\le 2\gamma E^{{\mathcal{F}}_{t}}(\int_{0}^{T} (g_{s}+|Y^{1}|_{s}+|Z^{1}|_{s}+|Z^{2}_{s}|)^{\alpha}\,ds). \] From this it follows that $|Y|\in \mathcal{S}^{p}$ for some $p>1$, which proves the proposition. \end{dow} \begin{uw}\label{uw.dic} A brief inspection of the proof of Proposition \ref{stw4} reveals that if $f$ does not depend on $z$ and satisfies (H2) then there exits at most one solution $(Y,Z,K)$ of RBSDE$(\xi,f,L)$ such that $Y$ is of class (D). \end{uw} \begin{uw}\label{uw2} If (H1), (H3), (Z) are satisfied and $(Y,Z)$ is a unique solution of BSDE$(\xi,f)$ such that $Y$ is of class (D) and $Z\in\mathbb{L}^{\alpha}({\mathcal{F}})$ then \[ E\int_{0}^{T}|f(s,Y_{s},Z_{s})|\,ds<+\infty. \] Indeed, by Proposition \ref{prop.ito}, for every stopping time $\tau\le T$, \begin{align*} |Y_{t\wedge\tau}|\le |Y_{\tau}| +\int_{t\wedge\tau}^{\tau}{(\theta,X_{\theta})}at{Y}_{s}f(s,Y_{s},Z_{s})\,ds -\int_{t\wedge\tau}^{\tau}{(\theta,X_{\theta})}at{Y}_{s}Z_{s}\,dB_{s},\quad t\in [0,T]. \end{align*} Hence \begin{align*} -\int_{t\wedge\tau}^{\tau}{(\theta,X_{\theta})}at{Y}_{s} (f(s,Y_{s},Z_{s})-f(s,0,Z_{s}))\,ds &\le |Y_{\tau}|-|Y_{t\wedge\tau}| +\int_{t\wedge\tau}^{\tau}|f(s,0,Z_{s})|\,ds\\&\quad -\int_{t\wedge\tau}^{\tau}{(\theta,X_{\theta})}at{Y}_{s}Z_{s}\,dB_{s}. \end{align*} By the above inequality, (H3) (without loss of generality we may assume that $\mu\le 0$) and (Z), for $t\in[0,T]$ we have \begin{align*} &E\int_{t\wedge\tau_k}^{\tau}|f(s,Y_{s},Z_{s})-f(s,0,Z_{s})|\,ds\\ &\qquad\le E|Y_{\tau_k}| +E\int_{t\wedge\tau_k}^{\tau}(g_{s}+|Z_{s}|+|Y_{s}|)^{\alpha}\,ds +\int_{t\wedge\tau_k}^{\tau}f_{s}\,ds, \end{align*} where $\tau_{k}$ is defined by (\ref{eq3.01}). Since $Y$ is of class (D), letting $k\rightarrow +\infty$ we obtain \begin{align*} E\int_{0}^{T}|f(s,Y_{s},Z_{s})-f(s,0,Z_{s})|\,ds \le E|\xi|+\gamma E\int_{0}^{T}(g_{s}+|Z_{s}|+|Y_{s}|)^{\alpha}\,ds +\int_{0}^{T}f_{s}\,ds. \end{align*} Using once again (Z) we conclude from the above that \begin{align*} E\int_{0}^{T}|f(s,Y_{s},Z_{s})|\,ds \le E|\xi|+2\gamma E\int_{0}^{T}(g_{s}+|Z_{s}|+|Y_{s}|)^{\alpha}\,ds+ 2\int_{0}^{T} f_{s}\,ds<+\infty. \end{align*} \end{uw} \begin{tw}\label{tw2} Let $p=1$. \begin{enumerate} \item[\rm(i)]Assume \mbox{\rm(H1)}--\mbox{\rm(H6)}, \mbox{\rm(Z)}. Then there exists a solution $(Y,Z,K)$ of \mbox{\rm RBSDE}$(\xi,f,L)$ such that $Y$ is of class \mbox{\rm(D)}, $K\in \mathcal{V}^{+,1}_{c}$ and $Z\in\bigcap_{q<1}M^{q}$ iff \mbox{\rm(H7*)} is satisfied. \item[\rm(ii)]Assume \mbox{\rm(H1)}--\mbox{\rm(H6)}, \mbox{\rm(H7*)} and for $n\in{\mathbb N}$ let $(Y^{n},Z^{n})$ be a solution of \mbox{\rm(\ref{eq5.01})} such that $(Y^{n},Z^{n})\in \mathcal{S}^{q}\otimes M^{q}$, $q\in(0,1)$, and $Y^{n}$ is of class \mbox{\rm(D)}. Let $K^{n}$ be defined by \mbox{\rm(\ref{eq5.02})}. Then for every $q\in(0,1)$, \[ E\sup_{t\le T}|Y^{n}_{t}-Y_{t}|^{q} +E\sup_{t\le T}|K^{n}_{t}-K_{t}|^{q} +E(\int_{0}^{T}|Z^{n}_{t}-Z_{t}|^{2}\,dt)^{q/2}\rightarrow 0 \] as $n\rightarrow +\infty$. \end{enumerate} \end{tw} \begin{dow} (i) Necessity. By Remark \ref{uw2}, if there is a solution $(Y,Z,K)$ of BSDE$(\xi,f,L)$ such that $(Y,Z)\in \mathcal{S}^{q}\otimes M^{q}$, $q\in (0,1)$, $K\in\mathcal{V}^{+,1}_{c}$ and $Y$ is of class (D) then (H7*) is satisfied with $X=Y$. \\ Sufficiency. We first show that the sequence $\{Y^{n}\}$ is nondecreasing. To this end, let us put $f_{n}(t,y,z)=f(t,y,z)+n(y-L_{t})^{-}$. Since the exponential change of variable described at the beginning of the proof of Lemma \ref{lm1} does not change the monotonicity of the sequence $\{Y^{n}\}$, we may and will assume that the mapping $\mathbb{R}\ni y\mapsto f_{n}(t,y,0)$ is nonincreasing. By the It\^o-Tanaka formula, for every stopping time $\tau\le T$, \begin{align*} &(Y^{n}_{t\wedge\tau}-Y^{n+1}_{t\wedge\tau})^{+} +\frac12\int_{\tau\wedge t}^{\tau}dL^{0}_{s}(Y^{n}-Y^{n+1})\\ &\qquad=(Y^{n}_{\tau}-Y^{n+1}_{\tau})^{+} +\int_{t\wedge\tau}^{\tau} \mathbf{1}_{\{Y^{n}_{s} > Y^{n+1}_{s}\}}(f_{n}(s,Y^{n}_{s},Z^{n}_{s}) -f_{n+1}(s,Y^{n+1}_{s},Z^{n+1}_{s}))\,ds\\ &\qquad\quad-\int_{t\wedge\tau}^{\tau} \mathbf{1}_{\{Y^{n}_{s} > Y^{n+1}_{s}\}}(Z^{n}_{s}-Z^{n+1}_{s})\,dB_{s}. \end{align*} Taking the conditional expectation with respect to ${\mathcal{F}}_t$ on both sides of the above equality with $\tau$ replaced by $\tau_k=\inf\{t\in [0,T];\int_{0}^{t} |Z^{n}_{s}-Z^{n+1}_{s}|^{2}\,ds\ge k\}\wedge T$, letting $k\rightarrow +\infty$ and using the fact that $Y$ is of class (D) we obtain \begin{align} \label{T4} (Y^{n}_{t}-Y^{n+1}_{t})^{+}&\le E^{\mathcal{F}_{t}}\int_{t}^{T} \mathbf{1}_{\{Y^{n}_{s}>Y^{n+1}_{s}\}}(f_{n}(s,Y^{n}_{s},Z^{n}_{s}) -f_{n+1}(s,Y^{n+1}_{s},Z^{n+1}_{s}))\,ds. \end{align} From the above inequality and the fact that $f_{n}\le f_{n+1}$ we get \begin{align*} &\int_{t}^{T} \mathbf{1}_{\{Y^{n}_{s} > Y^{n+1}_{s}\}}(f_{n}(s,Y^{n}_{s},Z^{n}_{s}) -f_{n+1}(s,Y^{n}_{s},Z^{n}_{s}))\,ds\\&\quad +\int_{t}^{T} \mathbf{1}_{\{Y^{n}_{s} > Y^{n+1}_{s}\}}(f_{n+1}(s,Y^{n}_{s},Z^{n}_{s}) -f_{n+1}(s,Y^{n+1}_{s},Z^{n+1}_{s}))\,ds \\&\le \int_{t}^{T} \mathbf{1}_{\{Y^{n}_{s} >Y^{n+1}_{s}\}}(f_{n+1}(s,Y^{n}_{s},Z^{n}_{s}) -f_{n+1}(s,Y^{n+1}_{s},Z^{n+1}_{s}))\,ds\\&= \int_{t}^{T}\mathbf{1}_{\{Y^{n}_{s} > Y^{n+1}_{s}\}}(f_{n+1}(s,Y^{n}_{s},Z^{n}_{s}) -f_{n+1}(s,Y^{n}_{s},0))\,ds \\&\quad+\int_{t}^{T}\mathbf{1}_{\{Y^{n}_{s} > Y^{n+1}_{s}\}}(f_{n+1}(s,Y^{n}_{s},0) -f_{n+1}(s,Y^{n+1}_{s},0))\,ds \\&\quad+\int_{t}^{T}\mathbf{1}_{\{Y^{n}_{s} > Y^{n+1}_{s}\}}(f_{n+1}(s,Y^{n+1}_{s},0) -f_{n+1}(s,Y^{n+1}_{s},Z^{n+1}_{s}))\,ds. \end{align*} Since $f_{n}(t,y,z)-f_{n}(t,y,z')=f(t,y,z)-f(t,y,z')$ for every $t\in[0,T]$, $y\in\mathbb{R}$, $z,z'\in\mathbb{R}^{d}$, using the monotonicity of $f_{n+1}$ and assumption (Z) we conclude from the above and (\ref{T4}) that for $t\in[0,T]$, \[ (Y^{n}_{t}-Y^{n+1}_{t})^{+} \le 2\gamma E^{\mathcal{F}_{t}}\int_{0}^{T} (g_{s}+|Y^{n}_{s}|+|Z_{s}^{n}|+|Y^{n+1}_s|+|Z^{n+1}_s|)^{\alpha}\,ds. \] Since $(Y^{n},Z^{n})\in\mathcal{S}^{q}\otimes M^{q}$ for every $q\in (0,1)$, $n\in\mathbb{N}$, it follows from the above estimate that $(Y^{n}-Y^{n+1})^{+}\in \mathcal{S}^{p}$ for some $p>1$. Hence, by Proposition \ref{stw2}, $Y^{n}_{t}\le Y^{n+1}_{t}$, $t\in [0,T]$. Write \[ Y_{t}=\lim_{n\rightarrow +\infty} Y^{n}_{t},\quad t\in [0,T]. \] We are going to show that there is a process $\overline{X}$ of class (D) such that $\overline{X}\in \mathcal{V}^{1}_{c}+{\mathcal{M}}^{q}_{c}$ for $q\in(0,1)$ and $\overline{X}_{t}\ge Y_{t}$, $t\in [0,T]$. Indeed, since $X$ from assumption (H7*) belongs to $\mathcal{V}^{1}_{c}+{\mathcal{M}}^{q}_{c}$ for $q\in (0,1)$, there exist $M\in{\mathcal{M}}^{q}_{c}$ and $V\in\mathcal{V}^{1}_{c}$ such that $X=V+M$. By the representation property of the Brownian filtration there exists $Z'\in M^{q}$ such that \[ X_{t}=X_{T}-{\int_{t}^{T}} dV_{s}-{\int_{t}^{T}} Z'_{s}\,dB_{s},\quad t\in [0,T], \] which we can write in the form \begin{align*} X_{t}&=X_{T}+{\int_{t}^{T}} f(s,X_{s},Z'_{s})\,ds -{\int_{t}^{T}} (f^{+}(s,X_{s},Z'_{s})\,ds+dV^{+}_{s})\\&\quad +{\int_{t}^{T}} (f^{-}(s,X_{s},Z'_{s})\,ds+dV^{-}_{s}) -{\int_{t}^{T}} Z'_{s}\,dB_{s},\quad t\in [0,T]. \end{align*} By \cite[Theorem 6.3]{BDHPS} and Remark \ref{uw2} there exists a unique solution $(\overline{X},\overline{Z})$ of the BSDE \begin{align*} \overline{X}_{t}=\xi\vee X_T+{\int_{t}^{T}} f(s,\overline{X}_{s},\overline{Z}_{s})\,ds +{\int_{t}^{T}} (f^{-}(s,X_{s},Z'_{s})\,ds+dV^{-}_{s}) -{\int_{t}^{T}} \overline{Z}_{s}\,dB_{s} \end{align*} such that $(\overline{X},\overline{Z})\in\bigcap_{q<1}\mathcal{S}^{q} \otimes M^{q}$, $\overline{X}$ is of class (D) and \begin{equation} \label{T5} E\int_{0}^{T}|f(t,\bar{X}_{t},\bar{Z}_{t})|\,dt<+\infty. \end{equation} As in the proof of the fact that $(Y^{n}-Y^{n+1})^{+}\in \mathcal{S}^{p}$ one can show that for every stopping time $\tau\le T$, \begin{align*} (X_{t\wedge\tau}-\overline{X}_{t\wedge\tau})^{+} &\le(X_{\tau}-\overline{X}_{\tau})^{+} +\int_{t\wedge\tau}^{\tau}\mathbf{1}_{\{X_{s}>\overline{X}_{s}\}} (f(s,X_{s},Z_{s}')-f(s,\overline{X}_{s},\overline{Z}_{s}))\,ds\\ &\quad-2\int_{t\wedge\tau}^{\tau}\mathbf{1}_{\{X_{s}>\overline{X}_{s}\}} (Z'_{s}-\overline{Z}_{s})\,dB_{s}\\ &\le (X_{\tau}-\overline{X}_{\tau})^{+} +2\gamma\int_{t\wedge\tau}^{\tau}(g_{s}+|X_{s}|+|\bar{X}_{s}|+|Z'_{s}|+|Z_{s}|)^{\alpha}\,ds\\ &\quad-2\int_{t\wedge\tau}^{\tau}\mathbf{1}_{\{X_{s} >\overline{X}_{s}\}}(Z'_{s}-\overline{Z}_{s})\,dB_{s}. \end{align*} Let $\tau_{k}=\inf\{t\in [0,T];\int_{0}^{t} (|Z^{'}_{s}|^{2}+|\overline{Z}_{s}|^{2})\,ds\ge k\}\wedge T$. Then \[ (X_{t\wedge\tau_{k}}-\overline{X}_{t\wedge\tau_{k}})^{+} \le E^{\mathcal{F}_{t}}(X_{\tau_{k}}-\overline{X}_{\tau_{k}})^{+} +2\gamma E^{{\mathcal{F}}_{t}}\int_{0}^{T}(g_{s}+|X_{s}|+|\bar{X}_{s}|+|Z'_{s}|+|Z_{s}|)^{\alpha}\,ds. \] Since $X,\overline{X}$ are of class (D), letting $k\rightarrow+\infty$ we get \[ (X_{t}-\overline{X}_{t})^{+}\le 2\gamma E^{{\mathcal{F}}_{t}}\int_{0}^{T} (g_{s}+|X_{s}|+|\bar{X}_{s}|+|Z'_{s}|+|Z_{s}|)^{\alpha}\,ds. \] Therefore $(X-\overline{X})^{+}\in \mathcal{S}^{p}$ for some $p>1$ since $Z',\overline{Z}\in M^{q}$, $X,\bar{X}\in\mathcal{S}^{q}$, $q\in (0,1)$. Consequently, by Proposition \ref{stw2}, $X_{t}\le \overline{X}_{t}$, $t\in [0,T]$. Thus, \begin{align*} \overline{X}_{t}&=\xi\vee X_{T} +{\int_{t}^{T}} f(s,\overline{X}_{s},\overline{Z}_{s})\,ds +{\int_{t}^{T}} n(\overline{X}_{s}-L_{s})^{-}\,ds \\&\quad+{\int_{t}^{T}} (f^{-}(s,X_{s},Z'_{s})\,ds +dV^{-}_{s})-{\int_{t}^{T}} \overline{Z}_{s}\,dB_{s},\quad t\in [0,T]. \end{align*} As in the case of the process $(X-\overline{X})^{+}$ one can show that $(Y^{n}-\overline{X})^{+}\in\mathcal{S}^{p}$ for some $p>1$. Hence, by Proposition \ref{stw2}, $Y^{n}_{t}\le \overline{X}_{t}$, $t\in [0,T]$ for every $n\in\mathbb{N}$. Furthermore, since $Y^{1},\overline{X}\in \mathcal{S}^{q}$, $q\in (0,1)$, we have \begin{equation} \label{eq5.1} \sup_{n\ge1}E|Y^{n,*}_{T}|^{q}<+\infty. \end{equation} It follows in particular that $\sup_{n\ge1}|Y^{n}_{0}|<\infty$ since $Y^{n}_{0}$ are deterministic. Moreover, by Lemma \ref{lm4}, there exists a stationary sequence $\{\sigma^{1}_{k}\}$ of stopping times such that for every $k\in\mathbb{N}$, \begin{equation} \label{eq5.2} \sup_{n\ge1}|Y^{n,*}_{\sigma^{1}_{k}}| \le k\vee(\sup_{n\ge1}|Y^{n}_{0}|)<+\infty. \end{equation} Set \[ \sigma^{2}_{k}=\inf\{t\in [0,T], \min\{Y^{1,*}_{t},\overline{X}^{+,*}_{t}, \int_{0}^{t}f^{-}(s,\overline{X}_{s},0)\,ds, \int_{0}^{t}|f(s,0,0)|\,ds\}>k\}\wedge T \] and $\tau_{k}=\sigma^{1}_{k}\wedge\sigma^{2}_{k}$. It is easy to see that the sequence $\{\tau_{k}\}$ is stationary. Using this and the fact that $Y^n_{\tau_k}$, $f$, $L$ satisfy the assumptions of Theorem \ref{tw1} on the interval $[0,\tau_k]$ one can show that there exist $Y,K\in\mathcal{S}, Z\in M$ such that $K$ is increasing, $K_{0}=0$ and \begin{equation} \label{eq5.3} \sup_{0\le t \le T}|Y^{n}_{t}-Y_{t}| +\sup_{0\le t\le T} |K^{n}_{t}-K_{t}|+\int_{0}^{T}|Z^{n}_{s}-Z_{s}|^{2}\,ds\rightarrow 0\mbox{ in probability }P \end{equation} as $n\rightarrow +\infty$. Moreover, one can show that $Y_{t}\ge L_{t}$, $t\in [0,T]$, \begin{align}\label{eq5.4} Y_{t}=\xi+\int_{t}^{T}f(s,Y_{s},Z_{s})\,ds +\int_{t}^{T}dK_{s}-\int_{t}^{T} Z_{s}\,dB_{s},\quad t\in[0,T] \end{align} and \begin{equation} \label{eq5.5} \int_{0}^{T}(Y_{s}-L_{s})\,dK_{s}=0. \end{equation} Accordingly, the trip $(Y,Z,K)$ is a solution of RBSDE$(\xi,f,L)$. The proof of (\ref{eq5.3})--(\ref{eq5.5}) runs as the proof of Theorem \ref{tw1} (see the reasoning following (\ref{eq4.2}) with $p=2$), the only difference being in the fact that now we consider equations on $[0,\tau_{k}]$ with terminal values depending on $n$. However, using (\ref{eq5.2}) and the pointwise convergence of $\{Y^{n}\}$ allows overcome this difficulty. Since $Y^{1}_{t}\le Y_{t}\le \overline{X}_{t}$, $t\in [0,T]$, and $Y^{1}, X^{+}$ are of class (D), it follows that $Y$ is of class (D). By Lemma \ref{lm2} for every $q\in(0,1)$, \begin{equation} \label{eq6.8} \sup_{n\ge1}E\big((\int_{0}^{T}|Z^{n}_{t}|^{2}\,dt)^{q/2} +|K^{n}_{T}|^{q}\big)<+\infty. \end{equation} From this and (\ref{eq5.3}) we conclude that $Z\in\bigcap_{q<1}M^{q}$ and $E|K_{T}|^{q}<\infty$ for $q\in(0,1)$. To see that $EK_{T}<\infty$ let us define $\tau_{k}$ by (\ref{eq3.01}). Then by (\ref{eq5.4}), \begin{equation} \label{eq6.9} K_{\tau_{k}}=Y_{0}-Y_{\tau_{k}} -\int_{0}^{\tau_{k}}f(s,Y_{s},Z_{s})\,ds +\int_{0}^{\tau_{k}} Z_{s}\,dB_{s}. \end{equation} Since $Y$ is of class (D), using Fatou's lemma, (H2), (Z) and the fact that $Y_{t}\le\overline{X}_{t}$, $t\in [0,T]$ we conclude from (\ref{eq6.9}) that \begin{align*} EK_{T}\le EY^{+}_{0}+E\xi^{-} +E\int_{0}^{T}f^{-}(s,\overline{X}_{s},0)\,ds+\gamma E\int_{0}^{T}(g_{s}+|Y_{s}|+|Z_{s}|)^{\alpha}\,ds. \end{align*} Hence $EK_{T}<\infty$, because by (\ref{T5}) and (H2), $E\int_{0}^{T}|f(s,\overline{X}_{s},0)|\,ds<+\infty$. \\ (ii) Convergence of $\{Y^{n}\}$ in $\mathcal{S}^{q}$ for $q\in (0,1)$ follows from (\ref{eq5.1}) and (\ref{eq5.3}). The desired convergence of $\{Z^{n}\}$ and $\{K^{n}\}$ follows from (\ref{eq5.3}) and (\ref{eq6.8}). \end{dow} \begin{uw} An important class of generators satisfying (H1)--(H5) together with (Z) are generators satisfying (H1)--(H5) which are bounded or not depending on $z$. Another class which share these properties are generators of the form \[ f(t,y,z)=g(t,y)+c(1+|z|)^{q}, \] where $q\in [0,\alpha]$ and $g$ is a progressively measurable function satisfying (H1)--(H5). \end{uw} \begin{uw} Let assumptions (H1)--(H3), (Z) hold and let $(Y,Z,K)$ be a solution of RBSDE$(\xi,f,L)$ such that $Y$ is of class (D) and $Z\in\bigcup_{\beta>\alpha} M^{\beta}$. Then from Remark \ref{uw1} it follows immediately that \[ E(\int_{0}^{T}|f(s,Y_{s},Z_{s})|\,ds)<+\infty\mbox{ iff } EK_{T}<+\infty. \] If, in addition, there exists a continuous semimartingale $X$ such that (H7*) is satisfied then \[ E\int_{0}^{T}\mathbf{1}_{\{Y_{s}\le X_{s}\}}dK_{s}<+\infty. \] To prove the last estimate let us put $\tau_{k}=\inf\{t\in [0,T]; \langle M\rangle_{t} +\int_{0}^{t}|Z_{s}|^{2}\,ds>k\}\wedge T$. By the It\^o-Tanaka formula and (H2), (H3), \begin{align*} \int_{0}^{\tau_{k}}\mathbf{1}_{\{Y_{s}\le X_{s}\}}\,dK_{s} &=(Y_{\tau_{k}}-X_{\tau_{k}})^{-}-(Y_{0}-X_{0})^{-} -\int_{0}^{\tau_{k}}\mathbf{1}_{\{Y_{s}\le X_{s}\}} f(s,Y_{s},Z_{s})\,ds\\ &\quad-\int_{0}^{\tau_{k}}\mathbf{1}_{\{Y_{s}\le X_{s}\}}dV_{s} -\frac12\int_{0}^{\tau_{k}}dL^{0}_{s}(Y-X)\\ &\quad-\int_{0}^{\tau_{k}}\mathbf{1}_{\{Y_{s}\le X_{s}\}} Z_{s}\,dB_{s}+\int_{0}^{\tau_{k}}\mathbf{1}_{\{Y_{s}\le X_{s}\}}\,dM_{s}. \end{align*} Hence \begin{align*} E\int_{0}^{\tau_{k}}\mathbf{1}_{\{Y_{s}\le X_{s}\}}\,dK_{s} &\le E|Y_{\tau_{k}}|+ EX^{+}_{\tau_{k}} +E\int_{0}^{T}\mathbf{1}_{\{Y_{s}\le X_{s}\}} f^{-}(s,X_{s},0)\,ds\\&\quad +\gamma E\int_{0}^{T}(g_{s}+|Z_{s}|+|Y_{s}|)^{\alpha}\,ds +E\int_{0}^{T}d|V|_{s}. \end{align*} Since $(Y-X)^{-}$ is of class (D), letting $k\rightarrow+\infty$ in the above inequality we get the desired result. \end{uw} \nsubsection{Nonintegrable solutions of reflected BSDEs} \label{sec7} In this section we examine existence and uniqueness of solutions of reflected BSDEs in the case where the data satisfy (H1)--(H6) (resp. (H1)--(H6), (Z) for $p=1$) but (H7) (resp. (H7*) in case $p=1$) is not satisfied. In view of Theorems \ref{tw1} and \ref{tw2} in that case there is neither a solution $(Y,Z,K)$ in the space $\mathcal{S}^{p}\otimes M^{p}\otimes \mathcal{V}^{p,+}_{c}$ if $p>1$ nor a solution in the space $\mathcal{S}^{q}\otimes M^{q}\otimes \mathcal{V}^{1,+}_{c},\, q\in (0,1)$ with $Y$ of class (D) if $p=1$. We will show that nevertheless there exists a solution with weaker integrability properties. Before proving our main result let us note that in \cite{EKPPQ,HP,C} reflected BSDEs with generator $f$ such that $|f(t,y,z)|\le M(|f(t,0,0)|+|y|+|z|)$ for some $M\ge0$ are considered. In case $p=2$ it is proved there that if we assume that $\xi, \int_{0}^{T}|f(s,0,0)|\,ds\in \mathbb{L}^{2}({\mathcal{F}}_{T})$, $L$ is continuous and $L^{+}\in \mathcal{S}^{2}$ then there exists a solution $(Y,Z,K)\in \mathcal{S}^{2}\otimes M^{2}\otimes \mathcal{V}^{+,2}_{c}$ of (\ref{eq1.1}) (see \cite{EKPPQ} for the case of Lipschitz continuous generator and \cite{HP,C} for continuous generator). We would like to stress that although in \cite{EKPPQ,HP,C} condition (H7) is not explicitly stated, it is satisfied, because if $f$ satisfies the linear growth condition and $L^{+}\in\mathcal{S}^{2}$ then \[ E(\int_{0}^{T}f^{-}(t,L^{+,*}_{t},0)\,dt)^{2} \le 2M^{2}T^{2}+2T^{2}E|L^{+,*}_{T}|^{2}<+\infty \] and $L_{t}\le L^{+,*}_{t}$, $t\in[0,T]$, $L^{+,*}\in\mathcal{V}^{+,2}_{c}$. \begin{tw} \label{tw7.1} Let \mbox{\rm (H1)}--\mbox{\rm (H6)} (resp. \mbox{\rm (H1)}--\mbox{\rm(H6)}, \mbox{\rm (Z)}) be satisfied and $L^{+}\in\mathcal{S}^{p}$ for some $p>1$ (resp. $L^{+}$ is of class \mbox{\rm(D)}). Then there exists a solution $(Y,Z,K)\in \mathcal{S}^{p}\otimes M\otimes \mathcal{V}^{+}_{c}$ (resp. $(Y,Z,K)\in \mathcal{S}^{q}\otimes M\otimes \mathcal{V}^{+}_{c}$, $q\in (0,1)$ such that $Y$ is of class \mbox{\rm(D)}) of the \mbox{\rm RBSDE}$(\xi,f,L)$. \end{tw} \begin{dow} We first assume that $p=1$. By \cite[Theorem 6.3]{BDHPS} there exists a unique solution $(Y^{n},Z^{n})\in \bigcap_{q<1}\mathcal{S}^{q}\otimes M^{q}$ of (\ref{eq5.01}) such that $Y^n$ is of class (D). By Proposition \ref{tw2} (see also the reasoning used at the beginning of the proof of Theorem \ref{tw2}), for every $n\in\mathbb{N}$, $Y^{n}_{t}\le Y^{n+1}_{t}$ and $Y^{n}_{t}\le\bar{Y}^{n}_{t}$, $t\in [0,T]$, where $(\bar{Y}^{n},\bar{Z}^{n})\in \bigcap_{q<1}\mathcal{S}^{q}\otimes M^{q}$ is a solution of the BSDE \begin{align*} \bar{Y}^{n}_{t}=\xi+{\int_{t}^{T}} f^{+}(s,\bar{Y}^{n}_{s}, \bar{Z}^{n}_{s})\, ds+{\int_{t}^{T}} n(\bar{Y}^{n}_{s}-L_{s})^{-}\,ds-{\int_{t}^{T}} \bar{Z}^{n}_{s}\,dB_{s},\quad t\in [0,T] \end{align*} such that $\bar{Y}^{n}$ is of class (D). Hence \begin{align} \label{apx1} |Y^{n}_{t}|\le |Y^{1}_{t}|+|\bar{Y}^{n}_{t}|,\quad t\in [0,T]. \end{align} Put \[ R_{t}(L)=\mathop{\mathrm{ess\,sup}}_{t\le\tau\le T} E(L_{\tau}|{\mathcal{F}}_{t}). \] It is known (see \cite{CK,E}) that $R(L)$ has a continuous version (still denoted by $R(L)$) such that $R(L)$ is a supermartingale of class (D) majorizing the process $L$. Moreover, by the Doob-Meyer decomposition theorem there exist a uniformly integrable continuous martingale $M$ and a process $V\in\mathcal{V}^{+,1}_{c}$ such that $R(L)=M+V$. In particular, by \cite[Lemma 6.1]{BDHPS}, $R(L)\in {\mathcal{M}}^{q}_{c}+\mathcal{V}^{+,1}_{c}$ for every $q\in (0,1)$. Therefore the data $\xi,f^{+},L$ satisfy assumptions (H1)--(H6), (Z) and (H7*) with $X=R(L)$. Hence, by Theorem \ref{tw2}, there exists a unique solution $(\bar{Y},\bar{Z},\bar{K})\in \mathcal{S}^{q}\otimes M^{q}\otimes \mathcal{V}^{+,1}_{c}$, $q\in (0,1)$, of the RBSDE$(\xi,f^{+},L)$ such that $\bar Y$ is of class (D) and \begin{align*} \bar{Y}^{n}_{t}\nearrow \bar{Y}_{t},\quad t\in [0,T]. \end{align*} By the above and (\ref{apx1}), \begin{align} \label{apx3} |Y^{n}_{t}|\le |Y^{1}_{t}|+|\bar{Y}_{t}|,\quad t\in [0,T]. \end{align} Put $Y_{t}=\sup_{n\ge1} Y^{n}_{t}$, $t\in [0,T]$ and \[ \tau_{k}=\inf\{t\in [0,T]; \int_{0}^{t}f^{-}(s,R_{s}(L),0)\,ds >k\}\wedge T. \] Then $f,L$ satisfy assumptions (H1)--(H6), (Z) and (H7*) with $X=R(L)$ on each interval $[0,\tau_{k}]$. Therefore analysis similar to that in the proof of (\ref{eq5.03}), but applied to the equation \begin{equation} \label{apx4} Y^{n}_{t\wedge\tau_{k}}=Y^{n}_{\tau_{k}} +\int_{t\wedge\tau_{k}}^{\tau_{k}}f(s,Y^{n}_{s},Z^{n}_{s})\,ds +\int^{\tau_{k}}_{t\wedge \tau_{k}}n(Y^{n}_{s}-L_{s})^{-}\,ds-\int_{t\wedge \tau_{k}}^{\tau_{k}} Z^{n}_{s}\, dB_{s} \end{equation} instead of (\ref{eq5.01}), shows that for every $k\in\mathbb{N}$, \begin{align} \label{apx5} E\sup_{0\le t\le \tau_{k}}|Y^{n}_{t}-Y^{m}_{t}|^{q} +E(\int_{0}^{\tau_{k}}|Z^{n}_{s}-Z^{m}_{s}|^{2}\,ds)^{q/2} +E\sup_{0\le t\le\tau_{k}}|K^{n}_{t}-K^{m}_{t}|^{q}\rightarrow0 \end{align} as $n,m\rightarrow +\infty$, where $K^{n}_{t}=\int_{0}^{t}n(Y^{n}_{s}-L_{s})^{-}\,ds$. (The only difference between the proof of (\ref{apx5}) and (\ref{eq5.03}) is caused by the fact that in (\ref{apx4}) the terminal condition $Y^{n}_{\tau_{k}}$ depends on $n$. But in view of (\ref{apx3}), monotonicity of the sequence $\{Y^{n}\}$ and integrability of $Y^{1},\bar{Y}$ the dependence of $Y^{n}_{\tau_{k}}$ on $n$ presents no difficulty). Since the sequence $\{\tau_{k}\}$ is stationary, from (\ref{apx4}), (\ref{apx5}) we conclude that there exist $K\in \mathcal{V}^{+}_{c}$ and $Z\in M$ such that \[ Y_{t}=\xi+{\int_{t}^{T}} f(s,Y_{s},Z_{s})\,ds+{\int_{t}^{T}} dK_{s} -{\int_{t}^{T}} Z_{s}\,dB_{s},\quad t\in [0,T] \] and (\ref{apx5}) holds with $(Y,Z,K)$ in place of $(Y^{n},Z^{n},K^{n})$. From the properties of the sequence $\{(Y^{n},Z^{n},K^{n})\}$ on $[0,\tau_{k}]$ proved in Theorem \ref{tw2} it follows that \[ Y_{t}\ge L_{t},\quad t\in [0,\tau_{k}], \quad \int_{0}^{\tau_{k}}(Y_{s}-L_{s})\,ds=0 \] for $k\in \mathbb{N}.$ Due to stationarity of the sequence $\{\tau_{k}\}$ this implies that \[ Y_{t}\ge L_{t},\quad t\in [0,T],\quad \int_{0}^{T}(Y_{s}-L_{s})\,ds=0. \] Accordingly, the triple $(Y,Z,K)$ is a solution of RBSDE$(\xi,f,L)$. In case $p>1$ the proof is similar. As a matter of fact it is simpler because instead of considering the Snell envelope $R(L)$ of the process $L$ it suffices to consider the process $L^{+,*}$. \end{dow} \begin{uw} From Proposition \ref{stw4} it follows that the solution obtained in Theorem \ref{tw7.1} is unique in its class for $p>1$. In case $p=1$ it is unique in its class if $f$ does not depend on $z$ (see Remark \ref{uw.dic}). \end{uw} The next example shows that in general the process $K$ of Theorem \ref{tw7.1} may be nonintegrable for any $q>0$. \begin{prz} Let $f(t,y)=-y^{+}\exp(|B_{t}|^{4})$, $L_{t}\equiv 1$, $\xi\equiv 1$. Then $\xi,f,L$ satisfy (H1)--(H6) and $L\in\mathcal{S}^{p}$ for every $p\ge 1$. So by Theorem \ref{tw7.1} and Proposition \ref{stw2.5} there exists a unique solution $(Y,Z,K)\in \mathcal{S}^{2}\otimes M\otimes \mathcal{V}^{+}_{c}$ of the RBSDE$(\xi,f,L)$. Observe that $EK_{T}^{q}=+\infty$ for any $q>0$. Of course, to check this it suffices to consider the case $q\in(0,1]$. Aiming for a contradiction, suppose that $q\in (0,1]$ and $EK^{q}_{T}<+\infty$. Then by \cite[Lemma 3.1]{BDHPS}, $Z\in M^{q}$, which implies that $E(\int_{0}^{T} f^{-}(t,Y_{t})\,dt)^{q}<+\infty$. On the other hand, since $Y_{t}\ge 1$ for $t\in [0,T]$, it follows that \[ E(\int_{0}^{T} f^{-}(t,Y_{t})\,dt)^{q}\ge E\int_{0}^{T}(f^{-}(t,1))^{q}\,dt =E\int_{0}^{T}\exp(q|B_{t}|^{4})\,dt=+\infty. \] \end{prz} \end{document}
\begin{document} \title[On the generators of the polynomial algebra] {} \author{\DJ\d{\u a}ng V\~o Ph\'uc$^{\dag}$ and Nguy\~\ecircumflex n Sum$^{\dag,1}$} \footnotetext[1]{Corresponding author.} \footnotetext[2]{2000 {\it Mathematics Subject Classification}. Primary 55S10; 55S05, 55T15.} \footnotetext[3]{{\it Keywords and phrases:} Steenrod squares, Peterson hit problem, polynomial algebra.} \centerline{\textbf{ON THE GENERATORS OF THE POLYNOMIAL ALGEBRA}} \centerline{\textbf{AS A MODULE OVER THE STEENROD ALGEBRA}} \centerline{\textbf{SUR LES G\'EN\'ERATEURS DE L'ALG\`EBRE POLYNOMIALE}} \centerline{\textbf{COMME MODULE SUR L'ALG\`EBRE DE STENNROD}} \maketitle \noindent{\bf Abstract.} Let $P_k:= \mathbb F_2[x_1,x_2,\ldots ,x_k]$ be the polynomial algebra over the prime field of two elements, $\mathbb F_2$, in $k$ variables $x_1, x_2, \ldots , x_k$, each of degree 1. We are interested in the {\it Peterson hit problem} of finding a minimal set of generators for $P_k$ as a module over the mod-2 Steenrod algebra, $\mathcal{A}$. In this paper, we study the hit problem in degree $(k-1)(2^d-1)$ with $d$ a positive integer. Our result implies the one of Mothebe \cite{mo,mo1}. \noindent{\bf R\'esum\'e.} Soient $\mathcal A$ l'alg\`ebre de Steenrod mod-2 et $P_k:= \mathbb F_2[x_1,x_2,\ldots ,x_k]$ l'alg\`ebre polynomiale gradu\'ee \`a $k$ g\'en\'erateurs sur le corps \`a deux \'el\'ements $\mathbb F_2$, chaque g\'en\'erateur \'etant de degr\'e 1. Nous \'etudions le probl\`eme suivant soulev\'e par F. Peterson: d\'eterminer un syst\`eme minimal de g\'en\'erateurs comme module sur l'alg\`ebre de Steenrod pour $P_k$, probl\`eme appel\'e {\it hit problem} en anglais. Dans ce but, nous \'etudions le {\it hit problem} en degr\'e $(k-1)(2^d-1)$ avec $d > 0$. Cette solution implique un r\'esultat de Mothebe ~\cite{mo,mo1}. \section{Introduction}\label{s1} \setcounter{equation}{0} Let $P_k$ be the graded polynomial algebra $\mathbb F_2[x_1,x_2,\ldots ,x_k]$, with the degree of each $x_i$ being 1. This algebra arises as the cohomology with coefficients in $\mathbb F_2$ of an elementary abelian 2-group of rank $k$. Then, $P_k$ is a module over the mod-2 Steenrod algebra, $\mathcal A$. The action of $\mathcal A$ on $P_k$ is determined by the elementary properties of the Steenrod squares $Sq^i$ and subject to the Cartan formula (see Steenrod and Epstein~\cite{st}). An element $g$ in $P_k$ is called {\it hit} if it belongs to $\mathcal{A}^+P_k$, where $\mathcal{A}^+$ is the augmentation ideal of $\mathcal A$. That means $g$ can be written as a finite sum $g = \sum_{u\geqslant 0}Sq^{2^u}(g_u)$ for suitable polynomials $g_u \in P_k$. We are interested in the {\it hit problem}, set up by F. Peterson, of finding a minimal set of generators for the polynomial algebra $P_k$ as a module over the Steenrod algebra. In other words, we want to find a basis of the $\mathbb F_2$-vector space $QP_k := P_k/\mathcal A^+P_k = \mathbb F_2 \otimes_{\mathcal A} P_k$. The hit problem was first studied by Peterson~\cite{pe}, Wood~\cite{wo}, Singer~\cite {si1}, and Priddy~\cite{pr}, who showed its relation to several classical problems respectively in cobordism theory, modular representation theory, the Adams spectral sequence for the stable homotopy of spheres, and stable homotopy type of classifying spaces of finite groups. The vector space $QP_k$ was explicitly calculated by Peterson~\cite{pe} for $k=1, 2,$ by Kameko~\cite{ka} for $k=3$, and recently by the second author ~\cite{su1,su3} for $k=4$. From the results of Wood \cite{wo} and Kameko \cite{ka}, the hit problem is reduced to the case of degree $n$ of the form \begin{equation} \label{ct1.1}n = s(2^d-1) + 2^dm, \end{equation} where $s, d, m$ are non-negative integers and $1 \leqslant s <k$, (see \cite{su3}.) For $s=k-1$ and $m > 0$, the problem was studied by Crabb and Hubbuck~\cite{ch}, Nam~\cite{na}, Repka and Selick~\cite{res} and the second author ~\cite{su1,su3}. In the present paper, we study the hit problem in degree $n$ of the form (\ref{ct1.1}) with $s=k-1$, $m=0$ and $d$ an arbitrary positive integer. Denote by $(QP_k)_n$ the subspace of $QP_k$ consisting of the classes represented by the homogeneous polynomials of degree $n$ in $P_k$. From the result of Carlisle and Wood \cite{cw} on the boundedness conjecture, one can see that for $d$ big enough, the dimension of $(QP_k)_n$ does not depend on $d$; it depends only on $k$. In this paper, we prove the following. \noindent {\bf Main Theorem.} {\it Let $n=(k-1)(2^d-1)$ with $d$ a positive integer and let $p = \min\{k,d\}$, $q = \min\{k,d-1\}$. If $k \geqslant 3$, then \begin{equation*}\label{ctc}\dim (QP_k)_n \geqslant c(k,d) := \sum_{t=1}^p\binom kt + (k-3)\binom{k}2\sum_{u=1}^{q}\binom ku , \end{equation*} with equality if and only if either $k=3$ or $k=4,\ d\geqslant 5$ or $k =5,\ d \geqslant 6$. } Note that $c(k,1) = \binom k1=k$. If $d > k$, then $c(k,d) = \big((k-3)\binom{k}2 + 1\big)(2^k-1)$. At the end of Section \ref{s3}, we show that our result implies Mothebe's result in \cite{mo,mo1}. In Section \ref{s2}, we recall the definition of an admissible monomial in $P_k$ and Singer's criterion on the hit monomials. Our results will be presented in Section \ref{s3}. \section{Preliminaries}\label{s2} \setcounter{equation}{0} In this section, we recall some needed information from Kameko~\cite{ka} and Singer~\cite{si2}, which will be used in the next section. \begin{nota} We denote $\mathbb N_k = \{1,2, \ldots , k\}$ and \begin{align*} X_{\mathbb J} = X_{\{j_1,j_2,\ldots , j_s\}} = \prod_{j\in \mathbb N_k\setminus \mathbb J}x_j , \ \ \mathbb J = \{j_1,j_2,\ldots , j_s\}\subset \mathbb N_k, \end{align*} In particular, $X_{\mathbb N_k} =1,\ X_\emptyset = x_1x_2\ldots x_k,$ $X_j = x_1\ldots \hat x_j \ldots x_k, \ 1 \leqslant j \leqslant k,$ and $X:=X_k \in P_{k-1}.$ Let $\alpha_i(a)$ denote the $i$-th coefficient in dyadic expansion of a non-negative integer $a$. That means $a= \alpha_0(a)2^0+\alpha_1(a)2^1+\alpha_2(a)2^2+ \ldots ,$ for $ \alpha_i(a) =0$ or 1 with $i\geqslant 0$. Set $\alpha(a) = \sum_{i\geqslant 0}\alpha_i(a).$ Let $x=x_1^{a_1}x_2^{a_2}\ldots x_k^{a_k} \in P_k$. Denote $\nu_j(x) = a_j, 1 \leqslant j \leqslant k$. Set $\mathbb J_t(x) = \{j \in \mathbb N_k :\alpha_t(\nu_j(x)) =0\},$ for $t\geqslant 0$. Then, we have $x = \prod_{t\geqslant 0}X_{\mathbb J_t(x)}^{2^t}.$ \end{nota} \begin{defn} For a monomial $x$ in $P_k$, define two sequences associated with $x$ by \begin{align*} \omega(x)=(\omega_1(x),\omega_2(x),\ldots , \omega_i(x), \ldots),\ \ \sigma(x) = (\nu_1(x),\nu_2(x),\ldots ,\nu_k(x)), \end{align*} where $\omega_i(x) = \sum_{1\leqslant j \leqslant k} \alpha_{i-1}(\nu_j(x))= \deg X_{\mathbb J_{i-1}(x)},\ i \geqslant 1.$ The sequence $\omega(x)$ is called the weight vector of $x$. Let $\omega=(\omega_1,\omega_2,\ldots , \omega_i, \ldots)$ be a sequence of non-negative integers. The sequence $\omega$ is called the weight vector if $\omega_i = 0$ for $i \gg 0$. \end{defn} The sets of the weight vectors and the sigma vectors are given the left lexicographical order. For a weight vector $\omega$, we define $\deg \omega = \sum_{i > 0}2^{i-1}\omega_i$. If there are $i_0=0, i_1, i_2, \ldots , i_r > 0$ such that $i_1 + i_2 + \ldots + i_r = m$, $\omega_{i_1+\ldots +i_{s-1} + t} = b_s, 1 \leqslant t \leqslant i_s, 1 \leqslant s \leqslant r$, and $\omega_i=0$ for all $i > m$, then we write $\omega = (b_1^{(i_1)},b _2^{(i_2)},\ldots , b_r^{(i_r)})$. Denote $b_u^{(1)} = b_u$. For example, $\omega = (3,3,2,1,1,1,0,\ldots) = (3^{(2)},2,1^{(3)})$. Denote by $P_k(\omega)$ the subspace of $P_k$ spanned by monomials $y$ such that $\deg y = \deg \omega$, $\omega(y) \leqslant \omega$, and by $P_k^-(\omega)$ the subspace of $P_k$ spanned by monomials $y \in P_k(\omega)$ such that $\omega(y) < \omega$. \begin{defn}\label{dfn2} Let $\omega$ be a weight vector and $f, g$ two polynomials of the same degree in $P_k$. i) $f \equiv g$ if and only if $f - g \in \mathcal A^+P_k$. If $f \equiv 0$ then $f$ is called hit. ii) $f \equiv_{\omega} g$ if and only if $f - g \in \mathcal A^+P_k+P_k^-(\omega)$. \end{defn} Obviously, the relations $\equiv$ and $\equiv_{\omega}$ are equivalence ones. Denote by $QP_k(\omega)$ the quotient of $P_k(\omega)$ by the equivalence relation $\equiv_\omega$. Then, we have $QP_k(\omega)= P_k(\omega)/ ((\mathcal A^+P_k\cap P_k(\omega))+P_k^-(\omega))$ and $(QP_k)_n \cong \bigoplus_{\deg \omega = n}QP_k(\omega)$ (see Walker and Wood \cite{wa1}). We note that the weight vector of a monomial is invariant under the permutation of the generators $x_i$, hence $QP_k(\omega)$ has an action of the symmetric group $\Sigma_k$. For a polynomial $f \in P_k(\omega)$, we denote by $[f]_\omega$ the class in $QP_k(\omega)$ represented by $f$. Denote by $|S|$ the cardinal of a set $S$. \begin{defn}\label{defn3} Let $x, y$ be monomials of the same degree in $P_k$. We say that $x <y$ if and only if one of the following holds: i) $\omega (x) < \omega(y)$; ii) $\omega (x) = \omega(y)$ and $\sigma(x) < \sigma(y).$ \end{defn} \begin{defn} A monomial $x$ is said to be inadmissible if there exist monomials $y_1,y_2,\ldots, y_m$ such that $y_t<x$ for $t=1,2,\ldots , m$ and $x - \sum_{t=1}^my_t \in \mathcal A^+P_k.$ A monomial $x$ is said to be admissible if it is not inadmissible. \end{defn} Obviously, the set of the admissible monomials of degree $n$ in $P_k$ is a minimal set of $\mathcal{A}$-generators for $P_k$ in degree $n$. Now, we recall a result of Singer \cite{si2} on the hit monomials in $P_k$. \begin{defn}\label{spi} A monomial $z$ in $P_k$ is called a spike if $\nu_j(z)=2^{d_j}-1$ for $d_j$ a non-negative integer and $j=1,2, \ldots , k$. If $z$ is a spike with $d_1>d_2>\ldots >d_{r-1}\geqslant d_r>0$ and $d_j=0$ for $j>r,$ then it is called the minimal spike. \end{defn} In \cite{si2}, Singer showed that if $\alpha(n+k) \leqslant k$, then there exists uniquely a minimal spike of degree $n$ in $P_k$. \begin{lem}\label{bdbs}\ {\rm i)} All the spikes in $P_k$ are admissible and their weight vectors are weakly decreasing. {\rm ii)} If a weight vector $\omega$ is weakly decreasing and $\omega_1 \leqslant k$, then there is a spike $z$ in $P_k$ such that $\omega (z) = \omega$. \end{lem} The proof of the this lemma is elementary. The following is a criterion for the hit monomials in $P_k$. \begin{thm}[See Singer~\cite{si2}]\label{dlsig} Suppose $x \in P_k$ is a monomial of degree $n$, where $\alpha(n + k) \leqslant k$. Let $z$ be the minimal spike of degree $n$. If $\omega(x) < \omega(z)$, then $x$ is hit. \end{thm} The following theorem will be used in the next section. \begin{thm}[See \cite{su1,su3}]\label{dl1} Let $n =\sum_{i=1}^{k-1}(2^{d_i}-1)$ with $d_i$ positive integers such that $d_1 > d_2 > \ldots >d_{k-2} \geqslant d_{k-1},$ and let $m = \sum_{i=1}^{ k-2}(2^{d_i-d_{k-1}}-1)$. If $d_{k-1} \geqslant k-1 \geqslant 3$, then $$\dim (QP_k)_n = (2^k-1)\dim (QP_{k-1})_m.$$ \end{thm} Note that we correct Theorem 3 in \cite{su1} by replacing the condition $d_{k-1} \geqslant k-1 \geqslant 1$ with $d_{k-1} \geqslant k-1 \geqslant 3$. \section{Proof of Main Theorem}\label{s3} Denote $\mathcal N_k =\big\{(i;I) ; I=(i_1,i_2,\ldots,i_r),1 \leqslant i < i_1 < \ldots < i_r\leqslant k,\ 0\leqslant r <k\big\}.$ \begin{defn} Let $(i;I) \in \mathcal N_k$, let $r = \ell(I)$ be the length of $I$, and let $u$ be an integer with $1 \leqslant u \leqslant r$. A monomial $x \in P_{k-1}$ is said to be $u$-compatible with $(i;I)$ if all of the following hold: i) $\nu_{i_1-1}(x)= \nu_{i_2-1}(x)= \ldots = \nu_{i_{(u-1)}-1}(x)=2^{r} - 1$, ii) $\nu_{i_u-1}(x) > 2^{r} - 1$, iii) $\alpha_{r-t}(\nu_{i_u-1}(x)) = 1,\ \forall t,\ 1 \leqslant t \leqslant u$, iv) $\alpha_{r-t}(\nu_{i_t-1} (x)) = 1,\ \forall t,\ u < t \leqslant r$. \end{defn} Clearly, a monomial $x$ can be $u$-compatible with a given $(i;I) \in \mathcal N_k $ for at most one value of $u$. By convention, $x$ is $1$-compatible with $(i;\emptyset)$. For $1 \leqslant i \leqslant k$, define the homomorphism $f_i: P_{k-1} \to P_k$ of algebras by substituting $$f_i(x_j) = \begin{cases} x_j, &\text{ if } 1 \leqslant j <i,\\ x_{j+1}, &\text{ if } i \leqslant j <k. \end{cases}$$ \begin{defn}\label{dfn1} Let $(i;I) \in \mathcal N_k$, $x_{(I,u)} = x_{i_u}^{2^{r-1}+\ldots + 2^{r-u}}\prod_{u< t \leqslant r}x_{i_t}^{2^{r-t}}$ for $r = \ell(I)>0$, $x_{(\emptyset,1)} = 1$. For a monomial $x$ in $P_{k-1}$, we define the monomial $\phi_{(i;I)}(x)$ in $P_k$ by setting $$ \phi_{(i;I)}(x) = \begin{cases} (x_i^{2^r-1}f_i(x))/x_{(I,u)}, &\text{if there exists $u$ such that}\\ &\text{$x$ is $u$-compatible with $(i, I)$,}\\ 0, &\text{otherwise.} \end{cases}$$ Then we have an $\mathbb F_2$-linear map $\phi_{(i;I)}:P_{k-1}\to P_k$. In particular, $\phi_{(i;\emptyset)} = f_i$. \end{defn} For a positive integer $b$, denote $\omega_{(k,b)} =((k-1)^{(b)})$ and $\bar \omega_{(k,b)}= ((k-1)^{(b-1)},k-3,1)$. \begin{lem}[See \cite{su3}]\label{hq0} Let $b$ be a positive integer and let $j_0, j_1, \ldots , j_{b-1} \in \mathbb N_k$. We set $i = \min\{j_0,\ldots , j_{b-1}\}$, $I = (i_1, \ldots, i_r)$ with $\{i_1, \ldots, i_r\} = \{j_0,\ldots , j_{b-1}\}\setminus \{i\}$. Then, we have $$\prod_{0 \leqslant t <b}X_{j_t}^{2^t} \equiv_{\omega_{(k,b)}} \phi_{(i;I)}(X^{2^b-1}).$$ \end{lem} \begin{defn} For any $(i;I) \in \mathcal N_k$, we define the homomorphism $p_{(i;I)}: P_k \to P_{k-1}$ of algebras by substituting $$p_{(i;I)}(x_j) =\begin{cases} x_j, &\text{ if } 1 \leqslant j < i,\\ \sum_{s\in I}x_{s-1}, &\text{ if } j = i,\\ x_{j-1},&\text{ if } i< j \leqslant k. \end{cases}$$ Then, $p_{(i;I)}$ is a homomorphism of $\mathcal A$-modules. In particular, for $I =\emptyset$, $p_{(i;\emptyset)}(x_i)= 0$ and $p_{(i;I)}(f_i(y)) = y$ for any $y \in P_{k-1}$. \end{defn} \begin{lem}\label{bdm} If $x$ is a monomial in $P_k$, then $p_{(i;I)}(x) \in P_{k-1}(\omega(x))$. \end{lem} \begin{proof} Set $y = p_{(i;I)}\left(x/x_i^{\nu_i(x)}\right)$. Then, $y$ is a monomial in $P_{k-1}$. If $\nu_i(x) = 0$, then $y = p_{(i;I)}(x)$ and $\omega(y) = \omega(x).$ Suppose $\nu_i(x) >0$ and $\nu_i(x) = 2^{t_1} + \ldots + 2^{t_c}$, where $0 \leqslant t_1 < \ldots < t_c,\ c\geqslant 1$. If $I = \emptyset$, then $p_{(i;I)}(x) = 0$. If $I \ne \emptyset$, then $p_{(i;I)}(x)$ is a sum of monomials of the form $\bar y := \big(\prod_{u=1}^cx_{s_u-1}^{2^{t_u}}\big)y$, where $s_u \in I$, $1 \leqslant u \leqslant c$. If $\alpha_{t_u}(\nu_{s_u-1}(y)) =0$ for all $u$, then $\omega(\bar y) = \omega(x)$. Suppose there is an index $u$ such that $\alpha_{t_u}(\nu_{s_u-1}(y)) =1$. Let $u_0$ be the smallest index such that $\alpha_{t_{u_0}}(\nu_{s_{u_0}-1}(y)) =1$. Then, we have $$ \omega_i(\bar y) = \begin{cases}\omega _i(x), &\text{if } i \leqslant t_{u_0},\\ \omega _i(x)-2, &\text{if } i = t_{u_0}+1.\end{cases}$$ Hence, $\omega (\bar y) < \omega(x)$ and $\bar y \in P_{k-1}(\omega(x))$. The lemma is proved. \end{proof} Lemma \ref{bdm} implies that if $\omega$ is a weight vector and $x \in P_k(\omega)$, then $p_{(i;I)}(x) \in P_{k-1}(\omega)$. Moreover, $p_{(i;I)}$ passes to a homomorphism from $QP_k(\omega)$ to $QP_{k-1}(\omega)$. In particular, we have \begin{lem}[See \cite{su3}]\label{bddl} Let $b$ be a positive integer and let $(j;J), (i;I) \in \mathcal N_k$ with $\ell(I) < b$. {\rm i)} If $(i;I)\subset (j;J)$, then $p_{(j;J)}\phi_{(i;I)}(X^{2^b-1}) = X^{2^b-1}\ \ \text{\rm mod}(P_{k-1}^-(\omega_{(k,b)})).$ {\rm ii)} If $(i;I)\not\subset (j;J)$, then $p_{(j;J)}\phi_{(i;I)}(X^{2^b-1}) \in P_{k-1}^-(\omega_{(k,b)}).$ \end{lem} For $0<h\leqslant k$, set $\mathcal N_{k,h} = \{(i;I) \in \mathcal N_k: \ell(I)<h\}$. Then, $|\mathcal N_{k,h}| = \sum_{t=1}^h\binom kt$. \begin{prop}\label{mdcm1} Let $d$ be a positive integer and let $p=\min\{k,d\}$. Then, the set $$B(d) :=\big\{\big[\phi_{(i;I)}(X^{2^d-1})\big]_{\omega_{(k,d)}} : (i;I) \in \mathcal N_{k,p}\big\}$$ is a basis of the $\mathbb F_2$-vector space $QP_k(\omega_{(k,d)})$. Consequently $\dim QP_k(\omega_{(k,d)}) = \sum_{t=1}^p\binom kt.$ \end{prop} \begin{proof} Let $x$ be a monomial in $P_k(\omega_{(k,d)})$ and $[x]_{\omega_{(k,d)}} \ne 0$. Then, we have $\omega(x) = \omega_{(k,d)}$. So, there exist $j_0,j_1,\ldots, j_{d-1} \in \mathbb N_k$ such that $x = \prod_{0 \leqslant t < d}X_{j_t}^{2^t}$. According to Lemma \ref{hq0}, there is $(i;I) \in \mathcal N_k$ such that $x = \prod_{0 \leqslant t < d}X_{j_t}^{2^t}\equiv_{\omega_{(k,d)}} \phi_{(i;I)}(X^{2^d-1}),$ where $r = \ell(I) < p = \min\{k,d\}$. Hence, $QP_k(\omega_{(k,d)})$ is spanned by the set $B(d)$. Now, we prove that the set $B(d)$ is linearly independent in $QP_k(\omega_{(k,d)})$. Suppose that there is a linear relation \begin{equation*}\label{ctmdo1}\sum_{(i;I) \in \mathcal N_{k,p}}\gamma_{(i;I)} \phi_{(i;I)}(X^{2^d-1}) \equiv_{\omega_{(k,d)}} 0,\end{equation*} where $\gamma_{(i;I)} \in \mathbb F_2$. By induction on $\ell(I)$, using Lemma \ref{bdm} and Lemma \ref{bddl} with $b=d$, we can easily show that $\gamma_{(i;I)} =0$ for all $(i;I) \in \mathcal N_{k,p}$. The proposition is proved. \end{proof} Set $C_k = \{x_{j_1}x_{j_2}\ldots x_{j_{k-3}}x_j^2: 1\leqslant j_1 < j_2 < \ldots < j_{k-3}<k, \ j_1 \leqslant j <k\} \subset P_{k-1}$. It is easy to see that $|C_k| = (k-3)\binom{k}2$. \begin{lem}\label{bdbbe} $C_k$ is the set of the admissible monomials in $P_{k-1}$ such that their weight vectors are $\bar\omega_{(k,1)}=(k-3,1)$. Consequently, $\dim QP_{k-1}(\bar\omega_{(k,1)}) = (k-3)\binom{k}2$. \end{lem} \begin{proof} Let $z$ be a monomial in $P_{k-1}$ such that $\omega(z) = (k-3,1)$. Then, $z = x_{j_1}x_{j_2}\ldots x_{j_{k-3}}x_j^2$ with $1\leqslant j_1 < j_2 < \ldots < j_{k-3}<k$ and $1 \leqslant j <k$. If $z \not\in C_k$, then $j < j_1$. Then, we have $$ z = \sum_{s=1}^{k-3}x_{j_s}^2x_{j_1}x_{j_2}\ldots \hat x_{j_s}\ldots x_{j_{k-3}}x_j +Sq^1(x_{j_1}x_{j_2}\ldots x_{j_{k-3}}x_j).$$ Since $x_{j_s}^2x_{j_1}x_{j_2}\ldots \hat x_{j_s}\ldots x_{j_{k-3}}x_j <z$ for $1 \leqslant s \leqslant k-3$, $z$ is inadmissible. Suppose that $z \in C_k$. If there is an index $s$ such that $j = j_s$, then $z$ is a spike. Hence, by Lemma \ref{bdbs}, it is admissible. Assume that $j \ne j_s$ for all $s$. If $z$ is inadmissible, then there exist monomials $y_1,\ldots, y_m$ in $P_{k-1}$ such that $y_t < z$ for all $t$ and $z =\sum_{t=1}^m y_t + \sum_{u\geqslant 0} Sq^{2^u}(g_u)$, where $g_u$ are suitable polynomials in $P_{k-1}$. Since $y_t < z$ for all $t$, $z$ is a term of $\sum_{u\geqslant 0} Sq^{2^u}(g_u)$, (recall that a monomial $x$ in $P_k$ is called {\it a term} of a polynomial $f$ if it appears in the expression of $f$ in terms of the monomial basis of $P_k$.) Based on the Cartan formula, we see that $z$ is not a term of $Sq^{2^u}(g_u)$ for all $u>0$. If $z$ is a term of $Sq^{1}(y)$ with $y$ a monomial in $P_{k-1}$, then $y = x_{j_1}x_{j_2}\ldots x_{j_{k-3}}x_j := \tilde y$. So, $\tilde y$ is a term of $g_0$. Then, we have \begin{align*}\bar y := x_{j_1}^2x_{j_2}\ldots x_{j_{k-3}}x_j = \sum_{s=2}^{k-3}&x_{j_s}^2x_{j_1}x_{j_2}\ldots \hat x_{j_s}\ldots x_{j_{k-3}}x_j\\ &+\sum_{t=1}^m y_t + Sq^1(g_0+\tilde y) + \sum_{u\geqslant 1} Sq^{2^u}(g_u). \end{align*} Since $j_1 < j$, we have $y_t < z < \bar y$ for all $t$. Hence, $\bar y$ is a term of $Sq^1(g_0+\tilde y) + \sum_{u\geqslant 1} Sq^{2^u}(g_u)$. By an argument analogous to the previous one, we see that $\tilde y$ is a term of $g_0+\tilde y$. This contradicts the fact that $\tilde y$ is a term of $g_0$. The lemma is proved. \end{proof} \begin{prop}\label{mdcm2} Let $d$ be a positive integer and let $q =\min\{k,d-1\}$. Then, the set $$\bar B(d):=\bigcup_{z \in C_k} \big\{\big[\phi_{(i;I)}(X^{2^{d-1}-1}z^{2^{d-1}})\big]_{\bar\omega_{(k,d)}} : \ (i;I) \in \mathcal N_{k,q}\big\}$$ is linearly independent in $QP_k(\bar\omega_{(k,d)})$. If $d >k$, then $\bar B(d)$ is a basis of $QP_k(\bar\omega_{(k,d)})$. Consequently $\dim QP_k(\bar\omega_{(k,d)}) \geqslant (k-3)\binom k2\sum_{u=1}^q\binom ku$ with equality if $d>k$. \end{prop} \begin{proof} We prove the first part of the proposition. Suppose there is a linear relation \begin{equation*}\label{ctmdo2}\mathcal S:= \sum_{((i;I),z) \in \mathcal N_{k,q}\times C_k}\gamma_{(i;I),z} \phi_{(i;I)}(X^{2^{d-1}-1}z^{2^{d-1}}) \equiv_{\bar\omega_{(k,d)}} 0,\end{equation*} where $\gamma_{(i;I),z} \in \mathbb F_2$. We prove $\gamma_{(j;J),z} = 0$ for all $(j;J) \in \mathcal N_{k,q}$ and $z \in C_k$. The proof proceeds by induction on $m=\ell(J)$. Let $(i;I) \in \mathcal N_{k,q}$. Since $r=\ell (I) < q =\min\{k,d-1\}$, $X^{2^{d-1}-1}z^{2^{d-1}}$ is 1-compatible with $(i;I)$ and $x_i^{2^r-1}f_i(X^{2^{d-1}-1})$ is divisible by $x_{(I,1)}$. Hence, using Definition \ref{dfn1}, we easily obtain \begin{align*}\phi_{(i;I)}(X^{2^{d-1}-1}z^{2^{d-1}}) =\phi_{(i;I)}(X^{2^{d-1}-1})f_i(z^{2^{d-1}}). \end{align*} A simple computation show that if $g \in P_{k-1}^-(\omega_{(k,d-1)})$, then $gz^{2^{d-1}} \in P_{k-1}^-(\bar\omega_{(k,d)})$; if $(i;I) \subset (j;\emptyset)$, then $(i;I) = (j;\emptyset)$; by Lemma \ref{bdm}, $p_{(j;\emptyset)}(\mathcal S) \equiv_{\bar\omega_{(k,d)}} 0$. Hence, applying Lemma \ref{bddl} with $b = d-1$, we get $$p_{(j,\emptyset)}(\mathcal S) \equiv_{\bar\omega_{(k,d)}} \sum_{z\in C_k}\gamma_{(j;\emptyset),z}X^{2^{d-1}-1}z^{2^{d-1}}\equiv_{\bar\omega_{(k,d)}} 0.$$ Since $z$ is admissible in $P_{k-1}$, $X^{2^{d-1}-1}z^{2^{d-1}}$ is also admissible in $P_{k-1}$. Hence, the last relation implies $\gamma_{(j;\emptyset),z} = 0$ for all $z \in C_k$. Suppose $0 < m < q$ and $\gamma_{(i;I),z} = 0$ for all $z\in C_k$ and $(i;I) \in \mathcal N_{k,q}$ with $\ell(I) < m$. Let $(j;J) \in \mathcal N_{k,q}$ with $\ell(J) =m$. Note that by Lemma \ref{bdm}, $p_{(j;J)}(\mathcal S) \equiv_{\bar\omega_{(k,d)}} 0$; if $(i;I) \in \mathcal N_{k,q}$, $\ell(I) \geqslant m$ and $(i;I)\subset (j;J)$, then $(i;I) = (j;J)$. So, using Lemma \ref{bddl} with $b=d-1$ and the inductive hypothesis, we obtain $$p_{(j,J)}(\mathcal S) \equiv_{\bar\omega_{(k,d)}} \sum_{z\in C_k}\gamma_{(j;J),z}X^{2^{d-1}-1}z^{2^{d-1}}\equiv_{\bar\omega_{(k,d)}} 0.$$ From this equality, one gets $\gamma_{(j;J),z} = 0$ for all $z \in C_k$. The first part of the proposition follows. The proof of the second part is similar to the one of Proposition 3.3 in \cite{su3}. However, the relation $\equiv_{\bar\omega_{(k,d)}}$ is used in the proof instead of $\equiv$. \end{proof} For $k=5$, we have the following result. \begin{thm}\label{dl12a} Let $n= 4(2^d-1)$ with $d$ a positive integer. The dimension of the $\mathbb F_2$-vector space $(QP_5)_{n}$ is determined by the following table: \centerline{\begin{tabular}{c|ccccc} $n = 4(2^d-1)$&$d=1$ & $d=2$ & $d=3$&$d=4$ & $d\geqslant 5$\cr \hline \ $\dim(QP_5)_n$ & $45$ & $190$ & $480$ &$650$& $651$ \cr \end{tabular}} \end{thm} Since $n= 4(2^d-1) = 2^{d+1} + 2^d + 2^{d-1} + 2^{d-1} -4$, for $d\geqslant 5$, the theorem follows from Theorem ~ \ref{dl1} and a result in \cite{su3}. For $1 \leqslant d \leqslant 4$, the proof of this theorem is based on Theorem ~ \ref{dlsig} and some results of Kameko \cite{ka}. It is long and very technical. The detailed proof of it will be published elsewhere. \begin{proof}[Proof of Main Theorem] For $k=3$, the theorem follows from the results of Kameko \cite{ka}. For $k=4$, it follows from the results in \cite{su1,su3}. Theorem \ref{dl12a} implies immediately this theorem for $k=5$. Suppose $k \geqslant 6$. Lemma \ref{bdbbe} implies that $QP_k(\bar\omega_{(k,1)}) \ne 0$. Hence, \begin{align*}\dim (QP_k)_{k-1} &\geqslant \dim QP_k(\omega_{(k,1)}) +\dim QP_k(\bar\omega_{(k,1)})\\ &> \dim QP_k(\omega_{(k,1)})= k= c(k,1). \end{align*} So, the theorem holds for $d=1$. Now, let $d>1$ and $\widetilde\omega_{(k,d)} = ((k-1)^{(d-2)}, k-3,k-4,2)$. Since $\widetilde\omega_{(k,d)}$ is weakly decreasing, by Lemma \ref{bdbs}, $QP_k(\widetilde\omega_{(k,d)}) \ne 0$. We have $\deg(\omega_{(k,d)}) = \deg(\bar\omega_{(k,d)}) = \deg (\widetilde\omega_{(k,d)}) = (k-1)(2^d-1) = n$ and $(QP_k)_n \cong \bigoplus_{\deg \omega = n}QP_k(\omega).$ Hence, using Propositions \ref{mdcm1} and \ref{mdcm2}, we get \begin{align*}\dim(QP_k)_n &= \sum_{\deg \omega = n}\dim QP_k(\omega) \\ &\geqslant \dim QP_k(\omega_{(k,d)}) + \dim QP_k(\bar\omega_{(k,d)}) + \dim QP_k(\widetilde\omega_{(k,d)})\\ &> \dim QP_k(\omega_{(k,d)}) + \dim QP_k(\bar\omega_{(k,d)}) \geqslant c(k,d). \end{align*} The theorem is proved. \end{proof} Denote by $N(k,n)$ the number of spikes of degree $n$ in $P_k$. Note that if $(i;I) \in \mathcal N_k$ and $I \ne \emptyset$, then $\phi_{(i;I)}(x)$ is not a spike for any monomial $x$. Hence, using Propositions \ref{mdcm1} and \ref{mdcm2}, we easily obtain the following. \begin{corl} Under the hypotheses of Main Theorem, \begin{align*}\dim(QP_k)_n \geqslant N(k,n) +\sum_{t=2}^p\binom kt + (k-3)\binom{k}2\sum_{u=2}^{q}\binom ku. \end{align*} \end{corl} This corollary implies Mothebe's result. \begin{corl}[See Mothebe \cite{mo,mo1}]\label{hqmo} Under the above hypotheses, $$\dim (QP_k)_n \geqslant N(k,n) + \sum_{t=2}^p\binom kt .$$ \end{corl} \noindent {\bf Acknowledgment.} We would like to express our warmest thanks to the referee for many helpful suggestions and detailed comments, which have led to improvement of the paper's exposition. The second author was supported in part by the Research Project Grant No. B2013.28.129 and by the Viet Nam Institute for Advanced Study in Mathematics. { } \noindent $^{\dag}$\ Department of Mathematics, Quy Nh\ohorn n University, \noindent \ \ 170 An D\uhorn \ohorn ng V\uhorn \ohorn ng, Quy Nh\ohorn n, B\`inh \DJ\d inh, Viet Nam. \noindent \ \ E-mail: [email protected] and [email protected] \end{document}
\begin{document} \title{Relativistic Trace Formula for Bound States in Terms of Classical Periodic Orbits} \author{H. Kleinert\thanks{[email protected] , ~http://www.physik.fu-berlin.de/\~{}kleinert \hfil } \\ Institut f\"ur Theoretische Physik\\ Freie Universit\"at Berlin, Arnimallee 14, 1000 Berlin 33, Germany \and D. H. Lin\thanks{ e-mail: [email protected]} \\ Department of Physics, National Tsing Hua University \\ Hsinchu 30043, Taiwan, Republic of China\\ } \maketitle \setlength{\baselineskip}{1cm} \centerline{\bf Abstract} {We set up a trace formula for the relativistic density of states in terms of a topological sum of classical periodic orbits. The result is applicable to any relativistic integrable system. } \thispagestyle{empty} \renewcommand {\thesection}{\arabic{section}} \tolerance=10000 \section{Introduction} Gutzwiller's trace formula of 1971 expresses the density of states $g(E)$ of a quantum mechanical system approximately as a sum over all periodic classical orbits \cite{1}. Later, Balian and Bloch \cite{2} presented a formula which also applies to nonintegrable systems. It arose from a study of sound spectra in cavities with reflecting walls of arbitrary shape in two and more dimensions. Gutzwiller's formula applies only to systems with isolated orbits. It fails if there exist degenerate families of periodic orbits connected by continuous symmetries \cite{3}. The problem arise in the derivation of Gutzwiller's formula from a stationary phase approximation to the trace integral over the semiclassical Green function at fixed energy. It contains an oscillating exponential of the eikonal function $S({\bf x},{\bf x} ;E)=\oint {\bf p}\cdot d{\bf x}$ of the periodic orbits passing through the point $ {\bf x}$. A continuous symmetry makes this independent of ${\bf x}$ over an entire spatial region swept out by the symmetry operations. Then the second derivatives of the eikonal function vanishes in that region, resulting in a divergence of the stationary-phase integral. Strutinsky and coworkers \cite {4} removed these divergences by going back the convolution integral in the time-dependent propagator and performing exactly as many integrations in that integral and in the trace integral over the Green function, as there are independent parameters describing the degeneracy. Later, Creagh and Littlejohn \cite{5} pursued the same idea in a generalized phase space which also contains room for the continuous symmetry of the system. For integrable systems, their procedure is similar to that of Berry and Tabor \cite{6} who derived a trace formula for integrable systems employing the action-angle variables. All this development has so far been restricted to the {\em nonrelativistic\/ } regime where the particle solves the Schr\"odinger equation in some external time-independent potential. The purpose of this paper is to begin adapting the methods to {\em relativistic\/} particles described by the Klein-Gordon equation in the external potential. Our final result will be a relativistic generalization of Gutzwiller's trace formula, expressing the density of states as a topological sum over the relativistic closed classical orbits. The formula is applicable to integrable relativistic classical systems. Relativistic quantum mechanics is of course not really a consistent theory. At relativistic velocities, particles will be created and absorbed, and the particle number is no longer conserved, thus violating the current conservation law of the Klein-Gordon equation. Quantum field theory is certainly the appropriate tool to describe relativistic particles. In the classical regime, however, the particle number is fixed and these problems are absent, so that a semiclassical expression for the density of states in terms of relativistic classical periodic orbits is a consistent approximation expected to render a reliable results for those systems in which particle creation and annihilation play only a minor role. \section{Relativistic Quantum-Mechanical Trace Formula} Consider a relativistic particle of mass $m$ in an external time-independent potential $V({\bf x})$, whose quantum mechanics is governed by the Klein-Gordon equation \begin{equation} \left\{ \lbrack i\hbar \partial _{t}+mc^{2}-V({\bf x})]^{2}-c^{2}\hbar ^{2}\left( \partial _{\mbox{{\scriptsize$\bf{x}$}}}-i\frac{e}{c\hbar }{\bf A} \right) ^{2}-m^{2}c^{4}\right\} \phi ({\bf x},t)=0. \label{1.1} \end{equation} where $c$ and $\hbar $ are speed of light and Planck's constant, and ${\bf A} ({\bf x})$ is a magnetic vector potential. We have shifted the energy origin to the rest energy $mc^{2}$ in order to have a smooth limit to nonrelativistic bound-state energies. Since the potentials are time-independent, the wave functions can be factorized as $\phi ({\bf x} ,t)=e^{-iEt/\hbar }\Psi ({\bf x})$, and (\ref{1.1}) takes the Schr\"odinger-like form \begin{equation} \hat{{\cal H}}_{E}\Psi ({\bf x})={\varepsilon }\Psi ({\bf x}), \label{1.2} \end{equation} where \begin{equation} {\varepsilon }\equiv \frac{E^{2}-m^{2}c^{4}}{2mc^{2}}, \label{1.3} \end{equation} and $\hat{{\cal H}}_{E}$ is the Hamilton operator \begin{equation} \hat{{\cal H}}_{E}=\hat{{\bf p}}^{2}/2m+[2EV({\bf x})-V^{2}({\bf x} )]/2mc^{2}, \label{1.4} \end{equation} with ${\bf p}=-i\hbar \partial _{\mbox{{\scriptsize$\bf{x}$}}}$. It is useful to view (\ref{1.2}) as a special case of a more general eigenvalue equation \begin{equation} \hat{{\cal H}}_{E}\Psi ({\bf x})={\cal E}\Psi ({\bf x}), \label{1.5} \end{equation} which arises from a Schr\"odinger-like equation \begin{equation} \hat{{\cal H}}_{E}\Psi ({\bf x},\tau )=i\hbar \partial _{\tau }\Psi ({\bf x} ,\tau ) \label{1.6} \end{equation} by a factorized ansatz $\Psi ({\bf x},\tau )=e^{-i{\cal E}\tau /\hbar }\Psi ( {\bf x})$. Then the variable $\tau $ plays the role of a pseudotime, and the Hamilton operator $\hat{{\cal H}}_{E}$ is the pseudotime-evolution operator governing the $\tau $-dependence of the system. Let $\Psi _{\mbox{{\scriptsize$\bf{n}$}}}({\bf x})$ be the eigenfunctions of Eq.~(\ref{1.5}) with eigenvalues ${\cal E}_{E}({\bf n})$. Then the physical energies $E_{\sbf n}$ of the particle are given by those values of $E$ at which the pseudoenergy is equal to $\varepsilon$: \begin{equation} {\cal E}_{E_{\sbf n}}({\bf n})=\varepsilon. \label{@12}\end{equation} As an example, consider the Coulomb potential $V(r)=-e^{2}/r$ of the relativistic hydrogen atom. Equation (\ref{1.5}) leads to the radial eigenvalue equation \begin{equation} \frac{d^{2}R(r)}{dr^{2}}+\frac{2}{r}\frac{dR(r)}{dr}+\left[ \frac{2m}{\hbar ^{2}}\left( {\cal E}_{E}+\frac{Ee^{2}/mc^{2}}{r}\right) -\frac{l(l+1)-\alpha ^{2}}{r^{2}}\right] R(r)=0. \label{1.10} \end{equation} Its solutions yield the bound state pseudoenergies depending on principal quantum number $n$ and angular momentum $l$, but degenerate in the azimuthal quantum number $m$: \begin{equation} {\cal E}_{E}(n,l,m)=-\frac{E^{2}/mc^{2}}{2}\frac{\alpha ^{2}}{\left[ \left( n-l-1/2\right) +\sqrt{\left( l+1/2\right) ^{2}-\alpha ^{2}}\right] ^{2}} ,\quad \left\{ \begin{array}{l} n=1,2,3,\cdots \\ l=0,1,2,\cdots \end{array} \right. . \label{1.11} \end{equation} Inserting these into Eq. (\ref{@12}), we obtain the well-known relativistic bound energies of the Coulomb system: \begin{equation} E_{n,l}=\pm mc^{2}\left[ 1+\frac{\alpha ^{2}}{\left[ \left( n-l-1/2\right) + \sqrt{\left( l+1/2\right) ^{2}-\alpha ^{2}}\right] ^{2}}\right] ^{-1/2}. \label{1.12} \end{equation} The complete information on the spectrum of eigenvalues of the Klein-Gordon equation (\ref{1.1}) is contained in the pole terms of the trace of the resolvent $\hat{R}(E)\equiv i({{\varepsilon }-\hat{ {\cal H}}_{E}({\bf n})+i\eta })^{-1}$: \begin{equation} r(E)\equiv {\rm Tr}\,\hat{R}(E)=\,i\,{\rm Tr}\,[{{\varepsilon }-\hat{{\cal H} }_{E}({\bf n})}+i\eta ]^{-1}, \label{1.7} \end{equation} where the infinitesimal positive quantity $\eta $ guarantees the causality of the time dependence of the Fourier transform of $r(E)$. The imaginary part of $r(E)$ defines the {\em density of states\/}: \begin{equation} g(E)=\frac{1}{\pi }{\rm Im}\,r(E)={\rm Tr}\,\delta ({\varepsilon }-\hat{ {\cal H}}_{E}). \label{1.8} \end{equation} In terms of the eigenvalues ${\cal E}_{E}({\bf n})$, the density (\ref{1.8}) has the spectral representation \begin{equation} g(E)=\sum_{\mbox{{\scriptsize$\bf{n}$}}}\delta ({{\varepsilon }-{\cal E}_{E}( {\bf n})}), \label{1.9} \end{equation} where the sum over ${\bf n}$ covers all quantum numbers. This sum will now be performed in a semiclassical approximation as a sum over periodic classical orbits. For the sake of generality, we assume that the particle moves in $D$ -dimensions, and assume that the motion has been transformed to $D$ cyclic coordinates whose motion can easily be quantized ({\em torus quantization\/} ). The labels ${\bf n}$ will then be integer-valued vectors ${\bf n}=(n_{1},n_{2},\cdots ,n_{D})$ with non-negative components $n_{i}$. For the purpose of deriving a semiclassical approximation to (\ref{1.9}), we convert each sum over $ n_{i}=0,1,2,\dots $ in Eq. (\ref{1.9}) into an integral with the help of the Poisson summation formula \cite{PI,7} \begin{equation} \sum_{n=0}^{\infty }f(n)=\sum_{k=-\infty }^{\infty }\int_{0^{-}}^{\infty }f(n)e^{2\pi ikn}dn . \label{1.13} \end{equation} Here we have assumed that the function $f(n)$ and its derivatives with respect to $n$ vanish at infinity, and the lower limit $ 0^{-}$ on the integral sign indicates that the integration starts on the left-hand side of the origin to include the entire $\delta $-function generated by the sum over $k$. The superscript will be omitted in the sequel. Thus we obtain \begin{equation} g(E)=\sum_{{\mbox{{\scriptsize$\bf{k}$}}}}\int d^{D}n\,\delta \left( { \varepsilon -}{\cal E}_{E}({\bf n})\right) e^{2\pi i{\bf k}{\bf n}}, \label{1.14} \end{equation} where each component of the integer-valued vector ${\bf k} =(k_{1},k_{2},\cdots ,k_{D})$ runs form minus to plus infinity, while the now continuous variables $n_{i}$ are integrated from $0^{-}$ to infinity. For integrable systems, the integration variables $n_{i}$ in Eq. (\ref{1.14} ) can be replaced by the values of the action integrals appearing in the relativistic quantum conditions \cite{8} \begin{equation} I_{i}=\frac{1}{2\pi }\oint_{C_{i}}{\bf p}\cdot d{\bf x}=\left( n_{i}+\frac{ \mu _{i}}{4}\right) \hbar , \label{1.15} \end{equation} where ${\bf p}$ is the relativistic momentum of the point particle along closed loops $C_{i}$ on an invariant torus. The quantum numbers $n_{i}$ are the same nonnegative integers as above, while $\mu _{i}$ are the numbers of conjugate points along the orbit $C_{i}$. Thus we can rewrite Eq.~(\ref{1.14} ) as \begin{equation} g(E)=\frac{1}{\hbar ^{D}}\sum_{{\bf k}}e^{-i{\bf k\cdot {{ \mbox{\egtmbf\symbol{'026}}}}}\,\pi /2}\int_{\hbar {\mbox{\egtmbf \symbol{'026}}}_{1}/4}^{\infty }dI_{1}\int_{\hbar {\mbox{\egtmbf \symbol{'026}}}_{2}/4}^{\infty }dI_{2}\cdots \int_{\hbar { \mbox{\egtmbf\symbol{'026}}}_{D}/4}^{\infty }dI_{D}\,\delta \left( { \varepsilon -}{\cal E}_{E}({\bf I})\right) e^{2\pi i{\bf k\cdot I}}, \label{1.16} \end{equation} where we have changed the argument of ${\cal E}_{E}({\bf n})$ to ${\cal E} _{E}({\bf I})$, and introduced vectors ${\mbox{\twlmbf\symbol{'026}}} =(\mu _{1},\mu _{2},\dots ,\mu _{D})$. Consider now the lowest term with ${\bf k}=0$, for which the oscillating exponentials in Eq. (\ref{1.16}) are absent. It contributes a smooth density of states \begin{equation} \bar{g}(E)=\frac{1}{\hbar ^{D}}\int_{0}^{\infty }dI_{1}\int_{0}^{\infty }dI_{2}\cdots \int_{0}^{\infty }dI_{D}\,\delta \left( {\varepsilon -}{\cal E} _{E}({\bf I})\right) , \label{1.17} \end{equation} where the lower bounds of the integral has been moved to zero, since the classical orbits for ${\bf k}=0$ have zero length, making ${ \mbox{\twlmbf\symbol{'026}}}$ equal to zero. The multiple integral (\ref {1.17}) is just the classical density of states \begin{equation} \bar{g}_{{\rm cl}}(E)=\frac{1}{(2\pi \hbar )^{D}}\int \int d^{D}{p}\,d^{D}{q} \,\delta \left( {\varepsilon -}{\cal E}_{E}({\bf p},{\bf q})\right) , \label{1.18} \end{equation} which in cyclic coordinates reads \begin{equation} \bar{g}_{{\rm cl}}(E)=\frac{1}{(2\pi \hbar )^{D}}\int_{0}^{\infty }dI_{1}\int_{0}^{2\pi }d\varphi _{1}\int_{0}^{\infty }dI_{2}\int_{0}^{2\pi }d\varphi _{2}\cdots \int_{0}^{\infty }dI_{D}\int_{0}^{2\pi }d\varphi _{D}\,\delta \left( {\varepsilon -}{\cal E}_{E}({\bf I})\right) , \label{1.19} \end{equation} reducing to (\ref{1.17}) after integrating out the angular variables. The classical density of states is also referred to as the {\em Thomas-Fermi density\/} \cite{PI2}, We now turn to the oscillating ${\bf k}\neq {\bf 0}$ parts of $g(E)$. With the help of the integral representation for the $\delta $-function \begin{equation} \delta \left( {\varepsilon -}{\cal E}_{E}({\bf I})\right) =\frac{1}{2\pi \hbar }\int_{-\infty }^{\infty }d\tau \,e^{i\tau \left[ {\varepsilon -}{\cal E}_{E}({\mbox{{\scriptsize$\bf{I}$}}})\right] /\hbar }, \label{1.20} \end{equation} we rewrite this as \begin{equation} \delta g(E)=\frac{1}{2\pi \hbar }\int_{-\infty }^{\infty }d\tau \frac{1}{ \hbar ^{D}}\sum_{{\mbox{{\scriptsize$\bf{k}$}}}}{}^{\!\prime }e^{-i{\bf k\cdot {\mbox{\egtmbf\symbol{'026}}}}\,\pi /2}\int_{\hbar { \mbox{\egtmbf\symbol{'026}}}_{1}/4}^{\infty }dI_{1}\int_{\hbar { \mbox{\egtmbf\symbol{'026}}}_{2}/4}^{\infty }dI_{2}\cdots \int_{\hbar { \mbox{\egtmbf\symbol{'026}}}_{D}/4}^{\infty }dI_{D}e^{{i}\left\{ 2\pi {\bf k\cdot I+}\tau [{\varepsilon -}{\cal E}_{E}({\bf I})]\right\} /\hbar }, \label{1.21} \end{equation} where the primes on the summation symbols indicate the omission of ${\bf k}= {\bf 0}$. The integrals over $I_{i}$ and $\tau $ are now evaluated in the stationary phase approximation. Let us abbreviate the action in the exponent by \begin{equation} A_{{\bf k}}({\bf I},\tau )=2\pi {\bf k\cdot I+}\tau [{\varepsilon -}{\cal E} _{E}({\bf I})]. \label{1.22} \end{equation} Its extrema lie at some ${\bf I}=\bar{{\bf I}}$, $\tau =\bar{\tau}$, where \begin{equation} \left. \frac{\partial A_{{\bf k}}}{\partial I_{i}}\right| _{{\bf I}=\bar{ {\bf I}},\tau =\bar{\tau}}=0,~~~~~\left. \frac{\partial A_{{\bf k}}}{ \partial \tau }\right| _{{\bf I}=\bar{{\bf I}},\tau =\bar{\tau}}=0.~~~~~ \label{1.23} \end{equation} The first set of equations yields the semiclassical quantization condition \begin{equation} 2\pi k_{i}=\bar{\tau}\omega _{i}(\bar{{\bf I}}),\quad i=1,2,\cdots ,D, \label{1.24} \end{equation} where \begin{equation} \omega _{i}({\bf I})\equiv \frac{\partial {\cal E}_{E}({\bf I})}{\partial I_{i}} \label{@}\end{equation} at $\bar{{\bf I}}$ are the angular velocities for the pseudoenergy ${ \varepsilon }$. The solutions of Eq. (\ref{1.24}) yield actions $\bar{{\bf I} }$ as nonlinear functions of ${\bf k}$ and $\bar{\tau}$: \begin{equation} \bar{{\bf I}}=\bar{{\bf I}}({\bf k},\bar{\tau}). \label{1.25} \end{equation} From Eq. (\ref{1.24}) we obtain the important relation for the resonant tori \begin{equation} \frac{k_{i}}{k_{j}}=\frac{\omega _{i}}{\omega _{j}},\quad i,j=1,2,\cdots ,D. \label{1.26} \end{equation} Since ${ k_i}$ ere integer numbers, the orbits on the torus must have commensurate frequencies, so that only closed periodic orbits contribute to the density of states in the saddle point approximation. This establishes the connection between $\delta g(E)$ and the relativistic periodic orbits of the classical system. If the frequencies are not commensurate, the orbits do not close although the motion is still confined to the torus. Such orbits are called multiply periodic or quasi-periodic. Each relativistic periodic orbit is specified by ${\bf k}$; it closes after $ k_{1}$ turns by $2\pi $ of the angle $\varphi _{1},$ $k_{2}$ turns of $ \varphi _{2}$, \dots ~. Thus ${\bf k}$ plays the role of an {\em index vector\/} characterizing the topology of the periodic orbits. For this reason, the sums in Eq. (\ref{1.16}) is also called topological sum. Note that Eq. (\ref{1.26}) admits only $k_{i}$-values of the same sign. The second equation in (\ref{1.23}) specifies $\bar{\tau}$ via \begin{equation} {\varepsilon }-{\cal E}_{E}\left( {\bf \bar{I}(}{\bar{\tau}}(E))\right) =0. \label{1.27} \end{equation} Having determined the saddle points, the semiclassical approximation requires the calculation of the effect of the quadratic fluctuations around these. For this we expand Eq. (\ref{1.22}) up to the quadratic terms, and shift the integration variables from ${\bf I}$ to ${\bf I}^{\prime }\equiv {\bf I}-\bar{{\bf I}}$. The lower bound of the integrals is then transformed into $\hbar {\mbox{\twlmbf\symbol{'026}}}/4-\bar{{\bf I}}$. For sufficiently large actions $\bar{I}_{i}$, the sharpness of the extrema at small $\hbar $ allows us to move the lower bounds to minus-infinity. This approximation is excellent for highly excited states. We now perform the Gaussian integrals and obtain the oscillating part of the relativistic density of states \begin{equation} \delta g^{(2)}(E)=\frac{1}{2\pi }\sqrt{2\pi /\hbar }^{D+1}\sum_{ \mbox{{\scriptsize$\bf{k}$}}}{}^{\!\prime }e^{-i{\bf k\cdot { \mbox{\egtmbf\symbol{'026}}}}\,\pi /2}e^{-i{\pi }\nu /4}\frac{1}{\bar{\tau} ^{(D-1)/2}}\left| \det M\right| _{\bar{\mbox{{\scriptsize$\bf{I}$}}} }^{-1/2}e^{{i}2\pi {\bf k}\cdot {\bar{{\bf I}}}/\hbar }, \label{1.28} \end{equation} where $M$ is the stability matrix \begin{equation} M=\left( \begin{array}{cc} \bar{\tau}\displaystyle\frac{\partial ^{2}{\cal E}_{E}}{\partial I_{i}\partial I_{j}} & \displaystyle \frac{\partial {\cal E}_{E}}{\partial I_{i}} \\[2mm] \displaystyle \frac{\partial {\cal E}_{E}}{\partial I_{j}} & 0 \end{array} \right) . \label{1.29} \end{equation} whose determinant is, according to formula \begin{equation} \det \left( \begin{array}{ll} A & B \\ C & D \end{array} \right) =\det A\,\det (D-C^{T}A^{-1}B) \label{1.30} \end{equation} given by \begin{equation} \det M=\det H~\, {\mbox{\twlmbf\symbol{'041}}}^{T}\!H^{-1}{\mbox{\twlmbf \symbol{'041}}}. \label{1.31} \end{equation} where \begin{equation} H_{ij}\equiv \frac{\partial ^{2}{\cal E}_{E}}{\partial I_{i}\partial I_{j}}. \label{1.32} \end{equation} The Maslov index $\nu $ is equal to $N^{+}-N^{-}-N^{0}$, where $N^{\pm }$ denote the numbers of positive and negative eigenvalues of matrix $H_{ij}$, and $ N^{0}$ is unity (zero) if the sign of ${\mbox{\twlmbf\symbol{'041}}} ^{T}H^{-1}{\mbox{\twlmbf\symbol{'041}}}$ is positive (negative). The second factor in (\ref{1.30}) has been simplified using the equation of motion for the cyclic variables ${\mbox{\twlmbf\symbol{'047}}}$: \begin{equation} \frac{d{{\mbox{\twlmbf\symbol{'047}}}}}{d\tau }=\nabla _{{\bf I}}{\cal E} _{E}({\bf I})={{\mbox{\twlmbf\symbol{'041}}}(I)}, \label{1.33} \end{equation} the right-hand side being also equal to \begin{equation} {\mbox{\twlmbf\symbol{'041}}}=\frac{2\pi {\bf k}}{\bar{\tau}}. \label{1.34} \end{equation} Since for every ${\bf k}$ there is an equal contribution from $-{\bf k}$, we may replace the exponential by a cosine and obtain \begin{equation} \delta g^{(2)}(E)=\frac{1}{2\pi }\sqrt{2\pi /\hbar } ^{D+1}\sum_{\mbox{{\scriptsize$\bf{k}$}}}{}^{\!\prime }\frac{1}{\bar{\tau} ^{(D-1)/2}}\left| \det H~{\mbox{\twlmbf\symbol{'041}}}^{T}H^{-1}{ \mbox{\twlmbf\symbol{'041}}}\right| _{\bar{\mbox{{\scriptsize$\bf{I}$}}} }^{-1/2}\cos \left[ {{}2\pi {\bf k}\cdot \left( {\bar{{\bf I}}/\hbar }-{ \mbox{\twlmbf\symbol{'026}}}/4\right) -\pi \nu /4}\right] . \label{1.35} \end{equation} The relativistic trace formula (\ref{1.35}) gives us a basis for understanding quantum phenomena at the relativistic level in terms of classical orbits. In general, we just need to evaluate the classical ${\cal E }_{E}\left( {\bf \bar{I}}\right) $ for integrable systems, and consider some shortest orbits. As in nonrelativistic systems, we expect astonishingly accurate energy spectra from Eq. (\ref{1.35}). \section{Three-Dimensional Relativistic Rectangular Billiard} As a first application, consider the motion of a relativistic particle in a three-dimensional rectangular billiard with sides of length $a_{1},a_{2},$ and $a_{3}$ along $q_{1},q_{2},$ and $q_{3}$ axes. The quantum spectrum of Eq. (\ref{1.5}) with Dirichlet boundary condition is given by the pseudoenergies \begin{equation} {\cal E}_{E}({n_{1},n_{2},n_{3}})=\frac{\hbar ^{2}\pi ^{2}}{2m}\left( \frac{ n_{1}^{2}}{a_{1}^{2}}+\frac{n_{2}^{2}}{a_{2}^{2}}+\frac{n_{3}^{2}}{a_{3}^{2}} \right) ,\quad n_{i}(i=1,2,3)=1,2,3,\cdots . \label{2.1} \end{equation} The physical relativistic energy spectrum is obtained from Eq. (\ref{@12}): \begin{equation} E_{n_{1},n_{2},n_{3}}=\pm \sqrt{\pi ^{2}\hbar ^{2}c^{2}\left( \frac{n_{1}^{2} }{a_{1}^{2}}+\frac{n_{2}^{2}}{a_{2}^{2}}+\frac{n_{3}^{2}}{a_{3}^{2}}\right) +m^{2}c^{4}.} \label{2.2} \end{equation} As in the nonrelativistic case, this result is {\em exactly\/} reproduced by the relativistic quantization according to Eq. (\ref{1.15}). The numbers $\mu _{i}$ are all equal to $4$, since the wave functions have Dirichlet boundary condition. At every every encounter with the wall, the action picks up a phase $\pi $. The relativistic action variables are therefore \begin{equation} I_{i}=\frac{1}{2\pi }\oint p_{i}dq_{i}=n_{i}\hbar ,\quad i=1,2,3;\quad n_{i}=1,2,3,\cdots . \label{2.3} \end{equation} The classical Hamiltonian may be expressed as \begin{equation} {\cal E}_{E}({\bf I)=}\frac{\pi ^{2}}{2m}\left( \frac{I_{1}^{2}}{a_{1}^{2}}+ \frac{I_{2}^{2}}{a_{2}^{2}}+\frac{I_{3}^{2}}{a_{3}^{2}}\right), \label{2.4} \end{equation} and the corresponding angular frequencies are \begin{equation} \omega _{i}=\frac{\pi ^{2}}{ma_{i}^{2}}I_{i},\quad i=1,2,3. \label{2.5} \end{equation} We now determine the saddle points $\bar{{\bf I}}.$ According to Eq. (\ref{1.24}), these are given by \begin{equation} \left( \bar{I}_{1},\bar{I}_{2},\bar{I}_{3}\right) \left( \tau \right) =\left( \frac{2ma_{1}^{2}k_{1}}{\tau \pi },\frac{2ma_{2}^{2}k_{2}}{\tau \pi } ,\frac{2ma_{3}^{2}k_{3}}{\tau \pi }\right), \label{2.6} \end{equation} leading to the pseudoenergies at the saddle point \begin{equation} {\cal E}_{E}({\bf \bar{I}(}\tau {\bf ))=}\frac{2m}{\tau ^{2}} \sum_{i=1}^{3}\left( a_{i}k_{i}\right) ^{2}. \label{2.7} \end{equation} The saddle-point value of ${\tau }$ is determined by (\ref{1.27}), yielding \begin{equation} \bar{\tau}=\sqrt{\frac{2m}{{\varepsilon }}}\sqrt{\sum_{i=1}^{3}\left( a_{i}k_{i}\right) ^{2}}. \label{2.8} \end{equation} From these saddle point values, we obtain \begin{equation} \left. \sum_{ij}\omega _{i}H_{ij}^{-1}\omega _{j}\right| _{\bar{ \mbox{{\scriptsize$\bf{I}$}}}(\bar{\tau})}=2{\varepsilon } \label{2.9} ,\end{equation} so that the sign of $\omega _{i}H_{ij}^{-1}\omega _{j}$ is positive and the number $N^0$ in the Maslov index $ \nu =N^{+}-N^{-}-N^{0}$ vanishes. The determinant of the second-derivative matrix is \begin{equation} \det \frac{\partial ^{2}{\cal E}_{E}}{\partial I_{i}\partial I_{j}}=\frac{ \pi ^{6}}{m^{3}\,a_{1}^{2}a_{2}^{2}a_{3}^{2}}. \label{2.10} \end{equation} All eigenvalues of the matrix $\lfrac{\partial ^{2}{\cal E}_{E}}{\partial I_{i}\partial I_{j}}$ are positive. Thus we identify the indices $N^{+}=3,~N^{-}=0$. Inserted into Eq.~(\ref{1.35}), we finally obtain for the oscillating part of the relativistic density of states \begin{equation} \delta g^{(2)}(E)=\frac{ \pi }{4E_{0}}\sqrt{\frac{{\varepsilon }}{E_{0}}} \frac{a_{1}a_{2}a_{3}} {L^{3}} \sum_{k_{1},k_{2},k_{3}=-\infty }^{\infty}{}^{\!\!\!\!\!\!\!\!\!\!\!\!\!\! \prime} ~~~~~j_{0}\left( \frac{S\left( {\bf k}\right) }{\hbar }\right) , \label{2.11} \end{equation} where $j_{0}(x)$ is the spherical Bessel function of order zero $j_{0}(x)=\sin (x)/x.$ The symbol $L$ denotes some lenght scale which may be any average of the three length scales $ a_1,\,a_{2}$,or $a_{3}$, while \begin{equation} E_{0}\equiv \frac{\pi ^{2}\hbar ^{2}}{2mL^{2}} \label{2.12} \end{equation} denotes the energy associated with $L$. The quantity $S\left( {\bf k}\right) $ is \begin{equation} S\left( {\bf k}\right) =\frac{1}{c}\sqrt{ E^{2}-m^{2}c^{4}}~\,2\sqrt{ k_{1}^{2}a_{1}^{2}+k_{2}^{2}a_{2}^{2}+k_{3}^{2}a_{3}^{2}}=p~L_{\sbf k}. \label{2.13} \end{equation} It is precisely the relativistic eikonal $p\,L_{\sbf k}$ of the classical periodic orbits of momentum $p$ and total length $L_{\sbf k}$. In general, the inclusion of only a few shortest orbits in Eq. (\ref{2.11}) yields the correct positions of the quantum energy levels. The three-dimensional relativistic rectangular billiard may serve as a prototype of the relativistic semiclassical treatment for arbitrary billiard systems. Let us compare the calculation of (\ref{2.11}) from our relativistic trace formula (\ref{1.35}) with a direct calculation from an inverse Laplace transformation of partition function $Z(\beta )$, i.e, \begin{equation} g(E)=\frac{1}{2\pi i}\int_{\epsilon -i\infty }^{\epsilon +i\infty }d\beta \, e^{\beta {\varepsilon }}Z(\beta ), \label{2.14} \end{equation} where the partition function is given by \begin{equation} Z(\beta )=\sum_{n_{1}=1}^{\infty }\sum_{n_{2}=1}^{\infty }\sum_{n_{3}=1}^{\infty }\exp \left\{ -\beta {\cal E}_{E}(n_{1},n_{2},n_{3}) \right\} , \label{2.15} \end{equation} with the pseudoenergies ${\cal E}_{E}({n_{1},n_{2},n_{3}})$ of Eq.~(\ref{2.1}). The problem is the same as in the calculation of the Casimir energy for the box. Since (\ref{2.15}) is a product of three independent sums \begin{equation} Z_{i}(\beta )=\sum_{n_{i}=1}^{\infty }\exp \left\{ -\beta {\cal E}_{E}({n_{i} })\right\} ,\quad i=1,2,3, \label{2.16} \end{equation} we may process each sum separately. Applying the Poisson formula (\ref{1.13}) to the sum over $n_i$ we find \[ Z_{i}(\beta )=\sum_{k_{i}=-\infty }^{\infty }\int_{-\infty }^{\infty }dn_ie^{-\beta E_{0}n_i^{2}L^{2}/a_{i}^{2}}e^{2\pi ik_{i}n_i}-\frac{1}{2} ~~~~~~~~~~~~\] \begin{equation} \!\!\!\!\!\!\!\!\!\!\!=\frac{1}{2}\frac{a_{i}}{L}\sqrt{\frac{\pi }{\beta E_{0}}} \sum_{k_{i}=-\infty }^{\infty }e^{-\left( \pi ma_{i}\right) ^{2}/\beta E_{0}L^{2}}-\frac{1}{2}. \label{2.17} \end{equation} Inserting this into (\ref{2.15}), and using the integral formula \begin{equation} \frac{1}{2\pi i}\int_{\epsilon -i\infty }^{\epsilon +i\infty }\frac{d\beta }{ \beta ^{\mu +1}}e^{\beta {\varepsilon }}e^{-\kappa /\beta }=\left( \frac{{ \varepsilon }}{\kappa }\right) ^{\mu /2}J_{\mu }\left( 2\sqrt{\kappa { \varepsilon }}\right) , \label{2.18} \end{equation} we obtain the exact level density of the relativistic three-dimensional rectangular box \begin{equation} g(E)=g^{(3)}(E)-\frac{1}{2}\left[ g_{12}^{(2)}(E)+g_{23}^{(2)}(E)+g_{31}^{(2)}(E)\right] +\left[ g_{1}^{(1)}(E)+g_{2}^{(1)}(E)+g_{3}^{(1)}(E)\right] -\frac{1}{8}\delta \left( {\varepsilon }\right) . \label{2.19} \end{equation} The leading term comes from a proper three-fold sum, and is given by \begin{equation} \delta g^{(2)}(E)=\sum_{k_{1},k_{2},k_{3}=-\infty }^{\infty }\frac{\pi }{ 4E_{0}}\sqrt{\frac{{\varepsilon }}{E_{0}}}\left( \frac{a_{1}a_{2}a_{3}}{L^{3} }\right) j_{0}\left( \frac{S\left( {\bf k}\right) }{\hbar }\right) \label{2.20} \end{equation} with $S\left( {\bf k}\right) $ of Eq.~(\ref{2.13}). This agrees with the semiclassical result (\ref{2.11}). The second set of terms gives corrections from the faces of the box: \begin{equation} g_{ij}^{(2)}(E)=\frac{\pi }{4}\frac{1}{E_{0}}\frac{a_{i}a_{j}}{L^{2}} \sum_{k_{1},k_{2}=-\infty }^{\infty }J_{0}\left( \frac{S_2\left( k_{1},k_{2}\right) }{\hbar }\right) , \label{2.21} \end{equation} where \begin{equation} S_2\left( k_{1},k_{2}\right)=\frac{1}{c}\sqrt{ E^{2}-m^{2}c^{4}}~~2\sqrt{k_{1}^{2}a_{i}^{2}+k_{2}^{2}a_{j}^{2}} =p\,L_{k_{1},k_{2}} \label{2.22} \end{equation} are the eikonals of the orbits on the faces. The functions $g_{ij}^{(2)}(E)$ are the level densities of the planar facial ``boxes''. The third set of terms in (\ref{2.19}) stems from the edges of the box, being the level density for these one-dimensional ``boxes'' of length $a_{i}$: \begin{equation} g_{i}^{(1)}(E)=\frac{a_{i}}{2L\sqrt{E_{0}{\varepsilon }}}\sum_{k_i=-\infty }^{\infty }\cos \left( \frac{S_1\left( k_i\right) }{\hbar }\right) , \label{2.23} \end{equation} where \begin{equation} S_1\left( k\right) =\frac{1}{c}\sqrt{E^{2}-m^{2}c^{4}}~2ka_{i}=p\,L_{k_{1},k_{2}} . \label{2.24} \end{equation} These boundary terms can be obtained also from the general trace formula ( \ref{1.13}) by calculating higher-order corrections to the semiclassical approximation (\ref{1.35}). The last term in (\ref{2.19}) is a delta function at ${\varepsilon}=0$ which does not contribute to the level density at ${\varepsilon}>0$. The classical (Thomas-Fermi) contribution to the density of states is \begin{equation} \bar{g}(E)=\frac{1}{E_{0}}\left( \frac{\pi }{4}\sqrt{\frac{{\varepsilon}}{ E_{0}}}\frac{{V}_3}{L^{3}}-\frac{\pi }{8}\frac{{V_2}}{2L^{2}}+\frac{1}{8} \sqrt{\frac{E_{0}}{{\varepsilon}}}\frac{{V_1}}{L}\right) . \label{2.25} \end{equation} Here ${V}_3=a_{1}a_{2}a_{3}$ is the volume of the box, ${V_2=}2\left( a_{1}a_{2}+a_{2}a_{3}+a_{1}a_{3}\right) $ the total surface, and $ V_1=a_{1}+a_{2}+a_{3})$ the sum of the edge lengths. \section{Concluding remark} For relativistic integrable systems, we have derived a semiclassical trace formula by transforming the relativistic quantization conditions into the topological sum involving all closed relativistic classical orbits. Certainly, our final result (\ref{1.35}) can also be obtained by an {\sf ab initio} procedure, starting out from the relativistic path integral for the relativistic fixed-energy amplitude representation \cite{PI,9,10} \begin{equation} G({\bf {x}}_{b},{\bf {x}}_{a};E)=\frac{\hbar }{2Mc}\int_{0}^{\infty }dL\int D\rho \Phi \left[ \rho \right] \int D^{D}xe^{iA_{E}/\hbar }, \label{3.1} \end{equation} with the action \begin{equation} A_{E}\left[ x,\dot{x}\right] =\int_{\tau _{a}}^{\tau _{b}}d\tau \left[ \frac{ M}{2\rho \left( \tau \right) }{\bf \dot{x}}^{2}\left( \tau \right) +{\frac{e }{c}{\bf A\cdot \dot{x}}(\tau })+\frac{\rho (\tau )}{2Mc^{2}}\left( E-V({\bf x})\right) ^{2}-\rho \left( \tau \right) \frac{Mc^{2}}{2}\right], \label{3.2} \end{equation} where $L$ is defined by \begin{equation} L=\int_{\tau _{a}}^{\tau _{b}}d\tau \rho (\tau ), \label{3.3} \end{equation} with $\rho (\tau )$ being an arbitrary dimensionless fluctuating scale variable, and $\Phi [\rho ]$ is some convenient gauge-fixing functional, such as $\Phi \left[ \rho \right] =\delta \left[ \rho -1\right] $. The prefactor $\hbar /Mc$ in (\ref{3.1}) is the Compton wave length of a particle of mass $M$, the field ${\bf A(x)}$ is the vector potential, $V({\bf x})$ the scalar potential, $E$ the system energy, and ${\bf x}$ the spatial part of the $D+1$ -dimensional vector $x=({\bf x},i\tau )$. This path integral forms the basis for studying relativistic potential problems. Choosing $\rho (\tau )$ to be equal to unity, the amplitude (\ref{3.1}) becomes \begin{equation} G({\bf {x}}_{b},{\bf {x}}_{a};E)=\frac{\hbar }{2Mc}\int_{0}^{\infty }dL\exp \left[ \frac{i}{\hbar }{\varepsilon }L\right] \int {\cal D}^{D}x\exp \left[ \frac{i}{\hbar }A_{E}\right] , \label{3.4} \end{equation} where the fixed-energy action $A_{E}$ is given by \begin{equation} A_{E}=\int_{0}^{L}d\tau \left\{ \frac{M}{2}{\bf \dot{x}}^{2}\left( \tau \right) +{\frac{e}{c}{\bf A\cdot \dot{x}}(\tau })+\frac{1}{2Mc^{2}}\left[ V^2( {\bf x})-2EV({\bf x})\right] \right\} . \label{3.5} \end{equation} The semiclassical approximation to the relativistic fixed-energy amplitude (\ref{3.4}) is \cite{11} \[ \!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!G_{{\rm sc}}({\bf {x}}_{b},{\bf {x}} _{a};E)=\frac{\hbar }{2Mc}\frac{1}{(2\pi \hbar i)^{D/2}}\sum_{{\rm class.traj.}}\int_{0}^{\infty }dL \,e^{i\varepsilon L/\hbar } \] \begin{equation} \times \det \left[ -\partial _{x_{b}^{i}}\partial _{x_{a}^{j}}A_{E}({\bf {x}} _{b},{\bf {x}}_{a};L)\right] ^{1/2}e^{ {i}A_{E}({\bf {x}} _{b},{\bf {x}}_{a};L)/\hbar -i\pi \nu /2 }. \label{3.6} \end{equation} The associated density of states is obtained from the trace of (\ref{3.6} ): \[ \!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\! \int d^{D}x\,G_{{\rm sc}}({\bf {x}}_{b},{\bf {x}}_{a};E)=\frac{\hbar }{2Mc} \frac{1}{(2\pi \hbar i)^{D/2}}\sum_{{\rm class.traj.}}\int_{0}^{\infty }dL\,e^{{i}{\varepsilon }L/\hbar }~~~~~~~~~~~~~ \] \begin{equation} \times \int d^{D}x\det \left[ -\partial _{x_{b}^{i}}\partial _{x_{a}^{j}}A_{E}({\bf {x}}_{b},{\bf {x}}_{a};L)\right] ^{1/2}e^{ {i}A_{E}({\bf {x}}_{b},{\bf {x}}_{a};L)/\hbar -i\pi \nu /2 }. \label{3.7} \end{equation} The trace operation in Eq. (\ref{3.6}) is integration over all periodic orbits in the pseudotime ``$L"$. If the relativistic systems is integrable, it can be expressed in terms of action-angle variables as \[ \int d^{D}x\det \left[ -\partial _{x_{b}^{i}}\partial _{x_{a}^{j}}A_{E}({\bf {x}}_{b},{\bf {x}}_{a};L)\right] ^{1/2}e^{iA_{E}({\bf { x}}_{b},{\bf {x}}_{a};L)/\hbar -i\pi \nu /2}~~~~~~~~~~~~~~~~~~~~~~~~ \] \begin{equation} =\sum_{{\bf k}}\int_{0}^{2\pi }d^{D}\varphi L^{-D/2}\det \left[ \frac{ \partial ^{2}{\cal E}_{E}\left( {\bf I}\right) }{\partial I_{i}\partial I_{j} }\right] ^{-1/2}e^{i\left[ 2\pi {\bf I\cdot k}-{\cal E}_{E}({\bf I})L\right] /\hbar -i\pi \nu /2} \label{3.8} \end{equation} \begin{equation} A_{E}({\bf {x}}_{b},{\bf {x}}_{a};L)={\bf I}\cdot \left( {\ { \mbox{\twlmbf\symbol{'047}}}}_{b}-{\ {{\mbox{\twlmbf\symbol{'047}}}}} _{a}\right) -{\cal E}_{E}({\bf I})L=2\pi {\bf I\cdot k}-{\cal E}_{E}({\bf I} )L, \label{3.9} \end{equation} thus establishing contact with the earlier treatment in which $\tau $ plays the role of $L$. ~\newline \centerline{ACKNOWLEDGMENTS} \newline This work was supported by the National Youth Council of the ROC under contract number NYC300375. \end{document}
\begin{document} \title{Some Complete and Intermediate Polynomials in Algebraic Complexity Theory} \begin{abstract} We provide a list of new natural $\mathsf{VNP}$-intermediate polynomial families, based on basic (combinatorial) $\mathsf{NP}$-complete problems that are complete under \emph{parsimonious} reductions. Over finite fields, these families are in $\mathsf{VNP}$, and under the plausible hypothesis $\mathsf{Mod}_p\mathsf{P} \not\subseteq \mathsf{P/poly}$, are neither $\mathsf{VNP}$-hard (even under oracle-circuit reductions) nor in $\mathsf{VP}$. Prior to this, only the Cut Enumerator polynomial was known to be $\mathsf{VNP}$-intermediate, as shown by B\"{u}rgisser in 2000. We next show that over rationals and reals, two of our intermediate polynomials, based on satisfiability and Hamiltonian cycle, are not monotone affine polynomial-size projections of the permanent. This augments recent results along this line due to Grochow. Finally, we describe a (somewhat natural) polynomial defined independent of a computation model, and show that it is $\mathsf{VP}$-complete under polynomial-size projections. This complements a recent result of Durand et al.\ (2014) which established $\mathsf{VP}$-completeness of a related polynomial but under constant-depth oracle circuit reductions. Both polynomials are based on graph homomorphisms. A simple restriction yields a family similarly complete for $\mathsf{VBP}$. \end{abstract} \section{Introduction} \label{sec:intro} The algebraic analogue of the $\mathsf{P}$ versus $\mathsf{NP}$ problem, famously referred to as the $\mathsf{VP}$ versus $\mathsf{VNP}$ question, is one of the most significant problem in algebraic complexity theory. Valiant~\cite{Valiant-stoc79a} showed that the $\textsc{Permanent}$ polynomial is $\mathsf{VNP}$-complete (over fields of char $\neq$ 2). A striking aspect of this polynomial is that the underlying decision problem, in fact even the search problem, is in $\mathsf{P}$. Given a graph, we can decide in polynomial time whether it has a perfect matching, and if so find a maximum matching in polynomial time~\cite{Edmonds65}. Since the underlying problem is an easier problem, it helped in establishing $\mathsf{VNP}$-completeness of a host of other polynomials by a reduction from the $\textsc{Permanent}$ polynomial (cf.~\cite{Burgisser-book00}). Inspired from classical results in structural complexity theory, in particular~\cite{Ladner75}, B\"{u}rgisser~\cite{Burgisser99-j} proved that if Valiant's hypothesis (i.e. $\mathsf{VP} \neq \mathsf{VNP}$) is true, then, over any field there is a $p$-family in $\mathsf{VNP}$ which is neither in $\mathsf{VP}$ nor $\mathsf{VNP}$-complete with respect to $c$-reductions. Let us call such polynomial families $\mathsf{VNP}$-intermediate (i.e. in $\mathsf{VNP}$, not $\mathsf{VNP}$-complete, not in $\mathsf{VP}$). Further, B\"{u}rgisser~\cite{Burgisser99-j} showed that over finite fields, a \emph{specific} family of polynomials is $\mathsf{VNP}$-intermediate, provided the polynomial hierarchy $\mathsf{P}h$ does not collapse to the second level. On an intuitive level these polynomials enumerate \emph{cuts} in a graph. This is a remarkable result, when compared with the classical $\mathsf{P}$-$\mathsf{NP}$ setting or the BSS-model. Though the existence of problems with intermediate complexity has been established in the latter settings, due to the involved ``diagonalization'' arguments used to construct them, these problems seem highly unnatural. That is, their definitions are not motivated by an underlying combinatorial problem but guided by the needs of the proof and, hence, seem artificial. The question of whether there are other naturally-defined $\mathsf{VNP}$-intermediate polynomials was left open by B\"{u}rgisser~\cite{Burgisser-book00}. We remark that to date the \emph{cut enumerator} polynomial from \cite{Burgisser99-j} is the only known example of a natural polynomial family that is $\mathsf{VNP}$-intermediate. The question of whether the classes $\mathsf{VP}$ and $\mathsf{VNP}$ are distinct is often phrased as whether $\mathsf{P}erm_n$ is \emph{not} a quasi-polynomial-size projection of $\mathsf{Det}_n$. The importance of this reformulation stems from the fact that it is a purely algebraic statement, devoid of any dependence on circuits. While we have made very little progress on this question of determinantal complexity of the permanent, the progress in restricted settings has been considerable. One of the success stories in theoretical computer science is unconditional lower bound against monotone computations~\cite{Razborov85-c,Razborov85-p,AB87}. In particular, Razborov~\cite{Razborov85-p} proved that computing the permanent over the Boolean semiring requires monotone circuits of size at least $n^{\Omega(\log n)}$. Jukna~\cite{Jukna14} observed that if the Hamilton cycle polynomial is a monotone $p$-projection of the permanent, then, since the clique polynomial is a monotone projection of the Hamiltonian cycle~\cite{Valiant-stoc79a} and the clique requires monotone circuits of exponential size~\cite{AB87}, one would get a lower bound of $2^{n^{\Omega(1)}}$ for monotone circuits computing the permanent, thus improving on~\cite{Razborov85-p}. The importance of this observation is also highlighted by the fact that such a monotone $p$-projection, over the reals, would give an alternate proof of the result of Jerrum and Snir \cite{JS82} that computing the permanent by monotone circuits over $\mathbb{R}$ requires size at least $2^{n^{\Omega(1)}}$. (Jerrum and Snir~\cite{JS82} proved that the permanent requires monotone circuits of size $2^{\Omega(n)}$ over $\mathbb{R}$ and the tropical semiring.) The first progress on this question raised in~\cite{Jukna14} was made recently by Grochow~\cite{Grochow15}. He showed that the Hamiltonian cycle polynomial is not a monotone sub-exponential-size projection of the permanent. This already answered Jukna's question in its entirety, but Grochow~\cite{Grochow15} used his techniques to further establish that polynomials like the perfect matching polynomial, and even the $\mathsf{VNP}$-intermediate cut enumerator polynomial of B\"{u}rgisser~\cite{Burgisser99-j}, are not monotone polynomial-size projections of the permanent. This raises an intriguing question of whether there are other such non-negative polynomials which share this property. While the $\mathsf{P}erm$ vs $\mathsf{Det}$ problem has become synonymous with the $\mathsf{VP}$ vs $\mathsf{VNP}$ question, there is a somewhat unsatisfactory feeling about it. This rises from two facts: one, that the $\mathsf{VP}$-hardness of the determinant is known only under the more powerful quasi-polynomial-size projections, and, second, the lack of natural $\mathsf{VP}$-complete polynomials (with respect to polynomial-size projections) in the literature. (In fact, with respect to $p$-projections, the determinant is complete for the possibly smaller class $\mathsf{VBP}$ of polynomial-sized algebraic branching programs.) To remedy this situation, it seems crucial to understand the computation in $\mathsf{VP}$. B\"{u}rgisser~\cite{Burgisser-book00} showed that a generic polynomial family constructed using a topological sort of a generic $\mathsf{VP}$ circuit, while controlling the degree, is complete for $\mathsf{VP}$. Raz~\cite{Raz10}, using the depth reduction of~\cite{VSBR83}, showed that a family of ``universal circuits'' is $\mathsf{VP}$-complete. Thus both families directly depend on the circuit definition or characterization of $\mathsf{VP}$. Last year, Durand et al.~\cite{DMMRS14} made significant progress and provided a natural, first of its kind, $\mathsf{VP}$-complete polynomial. However, the natural polynomials studied by Durand et al.\ lacked a bit of punch because their completeness was established under polynomial-size \emph{constant depth c-reductions} rather than projections. In this paper, we make progress on all three fronts. First, we provide a list of new natural polynomial families, based on basic (combinatorial) $\mathsf{NP}$-complete problems~\cite{GJ79} whose completeness is via \emph{parsimonious} reductions~\cite{Simon77}, that are $\mathsf{VNP}$-intermediate over finite fields (Theorem~\ref{thm:intermediate}). Then, we show that over reals, some of our intermediate polynomials are not monotone affine polynomial-size projections of the permanent (Theorem~\ref{thm:mon-proj-lb}). As in~\cite{Grochow15}, the lower bound results about monotone affine projections are unconditional. Finally, we improve upon \cite{DMMRS14} by characterizing $\mathsf{VP}$ and establishing a natural $\mathsf{VP}$-complete polynomial under polynomial-size projections (Theorem~\ref{thm:lb-vp}). A modification yields a family similarly complete for $\mathsf{VBP}$ (Theorems~\ref{thm:lb-vbp}, \ref{thm:lb-vbp-char-not-2}). \mathsf{P}aragraph{Organization of the paper.} We give basic definitions in Section~\ref{sec:prelim}. Section~\ref{sec:intermediate} contains our discussion on intermediate polynomials. In Section~\ref{sec:monotone} we establish lower bounds under monotone affine projections. The discussion on completeness results appears in Section~\ref{sec:completeness}. We end in Section~\ref{sec:concl} with some interesting questions for further exploration. \section{Preliminaries} \label{sec:prelim} \subsubsection*{Algebraic complexity:} We say that a polynomial $f$ is a \emph{projection} of $g$ if $f$ can be obtained from $g$ by setting the variables of $g$ to either constants in the field, or to the variables of $f$. A sequence $(f_n)$ is a $p$-\emph{projection} of $(g_m)$, if each $f_n$ is a projection of $g_t$ for some $t=t(n)$ polynomially bounded in $n$. There are other notions of reductions between families of polynomials, like \emph{c-reductions} (polynomial-size oracle circuit reductions), \emph{constant-depth c-reductions}, and \emph{linear p-projections}. For more on these reductions, see~\cite{Burgisser-book00}. An arithmetic circuit is a directed acyclic graph with leaves labeled by variables or constants from an underlying field, internal nodes labeled by field operations $+$ and $\times$, and a designated output gate. Each node computes a polynomial in a natural way. The polynomial computed by a circuit is the polynomial computed at its output gate. A \emph{parse tree} of a circuit captures monomial generation within the circuit. Duplicating gates as needed, unwind the circuit into a formula (fan-out one); a parse tree is a minimal sub-tree (of this unwound formula) that contains the output gate, that contains all children of each included $\times$ gate, and that contains exactly one child of each included $+$ gate. For a complete definition see~\cite{MP08}. A circuit is said to be \emph{skew} if at every $\times$ gate, at most one incoming edge is the output of another gate. A family of polynomials $(f_n(x_1,\ldots ,x_{m(n)}))$ is called a $p$-family if both the degree $d(n)$ of $f_n$ and the number of variables $m(n)$ are bounded by a polynomial in $n$. A $p$-family is in $\mathsf{VP}$ (resp.\ $\mathsf{VBP}$) if a circuit family (skew circuit family, resp.) $(C_n)$ of size polynomially bounded in $n$ computes it. A sequence of polynomials $(f_n)$ is in $\mathsf{VNP}$ if there exist a sequence $(g_n)$ in $\mathsf{VP}$, and polynomials $m$ and $t$ such that for all $n$, \(f_n(\bar{x}) = \sum_{\bar{y}\in\{0,1\}^{t(\bar{x})}} g_n(x_1,\ldots,x_{m(n)},y_1,\ldots ,y_{t(n)}).\) ($\mathsf{VBP}$ denotes the algebraic analogue of branching programs. Since these are equivalent to skew circuits, we directly use a skew circuit definition of $\mathsf{VBP}$.) \subsubsection*{Boolean complexity:} We need some basics from Boolean complexity theory. Let $\mathsf{P/poly}$ denote the class of languages decidable by polynomial-sized Boolean circuit families. A function $\mathsf{P}hi:\{0,1\}^\ast \to \mathbb{N}$ is in \#$\mathsf{P}$ if there exists a polynomial $p$ and a polynomial time deterministic Turing machine $M$ such that for all $x \in \{0,1\}^\ast$, $f(x) = |\{y \in \{0,1\}^{p(|x|)} \mid M(x,y)=1\}|$. For a prime $p$, define \begin{align*} \#_p\mathsf{P} &= \{\mathsf{P}si:\{0,1\}^\ast \to \mathbb{F}_p \mid \mathsf{P}si(x) = \mathsf{P}hi(x) \bmod p \textrm{ for some $\mathsf{P}hi \in \#\mathsf{P}$} \}, \\ \mathsf{Mod}_p\mathsf{P} &= \{L \subseteq \{0,1\}^{\ast} \mid \textrm{ for some $\mathsf{P}hi \in \#\mathsf{P}$, } x \in L \iff \mathsf{P}hi(x) \equiv 1 \bmod p\} \end{align*} It is easy to see that if $\mathsf{P}hi:\{0,1\}^\ast \to \mathbb{N}$ is \#$\mathsf{P}$-complete with respect to parsimonious reductions (that is, for every $\mathsf{P}si \in \#P$, there is a polynomial-time computable function $f:\{0,1\}^* \rightarrow \{0,1\}^*$ such that for all $x\in \{0,1\}^*$, $\mathsf{P}si(x) = \mathsf{P}hi(f(x))$), then the language $L=\{x\mid \mathsf{P}hi(x)\equiv 1 \bmod p\}$ is $\mathsf{Mod}_p\mathsf{P}$-complete with respect to many-one reductions. \subsubsection*{Graph Theory:} We consider the treewidth and pathwidth parameters for an undirected graph. We will work with a ``canonical'' form of decompositions which is generally useful in dynamic-programming algorithms. A \emph{(nice) tree decomposition} of a graph $G$ is a pair $\mathcal{T} = (T, \{X_t\}_{t\in V(T)})$, where $T$ is a tree, rooted at $X_r$, whose every node $t$ is assigned a vertex subset $X_t \subseteq V(G)$, called a bag, such that the following conditions hold: \begin{enumerate} \item $X_r = \emptyset$, $|X_\ell| = 1$ for every leaf $\ell$ of $T$, and $\cup_{t\in V(T)} X_t = V(G)$. \\ That is, the root contain the empty bag, the leaves contain singleton sets, and every vertex of $G$ is in at least one bag. \item For every $(u,v) \in E(G)$, there exists a node $t$ of $T$ such that $\{u,v\} \subseteq X_t$. \item For every $u \in V(G)$, the set $T_u = \{t \in V(T) \mid u \in X_t\}$ induces a connected subtree of $T$. \item Every non-leaf node $t$ of $T$ is of one of the following three types: \begin{itemize} \item \textbf{Introduce node:} $t$ has exactly once child $t'$, and $X_t = X_{t'} \cup \{v\}$ for some vertex $v \notin X_{t'}$. We say that $v$ is \emph{introduced} at $t$. \item \textbf{Forget node:} $t$ has exactly one child $t'$, and $X_t = X_{t'} \setminus \{w\}$ for some vertex $w \in X_{t'}$. We say that $w$ is \emph{forgotten} at $t$. \item \textbf{Join node:} $t$ has two children $t_1,t_2$, and \(X_t = X_{t_1} = X_{t_2}.\) \end{itemize} \end{enumerate} The \emph{width} of a tree decomposition $\mathcal{T}$ is one less than the size of the largest bag; that is, $\max_{t\in V(T)}|X_t| - 1$. The \emph{tree-width} of a graph $G$ is the minimum possible width of a tree decomposition of $G$. In a similar way we can also define a \emph{nice path decomposition} of a graph. For a complete definition we refer to~\cite{fpt-book15}. A sequence $(G_n)$ of graphs is called a $p$-family if the number of vertices in $G_n$ is polynomially bounded in $n$. It is further said to have \emph{bounded} tree(path)-width if for some absolute constant $c$ independent of $n$, the tree(path)-width of each graph in the sequence is bounded by $c$. A \emph{homomorphism} from $G$ to $H$ is a map from $V(G)$ to $V(H)$ preserving edges. A graph is called \emph{rigid} if it has \emph{no} homomorphism to itself other than the identity map. Two graphs $G$ and $H$ are called \emph{incomparable} if there are \emph{no} homomorphisms from $G \to H$ as well as $H \to G$. It is known that asymptotically almost all graphs are rigid, and almost all pairs of nonisomorphic graphs are also incomparable. For the purposes of this paper, we only need a collection of three rigid and mutually incomparable graphs. For more details, we refer to~\cite{hn-book04}. \section{$\mathsf{VNP}$-intermediate} \label{sec:intermediate} In \cite{Burgisser99-j}, B\"{u}rgisser showed that unless PH collapses to the second level, an explicit family of polynomials, called the cut enumerator polynomial, is $\mathsf{VNP}$-intermediate. He raised the question, recently highlighted again in \cite{Grochow15}, of whether there are other such natural $\mathsf{VNP}$-intermediate polynomials. In this section we show that in fact his proof strategy itself can be adapted to other polynomial families as well. The strategy can be described abstractly as follows: Find an explicit polynomial family $h=(h_n)$ satisfying the following properties. \begin{description} \item[M: Membership.] The family is in $\mathsf{VNP}$. \item[E: Ease.] Over a field $\mathbb{F}_q$ of size $q$ and characteristic $p$, $h$ can be evaluated in $\mathsf{P}$. Thus if $h$ is $\mathsf{VNP}$-hard, then we can efficiently compute \#$\mathsf{P}$-hard functions, modulo $p$. \item[H: Hardness.] The monomials of $h$ encode solutions to a problem that is \#$\mathsf{P}$-hard via parsimonious reductions. Thus if $h$ is in $\mathsf{VP}$, then the number of solutions, modulo $p$, can be extracted using coefficient computation. \end{description} Then, unless $\mathsf{Mod}_p\mathsf{P} \subseteq \mathsf{P/poly}$\ (which in turn implies that PH collapses to the second level, \cite{KL82}), $h$ is $\mathsf{VNP}$-intermediate. We provide a list of $p$-families that, under the same condition $\mathsf{Mod}_p\mathsf{P} \not\subseteq \mathsf{P/poly}$, are $\mathsf{VNP}$-intermediate. All these polynomials are based on basic combinatorial $\mathsf{NP}$-complete problems that are complete under parsimonious reduction. \noindent (1)~The \emph{satisfiablity} polynomial $\mathsf{Sat^q} = (\mathsf{Sat^q}_n)$: For each $n$, let $\mathsf{Cl}_n$ denote the set of all possible clauses of size 3 over $2n$ literals. There are $n$ variables $\tilde{X} = \{X_i\}_{i=1}^n$, and also $8n^3$ clause-variables $\tilde{Y} = \{Y_c\}_{c \in \mathsf{Cl}_n}$, one for each 3-clause $c$. \[\mathsf{Sat^q}_n := \sum_{a \in \{0,1\}^n} \left(\mathsf{P}rod_{i\in[n]: a_i=1} X_i^{q-1}\right) \left(\mathsf{P}rod_{\substack{c~\in \mathsf{Cl}_n \\ ~a\textrm{ satisfies }c}} Y_c^{q-1} \right).\] For the next three polynomials, we consider the complete graph $G_n$ on $n$ nodes, and we have the set of variables $\tilde{X}=\{X_e\}_{e \in E_n}$ and $\tilde{Y}=\{Y_v\}_{v \in V_n}$. \noindent (2)~The \emph{vertex cover} polynomial $\mathsf{VC^q} = (\mathsf{VC^q}_n)$: \[\mathsf{VC^q}_n := \sum_{S\subseteq V_n} \left(\mathsf{P}rod_{e \in E_n \colon e \textrm{ is incident on }S} X_e^{q-1}\right) \left(\mathsf{P}rod_{v \in S} Y_v^{q-1} \right).\] \noindent (3)~The \emph{clique/independent set} polynomial $\mathsf{CIS^q} = (\mathsf{CIS^q}_n)$: \[\mathsf{CIS^q}_n := \sum_{T\subseteq E_n} \left(\mathsf{P}rod_{e \in T} X_e^{q-1}\right) \left(\mathsf{P}rod_{v\textrm{ incident on }T} Y_v^{q-1} \right).\] \noindent (4)~The \emph{clow} polynomial $\mathsf{Clow^q} = (\mathsf{Clow^q}_n)$: A clow in an $n$-vertex graph is a closed walk of length exactly $n$, in which the minimum numbered vertex (called the head) appears exactly once. \[\mathsf{Clow^q}_n := \sum_{w:\textrm{ clow of length }n} \left(\mathsf{P}rod_{e:\textrm{ edges in }w} X_e^{q-1}\right) \left(\mathsf{P}rod_{\substack{v:\textrm{ vertices in }w\\ \textrm{(counted only once)}}} Y_v^{q-1} \right).\] If an edge $e$ is used $k$ times in a clow, it contributes $X_e^{k(q-1)}$ to the monomial. But a vertex $v$ contributes only $Y_v^{q-1}$ even if it appears more than once. More precisely, \[\mathsf{Clow^q}_n := \sum_{\substack{w = \langle v_0, v_1, \ldots , v_{n-1} \rangle: \\ \forall j > 0, ~~v_0 < v_j }} \left(\mathsf{P}rod_{i\in [n]} X_{(v_{i-1},v_{i\bmod n})}^{q-1}\right) \left(\mathsf{P}rod_{v \in \{v_0, v_1, \ldots , v_{n-1} \}} Y_v^{q-1} \right).\] \noindent (5)~The \emph{3D-matching} polynomial $\mathsf{3DM^q} = (\mathsf{3DM^q}_n)$: Consider the complete tripartite hyper-graph, where each part in the partition $(A_n, B_n, C_n)$ contain $n$ nodes, and each hyperedge has exactly one node from each part. We have variables $X_e$ for hyperedge $e$ and $Y_v$ for node $v$. \[\mathsf{3DM^q}_n := \sum_{M \subseteq A_n \times B_n \times C_n} \left(\mathsf{P}rod_{e \in M} X_e^{q-1}\right) \left(\mathsf{P}rod_{\substack{v \in M\\ \textrm{(counted only once)}}} Y_v^{q-1} \right).\] We show that if $\mathsf{Mod}_p\mathsf{P} \not\subseteq \mathsf{P/poly}$, then all five polynomials defined above are $\mathsf{VNP}$-intermediate. \begin{theorem} \label{thm:intermediate} Over a finite field $\mathbb{F}_q$ of characteristic $p$, the polynomial families $\mathsf{Sat^q}$, $\mathsf{VC^q}$, $\mathsf{CIS^q}$, $\mathsf{Clow^q}$, and $\mathsf{3DM^q}$, are in $\mathsf{VNP}$. Further, if $\mathsf{Mod}_p\mathsf{P} \not\subseteq \mathsf{P/poly}$, then they are all $\mathsf{VNP}$-intermediate; that is, neither in $\mathsf{VP}$ nor $\mathsf{VNP}$-hard with respect to $c$-reductions. \end{theorem} \begin{proof} (M) An easy way to see membership in $\mathsf{VNP}$ is to use Valiant's criterion (\cite{Valiant-stoc79a}; see also Proposition 2.20 in \cite{Burgisser-book00}); the coefficient of any monomial can be computed efficiently, hence the polynomial is in $\mathsf{VNP}$. This establishes membership for all families. We first illustrate the rest of the proof by showing that the polynomial $\mathsf{Sat^q}$ satisfies the properties (H), (E). (H): Assume $(\mathsf{Sat^q}_n)$ is in $\mathsf{VP}$, via polynomial-sized circuit family $\{C_n\}_{n \ge 1}$. We will use $C_n$ to give a $\mathsf{P/poly}$ upper bound for computing the number of satisfying assignments of a 3-CNF formula, modulo $p$. Since this question is complete for $\mathsf{Mod}_p\mathsf{P}$, the upper bound implies $\mathsf{Mod}_p\mathsf{P}$ is in $\mathsf{P/poly}$. Given an instance $\mathsf{P}hi$ of 3SAT, with $n$ variables and $m$ clauses, consider the projection of $\mathsf{Sat^q}_n$ obtained by setting all $Y_c$ for $c \in \mathsf{P}hi$ to $t$, and all other variables to 1. This gives the polynomial $\mathsf{Sat^q}\mathsf{P}hi(t) = \sum_{j=1}^m d_j t^{j(q-1)}$ where $d_j$ is the number of assignments (modulo $p$) that satisfy exactly $j$ clauses in $\mathsf{P}hi$. Our goal is to compute $d_m$. We convert the circuit $C$ into a circuit $D$ that compute elements of $\mathbb{F}_q[t]$ by explicitly giving their coefficient vectors, so that we can pull out the desired coefficient. (Note that after the projection described above, $C$ works over the polynomial ring $\mathbb{F}_q[t]$.) Since the polynomial computed by $C$ is of degree $m(q-1)$, we need to compute the coefficients of all intermediate polynomials too only upto degree $m(q-1)$. Replacing $+$ by gates performing coordinate-wise addition, $\times$ by a sub-circuit performing (truncated) convolution, and supplying appropriate coefficient vectors at the leaves gives the desired circuit. Since the number of clauses, $m$, is polynomial in $n$, the circuit $D$ is also of polynomial size. Given the description of $C$ as advice, the circuit $D$ can be evaluated in $\mathsf{P}$, giving a $\mathsf{P/poly}$ algorithm for computing \#3-SAT($\mathsf{P}hi$) $\bmod~p$. Hence $\mathsf{Mod}_p\mathsf{P} \subseteq \mathsf{P/poly}$. (E) Consider an assignment to $\tilde{X}$ and $\tilde{Y}$ variables in $\mathbb{F}_q$. Since all exponents are multiples of $(q-1)$, it suffices to consider $0/1$ assignments to $\tilde{X}$ and $\tilde{Y}$. Each assignment $a$ contributes 0 or 1 to the final value; call it a contributing assignment if it contributes 1. So we just need to count the number of contributing assignments. An assignment $a$ is contributing exactly when $\forall i\in[n]$, $X_i = 0 \Longrightarrow a_i = 0$, and $\forall c \in \mathsf{Cl}_n$, $ Y_c=0 \Longrightarrow a \textrm{ does not satisfy } c $. These two conditions, together with the values of the $X$ and $Y$ variables, constrain many bits of a contributing assignment; an inspection reveals how many (and which) bits are so constrained. If any bit is constrained in conflicting ways (for example, $X_i = 0$, and $Y_c = 0$ for some clause $c$ containing the literal $\bar{x}_i$), then no assignment is contributing (either $a_i=1$ and the $X$ part becomes zero due to $X_i^{a_i}$, or $a_i=0$ and the $Y$ part becomes zero due to $Y_c$). Otherwise, some bits of a potentially contributing assignment are constrained by $X$ and $Y$, and the remaining bits can be set in any way. Hence the total sum is precisely \(2^{(\textrm{\# unconstrained bits})} \bmod~p\). Now assume $\mathsf{Sat^q}$ is $\mathsf{VNP}$-hard. Let $L$ be any language in $\mathsf{Mod}_p\mathsf{P}$, witnessed via \#$\mathsf{P}$-function $f$. (That is, $x\in L \Longleftrightarrow f(x) \equiv 1 \bmod p$.) By the results of \cite{Burg00, Burgisser-book00}, there exists a $p$-family $r=(r_n) \in \mathsf{VNP}_{\mathbb{F}_p}$ such that \(\forall n,~\forall x \in \{0,1\}^n,~r_n(x) = f(x) \bmod p.\) By assumption, there is a $c$-reduction from $r$ to $\mathsf{Sat^q}$. We use the oracle circuits from this reduction to decide instances of $L$. On input $x$, the advice is the circuit $C$ of appropriate size reducing $r$ to $\mathsf{Sat^q}$. We evaluate this circuit bottom-up. At the leaves, the values are known. At $+$ and $\times$ gates, we perform these operations in $\mathbb{F}_q$. At an oracle gate, the paragraph above tells us how to evaluate the gate. So the circuit can be evaluated in polynomial time, showing that $L$ is in $\mathsf{P/poly}$. Thus $\mathsf{Mod}_p\mathsf{P} \subseteq \mathsf{P/poly}$. For the other four families, it suffices to show the following, since the rest is identical as for $\mathsf{Sat^q}$. \begin{description} \item[H'.] The monomials of $h$ encode solutions to a problem that is \#$\mathsf{P}$-hard via parsimonious reductions. \item[E'.] Over $\mathbb{F}_q$, $h$ can be evaluated in $\mathsf{P}$. \end{description} We describe this for the polynomial families one by one. \subsubsection*{The \emph{vertex cover} polynomial $\mathsf{VC^q} = (\mathsf{VC^q}_n)$:} \[\mathsf{VC^q}_n := \sum_{S\subseteq V_n} \left(\mathsf{P}rod_{e \in E_n \colon e \textrm{ is incident on }S} X_e^{q-1}\right) \left(\mathsf{P}rod_{v \in S} Y_v^{q-1} \right).\] \noindent (H'): Given an instance of vertex cover $A = (V(A),E(A))$ such that $|V(A)| = n$ and $|E(A)| = m$, we show how $\mathsf{VC^q}_n$ encodes the number of solutions of instance $A$. Consider the following projection of $\mathsf{VC^q}_n$. Set $Y_v = t$, for $v \in V(A)$. For $e \in E(A),$ set $X_e = z$; otherwise $e \notin E(A)$ and set $X_e = 1$. Thus, we have \[\mathsf{VC^q}_n(z,t) = \sum_{S\subseteq V_n} z^{(\textrm{\# edges incident on }S)(q-1)} t^{|S|(q-1)}.\] Hence, it follows that the number of vertex cover of size $k$, modulo $p$, is the coefficient of $z^{m(q-1)}t^{k(q-1)}$ in $\mathsf{VC^q}_n(z,t)$. \noindent (E'): Consider the weighted graph given by the values of $\tilde{X}$ and $\tilde{Y}$ variables. Each subset $S\subseteq V_n$ contributes $0$ or $1$ to the total. A subset $S\subseteq V_n$ contributes $1$ to $\mathsf{VC^q}_n$ if and only if every vertex in $S$ has non-zero weight, and every edge incident on each vertex in $S$ has non-zero weight. That is, $S$ is a subset of full-degree vertices. Therefore, the total sum is $2^{(\textrm{\# full-degree vertices})} \bmod p$. \subsubsection*{The \emph{clique/independent set} polynomial $\mathsf{CIS^q} = (\mathsf{CIS^q}_n)$:} \[\mathsf{CIS^q}_n := \sum_{T\subseteq E_n} \left(\mathsf{P}rod_{e \in T} X_e^{q-1}\right) \left(\mathsf{P}rod_{v\textrm{ incident on }T} Y_v^{q-1} \right).\] \noindent (H'): Given an instance of clique $A = (V(A),E(A))$ such that $|V(A)| = n$ and $|E(A)| = m$, we show how $\mathsf{CIS^q}_n$ encodes the number of solutions of instance $A$. Consider the following projection of $\mathsf{CIS^q}_n$. Set $Y_v = t$, for $v \in V(A)$. For $e \in E(A),$ set $X_e = z$; otherwise $e \notin E(A)$ and set $X_e = 1$. (This is the same projection as used for vertex cover.) Thus, we have \[\mathsf{CIS^q}_n(z,t) = \sum_{T\subseteq E_n} z^{|T\cap E(A)|(q-1)} t^{(\textrm{\# vertices incident on }T)(q-1)}.\] Now it follows easily that the number of cliques of size $k$, modulo $p$, is the coefficient of $z^{{k \choose 2}(q-1)} t^{k(q-1)}$ in $\mathsf{CIS^q}_n(z,t)$. \noindent (E'): Consider the weighted graph given by the values of $\tilde{X}$ and $\tilde{Y}$ variables. Each subset $T\subseteq E_n$ contributes $0$ or $1$ to the sum. A subset $T\subseteq E_n$ contributes $1$ to the sum if and only if all edges in $T$ have non-zero weight, and every vertex incident on $T$ must have non-zero weight. Therefore, we consider the graph induced on vertices with non-zero weights. Any subset of edges in this induced graph contributes $1$ to the total sum; all other subsets contribute 0. Let $\ell$ be the number of edges in the induced graph with non-zero weights. Thus, the total sum is $2^{\ell} \bmod p$. \subsubsection*{The \emph{clow} polynomial $\mathsf{Clow^q} = (\mathsf{Clow^q}_n)$:} A clow in an $n$-vertex graph is a closed walk of length exactly $n$, in which the minimum numbered vertex (called the head) appears exactly once. \[\mathsf{Clow^q}_n := \sum_{w:\textrm{ clow of length }n} \left(\mathsf{P}rod_{e:\textrm{ edges in }w} X_e^{q-1}\right) \left(\mathsf{P}rod_{\substack{v:\textrm{ vertices in }w\\ \textrm{(counted only once)}}} Y_v^{q-1} \right).\] (If an edge $e$ is used $k$ times in a clow, it contributes $X_e^{k(q-1)}$ to the monomial.) \noindent (H'): Given an instance $A = (V(A),E(A))$ of the Hamiltonian cycle problem with $|V(A)| = n$ and $|E(A)| = m$, we show how $\mathsf{Clow^q}_n$ encodes the number of Hamiltonian cycles in $A$. Consider the following projection of $\mathsf{Clow^q}_n$. Set $Y_v = t$, for $v \in V(A)$. For $e \in E(A),$ set $X_e = z$; otherwise $e \notin E(A)$ and set $X_e = 1$. (The same projection was used for $\mathsf{VC^q}$ and $\mathsf{CIS^q}$.) Thus, we have \[\mathsf{Clow^q}_n (z,t) = \sum_{w:\textrm{ clow of length }n} \left(\mathsf{P}rod_{e:\textrm{ edges in }w\cap E(A)} z^{q-1}\right) \left(\mathsf{P}rod_{\substack{v:\textrm{ vertices in }w\\ \textrm{(counted only once)}}} t^{q-1} \right).\] From the definition, it now follows that number of Hamiltonian cycles in $A$, modulo $p$, is the coefficient of $z^{n(q-1)}t^{n(q-1)}$. \noindent (E'): To evaluate $\mathsf{Clow^q}_n$ on instantiations of $\tilde{X}$ and $\tilde{Y}$ variables, we consider the weighted graph given by the values to the variables. We modify the edge weights as follows: if an edge is incident on a node with zero weight, we make its weight $0$ irrespective of the value of the corresponding $X$ variable. Thus, all zero weight vertices are isolated in the modified graph $G$. Hence, the total sum is equal to the number of closed walks of length $n$, modulo $p$, in this modified graph. This can be computed in polynomial time using matrix powering as follows: Let $G_i$ denote the induced subgraph of $G$ with vertices $\{ i, \ldots ,n\}$, and let $A_i$ be its adjacency matrix. We represent $A_i$ as an $n\times n$ matrix with the first $i-1$ rows and columns having only zeroes. Now the number of clows with head $i$ is given by the $[i,i]$ entry of $ A_i A_{i+1}^{n-2} A_i$. \subsubsection*{The \emph{3D-matching} polynomial $\mathsf{3DM^q} = (\mathsf{3DM^q}_n)$:} Consider the complete tripartite hyper-graph, where each partition contain $n$ nodes, and each hyperedge has exactly one node from each part. As before, there are variables $X_e$ for hyperedge $e$ and $Y_v$ for node $v$. \[\mathsf{3DM^q}_n := \sum_{M \subseteq A_n \times B_n \times C_n} \left(\mathsf{P}rod_{e \in M} X_e^{q-1}\right) \left(\mathsf{P}rod_{\substack{v \in M\\ \textrm{(counted only once)}}} Y_v^{q-1} \right).\] \noindent (H'): Given an instance of 3D-Matching $\mathcal{H}$, we consider the usual projection. The variables corresponding to the vertices are all set to $t$. The edges present in $\mathcal{H}$ are all set to $z$, and the ones not present are set to $1$. Then the number of 3D-matchings in $\mathcal{H}$, modulo $p$, is equal to the coefficient of $z^{n(q-1)}t^{3n(q-1)}$ in $\mathsf{3DM^q}_n (z,t)$. \noindent (E'): To evaluate $\mathsf{3DM^q}_n$ over $\mathbb{F}_q$, consider the hypergraph obtained after removing the vertices with zero weight, edges with zero weight, and edges that contain a vertex with zero weight (even if the edges themselves have non-zero weight). Every subset of hyperedges in this modified hypergraph contributes $1$ to the total sum, and all other subsets contribute 0. Hence, the evaluation equals $2^{(\textrm{\# edges in the modified hypergraph})} \bmod p$. \qed\end{proof} It is worth noting that the cut enumerator polynomial $\mathsf{Cut^q}$, showed by B\"{u}rgisser to be $\mathsf{VNP}$-intermediate over field $\mathbb{F}_q$, is in fact $\mathsf{VNP}$-complete over the rationals when $q=2$, \cite{dRA12}. Thus the above technique is specific to finite fields. \section{Monotone projection lower bounds} \label{sec:monotone} We now show that some of our intermediate polynomials are not \emph{monotone} $p$-projections of the $\textsc{Permanent}$ polynomial. The results here are motivated by the recent results of Grochow~\cite{Grochow15}. Recall that a polynomial $f(x_1,\ldots ,x_n)$ is a \emph{projection} of a polynomial $g(y_1,\ldots,y_m)$ if $f(x_1,\ldots ,x_n) = g(a_1,\ldots ,a_m)$, where $a_i$'s are either constants or $x_j$ for some $j$. The polynomial $f$ is an \emph{affine} projection of $g$ if $f$ can be obtained from $g$ by replacing each $y_i$ with an affine linear function $\ell_i(\tilde{x})$. Over any subring of $\mathbb{R}$, or more generally any totally ordered semi-ring, a \emph{monotone projection} is a projection in which all constants appearing in the projection are non-negative. We say that the family $(f_n)$ is a (monotone affine) projection of the family $(g_n)$ with \emph{blow-up} $t(n)$ if for all sufficiently large $n$, $f_n$ is a (monotone affine) projection of $g_{t(n)}$. \begin{theorem} \label{thm:mon-proj-lb} Over the reals (or any totally ordered semi-ring), for any $q$, the families $\mathsf{Sat^q}$ and $\mathsf{Clow^q}$ are not monotone affine $p$-projections of the $\textsc{Permanent}$ family. Any monotone affine projection from $\textsc{Permanent}$ to $\mathsf{Sat^q}$ must have a blow-up of at least $2^{\Omega(\sqrt n)}$. Any monotone affine projection from $\textsc{Permanent}$ to $\mathsf{Clow^q}$ must have a blow-up of at least $2^{\Omega(n)}$. \end{theorem} Before giving the proof, we set up some notation. For more details, see \cite{AT13,Rothvoss14,Grochow15}. For any polynomial $p$ in $n$ variables, let $\mathsf{Newt}(p)$ denote the polytope in $\mathbb{R}^n$ that is the convex hull of the vectors of exponents of monomials of $p$. For any Boolean formula $\mathsf{P}hi$ on $n$ variables, let \textsf{p-SAT}($\mathsf{P}hi$) denote the polytope in $\mathbb{R}^n$ that is the convex hull of all satisfying assignments of $\mathsf{P}hi$. Let $K_n = (V_n,E_n)$ denote the $n$-vertex complete graph. The travelling salesperson (TSP) polytope is defined as the convex hull of the characteristic vectors of all subsets of $E_n$ that define a Hamiltonian cycle in $K_n$. For a polytope $P$, let $\mathsf{c}(P)$ denote the minimal number of linear inequalities needed to define $P$. A polytope $Q \subseteq \mathbb{R}^m$ is an \emph{extension} of $P\subseteq \mathbb{R}^n$ if there is an affine linear map $\mathsf{P}i\colon \mathbb{R}^m \to \mathbb{R}^n$ such that $\mathsf{P}i(Q) = P$. The \emph{extension complexity} of $P$, denoted $\mathsf{xc}(P)$, is the minimum size $c(Q)$ of any extension $Q$ (of any dimension) of $P$. The following are straightforward, see for instance \cite{Grochow15,FMPTW15}. \begin{fact} \label{fact:xc} \begin{enumerate} \item $\mathsf{c}(\mathsf{Newt}(\mathsf{P}erm_n)) \leqslant 2n$. \item If polytope $Q$ is an extension of polytope $P$, then \(xc(P) \leqslant \mathsf{xc}(Q)\). \end{enumerate} \end{fact} We use the following recent results. \begin{proposition} \label{prop:xc} \begin{enumerate} \item Let $f(x_1,\ldots ,x_n)$ and $g(y_1,\ldots ,y_m)$ be polynomials over a totally ordered semi-ring $R$, with non-negative coefficients. If $f$ is a monotone projection of $g$, then the intersection of $\mathsf{Newt}(g)$ with some linear subspace is an extension of $\mathsf{Newt}(f)$. In particular, $\mathsf{xc}(\mathsf{Newt}(f)) \leqslant m + \mathsf{c}(\mathsf{Newt}(g))$. \cite{Grochow15} \item For every $n$ there exists a 3SAT formula $\mathsf{P}hi$ with $O(n)$ variables and $O(n)$ clauses such that \(\mathsf{xc}(\mathsf{p\text{-}SAT}(\mathsf{P}hi)) \geqslant 2^{\Omega(\sqrt{n})}.\) \cite{AT13} \item The extension complexity of the TSP polytope is $2^{\Omega(n)}$. \cite{Rothvoss14} \end{enumerate} \end{proposition} \begin{proof}(of Theorem~\ref{thm:mon-proj-lb}.) Let $\mathsf{P}hi$ be a 3SAT formula with $n$ variables and $m$ clauses as given by Proposition~\ref{prop:xc}~(2). For the polytope $P=\mathsf{p\text{-}SAT}(\mathsf{P}hi)$, $\mathsf{xc}(P)$ is high. Let $Q$ be the Newton polytope of $\mathsf{Sat^q}_n$. It resides in $N$ dimensions, where $N = n+ |\mathsf{Cl}_n|= n + 8n^3$, and is the convex hull of vectors of the form $(q-1)\langle \tilde{a}\tilde{b}\rangle$ where $\tilde{a} \in \{0,1\}^n$, $\tilde{b} \in \{0,1\}^{N-n}$, and for all $c \in \mathsf{Cl}_n$, $\tilde{a}$ satisfies $c$ if and only if $b_c=1$. For each $\tilde{a} \in \{0,1\}^n$, there is a unique $\tilde{b} \in \{0,1\}^{N-n}$ such that $(q-1)\langle \tilde{a}\tilde{b}\rangle$ is in $Q$. Define the polytope $R$, also in $N$ dimensions, to be the convex hull of vectors that are vertices of $Q$ and also satisfy the constraint $\sum_{c\in \mathsf{P}hi} b_c \ge m$. This constraint discards vertices of $Q$ where $\tilde{a}$ does not satisfy $\mathsf{P}hi$. Thus $R$ is an extension of $P$ (projecting the first $n$ coordinates of points in $R$ gives a $(q-1)$-scaled version of $P$), so by Fact~\ref{fact:xc}(2), $\mathsf{xc}(P) \le \mathsf{xc}(R)$. Further, we can obtain an extension of $R$ from any extension of $Q$ by adding just one inequality; hence $\mathsf{xc}(R) \le 1 + \mathsf{xc}(Q)$. Suppose $\mathsf{Sat^q}$ is a monotone affine projection of $\mathsf{P}erm_n$ with blow-up $t(n)$. By Fact~\ref{fact:xc}(1) and Proposition~\ref{prop:xc}(1), $\mathsf{xc}(\mathsf{Newt}(\mathsf{Sat^q})) = \mathsf{xc}(Q) \le t(n) + c(\mathsf{P}erm_{t(n)}) \le O(t(n))$. From the preceding discussion and by Proposition~\ref{prop:xc}(2), we get $2^{\Omega(\sqrt{n})} \le \mathsf{xc}(P) \le \mathsf{xc}(R) \le 1 + \mathsf{xc}(Q) \le O(t(n))$. It follows that $t(n)$ is at least $2^{\Omega(\sqrt{n})}$. For the $\mathsf{Clow^q}$ polynomial, let $P$ be the TSP polytope and $Q$ be $\mathsf{Newt}(\mathsf{Clow^q})$. The vertices of $Q$ are of the form $(q-1)\tilde{a}\tilde{b}$ where $\tilde{a} \in \{0,1\}^{{n \choose 2}}$ picks a subset of edges, $\tilde{b} \in \{0,1\}^{n}$ picks a subset of vertices, and the picked edges form a length-$n$ clow touching exactly the picked vertices. Define polytope $R$ by discarding vertices of $Q$ where $\sum_{i\in [n]}b_i < n$. Now the same argument as above works, using Proposition~\ref{prop:xc}(3) instead of (4). \qed\end{proof} \section{Complete families for VP and VBP} \label{sec:completeness} The quest for a natural $\mathsf{VP}$-complete polynomial has generated a significant amount of research~\cite{Burgisser-book00,Raz10,Men11,CDM13,DMMRS14}. The first success story came from~\cite{DMMRS14}, where some naturally defined homomorphism polynomials were studied, and a host of them were shown to be complete for the class $\mathsf{VP}$. But the results came with minor caveats. When the completeness was established under projections, there were non-trivial restrictions on the set of homomorphisms $\mathcal{H},$ and sometimes even on the target graph $H$. On the other hand, when all homomorphisms were allowed, completeness could only be shown under seemingly more powerful reductions, namely, constant-depth $c$-reductions. Furthermore, the graphs were either directed or had weights on nodes. It is worth noting that the reductions in \cite{DMMRS14} actually do not use the full power of generic constant-depth $c$-reductions; a closer analysis reveals that they are in fact \emph{linear p-projection}. That is, the reductions are linear combinations of polynomially many $p$-projections (see Chapter 3,~\cite{Burgisser-book00}). Still, this falls short of $p$-projections. In this work, we remove all such restrictions and show that there is a simple explicit homomorphism polynomial family that is complete for $\mathsf{VP}$ under $p$-projections. In this family, the source graphs $G$ are specific bounded-tree-width graphs, and the target graphs $H$ are complete graphs. We also show that a similar family with bounded-path-width source graphs is complete for $\mathsf{VBP}$ under $p$-projections. Thus, homomorphism polynomials are rich enough to characterise computations by circuits as well as algebraic branching programs. The polynomials we consider are defined formally as follows. \begin{defi} \label{def:hom} Let $G = (V(G),E(G))$ and $H = (V(H),E(H))$ be two graphs. Consider the set of variables $\bar{Z} := \{Z_{u,a} \mid u \in V(G) \mbox{ and } a \in V(H)\}$ and $\bar{Y} := \{Y_{(u,v)}\mid (u,v) \in E(H)\}$. Let $\mathcal{H}$ be a set of homomorphisms from $G$ to $H$. The homomorphism polynomial $f_{G,H,\mathcal{H}}$ in the variable set $\bar{Y}$, and the generalised homomorphism polynomial $\hat{f}_{G,H,\mathcal{H}}$ in the variable set $\bar{Z} \cup \bar{Y}$, are defined as follows: \begin{align*} f_{G,H,\mathcal{H}} & = \sum_{\mathsf{P}hi \in \mathcal{H}} \left(\mathsf{P}rod_{(u,v) \in E(G)}Y_{(\mathsf{P}hi(u),\mathsf{P}hi(v))} \right). \\ \hat{f}_{G,H,\mathcal{H}} & = \sum_{\mathsf{P}hi \in \mathcal{H}} \left(\mathsf{P}rod_{u \in V(G)} Z_{u,\mathsf{P}hi(u)}\right) \left(\mathsf{P}rod_{(u,v) \in E(G)}Y_{(\mathsf{P}hi(u),\mathsf{P}hi(v))} \right). \end{align*} Let ${\bf Hom}$ denote the set of all homomorphisms from $G$ to $H$. If $\mathcal{H}$ equals ${\bf Hom}$, then we drop it from the subscript and write $f_{G,H}$ or $\hat{f}_{G,H}$. \end{defi} Note that for every $G,H,\mathcal{H}$, $f_{G,H,\mathcal{H}}(\bar{Y})$ equals $\hat{f}_{G,H,\mathcal{H}}(\bar{Y}) \mid_{\bar{Z}=\bar{1}}$. Thus upper bounds for $\hat{f}$ give upper bounds for $f$, while lower bounds for $f$ give lower bounds for $\hat{f}$. We show in Theorem~\ref{thm:ub} that for any $p$-family $(H_m)$, and any bounded tree-width (path-width, respectively) $p$-family $(G_m)$, the polynomial family $(f_m)$ where $f_m=\hat{f}_{G_m,H_m}$ is in $\mathsf{VP}$ ($\mathsf{VBP}$, respectively). We then show in Theorem~\ref{thm:lb-vp} that for a specific bounded tree-width family $(G_m)$, and for $H_m=K_{m^6}$, the polynomial family $(f_{G_m,H_m})$ is hard, and hence complete, for $\mathsf{VP}$ with respect to projections. An analogous statement is shown in Theorem~\ref{thm:lb-vbp} for a specific bounded path-width family $(G_m)$ and for $H_m=K_{m^2}$. Over fields of characteristic other than 2, $\mathsf{VBP}$-hardness is obtained for a simpler family of source graphs $G_m$, as described in Theorem~\ref{thm:lb-vbp-char-not-2}. \subsection{Upper Bound} In~\cite{DMMRS14}, it was shown that the homomorphism polynomial $\hat{f}_{T_m,K_n}$ where $T_m$ is a binary tree on $m$ leaves, and $K_n$ is a complete graph on $n$ nodes, is computable by an arithmetic circuit of size $O(m^3n^3)$. Their proof idea is based on recursion: group the homomorphisms based on where they map the root of $T_m$ and its children, and recursively compute the sub-polynomials within each group. The sub-polynomials of a specific group have a special set of variables in their monomials. Hence, the homomorphism polynomial can be computed by suitably combining partial derivatives of the sub-polynomials. The partial derivatives themselves can be computed efficiently using the technique of Baur and Strassen, \cite{BS83}. Generalizing the above idea to polynomials where the source graph is not a binary tree $T_m$ but a bounded tree-width graph $G_m$ seems hard. The very first obstacle we encounter is to generalize the concept of partial derivative to monomial extension. Combining sub-polynomials to obtain the original polynomial also gets rather complicated. We sidestep this difficulty by using a dynamic programming approach \cite{DST02} based on a ``nice'' tree decomposition of the source graph. This shows that the homomorphism polynomial $\hat{f}_{G,H}$ is computable by an arithmetic circuit of size at most \(2|V(G)|\cdot|V(H)|^{tw(G)+1}\cdot (2|V(H)|+2|E(H)|),\) where $tw(G)$ is the tree-width of $G$. Let $\mathcal{T} = (T, \{X_t\}_{t\in V(T)})$ be a nice tree decomposition of $G$ of width $\tau$. For each $t \in V(T)$, let $M_t = \{ \mathsf{P}hi \mid \mathsf{P}hi \colon X_t \to V(H)\}$ be the set of all mappings from $X_t$ to $V(H)$. Since $|X_t| \leqslant \tau+1$, we have $|M_t| \leqslant |V(H)|^{\tau+1}$. For each node $t \in V(T)$, let $T_t$ be the subtree of $T$ rooted at node $t$, $V_t := \bigcup_{t' \in V(T_t)}X_{t'}$, and $G_t := G[V_t]$ be the subgraph of $G$ induced on $V_t$. Note that $G_r = G.$ We will build the circuit inductively. For each $t \in V(T)$ and $\mathsf{P}hi \in M_t$, we have a gate $\langle t, \mathsf{P}hi \rangle$ in the circuit. Such a gate will compute the homomorphism polynomial from $G_t$ to $H$ such that the mapping of $X_t$ in $H$ is given by $\mathsf{P}hi$. For each such gate $\langle t, \mathsf{P}hi \rangle$ we introduce another gate $\langle t, \mathsf{P}hi \rangle'$ which computes the ``partial derivative'' (or, quotient) of the polynomial computed at $\langle t, \mathsf{P}hi\rangle$ with respect to the monomial given by $\mathsf{P}hi$. As we mentioned before, the construction is inductive, starting at the leaf nodes and proceeding towards the root. \mathsf{P}aragraph{Base case (Leaf nodes):} Let $\ell \in V(T)$ be a leaf node. Then, $X_\ell = \{u\}$ such that $u \in V(G)$. Note that any $\mathsf{P}hi \in M_\ell$ is just a mapping of $u$ to some node in $V(H)$. Hence, the set $M_\ell$ can be identified with $V(H)$. Therefore, for all $h \in V(H)$, we label the gate $\langle \ell, h \rangle$ by the variable $Z_{u,h}$. The derivative gate $\langle \ell,h \rangle'$ in this case is set to $1$. \mathsf{P}aragraph{Introduce nodes:} Let $t \in V(T)$ be an introduce node, and $t'$ be its unique child. Then, $X_t \setminus X_{t'} = \{u\}$ for some $u\in V(G)$. Let $N(u) := \{ v | v \in X_{t'} \mbox{ and }(v,u) \in E(G_t)\}$. Note that there is a one-to-one correspondence between $\mathsf{P}hi \in M_t$ and pairs $(\mathsf{P}hi', h) \in M_{t'} \times V(H)$. Therefore, for all $\mathsf{P}hi (= (\mathsf{P}hi',h)) \in M_{t}$ such that \(\forall v \in N(u), (\mathsf{P}hi'(v),h) \in E(H),\) we set \begin{align*} \langle t, \mathsf{P}hi\rangle & := Z_{u,h}\cdot\left(\mathsf{P}rod_{v \in N(u)}Y_{(\mathsf{P}hi'(v),h)}\right)\cdot \langle t', \mathsf{P}hi' \rangle \;\;\;\;\;\mbox{ and}, \\ \langle t, \mathsf{P}hi\rangle' & := \langle t', \mathsf{P}hi'\rangle', \end{align*} otherwise we set \(\langle t ,\mathsf{P}hi\rangle = \langle t,\mathsf{P}hi\rangle' := 0.\) \mathsf{P}aragraph{Forget nodes:} Let $t \in V(T)$ be a forget node and $t'$ be its unique child. Then, $X_{t'} \setminus X_{t} = \{u\}$ for some $u \in V(G)$. Again note that there is a one-to-one correspondence between pairs \((\mathsf{P}hi, h) \in M_t \times V(H)\) and $\mathsf{P}hi' \in M_{t'}$. Let $N(u) := \{v| v \in X_{t}\mbox{ and }(v,u) \in E(G_{t'})\}.$ Therefore, for all $\mathsf{P}hi \in M_t,$ we set \begin{align*} \langle t,\mathsf{P}hi\rangle & := \sum_{h \in V(H)} \langle t',(\mathsf{P}hi,h)\rangle \;\;\;\;\;\mbox{ and}, \\ \langle t,\mathsf{P}hi \rangle' & := \sum_{\substack{h\in V(H)\mbox{ such that}\\ \forall v \in N(u), (\mathsf{P}hi(v),h) \in E(H)}} Z_{u,h}\cdot\left(\mathsf{P}rod_{v\in N(u)}Y_{(\mathsf{P}hi(v),h)}\right) \cdot\langle t', (\mathsf{P}hi,h)\rangle'. \end{align*} \mathsf{P}aragraph{Join nodes:} Let $t \in V(T)$ be a join node, and $t_1$ and $t_2$ be its two children; we have $X_t = X_{t_1} = X_{t_2}$. Then, for all $\mathsf{P}hi \in M_t,$ we set \begin{align*} \langle t,\mathsf{P}hi\rangle & := \langle t_1, \mathsf{P}hi\rangle\cdot\langle t_2,\mathsf{P}hi\rangle' \left( =\langle t_1,\mathsf{P}hi\rangle'\cdot \langle t_2,\mathsf{P}hi\rangle\right) \\ \langle t,\mathsf{P}hi\rangle' & := \langle t_1,\mathsf{P}hi\rangle'\cdot\langle t_2,\mathsf{P}hi \rangle'. \end{align*} The output gate of the circuit is $\langle r, \emptyset\rangle$. The correctness of the algorithm is readily seen via induction in a similar way. The bound on the size also follows easily from the construction. We observe some properties of our construction. First, the circuit constructed is a constant-free circuit. This was the case with the algorithm from~\cite{DMMRS14} too. Second, if we start with a path decomposition, we obtain \emph{skew} circuits, since the \emph{join} nodes are absent. The algorithm from \cite{DMMRS14} does not give skew circuits when $T_m$ is a path. (It seems the obstacle there lies in computing partial-derivatives using skew circuits.) From the above algorithm and its properties, we obtain the following theorem. \begin{theorem} \label{thm:ub} Consider the family of homomorphism polynomials $(f_m),$ where $f_m = f_{G_m,H_m}(\bar{Z},\bar{Y})$, and $(H_m)$ is a $p$-family of complete graphs. \begin{itemize} \item If $(G_m)$ is a $p$-family of graphs of bounded tree-width, then $(f_m) \in \mathsf{VP}$. \item If $(G_m)$ is a $p$-family of graphs of bounded path-width, then $(f_m) \in \mathsf{VBP}$. \end{itemize} \end{theorem} \subsection{$\mathsf{VP}$-completeness} We now turn our attention towards establishing $\mathsf{VP}$-\emph{hardness} of the homomorphism polynomials. We need to show that there exists a $p$-family $(G_m)$ of bounded tree-width graphs such that $(f_{G_m,H_m}(\bar{Y}))$ is hard for $\mathsf{VP}$ under projections. We use \emph{rigid} and mutually \emph{incomparable} graphs in the construction of $G_m$. Let $I := \{I_0, I_1, I_2\}$ be a fixed set of three connected, rigid and mutually incomparable graphs. Note that they are necessarily \emph{non-bipartite}. Let $c_{I_i} = |V(I_i)|$. Choose an integer $c_{\max} > \max\;\{ c_{I_0},c_{I_1},c_{I_2} \}$. Identify two distinct vertices $\{v_{\ell}^0, v_{r}^0\}$ in $I_0$, three distinct vertices $\{v_\ell^1,v_r^1,v_p^1\}$ in $I_1$, and three distinct vertices $\{v_\ell^2,v_r^2,v_p^2\}$ in $I_2$. For every $m$ a power of 2, we denote a complete (perfect) binary tree with $m$ leaves by $\mathsf{T}_m$. We construct a sequence of graphs $G_m$ (Fig.~\ref{fig:rigid-tree}) from $\mathsf{T}_m$ as follows: first replace the root by the graph $I_0$, then all the nodes on a particular level are replaced by either $I_1$ or $I_2$ alternately (cf.~Fig.~\ref{fig:rigid-tree}). Now we add edges; suppose we are at a `node' which is labeled $I_i$ and the left child and right child are labeled $I_j$, we add an edge between $v^i_\ell$ and $v^j_p$ in the left child, and an edge between $v^i_r$ and $v^j_p$ in the right child. Finally, to obtain $G_m$ we expand each added edge into a simple path with $c_{\max}$ vertices on it (cf.~Fig.~\ref{fig:rigid-tree}). That is, a left-edge connection between two incomparable graphs in the tree looks like, $I_i(v^i_\ell)\relbar\mbox{(path with }c_{\max}\mbox{ vertices)}\relbar (v^j_p)I_j.$ \begin{figure} \caption{The graph $G_m$.} \label{fig:rigid-tree} \end{figure} \begin{theorem} \label{thm:lb-vp} Over any field, the family of homomorphism polynomials $(f_m)$, with $f_m(\bar{Y}) = f_{G_m,H_m}(\bar{Y})$, where \begin{itemize} \item $G_m$ is defined as above (see~Fig.~\ref{fig:rigid-tree}), and \item $H_m$ is an undirected complete graph on $\mathsf{poly}(m)$, say $m^6$, vertices, \end{itemize} is complete for $\mathsf{VP}$ under $p$-projections. \end{theorem} \begin{proof} \emph{Membership} in $\mathsf{VP}$ follows from Theorem~\ref{thm:ub}. We proceed with the \emph{hardness} proof. The idea is to obtain the $\mathsf{VP}$-complete universal polynomial from \cite{Raz10} as a projection of $f_m$. This universal polynomial is computed by a normal-form homogeneous circuit with alternating unbounded fanin-in $+$ and bounded fan-in $\times$ gates. We would like to put its parse trees in bijection with homomorphisms from $G$ to $H$. This becomes easier if we use an equivalent universal circuit in a nice normal form as described in \cite{DMMRS14}. The normal form circuit is {\em multiplicatively disjoint}; sub-circuits of $\times$ gates are disjoint (see \cite{MP08}). This ensures that even though $C_n$ itself is not a formula, all its parse trees are already subgraphs of $C_n$ even without unwinding it into a formula. Our starting point is the related graph $J_n'$ in~\cite{DMMRS14}. The parse trees in $C_n$ are complete alternating unary-binary trees. The graph $J_n'$ is constructed in such a way that the parse trees are now in bijection with complete binary trees. To achieve this, we ``shortcut'' the $+$ gates, while preserving information about whether a subtree came in from the left or the right. For completeness sake we describe the construction of $J_n'$ from \cite{DMMRS14}. We obtain a sequence of graphs $(J_n')$ from the undirected graphs underlying $(C_n)$ as follows. Retain the multiplication and input gates of $C_n$. Let us make two copies of each. For each retained gate, $g$, in $C_n$; let $g_L$ and $g_R$ be the two copies of $g$ in $J'_n$. We now define the edge connections in $J'_n$. Assume $g$ is a $\times$ gate retained in $J'_n$. Let $\alpha$ and $\beta\,$ be two $+$ gates feeding into $g$ in $C_n$. Let $\{\alpha_1,\ldots,\alpha_i\}$ and $\{\beta_1,\ldots,\beta_j\}$ be the gates feeding into $\alpha$ and $\beta$, respectively. Assume without loss of generality that $\alpha$ and $\beta$ feed into $g$ from left and right, respectively. We add the following set of edges to $J'_n$: $\{(\alpha_{1L},g_L),\ldots,(\alpha_{iL},g_L)\}$, $\{(\beta_{1R},g_L),\ldots,(\beta_{jR},g_L)\}$, $\{(\alpha_{1L},g_R),\ldots,(\alpha_{iL},g_R)\}$ and $ \{(\beta_{1R},g_R),\ldots,(\beta_{jR},g_R) \}$. We now would like to keep a single copy of $C_n$ in these set of edges. So we remove the vertex $root_R$ and we remove the remaining spurious edges in following way. If we assume that all edges are directed from root towards leaves, then we keep only edges induced by the vertices reachable from $root_L$ in this directed graph. In~\cite{DMMRS14}, it was observed that there is a one-to-one correspondence between parse trees of $C_n$ and subgraphs of $J_n'$ that are rooted at $root_L$ and isomorphic to $\mathsf{T}_{2^{k(n)}}$. We now transform $J_n'$ using the set $I = \{I_0,I_1,I_2\}$. This is similar to the transformation we did to the balanced binary tree $\mathsf{T}_m$. We replace each vertex by a graph in $I$; $root_L$ gets $I_0$ and the rest of the layers get $I_1$ or $I_2$ alternately (as in Fig.~\ref{fig:rigid-tree}). Edge connections are made so that a left/right child is connected to its parent via the edge $(v^j_p,v^i_\ell)/(v^j_p,v^i_r)$. Finally we replace each edge connection by a path with $c_{\max}$ vertices on it (as in Fig.~\ref{fig:rigid-tree}), to obtain the graph $J_n$. All edges of $J_n$ are labeled 1, with the following exceptions: Every input node contains the same rigid graph $I_i$. It has a vertex $v^i_p$. Each path connection to other nodes has this vertex as its end point. Label such path edges that are incident on $v^i_p$ by the label of the input gate. Let $m := 2^{k(n)}$. The choice of $\mathsf{poly}(m)$ is such that $4s_n \leqslant \mathsf{poly}(m)$, where $s_n$ is the size of $J_n$. The $\bar{Y}$ variables are set to $\{0,1,\bar{x}\}$ such that the non-zero variables pick out the graph $J_n$. From the observations of \cite{DMMRS14} it follows that for each parse tree $p$-$\mathsf{T}$ of $C_n$, there exists a homomorphism $\mathsf{P}hi \colon G_{2^{k(n)}} \to J_n$ such that ${\emph{mon}} (\mathsf{P}hi)$ is exactly equal to ${\emph{mon}} (p\mbox{-}\mathsf{T})$. By ${\emph{mon}}(\cdot)$ we mean the monomial associated with an object. We claim that these are the only valid homomorphisms from $G_{2^{k(n)}} \to J_n$. We observe the following properties of homomorphisms from $G_{2^{k(n)}} \to J_n$, from which the claim follows. In the following by a rigid-node-subgraph we mean a graph in $\{I_0,I_1,I_2\}$ that replaces a vertex. \begin{enumerate} \item[$(i)$] Any homomorphic image of a rigid-node-subgraph of $G_{2^{k(n)}}$ in $J_n$, cannot split across two mutually incomparable rigid-node-subgraphs in $J_n$. That is, there cannot be two vertices in a rigid subgraph of $G_{2^{k(n)}}$ such that one of them is mapped into a rigid subgraph say $n_1$, and the other one is mapped into another rigid subgraph say $n_2$. This follows because homomorphisms do not increase distance. \item[$(ii)$] Because of $(i)$, with each homomorphic image of a rigid node $g_i \in G_{2^{k(n)}}$, we can associate at most one rigid node of $J_n$, say $n_i$, such that the homomorphic image of $g_i$ is a subgraph of $n_i$ and the paths (corresponding to incident edges) emanating from it. But such a subgraph has a homomorphism to $n_i$ itself: fold each hanging path into an edge and then map this edge into an edge within $n_i$. (For instance, let $\rho$ be a path hanging off $n_i$ and attached to $n_i$ at $u$, and let $v$ be any neighbour of $u$ within $n_i$. Mapping vertices of $\rho$ to $u$ and $v$ alternately preserves all edges and hence is a homomorphism.) Therefore, we note that in such a case we have a homomorphism from $g_i \to n_i$. By rigidity and mutual incomparability, $g_i$ must be the same as $n_i$, and this folded-path homomorphism must be the identity map. The other scenario, where we cannot associate any $n_i$ because $g_i$ is mapped entirely within connecting paths, is not possible since it contradicts \emph{non-bipartiteness} of mutually-incomparable graphs. \end{enumerate} \textbf{Root must be mapped to the root:} The rigidity of $I_0$ and Property $(ii)$ implies that $I_0 \in G_{2^{k(n)}}$ is mapped identically to $I_0$ in $J_n$. \\ \textbf{Every level must be mapped within the same level:} The children of $I_0$ in $G_{2^{k(n)}}$ are mapped to the children of the root while respecting left-right behaviour. Firstly, the left child cannot be mapped to the root because of incomparability of the graphs $I_1$ and $I_0$. Secondly, the left child cannot be mapped to the right child (or vice versa) even though they are the same graphs, because the minimum distance between the vertex in $I_0$ where the left path emanates and the right child is $c_{\max} + 1$ whereas the distance between the vertex in $I_0$ where the left path emanates and the left child is $c_{\max}$. So some vertex from the left child must be mapped into the path leading to the right child and hence the rest of the left child must be mapped into a proper subgraph of right child. But this contradicts rigidity of $I_1$. Continuing like this, we can show that every level must map within the same level and that the mapping within a level is correct. \qed\end{proof} \subsection{$\mathsf{VBP}$-completeness} Finally, we show that homomorphism polynomials are also rich enough to characterize computation by algebraic branching programs. Here we establish that there exists a $p$-family $(G_k)$ of undirected \emph{bounded path-width} graphs such that the family $(f_{G_k,H_k}(\bar{Y}))$ is $\mathsf{VBP}$-complete with respect to $p$-projections. We note that for $\mathsf{VBP}$-completeness under projections, the construction in~\cite{DMMRS14} required directed graphs. In the undirected setting they could establish hardness only under \emph{linear p-projection}, that too using $0$-$1$ valued weights. As before, we use rigid and mutually incomparable graphs in the construction of $G_k$. Let $I := \{I_1, I_2\}$ be two connected, non-bipartite, rigid and mutually incomparable graphs. Arbitrarily pick vertices $u \in V(I_1)$ and $v \in V(I_2)$. Let $c_{I_i} = |V(I_i)|$, and $c_{max} = \max\{c_{I_1},c_{I_2}\}$. Consider the sequence of graphs G$_k$~(Fig.~\ref{fig:r8-path-r8}); for every $k$, there is a simple path with $(k-1)+2c_{max}$ edges between a copy of $I_1$ and $I_2$. The path is between the vertices $u \in V(I_1)$ and $v \in V(I_2)$. The path between vertices $a$ and $b$ in G$_k$ contains $(k-1)$ edges. \begin{figure} \caption{The graph G$_k$.} \label{fig:r8-path-r8} \end{figure} In other words, connect $I_1$ and $I_2$ by stringing together a path with $c_{max}$ edges between $u$ and $a$, a path with $k-1$ edges between $a$ and $b$, and a path with $c_{max}$ edges between $b$ and $v$. \begin{theorem} \label{thm:lb-vbp} Over any field, the family of homomorphism polynomials $(f_k)$, where \begin{itemize} \item $\textrm{G}_k$ is defined as above (see Fig.~\ref{fig:r8-path-r8}), \item $H_k$ is the undirected complete graph on $O(k^2)$ vertices, \item $f_k(\bar{Y}) = f_{\text{G}_k,H_k}(\bar{Y})$, \end{itemize} is complete for $\mathsf{VBP}$ with respect to $p$-projections. \end{theorem} \begin{proof} \noindent\textbf{Membership:} It follows from Theorem~\ref{thm:ub}. \noindent\textbf{Hardness:} Let $(g_n) \in \mathsf{VBP}$. Without loss of generality, we can assume that $g_n$ is computable by a layered branching program of polynomial size such that the number of layers, $\ell$, is more than the width of the algebraic branching program. Let $B_n'$ be the undirected graph underlying the layered branching program $A_n$ for $g_n$. Let $B_n$ be the following graph: \(I_1(u) \relbar (s)B_n'(t) \relbar (v)I_2\), that is, $u \in I_1$ is connected to $ s \in B_n'$ via a path with $c_{max}$ edges and $ t \in B_n'$ is connected to $v \in I_2$ via a path with $c_{max}$ edges (cf. Fig.~\ref{fig:r8-path-r8}). The edges in $B_n'$ inherits the weight from $A_n$, and the rest of the edges in $B_n$ have weight $1$. Let us now consider $f_\ell$ when the variables on the edges of $H_\ell$ are instantiated to values in $\{0,1\}$ or variables of $g_n$ so that we obtain $B_\ell$ as a subgraph of $H_\ell$. We claim that a valid homomorphism from G$_\ell \to B_\ell$ must satisfy the following properties: \begin{itemize} \item[(P1)] $I_1$ in G$_\ell$ must be mapped to $I_1$ in $B_\ell$ using the identity homomorphism, \item[(P2)] $I_2$ in G$_\ell$ must be mapped to $I_2$ in $B_\ell$ using the identity homomorphism. \end{itemize} Assuming the claim, it follows that homomorphisms from G$_\ell \to B_\ell$ are in one-to-one correspondence with $s$-$t$ paths in $A_n$. In particular, the vertex $a \in \text{G}_\ell$ is mapped to the vertex $s$ in $B_\ell$, and the vertex $b \in \text{G}_\ell$ is mapped to the vertex $t$ in $B_\ell$. Also, the monomial associated with a homomorphism and its corresponding path are the same. Therefore, we have, \[ f_{\text{G}_\ell,B_\ell} = g_n .\] Since $\ell$ is polynomially bounded, we obtain $\mathsf{VBP}$-completeness of $(f_k)$ over any field. Let us now prove the claim. We first prove that a valid homomorphism from G$_\ell \to B_\ell$ must satisfy the property~(P1). There are three cases to consider. \begin{itemize} \item \textbf{Case 1:} \emph{Some vertex of $V(I_1) \subseteq V(G_\ell)$ is mapped to $u$ in $B_\ell$.} Since homomorphisms cannot increase distances between two vertices, we conclude that $V(I_1)$ must be mapped within the subgraph $I_1(u)-(a)$. Suppose further that some vertex on the $(u)-(a)$ path other than $u$ is also in the homomorphic image of $V(I_1)$. Some neighbour of $u$ in $V(I_1) \subseteq V(B_\ell)$, say $u'$, must also be in the homomorphic image, since otherwise we have a homomorphism from the non-bipartite $I_1$ to a path, a contradiction. But note that $I_1(u)-(a)$ has a homomorphism to $I_1$: fold the $(u)-(a)$ path onto the edge $u-u'$ in $I_1$. Hence, composing the two homomorphisms we obtain a homomorphism from $I_1$ to $I_1$ which is not surjective. This contradicts the rigidity of $I_1$. So in fact the homomorphism must map $V(I_1)$ from $G_\ell$ entirely within $I_1$ from $B_\ell$, and by rigidity of $I_1$, this must be the identity map. \item \textbf{Case 2:} \emph{Some vertex of $V(I_1)\subseteq V(G_\ell)$ is mapped to $v$ in $B_\ell$.} Since homomorphisms cannot increase distances between two vertices, we conclude that $V(I_1)$ must be mapped within the subgraph $(b)-(v)I_2$. But note that $(b)-(v)I_2$ has a homomorphism to $I_2$ (fold the $(b)-(v)$ path onto any edge incident on $v$ within $I_2$). Hence, composing the two homomorphisms, we obtain a homomorphism from $I_1$ to $I_2$. This is a contradiction, since $I_1$ and $I_2$ were incomparable graphs to start with. \item \textbf{Case 3:} \emph{No vertex of $V(I_1) \subseteq V(G_\ell)$ is mapped to $u$ or $v$ in $B_\ell$.} Then $V(I_1)\subseteq V(\text{G}_\ell)$ must be mapped entirely within one of the following disjoint regions of $B_\ell$: $(a)$~$I_1 \setminus \{u\},$ $(b)$~bipartite graph between vertices $u$ and $v,$ and $(c)$~$I_2\setminus \{v\}$. But then we contradict \emph{rigidity of }$I_1$ in the first case, \emph{non-bipartiteness of }$I_1$ in the second case, and \emph{incomparability of }$I_1$ \emph{and} $I_2$ in the last. \end{itemize} In a similar way, we could also prove that a valid homomorphism from G$_\ell \to B_\ell$ must satisfy the property (P2). \qed\end{proof} In the above proof, we crucially used incomparability of $I_1$ and $I_2$ to rule out flipping an undirected path. It turns out that over fields of characteristic not equal to 2, this is not crucial, since we can divide by 2. We show that if the characteristic of the underlying field is not equal to 2, then the sequence $(G_k)$ in the preceding theorem can be replaced by a sequence of simple undirected cycles of appropriate length. In particular, we establish the following result. \begin{theorem} \label{thm:lb-vbp-char-not-2} Over fields of char $\neq2$, the family of homomorphism polynomials $(f_k)$, $f_k = f_{G_k,H_k},$ where \begin{itemize} \item $G_k$ is a simple undirected cycle of length $2k+1$ and, \item $H_k$ is an undirected complete graph on $(2k+1)^2$ vertices, \end{itemize} is complete for $\mathsf{VBP}$ under $p$-projections. \end{theorem} \begin{proof} \noindent\textbf{Membership:} As before, it follows from Theorem~\ref{thm:ub}. \noindent\textbf{Hardness:} Let $(g_n) \in \mathsf{VBP}$. Without loss of generality, we can assume that $g_n$ is computable by a layered branching program of polynomial size satisfying the following properties: \begin{itemize} \item The number of layers, $\ell \geqslant 3$, is odd; say $\ell=2m+1$. So every path from $s$ to $t$ in the branching program has exactly $2m$ edges. \item The number of layers, is more than the width of the algebraic branching program, \end{itemize} Let us consider $f_m$ when the variables on the edges of $H_m$ have been set to 0, 1, or variables of $g_n$ so that we obtain the undirected graph underlying the layered branching program $A_n$ for $g_n$ as a subgraph of $H_m$. Now change the weight of the $(s,t)$ edge from 0 to weight $y$, where $y$ is a new variable distinct from all the other variables of $g_n$. Call this modified graph $B_m$. Note that without the new edge, $B_m$ would be bipartite. Let us understand the homomorphisms from $G_m$ to $B_m$. Homomorphisms from a simple cycle $C$ to a graph $\mathcal{G}$ are in one-to-one correspondence with closed walks of the same length in $\mathcal{G}$. Moreover, if the cycle $C$ is of odd length, the closed walk must contain a simple odd cycle of at most the same length. Therefore, the only valid homomorphism from $G_m$ to $B_m$ are walks of length $\ell=2m+1$, and they all contain the edge $(s,t)$ with weight $y$. But the cycles of length $\ell$ in $B_m$ are in one-to-one correspondence with $s$-$t$ paths in $A_n$. Each cycle contributes $2\ell$ walks: we can start the walk at any of the $\ell$ vertices, and we can follow the directions from $A_n$ or go against those directions. Thus we have, \[f_{G_m,B_m} = (2 (2m+1)) \cdot y\cdot g_n = (2\ell) \cdot y\cdot g_n.\] Let $p$ be the characteristic of the underlying field. If $p=0$, we substitute $y = (2\ell)^{-1}$ to obtain $g_n$. If $p > 2$, then $2\ell$ has an inverse if and only if $\ell$ has an inverse. Since $\ell \geqslant 3$ is an odd number, either $p$ does not divide $\ell$ or it does not divide $\ell+2$. Hence, at least one of $\ell$, $\ell+2$ has an inverse. Thus $g_n$ is a projection of $f_m$ or $f_{m+1}$ depending on whether $\ell$ or $\ell+2$ has an inverse in characteristic $p$. Since $\ell=2m+1$ is polynomially bounded in $n$, we therefore show $(f_k)$ is $\mathsf{VBP}$-complete with respect to $p$-projections over any field of characteristic not equal to 2. \qed\end{proof} \section{Conclusion} \label{sec:concl} In this paper, we have shown that over finite fields, five families of polynomials are intermediate in complexity between $\mathsf{VP}$ and $\mathsf{VNP}$, assuming the PH does not collapse. Over rationals and reals, we have established that two of these families are provably not monotone $p$-projections of the permanent polynomials. Finally, we have obtained a natural family of polynomials, defined via graph homomorphisms, that is complete for $\mathsf{VP}$ with respect to projections; this is the first family defined independent of circuits and with such hardness. An analogous family is also shown to be complete for $\mathsf{VBP}$. Several interesting questions remain. The definitions of our intermediate polynomials use the size $q$ of the field $\mathbb{F}_q$, not just the characteristic $p$. Can we find families of polynomials with integer coefficients, that are $\mathsf{VNP}$-intermediate (under some natural complexity assumption of course) over all fields of characteristic $p$? Even more ambitiously, can we find families of polynomials with integer coefficients, that are $\mathsf{VNP}$-intermediate over all fields with non-zero characteristic? at least over all finite fields? over fields $\mathbb{F}_p$ for all (or even for infinitely many) primes $p$? Equally interestingly, can we find an explicit family of polynomials that is $\mathsf{VNP}$-intermediate in characteristic zero? A related question is whether there are any polynomials defined over the integers, that are $\mathsf{VNP}$-intermediate over $\mathbb{F}_q$ (for some fixed $q$) but that are monotone $p$-projections of the permanent. Can we show that the remaining intermediate polynomials are also not polynomial-sized monotone projections of the permanent? Do such results have any interesting consequences, say, improved circuit lower bounds? \end{document}
\begin{document} \title[Stokes systems]{The Green function for the Stokes system with measurable coefficients} \author[J. Choi]{Jongkeun Choi} \address[J. Choi]{Department of Mathematics, Korea University, Seoul 02841, Republic of Korea} \email{jongkeun\[email protected]} \thanks{} \author[K.-A. Lee]{Ki-Ahm Lee} \address[K.-A. Lee]{Department of Mathematical Sciences, Seoul National University, Seoul 151-747, Republic of Korea \& Center for Mathematical Challenges, Korea Institute for Advanced Study, Seoul 130-722, Republic of Korea} \email{[email protected]} \thanks{} \subjclass[2010]{Primary 35J08, 35J57, 35R05} \keywords{Stokes system; Green function; $L^q$-estimate; $\mathrm{VMO}$ coefficients; Reifenberg flat domain} \begin{abstract} We study the Green function for the stationary Stokes system with bounded measurable coefficients in a bounded Lipschitz domain $\Omega\subset \mathbb{R}^n$, $n\ge 3$. We construct the Green function in $\Omega$ under the condition $(\bf{A1})$ that weak solutions of the system enjoy interior H\"older continuity. We also prove that $(\bf{A1})$ holds, for example, when the coefficients are $\mathrm{VMO}$. Moreover, we obtain the global pointwise estimate for the Green function under the additional assumption $(\bf{A2})$ that weak solutions of Dirichlet problems are locally bounded up to the boundary of the domain. By proving a priori $L^q$-estimates for Stokes systems with $\mathrm{BMO}$ coefficients on a Reifenberg domain, we verify that $(\bf{A2})$ is satisfied when the coefficients are $\mathrm{VMO}$ and $\Omega$ is a bounded $C^1$ domain. \end{abstract} \maketitle \section{Introduction} We consider the Dirichlet boundary value problem for the stationary Stokes system \begin{equation} \label{1j} \left\{ \begin{aligned} \sL\vec u+Dp=\vec f +D_\alpha \vec f_\alpha\quad &\text{in }\, \Omega,\\ \operatorname{div} \vec u=g \quad &\text{in }\, \Omega,\\ \vec u=0 \quad &\text{on }\, \partial \Omega, \end{aligned} \right. \end{equation} where $\Omega$ is a domain in $\mathbb R^n$. Here, $\sL$ is an elliptic operator of the form \[ \sL\vec u=-D_\alpha(A_{\alpha\beta}D_\beta \vec u), \] where the coefficients $A_{\alpha\beta}=A_{\alpha\beta}(x)$ are $n\times n$ matrix valued functions on $\mathbb R^n$ with entries $a_{\alpha\beta}^{ij}$ that satisfying the strong ellipticity condition; i.e., there is a constant $\lambda\in (0,1]$ such that for any $x\in \mathbb R^n$ and $\vec \xi, \, \vec \eta\in \mathbb R^{n\times n}$, we have \begin{equation} \label{ubc} \lambda\abs{\vec \xi}^2\le a_{\alpha\beta}^{ij}(x)\xi^j_\beta \xi^i_\alpha, \qquad \bigabs{a_{\alpha\beta}^{ij}(x)\xi^j_\beta \eta^i_\alpha}\le \lambda^{-1}\abs{\vec \xi}\abs{\vec\eta}. \end{equation} We do not assume that the coefficients $A_{\alpha\beta}$ are symmetric. The adjoint operator $\sL^*$ of $\sL$ is given by \[ \sL^*\vec u=-D_\alpha(A_{\beta \alpha}(x)^{\operatorname{tr}} D_\beta \vec u). \] We remark that the coefficients of $\sL^*$ also satisfy \eqref{ubc} with the same constant $\lambda$. There has been some interest in studying boundary value problems for Stokes systems with bounded coefficients; see, for instance, Giaquinta-Modica \cite{MR0641818}. They obtained various interior and boundary estimates for both linear and nonlinear systems of the type of the stationary Navier-Stokes system. Our first focus is to study of the Green function for the Stokes system with $L^\infty$ coefficients in a bounded Lipschitz domain $\Omega\subset \mathbb R^n$, $n\ge 3$. More precisely, we consider a pair $(\vec G(x,y), \vec \Pi(x,y))$, where $\vec G(x,y)$ is an $n\times n$ matrix valued function and $\vec \Pi(x,y)$ is an $n\times1$ vector valued function on $\Omega\times \Omega$, satisfying \[ \left\{ \begin{aligned} \sL_x\vec G(x,y)+D_x \vec \Pi(x,y)=\delta_y(x)\vec I &\quad \text{in }\, \Omega,\\ \operatorname{div}_x\vec G(x,y)=0 &\quad \text{in }\, \Omega,\\ \vec G(x,y)=0& \quad \text{on }\, \partial \Omega. \end{aligned} \right. \] Here, $\delta_y(\cdot)$ is Dirac delta function concentrated at $y$ and $\vec I$ is the $n\times n$ identity matrix. See Definition \ref{0110.def} for the precise definition of the Green function. We prove that if weak solutions of either \[ \sL\vec u+Dp=0, \quad \operatorname{div} \vec u=0 \quad \text{in }\, B_R \] or \[ \sL^*\vec u+Dp=0, \quad \operatorname{div} \vec u=0 \quad \text{in }\, B_R \] satisfy the following De Giorgi-Moser-Nash type estimate \begin{equation} \label{0211.eq1} [\vec u]_{C^\mu(B_{R/2})}\le CR^{-n/2-\mu}\norm{\vec u}_{L^2(B_R)}, \end{equation} then the Green function $(\vec G(x,y), \vec \Pi(x,y))$ exists and satisfies a natural growth estimate near the pole; see Theorem \ref{1226.thm1}. It can be shown, for example, that if the coefficients of $\sL$ belong to the class of $\mathrm{VMO}$ (vanishing mean oscillations), then the interior H\"older estimate \eqref{0211.eq1} above holds; see Theorem \ref{0110.thm1}. Also, we are interested in the following global pointwise estimate for the Green function: there exists a positive constant $C$ such that \begin{equation} \label{0304.eq2} \abs{\vec G(x,y)}\le C\abs{x-y}^{2-n}, \quad \forall x,\, y\in \Omega, \quad x\neq y. \end{equation} If we assume further that the operator $\sL$ has the property that the weak solution of \[ \left\{ \begin{aligned} \sL\vec u+Dp=\vec f &\quad \text{in }\, \Omega,\\ \operatorname{div}{\vec u}=g &\quad \text{in }\, \Omega,\\ \vec u=0 &\quad \text{on }\, \partial \Omega, \end{aligned} \right. \] is locally bounded up to the boundary, then we obtain the pointwise estimate \eqref{0304.eq2} of the Green function. This local boundedness condition $(\bf{A2})$ is satisfied when the coefficients of $\sL$ belong to the class of $\mathrm{VMO}$ and $\Omega$ is a bounded $C^1$ domain. To see this, we employ the standard localization method and the global $L^q$-estimate for the Stokes system with Dirichlet boundary condition, which is our second focus in this paper. Green functions for the linear equation and system have been studied by many authors. In \cite{MR0161019}, Littman-Stampacchia-Weinberger obtained the pointwise estimate of the Green function for elliptic equation. Gr\"uter-Widman \cite{MR0657523} proved existence and uniqueness of the Green function for elliptic equation, and the corresponding results for elliptic systems with continuous coefficients were obtained in \cite{MR1354111,MR0894243}. Hofmann-Kim proved the existence of Green functions for elliptic systems with variable coefficients on any open domain. Their methods are general enough to allow the coefficients to be $\mathrm{VMO}$. For more details, see \cite{MR2341783}. We also refer the reader to \cite{MR2718661, MR2763343} and references therein for the study of Green functions for elliptic systems. Regarding the study of the Green function for the Stokes system with the Laplace operator, we refer the reader to \cite{MR2465713,MR2718661}. In those papers, the authors obtained the global pointwise estimate \eqref{0304.eq2} for the Green function on a three dimensional Lipschitz domain. Mitrea-Mitrea \cite{MR2763343} established regularity properties of the Green function for the Stokes system with Dirichlet boundary condition in a two or three dimensional Lipschitz domain. Recent progress may be found in the article of Ott-Kim-Brown \cite{MR3320459}. This work includes a construction of the Green function with mixed boundary value problem for the Stokes system in two dimensions. Our second focus in this paper is the global $L^q$-estimates for the Stokes systems of divergence form with the Dirichlet boundary condition. As mentioned earlier, the $L^q$-estimate for the Stokes system is the key ingredient in establishing the global pointwise estimate for the Green function. Moreover, the study of the regularity of solutions to the Stokes system plays an essential role in the mathematical theory of viscous fluid flows governed by the Navier-Stokes system. For this reason, the $L^q$-estimate for the Stokes system with the Laplace operator was discussed in many papers. We refer the reader to Galdi-Simader-Sohr \cite{MR1313554}, Maz'ya-Rossmann \cite{MR2321139}, and references therein. Recently, estimates in Besov spaces for the Stokes system are obtained by Mitrea-Wright \cite{MR2987056}. In this paper, we consider the $L^q$-estimates for Stokes systems with variable coefficients in non-smooth domains. More precisely, we prove that if the coefficients of $\sL$ have small bounded mean oscillations on a Reifenberg flat domain $\Omega$, then the solution $(\vec u, p)$ of the problem \eqref{1j} satisfies the following $L^q$-estimate: \begin{equation*} \norm{p}_{L^q(\Omega)}+\norm{D\vec u}_{L^q(\Omega)}\le C\big(\norm{\vec f}_{L^q(\Omega)}+\norm{\vec f_\alpha}_{L^q(\Omega)}+\norm{g}_{L^q(\Omega)}\big). \end{equation*} Moreover, we obtain the solvability in Sobolev space for the systems on a bounded Lipschitz domain. It has been studied by many authors that the $L^q$-estimates for elliptic and parabolic systems with variable coefficients on a Reifenberg flat domain. We refer the reader to Dong-Kim \cite{MR2835999, MR3013054} and Byun-Wang \cite{MR2069724}. In particular, in \cite{MR2835999}, the authors proved $L^q$-estimates for divergence form higher order systems with partially BMO coefficients on a Reifenberg flat domain. Their argument is based on mean oscillation estimates and $L^\infty$-estimates combined with the measure theory on the ``crawling of ink spots" which can be found in \cite{MR0563790}. We mainly follow the arguments in \cite{MR2835999}, but the technical details are different due to the pressure term $p$. The presence of the pressure term $p$ makes the argument more involved. The organization of the paper is as follows. In Section \ref{sec_mr}, we introduce some notation and state our main theorems, including the existence and global pointwise estimates for Green functions, and their proofs are presented in Section \ref{1006@sec1}. Section \ref{sec_es} is devoted to the study of the $L^q$-estimate for the Stokes system with the Dirichlet boundary condition. In Appendix, we provide some technical lemmas. \section{Main results} \label{sec_mr} Before we state our main theorems, we introduce some necessary notation. Throughout the article, we use $\Omega$ to denote a bounded domain in $\mathbb R^n$, where $n\ge 2$. For any $x=(x_1,\ldots,x_n)\in \Omega$ and $r>0$,we write $\Omega_r(x)=\Omega \cap B_r(x)$, where $B_r(x)$ is the usual Euclidean ball of radius $r$ centered at $x$. We also denote $$ B_r^+(x)=\{y=(y_1,\ldots,y_n)\in B_r(x):y_1>x_1\}. $$ We define $d_x=\operatorname{dist}(x,\partial \Omega)=\inf\set{\abs{x-y}:y\in \partial \Omega}$. For a function $f$ on $\Omega$, we denote the average of $f$ in $\Omega$ to be \[ (f)_\Omega=\fint_\Omega f\,dx. \] We use the notation \[ \operatorname{sgn} z= \left\{ \begin{aligned} z/\abs{z} &\quad \text{if }\, z\neq 0,\\ 0 &\quad \text{if }\, z=0. \end{aligned} \right. \] For $1\le q\le \infty$, we define the space $L^q_0(\Omega)$ as the family of all functions $u\in L^q(\Omega)$ satisfying $(u)_\Omega=0$. We denote by $W^{1,q}(\Omega)$ the usual Sobolev space and $W^{1,q}_0(\Omega)$ the closure of $C^\infty_0(\Omega)$ in $W^{1,q}(\Omega)$. Let $\vec f,\, \vec f_\alpha\in L^q(\Omega)^n$ and $g\in L^q_0(\Omega)$. We say that $(\vec u,p)\in W^{1,q}_0(\Omega)^n\times {L}^q_0(\Omega)$ is a weak solution of the problem \begin{equation}\tag{SP}\label{dp} \left\{ \begin{aligned} \sL \vec u+D p=\vec f+D_\alpha\vec f_\alpha &\quad \text{in }\, \Omega,\\ \operatorname{div} \vec u=g &\quad \text{in }\, \Omega, \end{aligned} \right. \end{equation} if we have \begin{equation} \label{1218.eq0} \operatorname{div} \vec u=g\quad \text{in }\, \Omega \end{equation} and \begin{equation*} \int_\Omega A_{\alpha \beta}D_\beta \vec u\cdot D_\alpha \vec \varphi\,dx-\int_\Omega p\operatorname{div} \vec\varphi\,dx =\int_\Omega \vec f\cdot \vec \varphi\,dx-\int_\Omega \vec f_\alpha \cdot D_\alpha \vec \varphi\,dx\end{equation*} for any $\vec \varphi\in C^\infty_0(\Omega)^n$. Similarly, we say that $(\vec u,p)\in W^{1,q}_0(\Omega)^n\times {L}^q_0(\Omega)$ is a weak solution of the problem \begin{equation}\tag{SP$^*$}\label{dps} \left\{ \begin{aligned} \sL^* \vec u+D p=\vec f+D_\alpha\vec f_\alpha &\quad \text{in }\, \Omega,\\ \operatorname{div} \vec u=g &\quad \text{in }\, \Omega, \end{aligned} \right. \end{equation} if we have \eqref{1218.eq0} and \begin{equation} \label{1218.eq1} \int_\Omega A_{\alpha \beta}D_\beta \vec \varphi\cdot D_\alpha \vec u\,dx-\int_\Omega p\operatorname{div} \vec\varphi\,dx=\int_\Omega \vec f\cdot \vec \varphi\,dx-\int_\Omega \vec f_\alpha \cdot D_\alpha \vec \varphi\,dx \end{equation} for any $\vec \varphi\in C^\infty_0(\Omega)^n$. \begin{definition}[Green function] \label{0110.def} Let $\vec G(x,y)$ be an $n\times n$ matrix valued function and $\vec \Pi(x,y)$ be an $n\times 1$ vector valued function on $\Omega\times\Omega$. We say that a pair $(\vec G(x,y),\vec\Pi(x,y))$ is a Green function for the Stokes system $\eqref{dp}$ if it satisfies the following properties: \begin{enumerate}[a)] \item $\vec G(\cdot,y)\in W^{1,1}_0(\Omega)^{n\times n}$ and $\vec G(\cdot,y)\in W^{1,2}(\Omega\setminus B_R(y))^{n\times n}$ for all $y\in \Omega$ and $R>0$. Moreover, $\vec \Pi(\cdot,y)\in L^1_0(\Omega)^n$ for all $y\in \Omega$. \item For any $y\in \Omega$, $(\vec G(\cdot,y),\vec \Pi(\cdot,y))$ satisfies \begin{equation*} \operatorname{div} \vec G(\cdot,y)=0 \quad \text{in }\, \Omega \end{equation*} and \begin{equation*} \sL\vec G(\cdot,y)+D \vec \Pi(\cdot,y)=\delta_y\vec I \quad \text{in }\, \Omega \end{equation*} in the sense that for any $1\le k\le n$ and $\vec \varphi\in C^\infty_0(\Omega)^n$, we have \[ \int_\Omega a_{\alpha\beta}^{ij}D_\beta G^{jk}(x,y)D_\alpha \varphi^i(x)\,dx-\int_\Omega \Pi^k(x,y)\operatorname{div} \vec \varphi(x)\,dx=\varphi^k(y). \] \item If $(\vec u,p)\in W^{1,2}_0(\Omega)^n\times L^2_0(\Omega)$ is the weak solution of \eqref{dps} with $\vec f,\, \vec f_\alpha\in L^\infty(\Omega)^n$ and $g\in {L}^\infty_0(\Omega)$, then we have \[ \vec u(x)=\int_\Omega \vec G(y,x)^{\operatorname{tr}}\vec f(y)\,dy-\int_\Omega D_\alpha \vec G(y,x)^{\operatorname{tr}}\vec f_\alpha(y)\,dy-\int_\Omega \vec\Pi(x,y)g(y)\,dy. \] \end{enumerate} \end{definition} \begin{remark} The $L^2$-solvability of the Stokes system with the Dirichlet boundary condition (see Section \ref{1006@sec2}) and the part c) of the above definition give the uniqueness of a Green function. Indeed, if $(\tilde{\vec G}(x,y), \tilde{\vec\Pi}(x,y))$ is another Green function for $\eqref{dp}$, then by the uniqueness of the solution, we have \[ \int_\Omega \vec G(y,x)^{\operatorname{tr}}\vec f(y)\,dy-\int_\Omega \vec \Pi(x,y)g(y)\,dy=\int_\Omega \tilde{\vec G}(y,x)^{\operatorname{tr}}\vec f(y)\,dy-\int_\Omega \tilde{\vec \Pi}(x,y)g(y)\,dy \] for any $\vec f\in C^\infty_0(\Omega)^n$ and $g\in C^\infty_0(\Omega)$. Therefore, we conclude that $(\vec G, \vec \Pi)=(\tilde{\vec G},\tilde{\vec \Pi})$ a.e. in $\Omega\times \Omega$. \end{remark} \subsection{Existence of the Green function} \label{0110.sec1} To construct the Green function, we impose the following conditions. \begin{A0} There exist positive constants $R_1$ and $K_1$ such that the following holds: for any $x_0\in \partial \Omega$ and $0<r\le R_1$, there is a coordinate system depending on $x_0$ and $r$ such that in the new coordinate system, we have $$ \Omega_r(x_0)=\{x\in B_r(x_0):x_1>\psi(x')\}, $$ where $\psi:\mathbb R^{n-1}\to \mathbb R$ is a Lipschitz function with $\operatorname{Lip}(\psi)\le K_1$. \end{A0} \begin{A1} There exist constants $\mu\in (0,1]$ and $A_1>0$ such that the following holds: if $(\vec u,p)\in W^{1,2}(B_R(x_0))^n\times L^2(B_R(x_0))$ satisfies \begin{equation} \label{160907@eq2} \left\{ \begin{aligned} \sL\vec u+Dp=0 \quad \text{in }\, B_R(x_0),\\ \operatorname{div} \vec u=0 \quad \text{in }\, B_R(x_0), \end{aligned} \right. \end{equation} where $x_0\in \Omega$ and $R\in (0,d_{x_0}]$, then we have \begin{equation} \label{160907@eq3} [\vec u]_{C^{\mu}(B_{R/2}(x_0))}\le A_1R^{-\mu}\left(\fint_{B_R(x_0)}\abs{\vec u}^2\,dx\right)^{1/2}, \end{equation} where $[\vec u]_{C^\mu(B_{R/2}(x_0))}$ denotes the usual H\"older seminorm. The statement is valid, provided that $\sL$ is replaced by $\sL^*$. \end{A1} \begin{theorem} \label{1226.thm1} Let $\Omega$ be a domain in $\mathbb R^n$ with $\operatorname{diam}(\Omega)\le K_0$, where $n\ge 3$. Assume conditions $(\bf{A0})$ and $(\bf{A1})$. Then there exist Green functions $(\vec G(x,y), \vec \Pi(x,y))$ and $(\vec G^*(x,y),\vec \Pi^*(x,y))$ for $\eqref{dp}$ and $\eqref{dps}$, respectively, such that the following identity: \begin{equation} \label{1226.eq1a} \vec G(x,y)=\vec G^*(y,x)^{\operatorname{tr}}, \quad \forall x,\, y\in \Omega, \quad x\neq y. \end{equation} Also, for any $x,\, y\in \Omega$ satisfying $0<\abs{x-y}<d_y/2$, we have \begin{equation*} \abs{\vec G(x,y)}\le C\abs{x-y}^{2-n}. \end{equation*} Moreover, for any $y\in \Omega$ and $R\in (0, d_y]$, we obtain \begin{enumerate}[i)] \item $\norm{\vec G(\cdot,y)}_{L^{2n/(n-2)}(\Omega\setminus B_R(y))}+\norm{D\vec G(\cdot,y)}_{L^2(\Omega\setminus B_R(y))}\le CR^{(2-n)/2}$. \item $\abs{\set{x\in \Omega:\abs{\vec G(x,y)}>t}}\le Ct^{-n/(n-2)}$ for all $t>d_y^{2-n}$. \item $\abs{\set{x\in \Omega:\abs{D_x\vec G(x,y)}>t}}\le Ct^{-n/(n-1)}$ for all $t>d^{1-n}_y$. \item $\norm{\vec G(\cdot,y)}_{L^q(B_R(y))}\le C_qR^{2-n+n/q}$, where $ q\in [1,n/(n-2))$. \item $\norm{D\vec G(\cdot,y)}_{L^q(B_R(y))}\le C_qR^{1-n+n/q}$, where $q\in [1,n/(n-1))$ \item $\norm{\vec \Pi(\cdot,y)}_{L^q(\Omega)}\le C_{y,q}$, where $q\in[1,n/(n-1))$. \end{enumerate} In the above, $C=C(n,\lambda,K_0,K_1,R_1,\mu,A_1)$, $C_q=C_q(n,\lambda,K_0,K_1,R_1,\mu, A_1,q)$, and $C_{y,q}=C_{y,q}(n,\lambda,K_0,K_1,R_1,\mu,A_1,q,d_y)$. The same estimates are also valid for $(\vec G^*(x,y), \vec \Pi^*(x,y))$. \end{theorem} \begin{remark} Let $(\vec u,p)\in W^{1,2}_0(\Omega)^n \times L^2_0(\Omega)$ be the weak solution of the problem \[ \left\{ \begin{aligned} \sL \vec u+D p=\vec f+D_\alpha\vec f_\alpha &\quad \text{in }\, \Omega,\\ \operatorname{div} \vec u=0 &\quad \text{in }\, \Omega. \end{aligned} \right. \] Then by the property c) of Definition \ref{0110.def} and the identity \eqref{1226.eq1a}, we have the following representation for $\vec u$: \begin{equation*} \vec u(x):=\int_\Omega \vec G(x,y)\vec f(y)\,dy-\int_\Omega D_\alpha \vec G(x,y)\vec f_\alpha(y)\,dy. \end{equation*} Also, the following estimates are easy consequences of the identity \eqref{1226.eq1a} and the estimates i) -- v) in Theorem \ref{1226.thm1} for $\vec G^*(\cdot,x)$: \begin{enumerate}[a)] \item $\norm{\vec G(x,\cdot)}_{L^{2n/(n-2)}(\Omega\setminus B_R(x))}+\norm{D\vec G(x,\cdot)}_{L^2(\Omega\setminus B_R(x))}\le CR^{(2-n)/2}$. \item $\abs{\set{y\in \Omega:\abs{\vec G(x,y)}>t}}\le Ct^{-n/(n-2)}$ for all $t>d_x^{2-n}$. \item $\abs{\set{y\in \Omega:\abs{D_y\vec G(x,y)}>t}}\le Ct^{-n/(n-1)}$ for all $t>d^{1-n}_x$. \item $\norm{\vec G(x,\cdot)}_{L^q(B_R(x))}\le C_qR^{2-n+n/q}$, where $ q\in [1,n/(n-2))$. \item $\norm{D\vec G(x,\cdot)}_{L^q(B_R(x))}\le C_qR^{1-n+n/q}$, where $q\in [1,n/(n-1))$. \end{enumerate} \end{remark} In the theorem and the remark below, we show that if the coefficients have a vanishing mean oscillation $(\mathrm{VMO})$, then the condition $(\bf{A1})$ holds. \begin{theorem} \label{0110.thm1} Suppose that the coefficients of $\sL$ belong to the class of $\mathrm{VMO}$; i.e. we have \[ \lim_{\rho\to 0}\omega_\rho(A_{\alpha\beta}):=\lim_{\rho\to 0}\sup_{x\in \mathbb R^n}\sup_{s\le \rho}\fint_{B_s(x)}\bigabs{A_{\alpha\beta}-(A_{\alpha\beta})_{B_s(x)}}=0. \] If $(\vec u, p)\in W^{1,2}(B_R(x_0))^n\times L^2(B_R(x_0))$ satisfies \eqref{160907@eq2} with $x_0\in \Omega$ and $0<R\le \min\{d_{x_0},1\}$, then for any $\mu\in (0,1)$, the estimate \eqref{160907@eq3} holds with the constant $A_1$ depending only on $n$, $\lambda$, $\mu$, and the $\mathrm{VMO}$ modulus of the coefficients. \end{theorem} \begin{remark} \label{160907@rem1} In the above theorem, the constant $\min\{d_{x_0},1\}$ is interchangeable with $\min\{d_{x_0},c\}$ for any fixed $c\in (0, \infty)$, possibly at the cost of increasing the constant $A_1$. Setting $c=\operatorname{diam}\Omega$, the condition $(\bf{A1})$ holds with the constant $A_1$ depending on $n$, $\lambda$, $\operatorname{diam}\Omega$, $\mu$, and the $\mathrm{VMO}$ modulus $\omega_\rho$ of coefficients. \end{remark} The following corollary is immediate consequence of Theorem \ref{1226.thm1} and Remark \ref{160907@rem1}. \begin{corollary} Let $\Omega$ be a Lipschitz domain in $\mathbb R^n$, where $n\ge 3$. Suppose the coefficients of $\sL$ belong to the class of $\mathrm{VMO}$. Then there exists the Green function for \eqref{dp} and it satisfies the assertions in Theorem \ref{1226.thm1}. \end{corollary} \subsection{Global estimate of the Green function} \label{0110.sec2} We impose the following assumption to obtain the global pointwise estimate for the Green function. \begin{A2} There exists a constant $A_2>0$ such that if $(\vec u, p)\in W^{1,2}_0(\Omega)^n\times L^2_0(\Omega)$ satisfies \begin{equation} \label{170418@eq1} \left\{ \begin{aligned} \sL\vec u+Dp=\vec f \quad \text{in }\, \Omega,\\ \operatorname{div} \vec u=0 \quad \text{in }\,\Omega, \end{aligned} \right. \end{equation} where $\vec f\in L^\infty(\Omega)^n$, then $\vec u\in L^\infty(\Omega)^n$ with the estimate $$ \|\vec u\|_{L^\infty(\Omega_{R/2}(x_0))}\le A_2\left(R^{-n/2}\|\vec u\|_{L^2(\Omega_R(x_0))}+R^2\|\vec f\|_{L^\infty(\Omega_R(x_0))}\right) $$ for any $x_0\in \Omega$ and $0<R<\operatorname{diam}\Omega$. The statement is valid, provided that $\sL$ is replaced by $\sL^*$. \end{A2} \begin{theorem} \label{1226.thm2} Let $\Omega$ be a domain in $\mathbb R^n$ with $\operatorname{diam}(\Omega)\le K_0$, where $n\ge 3$. Assume conditions $(\bf{A0})$, $(\bf{A1})$, and $(\bf{A2})$. Let $(\vec G(x,y),\vec \Pi(x,y))$ be the Green function for $\eqref{dp}$ in $\Omega$ as constructed in Theorem \ref{1226.thm1}. Then we have the global pointwise estimate for $\vec G(x,y)$: \begin{equation} \label{0109.eq1} \abs{\vec G(x,y)}\le C\abs{x-y}^{2-n}, \quad \forall x,\,y\in \Omega, \quad x\neq y, \end{equation} where $C=C(n,\lambda,K_0,K_1, R_1,A_2)$. \end{theorem} From the global $L^q$-estimates for the Stokes systems in Section \ref{sec_es}, we obtain an example of the condition $(\bf{A2})$ in the theorem below. The proof of the theorem follows a standard localization argument; see Section \ref{0304.sec1} for the details. Similar results for elliptic systems are given for the Dirichlet problem in \cite{MR2718661} and for the Neumann problem in \cite{MR3105752}. \begin{theorem} \label{0110.thm2} Let $\Omega$ be a domain in $\mathbb R^n$ with $\operatorname{diam}(\Omega)\le K_0$, where $n\ge 3$. Assume the condition $(\bf{A0})$ with a sufficiently small $K_1$, depending only on $n$ and $\lambda$. If the coefficients of $\sL$ belong to the class of $\mathrm{VMO}$, then the condition $(\bf{A2})$ holds with the constant $A_2$ depending only on $n$, $\lambda$, $K_0$, $R_1$, and the $\mathrm{VMO}$ modulus of the coefficients. \end{theorem} By combining Theorems \ref{1226.thm2} and \ref{0110.thm2}, we immediately obtain the following result. \begin{corollary} Let $\Omega$ be a bounded $C^1$ domain in $\mathbb R^n$, where $n\ge 3$. Suppose that the coefficients of $\sL$ belong to the class of $\mathrm{VMO}$. Then there exists the Green function for $\eqref{dp}$ and it satisfies the global pointwise estimate \eqref{0109.eq1}. \end{corollary} \section{Some auxiliary results} \subsection{$L^2$-solvability} \label{1006@sec2} In this subsection, we consider the existence theorem for weak solutions of the Stokes system with measurable coefficients. For the solvability of the Stokes system, we impose the following condition. \begin{D} Let $\Omega$ be a bounded domain in $\mathbb R^n$, where $n\ge 2$. There exist a linear operator $B:L^2_0(\Omega)\to W^{1,2}_0(\Omega)^n$ and a constant $A>0$ such that \[ \operatorname{div} Bg=g\, \text{ in }\, \Omega \quad \text{and} \quad \norm{Bg}_{W^{1,2}_0(\Omega)}\le A\norm{g}_{L^2(\Omega)}. \] \end{D} \begin{remark} \label{K0122.rmk2} It is well known that if $\Omega$ is a Lipschitz domain with $\operatorname{diam}(\Omega)\le K_0$, which satisfies the condition $(\bf{A0})$, then for any $1<q<\infty$, there exists a bounded linear operator $B_q:L^q_0(\Omega)\to W^{1,q}_0(\Omega)^n$ such that \[ \operatorname{div} B_q g=g \,\text{ in }\,\Omega, \quad \norm{D(B_q g)}_{L^q(\Omega)}\le C\norm{g}_{L^q(\Omega)}, \] where the constant $C$ depends only on $n$, $q$, $K_0$, $K_1$, and $R_1$; see e.g., \cite{MR2263708}. We point out that if $\Omega=B_R(x)$ or $\Omega=B^+_R(x)$, then \begin{equation} \label{0123.eq1a} \norm{D(B_q g)}_{L^q(\Omega)}\le C\norm{g}_{L^q(\Omega)}, \end{equation} where $C=C(n,q)$. \end{remark} \begin{lemma} \label{122.lem1} Assume the condition $(\bf{D})$. Let $$ q=\frac{2n}{n+2} \quad \text{if }\, n\ge3 \quad \text{and} \quad q=2 \quad \text{if }\,n=2. $$ For $\vec f\in L^q(\Omega)^n$, $\vec f_\alpha\in L^2(\Omega)^n$, and $g\in L^2_0(\Omega)$, there exists a unique solution $(\vec u,p)\in W^{1,2}_0(\Omega)^n\times L^2_0(\Omega)$ of the problem \begin{equation} \label{0204.eq2} \left\{ \begin{aligned} \sL \vec u+D p=\vec f+D_\alpha\vec f_\alpha &\quad \text{in }\, \Omega,\\ \operatorname{div} \vec u=g &\quad \text{in }\, \Omega. \end{aligned} \right. \end{equation} Moreover, we have \begin{equation} \label{1227.eq1} \norm{p}_{L^2(\Omega)}+\norm{D\vec u}_{L^2(\Omega)}\le C\left(\norm{\vec f}_{L^{q}(\Omega)}+\norm{\vec f_\alpha}_{L^2(\Omega)}+\norm{g}_{L^2(\Omega)}\right), \end{equation} where $C=C(n,\lambda,A)$ if $n\ge 3$ and $C=C(\lambda,A,|\Omega|)$ if $n=2$. In the case when $\Omega=B_R(x)$ or $\Omega=B^+_R(x)$, if $\vec f\in L^2(\Omega)^n$, then we have \begin{equation} \label{122.eq1a} \norm{p}_{L^2(\Omega)}+\norm{D\vec u}_{L^2(\Omega)}\le C'\left(R\norm{\vec f}_{L^{2}(\Omega)}+\norm{\vec f_\alpha}_{L^2(\Omega)}+\norm{g}_{L^2(\Omega)}\right), \end{equation} where $C'=C'(n,\lambda)$. \end{lemma} \begin{proof} We mainly follow the argument given by Maz'ya-Rossmann \cite[Theorem 5.2]{MR2321139}. Also see \cite[Theorem 3.1]{MR3320459}. Let $H(\Omega)$ be the Hilbert space consisting of functions $\vec u\in W^{1,2}_0(\Omega)^n$ such that $\operatorname{div} \vec u=0$ and $H^\bot(\Omega)$ be orthogonal complement of $H(\Omega)$ in $W^{1,2}_0(\Omega)^n$. We also define $P$ as the orthogonal projection from $W^{1,2}_0(\Omega)^n$ onto $H^\bot(\Omega)$. Then, one can easily show that the operator $\cB=P\circ B:L^2_0(\Omega)\to H^\bot(\Omega)$ is bijective. Moreover, we obtain for $g\in L^2_0(\Omega)$ that \begin{equation} \label{0123.eq1} \operatorname{div} \cB g=g \,\text{ in }\,\Omega, \quad \norm{\cB g}_{W^{1,2}(\Omega)}\le A\norm{g}_{L^2(\Omega)}. \end{equation} Now, let $\vec f,\, \vec f_\alpha\in L^2(\Omega)^n$ and $g\in L^2_0(\Omega)$. Then from the above argument, there exists a unique function $\vec w:=\cB g\in H^\bot(\Omega)$ such that \eqref{0123.eq1} is satisfied. Also, by the Lax-Milgram theorem, one can find the function $\vec v\in H(\Omega)$ that satisfies \begin{equation*} \int_\Omega A_{\alpha\beta}D_\beta \vec v\cdot D_\alpha \vec \varphi\,dx=\int_\Omega \vec f\cdot \vec \varphi\,dx-\int_\Omega \vec f_\alpha \cdot D_\alpha \vec \varphi\,dx-\int_\Omega A_{\alpha\beta}D_\beta \vec w\cdot D_\alpha \vec \varphi\,dx \end{equation*} for all $\vec \varphi\in H(\Omega)$. By setting $\vec \varphi=\vec v$ in the above identity, and then, using H\"older's inequality and the Sobolev inequality, we have \[ \norm{D\vec v}_{L^2(\Omega)}\le C\left(\norm{\vec f}_{L^q(\Omega)}+\norm{\vec f_\alpha}_{L^2(\Omega)}+\norm{D\vec w}_{L^2(\Omega)}\right), \] where $q=2$ if $n=2$ and $q=2n/(n+2)$ if $n\ge 3$. Therefore, the function $\vec u=\vec v+\vec w$ satisfies $\operatorname{div} \vec u= g$ in $\Omega$ and the following identity: \begin{equation} \label{1227.eq1b} \int_\Omega A_{\alpha\beta}D_\beta \vec u\cdot D_\alpha \vec \varphi\,dx=\int_\Omega \vec f\cdot \vec \varphi\,dx-\int_\Omega \vec f_\alpha \cdot D_\alpha \vec \varphi\,dx, \quad \forall \vec \varphi\in H(\Omega). \end{equation} Moreover, we have \begin{equation} \label{1227.eq2a} \norm{D\vec u}_{L^2(\Omega)}\le C\left(\norm{\vec f}_{L^{q}(\Omega)}+\norm{\vec f_\alpha}_{L^2(\Omega)}+\norm{g}_{L^2(\Omega)}\right). \end{equation} To find $p$, we let \[ \ell(\phi)=\int_\Omega A_{\alpha\beta}D_\beta \vec u\cdot D_\alpha (\cB\tilde{\phi})\,dx-\int_\Omega \vec f\cdot \cB\tilde{\phi}\,dx+\int_\Omega \vec f_\alpha \cdot D_\alpha(\cB\tilde{\phi})\,dx, \] where $\phi\in L^2(\Omega)$ and $\tilde{\phi}=\phi-(\phi)_\Omega\in L^2_0(\Omega)$. Since \[ \norm{\cB\tilde{\phi}}_{W^{1,2}(\Omega)}\le A\norm{\tilde{\phi}}_{L^2(\Omega)}\le C(n,A)\norm{\phi}_{L^2(\Omega)}, \] $\ell$ is a bounded linear functional on $L^2(\Omega)$. Therefore, there exists a function $p_0\in L^2(\Omega)$ so that \[ \int_\Omega p_0 \tilde{\phi}\,dx=\ell (\tilde\phi), \quad \forall \tilde{\phi}\in L^2_0(\Omega), \] and thus, $p=p_0-(p_0)_\Omega\in L^2_0(\Omega)$ also satisfies the above identity. Then by using the fact that $\cB(L^2_0(\Omega))=H^\bot(\Omega)$, we obtain \begin{equation} \label{122.eq1} \int_\Omega A_{\alpha\beta}D_\beta \vec u\cdot D_\alpha \vec \varphi\,dx-\int_\Omega p \operatorname{div} \vec \varphi\,dx=\int_\Omega \vec f \cdot \vec \varphi\,dx-\int_\Omega \vec f_\alpha \cdot D_\alpha \vec \varphi\,dx \end{equation} for all $\vec \varphi\in H^\bot(\Omega)$. From \eqref{1227.eq1b} and \eqref{122.eq1}, we find that $(\vec u,p)$ is the weak solution of the problem \eqref{0204.eq2}. Moreover, by setting $\vec \varphi=\cB p$ in \eqref{122.eq1}, we have \begin{equation*} \norm{p}_{L^2(\Omega)}\le C\left(\norm{D\vec u}_{L^2(\Omega)}+\norm{\vec f}_{L^{q}(\Omega)}+\norm{\vec f_\alpha}_{L^2(\Omega)}\right), \end{equation*} and thus, we get \eqref{1227.eq1} from \eqref{1227.eq2a}. To establish \eqref{122.eq1a}, we observe that \[ \norm{u}_{L^2(\Omega)}\le C(n)R\norm{Du}_{L^2(\Omega)}, \quad \forall u\in W^{1,2}_0(\Omega), \] provided that $\Omega=B_R(x)$ or $\Omega=B_R^+(x)$. By using the above inequality and \eqref{0123.eq1a}, and following the same argument as above, one can easily show that the estimate \eqref{122.eq1a} holds. The lemma is proved. \end{proof} \subsection{Interior estimates} In this subsection we derive some interior estimates of $\vec u$ and $p$. We start with the following Caccioppoli type inequality that can be found, for instance, in \cite{arXiv:1604.02690v2,MR0641818}. \begin{lemma} \label{1006@lem1} Assume that $(\vec u,p)\in W^{1,2}(B_R(x_0))^n\times L^2(B_R(x_0))$ satisfies $$ \left\{ \begin{aligned} \sL\vec u+Dp=0 &\quad \text{in }\, B_R(x_0),\\ \operatorname{div} \vec u=0 &\quad \text{in }\, B_R(x_0), \end{aligned} \right. $$ where $x_0\in \mathbb R^n$ and $R>0$. Then we have $$ \int_{B_{R/2}(x_0)}\bigabs{p-(p)_{B_{R/2}(x_0)}}^2\,dx+\int_{B_{R/2}(x_0)}\abs{D\vec u}^2\,dx\le CR^{-2}\int_{B_R(x_0)}\abs{\vec u}^2\,dx, $$ where $C=C(n,\lambda)$. \end{lemma} \begin{proof} Let $r\in (0, R]$ and denote $B_r=B_r(x_0)$. By Remark \ref{K0122.rmk2}, there exists $\vec \phi\in W^{1,2}_0(B_r)^n$ such that \begin{equation*} \operatorname{div} \vec \phi=p-(p)_{B_{r}} \,\text{ in }\, B_{r} \end{equation*} and \begin{equation*} \norm{\vec\phi}_{L^{2n/(n-2)}(B_{r})}\le C\norm{D\vec \phi}_{L^2(B_{r})}\le C\norm{p-(p)_{B_{r}}}_{L^2(B_{r})}, \end{equation*} where $C=C(n)$. Since \begin{equation} \label{160831@eq4} \sL\vec u+D(p-(p)_{B_{r}})=0 \quad \text{in }\, B_r, \end{equation} by testing with $\vec \phi$ in \eqref{160831@eq4}, we get \begin{equation} \label{1006@eq1b} \int_{B_{r}}\abs{p-(p)_{B_{r}}}^2\,dx\le C_1\int_{B_{r}}\abs{D\vec u}^2\,dx, \quad \forall r\in (0,R], \end{equation} where $C_1=C_1(n,\lambda)$. From the above inequality, it remains us to show that \begin{equation} \label{160831@eq4a} \int_{B_{R/2}}|D\vec u|^2\,dx\le CR^{-2}\int_{B_R}|\vec u|^2\,dx. \end{equation} Let $0<\rho_1<\rho_2\le R$ and $\delta\in (0,1)$. Let $\eta$ be a smooth function on $\mathbb R^d$ such that $$ 0\le \eta\le 1, \quad \eta=1 \quad \text{in }\, B_{\rho_1}, \quad \operatorname{supp} \eta\subset B_{\rho_2}, \quad |D\eta|\le C(d)(\rho_2-\rho_1)^{-1}. $$ Then by applying $\eta^2 \vec u$ as a test function to $$ \sL\vec u+D(p-(p)_{B_{\rho_2}})=0 \quad \text{in }\, B_R $$ and using the fact that $\operatorname{div} u=0$, we have $$ \int_{B_R}A_{\alpha\beta}\eta D_\beta \vec u\cdot \eta D_\alpha \vec u\,dx=-2\int_{B_R}A_{\alpha\beta}\eta D_\beta \vec u\cdot D_\alpha \eta \vec u\,dx+2\int_{B_R}(p-(p)_{B_{\rho_2}})\eta D\eta \cdot \vec u\,dx, $$ and thus, by the ellipticity condition, H\"older's inequality, and Young's inequality, we obtain $$ \int_{B_{\rho_1}}|D\vec u|^2\,dx\le \frac{C_\delta}{(\rho_2-\rho_1)^2}\int_{B_{\rho_2}}|\vec u|^2\,dx+\frac{\delta}{C_1}\int_{B_{\rho_2}}|p-(p)_{B_{\rho_2}}|^2\,dx, $$ where $C_\delta=C_\delta(n,\lambda,\delta)$, and $C_1$ is the constant in \eqref{1006@eq1b}. From this together with \eqref{1006@eq1b}, it follows that \begin{equation} \label{160831@eq5a} \int_{B_{\rho_1}}|D\vec u|^2\,dx\le \frac{C_\delta}{(\rho_2-\rho_1)^2}\int_{B_{\rho_2}}|\vec u|^2\,dx+\delta\int_{B_{\rho_2}}|D\vec u|^2\,dx. \end{equation} Let us set $$ \delta=\frac{1}{8}, \quad \rho_k=\frac{R}{2}\left(2-\frac{1}{2^k}\right), \quad k=0,1,2,\ldots. $$ Then by \eqref{160831@eq5a}, we have $$ \int_{B_{\rho_k}}|D\vec u|^2\,dx\le \frac{C4^k}{R^2}\int_{B_{\rho_{k+1}}}|\vec u|^2\,dx+\delta\int_{B_{\rho_{k+1}}}|D\vec u|^2\,dx, \quad k\in \{0,1,2,\ldots\}, $$ where $C=C(n,\lambda)$. By multiplying both sides of the above inequality by $\delta^k$ and summing the terms with respect to $k=0,1,\ldots$, we obtain $$ \sum_{k=0}^\infty \delta^k\int_{B_{\rho_k}}|D\vec u|^2\,dx\le \frac{C}{R^2}\sum_{k=0}^\infty (4\delta)^k\int_{B_{\rho_{k+1}}}|\vec u|^2\,dx+\sum_{k=1}^\infty \delta^k\int_{B_{\rho_{k}}}|D\vec u|^2\,dx. $$ By subtracting the last term of the right-hand side in the above inequality, we obtain the desired estimate \eqref{160831@eq4a}. The lemma is proved. \end{proof} \begin{lemma} \label{1227.lem1} Assume the condition $(\bf{A1})$. Let $(\vec u,p)\in W^{1,2}(B_R(x_0))^n\times L^2(B_R(x_0))$ satisfy \begin{equation} \label{0302.eq1} \left\{ \begin{aligned} \sL\vec u+Dp=0 \quad \text{in }\, B_R(x_0),\\ \operatorname{div} \vec u=0 \quad \text{in }\, B_R(x_0), \end{aligned} \right. \end{equation} where $x_0\in \Omega$ and $R\in(0,d_{x_0}]$. Then we have \begin{equation} \label{0929.eq3} \int_{B_r(x_0)}\abs{D\vec u}^2\,dx\le C_1\left(\frac{r}{s}\right)^{n-2+2\mu}\int_{B_s(x_0)}\abs{D\vec u}^2\,dx, \quad 0<r<s\le R, \end{equation} where $C_1=C_1(n,\lambda,A_1)$. Moreover, we get \begin{equation} \label{0929=e1} \norm{\vec u}_{L^\infty(B_{R/2}(x_0))}\le C_2 R^{-n}\norm{\vec u}_{L^1(B_R(x_0))}, \end{equation} where $C_2=C_2(n,\mu, A_1)$. The statement is valid, provided that $\sL$ is replaced by $\sL^*$. \end{lemma} \begin{proof} To prove \eqref{0929.eq3}, we only need to consider the case $0<r\le s/4$. Also, by replacing $\vec u-(\vec u)_{B_s(x_0)}$ if necessary, we may assume that $(\vec u)_{B_s(x_0)}=0$. Since $(\vec u-(\vec u)_{B_{2r}(x_0)},p)$ is a weak solution of \eqref{0302.eq1}, we get from Lemma \ref{1006@lem1} that \begin{equation*} \int_{B_{r}(x_0)}\abs{D\vec u}^2\,dx\le Cr^{-2}\int_{B_{2r}(x_0)}\abs{\vec u-(\vec u)_{B_{2r}(x_0)}}^2\,dx. \end{equation*} By $(\bf{A1})$, the Poincar\'e inequality, and the above inequality, we have \begin{align*} \int_{B_r(x_0)}\abs{D\vec u}^2\,dx&\le Cr^{n-2+2\mu}[\vec u]^2_{C^{\mu}(B_{2r}(x_0))}\le Cr^{n-2+2\mu}[\vec u]^2_{C^{\mu}(B_{s/2}(x_0))}\\ &\le CA_1^2 r^{n-2+2\mu}s^{-n-2\mu}\int_{B_s(x_0)}\abs{\vec u}^2\,dx\le CA_1^2 \left(\frac{r}{s}\right)^{n-2+2\mu}\int_{B_s(x_0)}\abs{D\vec u}^2\,dx, \end{align*} which establishes \eqref{0929.eq3}. We observe that $(\bf{A1})$ and a well known averaging argument yield \begin{equation} \label{0316.eq1} \norm{\vec u}_{L^\infty(B_{R/2}(x_0))}\le C\left(\fint_{B_R(x_0)}\abs{\vec u}^2\,dx\right)^{1/2}, \end{equation} for any $R\in (0,d_{x_0}]$, where $C=C(n, \mu, A_1)$. For the proof that \eqref{0316.eq1} implies \eqref{0929=e1}, we refer to \cite[pp. 80-82]{MR1239172}. \end{proof} \begin{lemma} Let $\Omega$ be a domain in $\mathbb R^n$ with $\operatorname{diam}(\Omega)\le K_0$, where $n\ge 3$. Assume conditions $(\bf{A0})$ and $(\bf{A1})$. Let $(\vec u,p)\in W^{1,2}_0(\Omega)^n \times L^2_0(\Omega)$ be a solution of the problem \begin{equation*} \left\{ \begin{aligned} \sL \vec u+D p=\vec f &\quad \text{in }\, \Omega,\\ \operatorname{div} \vec u=0 &\quad \text{in }\, \Omega, \end{aligned} \right. \end{equation*} where $\vec f\in L^\infty(\Omega)^n$. Then for any $x_0\in \Omega$ and $R\in (0,d_{x_0}]$, $\vec u$ is continuous in $B_R(x_0)$ with the estimate \begin{equation} \label{1229.eq1} [\vec u]_{C^{\mu_1}(B_{R/2}(x_0))}\le C\left(R^{-n/2+1-\mu_1}\norm{D\vec u}_{L^2(\Omega)}+\norm{\vec f}_{L^q(\Omega)}\right) \end{equation} for any $q\in\big(\frac{n}{2},\frac{n}{2-\mu}\big)$, where $\mu_1:=2-n/q$ and $C=C(n,\lambda,\mu,A_1,q)$. Moreover, if $\vec f$ is supported in $B_R(x_0)$, then we have \begin{equation} \label{1229.eq1a} \norm{\vec u}_{L^\infty(B_{R/2}(x_0))}\le CR^2\norm{\vec f}_{L^\infty(B_R(x_0))}, \end{equation} where $C=C(n,\lambda,K_0, K_1,R_1,\mu, A_1)$. The statement is valid, provided that $\sL$ is replaced by $\sL^*$. \end{lemma} \begin{proof} Let $x\in B_{R/2}(x_0)$ and $0<s\le R/2$. We decompose $(\vec u,p)$ as $(\vec u_1,p_1)+(\vec u_2,p_2)$, where $(\vec u_2,p_2)\in W^{1,2}_0(B_s(x))^n\times {L}^2_0(B_s(x))$ satisfies \[ \left\{ \begin{aligned} \sL \vec u_2+D p_2=\vec f &\quad \text{in }\, B_s(x),\\ \operatorname{div} \vec u_2=0 &\quad \text{in }\, B_s(x). \end{aligned} \right. \] And then $(\vec u_1, p_1)\in W^{1,2}(B_s(x))^n\times L^2(B_s(x))$ satisfies \[ \left\{ \begin{aligned} \sL \vec u_1+D p_1=0 &\quad \text{in }\, B_s(x),\\ \operatorname{div} \vec u_1=0 &\quad \text{in }\, B_s(x). \end{aligned} \right. \] From the estimate \eqref{1227.eq1} and H\"older's inequality, it follows that \begin{equation} \label{0929.eq5a} \norm{D\vec u_2}_{L^2(B_s(x))}\le C\norm{\vec f}_{L^{2n/(n+2)}(B_s(x))}\le Cs^{n/2-1+\mu_1}\norm{\vec f}_{L^{q}(B_R(x_0))}, \end{equation} where $q\in \big(\frac{n}{2}, \frac{n}{2-\mu}\big)$, $\mu_1=2-n/q$, and $C=C(n,\lambda, q)$. For $0<r<s$, we obtain by Lemma \ref{1227.lem1} that \begin{align} \nonumber \int_{B_r(x)}\abs{D\vec u}^2\,dx&\le 2\int_{B_r(x)}\abs{D\vec u_1}^2\,dx+2\int_{B_r(x)}\abs{D\vec u_2}^2\,dx\\ \nonumber &\le C\left(\frac{r}{s}\right)^{n-2+2\mu}\int_{B_s(x)}\abs{D\vec u_1}^2\,dx+2\int_{B_s(x)}\abs{D\vec u_2}^2\,dx\\ \label{0929.eq5} &\le C\left(\frac{r}{s}\right)^{n-2+2\mu}\int_{B_s(x)}\abs{D\vec u}^2\,dx+C\int_{B_s(x)}\abs{D\vec u_2}^2\,dx, \end{align} where $C=C(n,\lambda,A_1)$. Therefore we get from \eqref{0929.eq5a} and \eqref{0929.eq5} that \begin{equation*} \int_{B_r(x)}\abs{D\vec u}^2\,dx\le C\left(\frac{r}{s}\right)^{n-2+2\mu}\int_{B_s(x)}\abs{D\vec u}^2\,dx+Cs^{n-2+2\mu_1}\norm{\vec f}^2_{L^{q}(B_R(x_0))}. \end{equation*} Then by \cite[Lemma 2.1, p. 86]{MR0717034}, we have \begin{equation*} \int_{B_r(x)}\abs{D\vec u}^2\,dx\le C\left(\frac{r}{R}\right)^{n-2+2\mu_1}\int_\Omega \abs{D\vec u}^2\,dx+Cr^{n-2+2\mu_1}\norm{\vec f}_{L^{q}(B_R(x_0))}^2 \end{equation*} for any $x\in B_{R/2}(x_0)$ and $r\in (0,R/2)$. From this together with Morrey-Campanato's theorem, we prove \eqref{1229.eq1}. To see \eqref{1229.eq1a}, assume $\vec f$ is supported in $B_R(x_0)$. Notice from the Sobolev inequality that \[ \norm{\vec u}_{L^2(B_R(x_0))}\le C(n)R\norm{D\vec u}_{L^2(\Omega)}. \] Then we obtain by \eqref{1229.eq1} and the above estimate that \begin{align} \nonumber \norm{\vec u}_{L^\infty(B_{R/2}(x_0))}&\le CR^{\mu_1}[\vec u]_{C^{\mu_1}(B_{R/2}(x_0))}+CR^{n/2}\norm{\vec u}_{L^2(B_R(x_0))}\\ \nonumber &\le CR^{-n/2+1}\norm{D\vec u}_{L^2(\Omega)}+CR^2\norm{\vec f}_{L^{\infty}(B_R(x_0))}, \end{align} and thus, we get desired estimate from the inequality \eqref{1227.eq1}. The lemma is proved. \end{proof} \section{Proofs of main theorems} \label{1006@sec1} In the section, we prove main theorems stated in Sections \ref{0110.sec1} and \ref{0110.sec2}. \subsection{Proof of Theorem \ref{1226.thm1}} \label{0204.sec1} \subsubsection{Averaged Green function} \label{0108.sec1} Let $y\in \Omega$ and $\varepsilon>0$ be fixed, but arbitrary. Fix an integer $1\le k\le n$ and let $(\vec v_\varepsilon,\pi_\varepsilon)=(\vec v_{\varepsilon;y,k},\pi_{\varepsilon;y,k})$ be the solution in $W^{1,2}_0(\Omega)^n\times {L}^2_0(\Omega)$ of \begin{equation*} \left\{ \begin{aligned} \sL \vec u+D p=\frac{1}{\abs{\Omega_\varepsilon(y)}}1_{\Omega_\varepsilon(y)} \vec e_k &\quad \text{in }\, \Omega,\\ \operatorname{div} \vec u=0 &\quad \text{in }\, \Omega, \end{aligned} \right. \end{equation*} where $\vec e_k$ is the $k$-th unit vector in $\mathbb R^n$. We define \emph{the averaged Green function} $(\vec G_\varepsilon(\cdot,y), \vec \Pi_\varepsilon(\cdot,y))$ for $\eqref{dp}$ by setting \begin{equation} \label{0102.eq1b} G_\varepsilon^{jk}(\cdot,y)=v_{\varepsilon;y,k}^j \quad \text{and}\quad \Pi_\varepsilon^k(\cdot,y)=\pi_{\varepsilon;y,k}. \end{equation} Then $(\vec G_\varepsilon(\cdot,y), \vec \Pi_\varepsilon(\cdot,y))$ satisfies \begin{equation} \label{0927.eq2a} \int_\Omega a_{\alpha\beta}^{ij}D_\beta G^{jk}_\varepsilon(\cdot,y)D_\alpha \varphi^i\,dx-\int_\Omega \Pi_\varepsilon^k(\cdot,y)\operatorname{div} \vec \varphi\,dx=\fint_{\Omega_{\varepsilon}(y)} \varphi^k\,dx \end{equation} for any $\vec \varphi\in W^{1,2}_0(\Omega)^n$. We also obtain by \eqref{1227.eq1} that \begin{equation} \label{0927.eq2b} \norm{\vec \Pi_\varepsilon(\cdot,y)}_{L^2(\Omega)}+\norm{D\vec G_\varepsilon(\cdot,y)}_{L^2(\Omega)}\le C\varepsilon^{(2-n)/2}, \quad \forall \varepsilon>0, \end{equation} where $C=C(n,\lambda,K_0, K_1,R_1)$. The following lemma is an immediate consequence of Lemma \ref{1006@lem1}. \begin{lemma} \label{0102-lem1} Let $y\in \Omega$ and $\varepsilon>0$. \begin{enumerate}[(i)] \item For any $x_0\in \Omega$ and $R\in (0,d_{x_0}]$ satisfying $B_{R}(x_0)\cap B_\varepsilon(y)=\emptyset$, we have \begin{equation*} \int_{B_{R/2}(x_0)}\abs{D\vec G_\varepsilon(x,y)}^2\,dx \le CR^{-2} \int_{B_{R}(x_0)}\abs{\vec G_\varepsilon(x,y)}^2\,dx, \end{equation*} where $C=C(n,\lambda)$. \item Let $R\in (0,2d_y/3]$ and $\varepsilon\in (0,R/4)$. Then we have \begin{equation*} \int_{B_{R}(y)\setminus B_{R/2}(y)}\abs{D\vec G_\varepsilon(x,y)}^2\,dx\le CR^{-2}\int_{B_{3R/2}(y)\setminus B_{R/4}(y)}\abs{\vec G_\varepsilon(x,y)}^2\,dx, \end{equation*} where $C=C(n,\lambda)$. \end{enumerate} \end{lemma} With the preparations in the previous section, we obtain the pointwise estimate of the averaged Green function $\vec G_\varepsilon(\cdot,y)$. \begin{lemma} \label{0929-thm1} There exists a constant $C=C(n,\lambda,K_0, K_1,R_1,\mu, A_1)>0$ such that for any $x,\, y\in \Omega$ satisfying $0<\abs{x-y}<d_y/2$, we have \begin{equation} \label{0929-e2} \abs{\vec G_\varepsilon(x,y)}\le C\abs{x-y}^{2-n}, \quad \forall \varepsilon\in(0,\abs{x-y}/3). \end{equation} \end{lemma} \begin{proof} Let $y\in \Omega$, $R\in (0,d_y)$, and $\varepsilon\in(0,R/2)$. We denote $\vec v_\varepsilon$ to be the $k$-th column of $\vec G_\varepsilon(\cdot,y)$. Assume that $(\vec u,p)\in W^{1,2}_0(\Omega)^n\times L^2_0(\Omega)$ is the solution of \begin{equation} \label{1229.eq2} \left\{ \begin{aligned} \sL^* \vec u+D p=\vec f &\quad \text{in }\, \Omega,\\ \operatorname{div} \vec u=0 &\quad \text{in }\, \Omega, \end{aligned} \right. \end{equation} where $f^i(x)= 1_{B_R(y)}\operatorname{sgn} (v_\varepsilon^i(x))$ and $\vec f=(f^1,\ldots,f^n)\in L^\infty(\Omega)^n$. Then by testing with $\vec v_\varepsilon$ in \eqref{1229.eq2}, we have \begin{equation*} \int_\Omega A_{\alpha\beta}D_\beta \vec v_\varepsilon\cdot D_\alpha \vec u\,dx=\int_{B_R(y)} \vec f\cdot \vec v_\varepsilon\,dx. \end{equation*} Similarly, we set $\vec \varphi=\vec u$ in \eqref{0927.eq2a} to obtain \begin{equation*} \int_\Omega A_{\alpha\beta}D_\beta \vec v_\varepsilon\cdot D_\alpha \vec u\,dx=\fint_{B_\varepsilon(y)} u^k\,dx. \end{equation*} From the above two identities, we get \begin{equation} \label{0105.eq0} \int_{B_R(y)}\vec f\cdot \vec v_\varepsilon\,dx=\fint_{B_\varepsilon(y)} u^k\,dx, \end{equation} and thus, by \eqref{1229.eq1a}, we derive \begin{equation} \label{1229.eq2a} \norm{\vec G_\varepsilon(\cdot,y)}_{L^1(B_R(y))}\le CR^2, \quad R\in (0,d_y), \quad \varepsilon\in (0,R/2), \end{equation} where $C=C(n,\lambda,K_0, K_1,R_1,\mu,A_1)$. Now, we are ready to prove the lemma. Let $x,\, y\in \Omega$ satisfy $0<\abs{x-y}<d_y/2$. We write $R:=2\abs{x-y}/3$. Note that if $\varepsilon <R/2$, then $(\vec G_\varepsilon(\cdot,y),\vec \Pi_\varepsilon(\cdot,y))$ satisfies \[ \left\{ \begin{aligned} \sL\vec G_\varepsilon(\cdot,y)+D\vec \Pi_\varepsilon(\cdot,y)=0 &\quad \text{in }\, B_{R}(x),\\ \operatorname{div} \vec G_\varepsilon(\cdot,y)=0 &\quad \text{in }\, B_R(x). \end{aligned} \right. \] Then by Lemma \ref{1227.lem1}, we have \begin{equation*} \abs{\vec G_\varepsilon(x,y)}\le CR^{-n}\norm{\vec G_\varepsilon(\cdot,y)}_{L^1(B_R(x))}\le CR^{-n}\norm{\vec G_\varepsilon(\cdot,y)}_{L^1(B_{3R}(y))}. \end{equation*} This together with \eqref{1229.eq2a} yields \eqref{0929-e2}. The lemma is proved. \end{proof} Based on the pointwise estimate \eqref{0929-e2}, we prove that $\vec G_\varepsilon(\cdot,y)$ and $\vec \Pi_\varepsilon(\cdot,y)$ satisfy the following $L^q$-estimates uniformly in $\varepsilon>0$. \begin{lemma} \label{1229-lem1} Let $y\in \Omega$, $R\in (0,d_y]$, and $\varepsilon>0$. Then we have \begin{equation} \label{1229.eq2b} \norm{\vec G_\varepsilon(\cdot,y)}_{L^{2n/(n-2)}(\Omega\setminus B_R(y))}+\norm{D\vec G_\varepsilon(\cdot,y)}_{L^2(\Omega\setminus B_R(y))}\le CR^{(2-n)/2}. \end{equation} Also, we obtain \begin{align} \label{0103.eq3} \abs{\set{x\in \Omega:\abs{\vec G_\varepsilon(x,y)}>t}}&\le Ct^{-n/(n-2)}, \quad \forall t>d_y^{2-n},\\ \label{0103.eq3a} \abs{\set{x\in \Omega:\abs{D_x\vec G_\varepsilon(x,y)}>t}}&\le Ct^{-n/(n-1)}, \quad \forall t>d_y^{1-n}. \end{align} Moreover, we derive the following uniform $L^q$ estimates: \begin{align} \label{1229.eq2c} \norm{\vec G_\varepsilon(\cdot,y)}_{L^q(B_R(y))}\le C_qR^{2-n+n/q}, &\quad q\in [1,n/(n-2)),\\ \label{1229.eq2d} \norm{D\vec G_\varepsilon(\cdot,y)}_{L^q(B_R(y))}\le C_qR^{1-n+n/q}, &\quad q\in [1,n/(n-1)),\\ \label{0103.eq6} \norm{\vec \Pi_\varepsilon(\cdot,y)}_{L^q(\Omega)}\le C_{y,q}, &\quad q\in [1,n/(n-1)). \end{align} In the above, $C=C(n,\lambda,K_0,K_1,R_1, \mu, A_1)$, $C_q=C_q(n,\lambda,K_0, K_1,R_1, \mu,A_1,q)$, and $C_{y,q}=C_{y,q}(n,\lambda,K_0, K_1,R_1, \mu, A_1,q,d_y)$. \end{lemma} \begin{proof} Recall the notation \eqref{0102.eq1b}. We first prove the estimate \eqref{1229.eq2b}. From the obvious fact that $d_y/3$ and $d_y$ are comparable to each other, we only need to prove the estimate \eqref{1229.eq2b} for $R\in (0,d_y/3]$. If $\varepsilon\ge R/12$, then by \eqref{0927.eq2b} and the Sobolev inequality, we have \begin{equation} \label{K1230.eq1} \norm{\vec G_\varepsilon(\cdot,y)}_{L^{2n/(n-2)}(\Omega\setminus B_R(y))}+\norm{D\vec G_\varepsilon(\cdot,y)}_{L^2(\Omega\setminus B_R(y))}\le C\norm{D\vec G_\varepsilon(\cdot,y)}_{L^2(\Omega)}\le CR^{(2-n)/2}. \end{equation} On the other hand, if $\varepsilon\in (0,R/12)$, then by setting $\vec \varphi=\eta^2\vec v_\varepsilon$ in \eqref{0927.eq2a}, where $\eta$ is a smooth function satisfying \begin{equation*} 0\le \eta\le1, \quad \eta\equiv 1 \,\text{ on }\, \mathbb R^n\setminus B_{R}(y), \quad \eta\equiv 0 \,\text{ on }\,B_{R/2}(y), \quad \abs{D\eta}\le CR^{-1}, \end{equation*} we have \begin{equation} \label{K1229.eq3a} \int_\Omega \eta^2\abs{D\vec v_\varepsilon}^2\,dx\le C\int_\Omega \abs{D\eta}^2\abs{\vec v_\varepsilon}^2\,dx+C\int_{D}\abs{\pi_\varepsilon-(\pi_\varepsilon)_D}^2\,dx, \end{equation} where $D=B_R(y)\setminus B_{R/2}(y)$. By Remark \ref{K0122.rmk2}, there exists a function $\vec \phi_\varepsilon\in W^{1,2}_0(D)^n$ such that \begin{equation*} \operatorname{div} \vec \phi_\varepsilon=\pi_\varepsilon-(\pi_\varepsilon)_{D} \,\text{ in }\, D, \quad \norm{D\vec \phi_\varepsilon}_{L^2(D)}\le C\norm{\pi_\varepsilon-(\pi_\varepsilon)_D}_{L^2(D)}, \end{equation*} where $C=C(n)$. Therefore, by setting $\vec \varphi=\vec \phi_\varepsilon $ in \eqref{0927.eq2a}, we get from Lemma \ref{0102-lem1} (ii) that \begin{equation} \label{1229.eq3b} \int_{D}\abs{\pi_\varepsilon-(\pi_\varepsilon)_D}^2\,dx\le C\int_{D}\abs{D\vec v_\varepsilon}^2\,dx\le CR^{-2}\int_{B_{3R/2}(y)\setminus B_{R/4}(y)}\abs{\vec v_\varepsilon}^2\,dx. \end{equation} Then by combining \eqref{K1229.eq3a} and \eqref{1229.eq3b}, we find that \begin{equation} \label{1229.eq3c} \int_\Omega \eta^2|D\vec v_\varepsilon|^2\,dx\le C R^{-2}\int_{B_{3R/2}(y)\setminus B_{R/4}(y)} \abs{\vec v_\varepsilon}^2\,dx \le CR^{2-n}, \end{equation} where we used Lemma \ref{0929-thm1} in the last inequality. Also, by using the fact that \[ \norm{\eta \vec v_\varepsilon}_{L^{2n/(n-2)}(\Omega)}\le C\norm{D(\eta \vec v_\varepsilon)}_{L^2(\Omega)}\le C\norm{\eta D\vec v_\varepsilon}_{L^2(\Omega)}+C\norm{D\eta \vec v_\varepsilon}_{L^2(\Omega)}, \] the inequality \eqref{1229.eq3c} implies \[ \norm{\vec G_\varepsilon(\cdot,y)}_{L^{2n/(n-2)}(\Omega\setminus B_R(y))}+\norm{D\vec G_\varepsilon(\cdot,y)}_{L^2(\Omega\setminus B_R(y))}\le CR^{(2-n)/2}. \] This together with \eqref{K1230.eq1} gives \eqref{1229.eq2b} for $R\in (0,d_y/3]$. Now, let $A_t=\set{x\in \Omega:\abs{\vec G_\varepsilon(x,y)}>t}$ and choose $t=R^{2-n}>d_y^{2-n}$. Then by \eqref{1229.eq2b}, we have \begin{equation*} \abs{A_t\setminus B_R(y)}\le t^{-2n/(n-2)}\int_{A_t\setminus B_R(y)}\abs{\vec G_\varepsilon(x,y)}^{2n/(n-2)}\,dx\le C t^{-n/(n-2)}. \end{equation*} From this inequality and the fact that $\abs{A_t\cap B_R(y)}\le CR^n=Ct^{-n/(n-2)}$, we get \eqref{0103.eq3}. Let us fix $q\in [1,n/(n-2))$. Note that \begin{align} \nonumber \int_{B_R(y)}\abs{\vec G_\varepsilon(x,y)}^q\,dx&=\int_{B_R(y)\cap A_t^c}\abs{\vec G_\varepsilon(x,y)}^q\,dx+\int_{B_R(y)\cap A_t}\abs{\vec G_\varepsilon(x,y)}^q\,dx\\ \label{0103.eq5} &\le C R^{(2-n)q+n}+\int_{A_t}\abs{\vec G_\varepsilon(x,y)}^q\,dx, \end{align} where $t=R^{2-n}>d^{2-n}_y$. From \eqref{0103.eq3} it follows that \begin{align} \nonumber \int_{A_t}\abs{\vec G_\varepsilon(x,y)}^q\,dx&=q\int_0^\infty s^{q-1}\bigabs{\set{x\in \Omega:\abs{\vec G_\varepsilon(x,y)}>\max(t,s)}}\,ds\\ \nonumber &\le C_q t^{-n/(n-2)}\int_0^t s^{q-1}\,ds+C_q\int_t^\infty s^{q-1-n/(n-2)}\,ds\\ \label{0103.eq5a} &\le C_qR^{(2-n)q+n}, \end{align} where $C_q=C_q(n,\lambda,K_0, K_1,R_1,\mu,A_1,q)$. Therefore, by combining \eqref{0103.eq5} and \eqref{0103.eq5a}, we obtain \eqref{1229.eq2c}. Moreover, by utilizing \eqref{1229.eq2b}, and following the same steps as in the above, we get \eqref{0103.eq3a} and \eqref{1229.eq2d}. It only remains to establish \eqref{0103.eq6}. From H\"older's inequality, we only need to prove the inequality with $q\in (1,n/(n-1))$. Let $q\in (1,n/(n-1))$ and $q'=q/(q-1)$, and denote \[ w:= \operatorname{sgn} (\pi_\varepsilon) \abs{\pi_\varepsilon}^{q-1}. \] Then we have \[ w\in L^{q'}(\Omega), \quad n<q'<\infty. \] Therefore by Remark \ref{K0122.rmk2} and the Sobolev inequality, there exists a function $\vec \phi\in W^{1,q'}_0(\Omega)^n$ such that \begin{equation} \label{0103.eq6b} \begin{aligned} &\operatorname{div} \vec \phi=w-(w)_{\Omega} \,\text{ in }\, \Omega,\\ & \norm{\vec \phi}_{L^\infty(\Omega)}\le C\|D\vec \phi\|_{L^{q'}(\Omega)}\le C\norm{w}_{L^{q'}(\Omega)}. \end{aligned} \end{equation} We observe that \begin{equation} \label{0103.eq6c} \int_\Omega \pi_\varepsilon \operatorname{div} \vec \phi\,dx=\int_{\Omega}\pi_\varepsilon (w-(w)_{\Omega})\,dx =\int_{\Omega}\pi_\varepsilon w\,dx=\int_{\Omega}\abs{w}^{q'}\,dx. \end{equation} By setting $\vec \varphi=\vec \phi$ in \eqref{0927.eq2a}, we get from \eqref{0103.eq6b} and \eqref{0103.eq6c} that \begin{equation} \label{0105.eq2} \int_{\Omega}\abs{w}^{q'}\,dx\le C\big(1+\norm{D\vec v_\varepsilon}_{L^q(\Omega)}\big)\norm{w}_{L^{q'}(\Omega)}. \end{equation} Notice from \eqref{1229.eq2b} and \eqref{1229.eq2d} that \[ \norm{D\vec v_\varepsilon}_{L^q(\Omega)}\le C_{y,q} \] for all $\varepsilon>0$, where $C_{y,q}=C_{y,q}(n,\lambda,K_0, K_1,R_1, \mu,A_1,q,d_y)$. This together with \eqref{0105.eq2} gives \eqref{0103.eq6}. The lemma is proved. \end{proof} \subsubsection{Construction of the Green function} \label{0108.sec2} Let $y\in \Omega$ be fixed, but arbitrary. Notice from Lemma \ref{1229-lem1} and the weak compactness theorem that there exist a sequence $\set{\varepsilon_\rho}_{\rho=1}^\infty$ tending to zero and functions $\vec G(\cdot,y)$ and $\hat{\vec G}(\cdot,y)$ such that \begin{align} \nonumber &\vec G_{\varepsilon_\rho}(\cdot,y) \rightharpoonup \vec G(\cdot,y) \quad \text{weakly in }\, W^{1,2}(\Omega \setminus \overline{B_{d_y/2}(y)})^{n\times n},\\ \label{0103.e1a} &\vec G_{\varepsilon_\rho}(\cdot,y) \rightharpoonup \hat{\vec G}(\cdot,y) \quad \text{weakly in }\, W^{1,q}(B_{d_y}(y))^{n\times n}, \end{align} where $q\in (1,n/(n-1))$. Since $\vec G(\cdot,y)\equiv \hat{\vec G}(\cdot,y)$ on $B_{d_y}(y)\setminus \overline{B_{d_y/2}(y)}$, we shall extend $\vec G(\cdot,y)$ to entire $\Omega$ by setting $\vec G(\cdot,y)\equiv \hat{\vec G}(\cdot,y)$ on $\overline{B_{d_y/2}(y)}$. By applying a diagonalization process and passing to a subsequence, if necessary, we may assume that \begin{equation} \label{0103.e1b} \vec G_{\varepsilon_\rho}(\cdot,y) \rightharpoonup \vec G(\cdot,y) \quad \text{weakly in }\, W^{1,2}(\Omega \setminus \overline{B_R(y)})^{n\times n}, \quad \forall R\in (0,d_y]. \end{equation} Indeed, if we consider a sequence $\{R_i\}_{i=1}^\infty$ satisfying $R_i\in (0, d_y]$ and $R_i \searrow 0$, then for each $i\in \{1,2,\ldots\}$, there exists a subsequence of $\{\vec G_{\varepsilon_\rho}(\cdot,y)\}$, denoted by $\big\{\vec G_{\varepsilon_{\rho_{i,j}}}(\cdot,y)\big\}$, such that $$ \big\{\vec G_{\varepsilon_{\rho_{i+1,j}}}(\cdot,y)\big\}\subset \big\{\vec G_{\varepsilon_{\rho_{i,j}}}(\cdot,y)\big\} $$ and $$ \vec G_{\varepsilon_{\rho_{i,j}}}(\cdot,y) \rightharpoonup \vec G(\cdot,y) \quad \text{weakly in }\, W^{1,2}(\Omega \setminus \overline{B_{R_i}(y)})^{n\times n} \quad \text{as }\, j\to \infty. $$ Taking the subsequence as $\big\{\vec G_{\varepsilon_{\rho_{i,i}}}(\cdot,y)\big\}$, we see that \eqref{0103.e1b} holds. By \eqref{0103.eq6}, there exists a function $\vec \Pi(\cdot,y)\in L^q_0(\Omega)^n$ such that, by passing to a subsequence, \begin{equation} \label{0103.eq1c} \vec \Pi_{\varepsilon_\rho}(\cdot,y) \rightharpoonup {\vec \Pi}(\cdot,y) \quad \text{weakly in }\, L^q(\Omega)^n. \end{equation} We shall now claim that $(\vec G(x,y),\vec \Pi(x,y))$ satisfies the properties a) -- c) in Definition \ref{0110.def} so that $(\vec G(x,y),\vec \Pi(x,y))$ is indeed the Green function for $\eqref{dp}$. Notice from \eqref{0103.e1b} that for any $\zeta\in C^\infty_0(\Omega)$ satisfying $\zeta\equiv 1$ on $B_R(y)$, where $R\in (0,d_y)$, we have \[ (1-\zeta)\vec G_{\varepsilon_\rho}(\cdot,y)\rightharpoonup (1-\zeta)\vec G(\cdot,y) \quad \text{weakly in }\, W^{1,2}(\Omega)^{n\times n}. \] Since $W^{1,2}_0(\Omega)$ is weakly closed in $W^{1,2}(\Omega)$, we have $(1-\zeta)\vec G(\cdot,y)\in W^{1,2}_0(\Omega)^{n\times n}$, and thus the property a) is verified. Let $\eta$ be a smooth cut-off function satisfying $\eta\equiv 1$ on $B_{d_y/2}(y)$ and $\operatorname{supp} \eta\subset B_{d_y}(y)$. Then by \eqref{0927.eq2a}, \eqref{0103.e1a} -- \eqref{0103.eq1c}, we obtain for $\vec \varphi\in C^\infty_0(\Omega)^n$ that \begin{align} \nonumber \varphi^k(y)=&\lim_{\rho\to \infty}\fint_{\Omega_{\varepsilon_\rho}(y)} \varphi^k\\ \nonumber =&\lim_{\rho\to \infty}\left(\int_\Omega a^{ij}_{\alpha\beta}D_\beta G^{jk}_{\varepsilon_\rho}(\cdot,y)D_\alpha(\eta \varphi^i)+\int_\Omega a^{ij}_{\alpha\beta}D_\beta G^{jk}_{\varepsilon_\rho}(\cdot,y)D_\alpha((1-\eta) \varphi^i)\right)\\ \nonumber &-\lim_{\rho\to \infty}\int_\Omega \Pi^k_{\varepsilon_\rho}(\cdot,y)\operatorname{div} \vec\varphi\\ \nonumber =&\int_\Omega a^{ij}_{\alpha\beta}D_\beta G^{jk}(\cdot,y)D_\alpha \varphi^i-\int_\Omega \Pi^k(\cdot,y)\operatorname{div} \vec \varphi. \end{align} Similarly, we get \[ \int_\Omega \phi(x) \operatorname{div}_x \vec G(x,y)\,dx=0, \quad \forall \phi\in C^\infty(\Omega). \] From the above two identity, the property b) is satisfied. Finally, if $(\vec u,p)\in W^{1,2}_0(\Omega)^n\times L^2_0(\Omega)$ is the weak solution of the problem \eqref{dps}, then by setting $\vec \varphi$ to be the $k$-th column of $\vec G_{\varepsilon_\rho}(\cdot,y)$ in \eqref{1218.eq1} and setting $\vec \varphi=\vec u$ in \eqref{0927.eq2a}, we have (see e.g., Eq. \eqref{0105.eq0}) \begin{equation} \label{160906@eq10} \fint_{\Omega_{\varepsilon_\rho}(y)} \vec u=\int_\Omega \vec G_{\varepsilon_\rho}(\cdot,y)^{\operatorname{tr}}\vec f-\int_\Omega D_\alpha \vec G_{\varepsilon_\rho}(\cdot,y)^{\operatorname{tr}}\vec f_\alpha-\int_\Omega \vec \Pi_{\varepsilon_\rho}(\cdot,y)g. \end{equation} By letting $\rho\to \infty$ in the above identity, we find that $(\vec G(x,y),\vec \Pi(x,y))$ satisfies the property c) in Definition \ref{0110.def}. Next, let $y\in \Omega$ and $R\in (0,d_y]$. Let $\vec v$ and $\vec v_{\varepsilon}$ be the $k$-th column of $\vec G(\cdot,y)$ and $\vec G_\varepsilon(\cdot,y)$, respectively. Then for any $\vec g\in C^\infty_0(B_R(y))^n$, we obtain by \eqref{1229.eq2c} and \eqref{0103.e1a} that \begin{equation*} \Abs{\int_{B_R(y)} \vec v\cdot \vec g\,dx}=\lim_{\rho\to \infty}\Abs{\int_{B_R(y)}\vec v_{\varepsilon_\rho}\cdot \vec g\,dx}\le C_q R^{2-n+n/q}\norm{\vec g}_{L^{q'}(B_R(y))}, \end{equation*} where $q\in [1,n/(n-2))$ and $q'=q/(q-1)$. Therefore, by a duality argument, we obtain the estimate iv) in Theorem \ref{1226.thm1}. Similarly, from Lemma \ref{1229-lem1}, \eqref{0103.e1a}, and \eqref{0103.e1b}, we have the estimates i) and v) in the theorem. Also, ii) and iii) are deduced from i) in the same way as \eqref{0103.eq3} and \eqref{0103.eq3a} are deduced from \eqref{1229.eq2b}. Therefore, $\vec G(x,y)$ satisfies the estimates i) -- v) in Theorem \ref{1226.thm1}. For $x, y\in \Omega$ satisfying $0<\abs{x-y}<d_y/2$, set $r:=\abs{x-y}/4$. Notice from the property b) in Definition \ref{0110.def} that $(\vec G(\cdot,y), \vec \Pi(\cdot,y))$ satisfies \[ \left\{ \begin{aligned} \sL\vec G(\cdot,y)+D\vec \Pi(\cdot,y)=0 &\quad \text{in }\, B_{r}(x),\\ \operatorname{div} \vec G(\cdot,y)=0 &\quad \text{in }\, B_{r}(x). \end{aligned} \right. \] Then by Lemma \ref{1227.lem1} and H\"older's inequality, we have \begin{equation*} \abs{\vec G(x,y)}\le Cr^{(2-n)/2}\norm{\vec G(\cdot,y)}_{L^{2n/(n-2)}(B_{2r}(x))}\le Cr^{(2-n)/2}\norm{\vec G(\cdot,y)}_{L^{2n/(n-2)}(\Omega \setminus B_{r}(y))}. \end{equation*} This together with the estimate i) in Theorem \ref{1226.thm1} implies \[ \abs{\vec G(x,y)}\le C\abs{x-y}^{2-n}, \quad 0<\abs{x-y}<d_y/2. \] \begin{lemma} \label{0108.lem1} For each compact set $K\subset \Omega \setminus \set{y}$, there is a subsequence of $\{\vec G_{\varepsilon_\rho}(\cdot,y)\}$ that converges to $\vec G(\cdot,y)$ uniformly on $K$. \end{lemma} \begin{proof} Let $x\in \Omega$ and $R\in (0,d_x]$ satisfying $\overline{B_R(x)}\subset \Omega \setminus \set{y}$. Notice that there exists $\varepsilon_B>0$ such that for $\varepsilon<\varepsilon_B$, we have \[ \left\{ \begin{aligned} \sL\vec G_\varepsilon(\cdot,y)+D\vec \Pi_\varepsilon(\cdot,y)=0 &\quad \text{in }\, B_R(x),\\ \operatorname{div} \vec G_\varepsilon(\cdot,y)=0 &\quad \text{in }\, B_R(x). \end{aligned} \right. \] By $(\bf{A1})$ and \eqref{1229.eq2b}, $\set{\vec G_\varepsilon(\cdot,y)}_{\varepsilon\le\varepsilon_B}$ is equicontinuous on $\overline{B_{R/2}(x)}$. Also, it follows from Lemma \ref{1227.lem1} that $\set{\vec G_\varepsilon(\cdot,y)}_{\varepsilon\le\varepsilon_B}$ is uniformly bounded on $\overline{B_{R/2}(x)}$. By the Arzel\`a-Ascoli theorem, we obtain the desired conclusion. \end{proof} \subsubsection{Proof of the identity \eqref{1226.eq1a}} For any $x\in \Omega$ and $\sigma>0$, we define the averaged Green function $(\vec G^*_{\sigma}(\cdot,x), \vec \Pi_\sigma^*(\cdot,x))$ for $\eqref{dps}$ by letting its $l$-th column to be the unique weak solution in $W^{1,2}_0(\Omega)^n\times L^2_0(\Omega)$ of the problem \[ \left\{ \begin{aligned} \sL^*\vec u+Dp=\frac{1}{\abs{\Omega_\sigma(x)}}1_{\Omega_\sigma(x)}\vec e_l &\quad \text{in }\, \Omega,\\ \operatorname{div} \vec u=0 &\quad \text{in }\, \Omega, \end{aligned} \right. \] where $\vec e_l$ is the $l$-th unit vector in $\mathbb R^n$. Then by following the same argument as in Sections \ref{0108.sec1} and \ref{0108.sec2}, there exist a sequence $\set{\sigma_\nu}_{\nu=1}^\infty$ tending to zero and the Green function $(\vec G^*(\cdot,x), \vec \Pi^*(\cdot,x))$ for $\eqref{dps}$ satisfying the counterparts of \eqref{0103.e1a}, \eqref{0103.e1b}, \eqref{0103.eq1c}, and Lemma \ref{0108.lem1}. Now, let $x,\,y \in \Omega$ and $x\neq y$. We then obtain for $\varepsilon\in (0,d_y]$ and $\sigma\in (0,d_x]$ that \begin{equation} \label{jk.eq2b} \fint_{B_\varepsilon(y)} (G^*_{\sigma})^{kl}(\cdot,x)=\int_\Omega a^{ij}_{\alpha\beta}D_\beta G^{jk}_\varepsilon(\cdot,y)D_\alpha \big((G^*_\sigma)^{il}(\cdot,x)\big)=\fint_{B_{\sigma}(x)}G_\varepsilon^{lk}(\cdot,y). \end{equation} We define \[ I^{kl}_{\rho,\nu}:=\fint_{B_{\varepsilon_\rho}(y)}(G^*_{\sigma_{\nu}})^{kl}(\cdot,x)=\fint_{B_{\sigma_\nu}}G^{lk}_{\varepsilon_\rho}(\cdot,y). \] Then by the continuity of $\vec G_{\varepsilon_\rho}(\cdot,y)$ and Lemma \ref{0108.lem1}, we have \[ \lim_{\rho\to \infty}\lim_{\nu\to \infty}I^{kl}_{\rho,\nu}=\lim_{\rho\to \infty}G^{lk}_{\varepsilon_\rho}(x,y)=G^{lk}(x,y). \] Similarly, we get \[ \lim_{\rho\to \infty}\lim_{\nu\to \infty}I^{kl}_{\rho,\nu}=\lim_{\rho\to \infty}\fint_{\Omega_{\varepsilon_\rho}(y)}(G^*)^{kl}(\cdot,x)=(G^*)^{kl}(y,x). \] We have thus shown that \[ G^{lk}(x,y)=(G^*)^{kl}(y,x), \quad \forall x,\, y\in \Omega, \quad x\neq y, \] which gives the identity \eqref{1226.eq1a}. Therefore, we get from \eqref{jk.eq2b} that \begin{align*} G^{lk}_\varepsilon(x,y)&=\lim_{\nu\to \infty}\fint_{B_{\sigma_\nu}(x)}G^{lk}_\varepsilon(\cdot,y)=\lim_{\nu\to\infty}\fint_{B_\varepsilon(y)}(G^*_{\sigma_\nu})^{kl}(\cdot,x)\\ &=\fint_{B_\varepsilon(y)}(G^*)^{kl}(\cdot,x)=\fint_{B_\varepsilon(y)}G^{lk}(x,\cdot), \quad \varepsilon\in (0,d_y], \end{align*} and \begin{equation} \label{0109.eq2a} \lim_{\varepsilon\to 0}G^{lk}_\varepsilon(x,y)=G^{lk}(x,y), \quad \forall x,\,y\in \Omega,\quad x\neq y. \end{equation} The theorem is proved. $\blacksquare$ \subsection{Proof of Theorem \ref{0110.thm1}} The proof is based on $L^q$-estimates for Stokes systems with $\mathrm{VMO}$ coefficients. In this proof, we assume that $x_0\in \Omega$ and $0<R\le \min\{d_{x_0},1\}$, and denote $B_r=B_r(x_0)$ for $r>0$. \begin{lemma} \label{170112@lem1} Let $q>n$, $0<\rho<r\le R\le 1$, and $(\vec v,b)\in W^{1,q}(B_r)^n\times L^q(B_r)$ satisfy $$ \left\{ \begin{aligned} \sL\vec v+Db=0 \quad \text{in }\, B_r,\\ \operatorname{div} \vec v=0 \quad \text{in }\, B_r, \end{aligned} \right. $$ where the coefficients of $\sL$ belong to the class of $\mathrm{VMO}$. Then we have $$ \|D\vec v\|_{L^q(B_{\rho})}+\frac{1}{r-\rho}\|\vec v\|_{L^q(B_\rho)}\le \frac{C}{r-\rho}\left(\|D\vec v\|_{L^{nq/(n+q)}(B_{r})}+\frac{1}{r-\rho}\|\vec v\|_{L^{nq/(n+q)}(B_{r})}\right), $$ where $C$ depends on $n$, $\lambda$, $q$, and the $\mathrm{VMO}$ modulus of the coefficients. \end{lemma} \begin{proof} Let $\tau=(\rho+r)/2$ and $\eta$ be a smooth function in $\mathbb R^2$ such that $$ 0\le \eta\le 1, \quad \eta\equiv 1 \,\text{ on }\, B_{\rho}, \quad \operatorname{supp} \eta\subset B_{\tau}, \quad |D\eta|\le C(r-\rho)^{-1}, $$ Denote $b_0=(b)_{B_r}$ and observe that $(\eta\vec v,\eta (b-b_0))$ satisfies $$ \left\{ \begin{aligned} \sL(\eta \vec v)+D(\eta (b-b_0))=(b-b_0)D\eta-A_{\alpha\beta} D_\beta \vec v D_\alpha \eta-D_\alpha(A_{\alpha\beta}D_\beta \eta \vec v) &\quad \text{in }\, B_r,\\ \operatorname{div} (\eta \vec v)=D\eta \cdot \vec v &\quad \text{in }\, B_r,\\ \eta \vec v=0 &\quad \text{on }\partial B_r. \end{aligned} \right. $$ By Corollary \ref{0129.cor2} with scaling, we have $$ \|D\vec v\|_{L^q(B_\rho)}\le \frac{C}{r-\rho}\big(\|b-b_0\|_{L^{nq/(n+q)}(B_r)}+\|D\vec v\|_{L^{nq/(n+q)}(B_r)}+\|\vec v\|_{L^q(B_\tau)}\big), $$ where $C$ depends on $n$, $\lambda$, $q$, and the $\mathrm{VMO}$ modulus of the coefficients. Note that \begin{equation} \label{170112@eq10} \|\vec v\|_{L^q(B_{r_1})}\le \frac{C}{r_1}\|\vec v\|_{L^{nq/(n+q)}(B_{r_1})}+C\|D\vec v\|_{L^{nq/(n+q)}(B_{r_1})} \end{equation} for $0<r_1\le r$. Combining the above two estimates we have \begin{equation} \label{170112@eq1} \begin{aligned} &\|D\vec v\|_{L^q(B_{\rho})}+\frac{1}{r-\rho}\|\vec v\|_{L^q(B_\rho)}\\ &\quad \le \frac{C}{r-\rho}\left(\|b-b_0\|_{L^{nq/(n+q)}(B_r)}+\|D\vec v\|_{L^{nq/(n+q)}(B_{r})}+\frac{1}{r-\rho}\|\vec v\|_{L^{nq/(n+q)}(B_{r})}\right). \end{aligned} \end{equation} Set $s=nq/(n+q)$ and $\tilde{b}=\operatorname{sgn}(b-b_0)|b-b_0|^{s-1}\in L^{s/(s-1)}(B_r)$. There exists $\vec \phi\in W^{1,s/(s-1)}_0(B_r)^n$ such that (see Remark \ref{K0122.rmk2}) $$ \operatorname{div} \vec \phi= \tilde{b}-(\tilde{b})_{B_r} \, \text{ in }\, B_r, \quad \|D\vec \phi\|_{L^{s/(s-1)}(B_r)}\le C(n,q)\|\tilde{b}\|_{L^{s/(s-1)}(B_r)}. $$ Using $\vec \phi$ as a test function, we obtain $$ \int_{B_r}|b-b_0|^s\,dx=\int_{B_r}(b-b_0)\operatorname{div} \vec \phi\,dx=\int_\Omega A_{\alpha\beta}D_\beta \vec v\cdot D_\alpha \vec \phi\,dx, $$ which implies that $$ \|b-b_0\|_{L^s(B_r)}^s\le C(n,\lambda,q)\|D\vec v\|_{L^s(B_r)}\|b-b_0\|_{L^s(B_r)}^{s-1}. $$ From this together with \eqref{170112@eq1}, we get the desired estimate. \end{proof} Now we are ready to prove Theorem \ref{0110.thm1}. Let $(\vec u, p)\in W^{1,2}(B_R)^n\times L^2(B_R)$ satisfy \eqref{160907@eq2}. Let $q>n$, $0<r\le R$, and $\rho=r/4$. Set $$ q_i=\frac{nq}{n+qi}, \quad r_i=\rho+\frac{ri}{4m}, \quad i\in \{0,\ldots,m\}, $$ where $m$ is the smallest integer such that $m\ge n(1/2-1/q)$. Then by applying Lemma \ref{170112@lem1} iteratively, we see that $(\vec u, p)\in W^{1,q}(B_\rho)^n\times L^q(B_\rho)$ and $$ \|D\vec u\|_{L^q(B_\rho)}+\frac{4m}{r}\|\vec u\|_{L^q(B_\rho)}\le \left(\frac{Cm}{r}\right)^m\left(\|D\vec u\|_{L^{q_m}(B_{r_m})}+\frac{4m}{r}|\vec u\|_{L^{q_m}(B_{r_m})}\right). $$ Using H\"older's inequality and Lemma \ref{1006@lem1}, we have \begin{align*} \|D\vec u\|_{L^q(B_{r/4})}+\frac{1}{r}\|\vec u\|_{L^q(B_{r/4})}&\le \left(\frac{Cmr}{r}\right)^mr^{n(1/q-1/2)}\left(\|D\vec u\|_{L^{2}(B_{r/2})}+\frac{1}{r}\|\vec u\|_{L^{2}(B_{r/2})}\right)\\ &\le \frac{Cr^{n(1/q-1/2)}}{r}\|\vec u\|_{L^2(B_r)}. \end{align*} By the Sobolev inequality with scaling, we get $$ [\vec u]_{C^{1-n/q}(B_{r/4}(x_0))}\le Cr^{-1+n/q}\left(\fint_{B_r(x_0)}|\vec u|^2\,dx\right)^{1/2}, $$ where $C$ depends on $n$, $\lambda$, $q$, and the $\mathrm{VMO}$ modulus of the coefficients. Since the above inequality holds for all $x_0\in \Omega$ and $0<r\le R\le \min\{d_{x_0},1\}$, we conclude that $$ [\vec u]_{C^{1-n/q}(B_{R/2}(x_0))}\le Cr^{-1+n/q}\left(\fint_{B_R(x_0)}|\vec u|^2\,dx\right)^{1/2}. $$ This completes the proof of Theorem \ref{0110.thm1}. $\blacksquare$ \subsection{Proof of Theorem \ref{1226.thm2}} For $y\in \Omega$ and $\varepsilon>0$, let $(\vec G_{\varepsilon}(\cdot,y), \vec \Pi_{\varepsilon}(\cdot,y))$ be the averaged Green function on $\Omega$ as constructed in Section \ref{0108.sec1}, and let $\vec G^{\cdot k}_{\varepsilon}(\cdot,y)$ be the $k$-th column of $\vec G_\varepsilon(\cdot,y)$. Recall that $(\vec G_{\varepsilon}^{\cdot k}(\cdot,y), \Pi^k_\varepsilon(\cdot,y))$ satisfies \[ \left\{ \begin{aligned} \sL\vec G_\varepsilon^{\cdot k}(\cdot,y)+D\vec \Pi_\varepsilon^k(\cdot,y)=\vec g_k &\quad \text{in }\, \Omega,\\ \operatorname{div} \vec G_\varepsilon^{\cdot k}(\cdot,y)=0 &\quad \text{in }\, \Omega, \end{aligned} \right. \] where $$ \vec g_k=\frac{1}{|\Omega_{\varepsilon}(y)|}1_{\Omega_{\varepsilon}(y)}\vec e_k. $$ By $(\bf{A2})$, we obtain for any $x_0\in \Omega$ and $0<r<\operatorname{diam}\Omega$ that $$ \|\vec G^{\cdot k}_\varepsilon(\cdot,y)\|_{L^\infty(\Omega_{r/2}(x_0))}\le A_2\left(r^{-n/2}\|\vec G^{\cdot k}_\varepsilon(\cdot,y)\|_{L^2(\Omega_r(x_0))}+r^2\|\vec g_k\|_{L^\infty(\Omega_r(x_0))}\right). $$ Applying a standard argument (see, for instance, \cite[pp. 80-82]{MR1239172}), we have $$ \|\vec G^{\cdot k}_\varepsilon(\cdot,y)\|_{L^\infty(\Omega_{r/2}(x_0))}\le C\left(r^{-n}\|\vec G^{\cdot k}_\varepsilon(\cdot,y)\|_{L^1(\Omega_r(x_0))}+r^2\|\vec g_k\|_{L^\infty(\Omega_r(x_0))}\right), $$ where $C=C(n,A_2)$. We remark that if $B_r(x_0)\cap B_\varepsilon(y)=\emptyset$, then \begin{equation} \label{170418@eq1a} \|\vec G^{\cdot k}_\varepsilon(\cdot,y)\|_{L^\infty(\Omega_{r/2}(x_0))}\le Cr^{-n}\|\vec G^{\cdot k}_\varepsilon(\cdot,y)\|_{L^1(\Omega_r(x_0))}. \end{equation} Next, let $y\in \Omega$ and $R\in (0,\operatorname{diam}\Omega)$. Assume that $\vec f\in L^\infty(\Omega)^n$ with $\operatorname{supp} f\subset\Omega_R(y)$. Let $(\vec u,p)\in W^{1,2}_0(\Omega)^n\times L^2_0(\Omega)$ be the weak solution of the problem \[ \left\{ \begin{aligned} \sL^*\vec u+Dp=\vec f &\quad \text{in }\, \Omega,\\ \operatorname{div} \vec u=0 &\quad \text{in }\, \Omega. \end{aligned} \right. \] By $(\bf{A2})$, the Sobolev inequality, and \eqref{1227.eq1}, we have $$ \|\vec u\|_{L^\infty(\Omega_{R/2}(y))}\le A_2\left(R^{-n/2}\|\vec u\|_{L^2(\Omega_R(y))}+R^2\|\vec f\|_{L^\infty(\Omega_R(y))}\right)\le CR^2\|\vec f\|_{L^\infty(\Omega_R(y))}, $$ where $C=C(n,\lambda,K_0, K_1,R_1,A_2)$. Using this together with the fact that (see, for instance, \eqref{160906@eq10}) $$ \fint_{\Omega_{\varepsilon}(y)}u^k\,dx=\int_{\Omega_R(y)}G^{ik}_{\varepsilon}(\cdot,y)f^i\,dx, $$ we have $$ \left|\int_{\Omega_R(y)}G^{ik}_\varepsilon(\cdot,y)f^i\,dx\right|\le CR^2\|\vec f\|_{L^\infty(\Omega_R(y))} $$ for all $0<\varepsilon<R/2$ and $\vec f\in L^\infty(\Omega_R(y))^n$. Taking $$ f^i(x)=1_{\Omega_R(y)} \operatorname{sgn} (G^{ik}_\varepsilon(x,y)), $$ we have \begin{equation} \label{170418@eq1b} \norm{\vec G^{\cdot k}_\varepsilon(\cdot,y)}_{L^1(\Omega_R(y))}\le CR^2, \quad \forall \varepsilon\in (0,R/2). \end{equation} Now we are ready to prove the theorem. Let $x,\,y\in \Omega$ and $x\neq y$ and take $R=3r=3\abs{x-y}/2$. Then by \eqref{170418@eq1a} and \eqref{170418@eq1b}, we obtain for $\varepsilon\in (0,r)$ that \begin{align} \nonumber \abs{\vec G_\varepsilon(x,y)}&\le Cr^{-n}\norm{\vec G_\varepsilon(\cdot,y)}_{L^1(\Omega_r(x))}\le CR^{-n}\norm{\vec G_\varepsilon(\cdot,y)}_{L^1(\Omega_R(y))}\le CR^{2-n}, \end{align} where $C=C(n,\lambda,K_0, K_1,R_1,A_2)$. Therefore, by letting $\varepsilon\to 0$ and using \eqref{0109.eq2a}, we obtain that \[ \abs{\vec G(x,y)}\le C\abs{x-y}^{2-n}. \] The theorem is proved. $\blacksquare$ \subsection{Proof of Theorem \ref{0110.thm2}} \label{0304.sec1} Let $(\vec u, p)\in W^{1,2}_0(\Omega)^n\times L^2_0(\Omega)$ be the weak solution of \eqref{170418@eq1}. By Corollary \ref{0129.cor2}, $\vec u$ is H\"older continuous. To prove the theorem, we first consider the localized estimates for Stokes systems as below. For $y\in \Omega$ and $r>0$, we denote $B_r=B_r(y)$ and $\Omega_r=\Omega_r(y)$.\\ \\ {\bf{Step 1.}} Let $n/(n-1)<q\le t$, $0<\rho<r<\tau$, and $\eta,\,\zeta$ be smooth functions in $\mathbb R^n$ satisfying \begin{align*} 0\le \eta\le 1, \quad \eta\equiv 1 \, \text{ on }\, B_\rho, \quad \operatorname{supp} \eta\subset B_r, \quad |D\eta|\le C(r-\rho)^{-1},\\ 0\le \zeta\le 1, \quad \zeta\equiv 1 \, \text{ on }\, B_r, \quad \operatorname{supp} \zeta\subset B_\tau, \quad |D\zeta|\le C(\tau-r)^{-1}. \end{align*} Then $(\eta\vec u,\eta p)$ is the weak solution of the problem \begin{equation*} \left\{ \begin{aligned} \sL(\eta \vec u)+D(\eta p)=\eta \vec f+pD\eta- \vec\Psi-D_\alpha\vec\Phi_\alpha &\quad \text{in }\Omega,\\ \operatorname{div} \eta \vec u=D\eta \cdot \vec u &\quad \text{in }\Omega,\\ \eta \vec u=0 &\quad \text{on }\, \partial \Omega, \end{aligned} \right. \end{equation*} where \[ \vec \Psi=A_{\alpha\beta}D_\beta \vec uD_\alpha \eta \quad \text{and}\quad \vec \Phi_\alpha=A_{\alpha\beta}D_\beta \eta \vec u. \] By Corollary \ref{0129.cor2}, we have \begin{multline*} \norm{\eta p-(\eta p)_{\Omega}}_{L^{q}(\operatorname{supp} \eta\cap \Omega)}+\norm{D\vec u}_{L^{q}(\Omega_\rho)}\le \frac{C}{r-\rho}\big(\|p\|_{L^{nq/(n+q)}(\Omega_r)}+\norm{D\vec u}_{L^{nq/(n+q)}(\Omega_r)}\big)\\ +C\left( r^{1+n/q}\norm{\vec f}_{L^\infty(\Omega_r)}+\frac{1}{r-\rho}\norm{\vec u}_{L^{q}(\Omega_r)}\right). \end{multline*} Using the fact that \begin{align*} \|p\|_{L^{nq/(n+q)}(\Omega_r)}&=\|\zeta p-(\zeta p)_\Omega+(\zeta p)_\Omega\|_{L^{nq/(n+q)}(\Omega_r)}\\ &\le \|\zeta p-(\zeta p)_\Omega\|_{L^{nq/(n+q)}(\operatorname{supp} \zeta\cap \Omega)}+C(n,q)\tau^{1+n/q}|(\zeta p)_\Omega|, \end{align*} we have \begin{align} \nonumber &\norm{\eta p-(\eta p)_{\Omega}}_{L^{q}(\operatorname{supp} \eta\cap \Omega)}+\norm{D\vec u}_{L^{q}(\Omega_\rho)}\\ \nonumber &\le \frac{C}{r-\rho}\big(\|\zeta p-(\zeta p)_{\Omega}\|_{L^{nq/(n+q)}(\operatorname{supp} \zeta\cap \Omega)}+\norm{D\vec u}_{L^{nq/(n+q)}(\Omega_r)}\big)\\ \label{170112@eq5b} &\quad +\frac{C}{r-\rho}\tau^{1+n/q}|(\zeta p)_\Omega|+C\left( r^{1+n/q}\norm{\vec f}_{L^\infty(\Omega_r)}+\frac{1}{r-\rho}\norm{\vec u}_{L^{q}(\Omega_r)}\right), \end{align} where $C$ depends on $n$, $\lambda$, $K_0$, $R_1$, $q$, and the $\mathrm{VMO}$ modulus of the coefficients.\\ \\ {\bf{Step 2.}} Let $t>n$ and $0<\rho<r<\operatorname{diam}\Omega$. Set $$ t_i=\frac{nt}{n+ti}, \quad r_i=\rho+(r-\rho)i/m, \quad i\in\{0,\ldots,m+1\}, $$ where $m$ is the smallest integer such that $t_m\le 2$. Let $\eta_{r_i}$, $i\in\{0,\ldots,m\}$, be smooth functions in $\mathbb R^n$ satisfying $$ 0\le \eta_{r_i}\le 1, \quad \eta_{r_i}\equiv 1 \, \text{ on }\, B_{r_i}, \quad \operatorname{supp} \eta_{r_i}\subset B_{r_{i+1}}, \quad |D\eta_{r_i}|\le \frac{C(n,t)}{r-\rho}. $$ Applying \eqref{170112@eq5b} iteratively, we have \begin{align*} \|D\vec u\|_{L^{t_0}(\Omega_{r_0})}&\le C^m\left(\frac{m}{r-\rho}\right)^m\big(\|\eta_{r_m} p-(\eta_{r_m} p)_\Omega\|_{L^{t_m}(\operatorname{supp} \eta_{r_m}\cap \Omega)}+\|D\vec u\|_{L^{t_m}(\Omega_{r_m})}\big)\\ &\quad +\sum_{i=1}^m C^i\left(\frac{m}{r-\rho}\right)^{i}r^{1+n/t_{i-1}}|(\eta_{r_i} p)_\Omega|\\ &\quad +\sum_{i=1}^mC^i\left(\frac{m}{r-\rho}\right)^{i-1}\left(r^{1+n/t_{i-1}}\|\vec f\|_{L^\infty(\Omega_{r})}+\frac{m}{r-\rho}\|\vec u\|_{L^{t_{i-1}}(\Omega_r)}\right). \end{align*} Hence, by H\"older's inequality we obtain \begin{align*} \|D\vec u\|_{L^{t}(\Omega_{\rho})}&\le C_0\left(\frac{r}{r-\rho}\right)^mr^{n(1/t-1/2)}\big(\|p\|_{L^{2}(\Omega_r)}+\|D\vec u\|_{L^{2}(\Omega_{r})}\big)\\ &\quad +C_0\left(\frac{r}{r-\rho}\right)^mr^{1+n/t}\|\vec f\|_{L^\infty(\Omega_r)}+C_0\left(\frac{r}{r-\rho}\right)^mr^{-1}\|\vec u\|_{L^t(\Omega_r)}, \end{align*} where $C_0$ depends on $n$, $\lambda$, $K_0$, $R_1$, and the $\mathrm{VMO}$ modulus of the coefficients. By taking $\rho=r/2$, we have $$ \|D\vec u\|_{L^{t}(\Omega_{r/2})}\le C_0r^{n(1/t-1/2)}\big(\|p\|_{L^{2}(\Omega_r)}+\|D\vec u\|_{L^{2}(\Omega_{r})}\big) +C_0\big(r^{1+n/t}\|\vec f\|_{L^\infty(\Omega_r)}+r^{-1}\|\vec u\|_{L^t(\Omega_r)}\big). $$ We apply Caccioppoli's inequality (see, for instance, \cite{MR2027755}) to the above estimate to get \begin{equation} \label{170112@eq8} \|D\vec u\|_{L^{t}(\Omega_{r/4})}\le C_0\big(r^{1+n/t}\|\vec f\|_{L^\infty(\Omega_r)}+r^{-1}\|\vec u\|_{L^t(\Omega_r)}\big). \end{equation} {\bf{Step 3.}} We extend $\vec u$ to $\mathbb R^n$ by setting $\vec u\equiv 0$ on $\mathbb R^n\setminus \Omega$. For $y\in \Omega$ and $0<r<\operatorname{diam}\Omega$, we obtain by \eqref{170112@eq10} and \eqref{170112@eq8} that \begin{align*} r^{-1}\|\vec u\|_{L^t(B_{r/4})}+\|D\vec u\|_{L^t(B_{r/4})}&\le C\big(r^{1+n/t}\|\vec f\|_{L^\infty(\Omega_r)}+r^{-1}\|\vec u\|_{L^t(B_r)}\big). \end{align*} Using this together with the Sobolev inequality, we have $$ \|\vec u\|_{L^\infty(B_{r/4})}\le C\big(r^{2}\|\vec f\|_{L^\infty(\Omega_r)}+r^{-n/t}\|\vec u\|_{L^t(B_r)}\big). $$ Since the above estimate holds for any $y\in \Omega$ and $0<r<\operatorname{diam}\Omega$, by using a standard argument (see, for instance, \cite[pp. 80-82]{MR1239172}), we derive $$ \norm{\vec u}_{L^\infty(\Omega_{r/2})} \le C\big(r^2\norm{\vec f}_{L^\infty(\Omega_r)}+r^{-n/2}\norm{\vec u}_{L^2(\Omega_r)}\big). $$ This completes the proof of Theorem \ref{0110.thm2}. $\blacksquare$ \section{$L^q$-estimates for the Stokes systems} \label{sec_es} In this section, we consider the $L^q$-estimate for the solution to \begin{equation} \label{0123.eq3} \left\{ \begin{aligned} \sL\vec u+Dp=\vec f+D_\alpha\vec f_\alpha&\quad \text{in }\, \Omega,\\ \operatorname{div} \vec u= g &\quad \text{in }\, \Omega. \end{aligned} \right. \end{equation} We let $\Omega$ be a domain in $\mathbb R^n$, where $n\ge 2$. We denote \begin{equation} \label{160928@eq1} U:=\abs{p}+\abs{D\vec u}\quad \text{and}\quad F:=\abs{\vec f}+\abs{\vec f_\alpha}+\abs{g}, \end{equation} and we abbreviate $B_R=B_R(0)$ and $B_R^+=B_R^+(0)$, etc. \subsection{Main results} \label{123.sec1} \begin{A} There is a constant $R_0\in (0,1]$ such that the following hold. \begin{enumerate}[(a)] \item For any $x\in\overline{\Omega}$ and $R\in(0,R_0]$ so that either $B_R(x)\subset \Omega$ or $x\in \partial \Omega$, we have \begin{equation*} \fint_{B_R(x)}\bigabs{A_{\alpha\beta}-(A_{\alpha\beta})_{B_R(x)}}\le \gamma. \end{equation*} \item ($\gamma$-Reifenberg flat domain) For any $x\in \partial \Omega$ and $R\in (0,R_0]$, there is a spatial coordinate systems depending on $x$ and $R$ such that in this new coordinate system, we have \begin{equation*} \set{y:x_1+\gamma R<y_1}\cap B_R(x)\subset \Omega_R(x)\subset \set{y:x_1-\gamma R<y_1}\cap B_R(x). \end{equation*} \end{enumerate} \end{A} \begin{theorem} \label{0123.thm1} Assume the condition $(\bf{D})$ in Section \ref{1006@sec2} and $\operatorname{diam}(\Omega)\le K_0$. For $2<q<\infty$, there exists a constant $\gamma>0$, depending only on $n$, $\lambda$, and $q$, such that, under the condition $(\bf{A3}\,(\gamma))$, the following holds: if $(\vec u,p)\in W^{1,q}_0(\Omega)^n\times L^q_0(\Omega)$ satisfies \eqref{0123.eq3}, then we have \begin{equation} \label{1204.eq2} \norm{p}_{L^q(\Omega)}+\norm{D\vec u}_{L^q(\Omega)}\le C\big(\norm{\vec f}_{L^q(\Omega)}+\norm{\vec f_\alpha}_{L^q(\Omega)}+\norm{g}_{L^q(\Omega)}\big), \end{equation} where $C=C(n,\lambda,K_0,q, A,R_0)$. \end{theorem} \begin{remark} We remark that $\gamma$-Reifenberg flat domains with a small constant $\gamma>0$ satisfy the condition $(\bf{D})$. Indeed, $\gamma$-Reifenberg flat domains with sufficiently small $\gamma$ are John domains (and NTA-domains) that satisfy the condition $(\bf{D})$. We refer to \cite{MR2186550, MR2263708, MR1446617} for the details. \end{remark} Since Lipschitz domains with a small Lipschitz constant are Refineberg flat, we obtain the following result from Theorem \ref{0123.thm1}. \begin{corollary} \label{0129.cor2} Let $\Omega$ be a domain in $\mathbb R^n$ with $\operatorname{diam}(\Omega)\le K_0$, where $n\ge 2$. Assume that the coefficients of $\sL$ belong to the class of $\mathrm{VMO}$. For $1<q<\infty$, there exists a constant $L=L(n,\lambda,q)>0$ such that, under the condition $(\bf{A0})$ with $R_1\in (0,1]$ and $K_1\in (0, L]$, the following holds: if $q_1\in (1,\infty)$, $q_1\ge \frac{qn}{q+n}$, $\vec f\in L^{q_1}(\Omega)^n$, $\vec f_\alpha\in L^q(\Omega)^n$, and $g\in L^q_0(\Omega)$, there exists a unique solution $(\vec u,p)\in W^{1,q}_0(\Omega)^n\times L^q_0(\Omega)$ of the problem \eqref{0123.eq3}. Moreover, we have $$ \norm{p}_{L^q(\Omega)}+\norm{D\vec u}_{L^q(\Omega)}\le C\big(\norm{\vec f}_{L^{q_1}(\Omega)}+\norm{\vec f_\alpha}_{L^q(\Omega)}+\norm{g}_{L^q(\Omega)}\big), $$ where the constant $C$ depends on $n$, $\lambda$, $K_0$, $R_1$, $q$, and the $\mathrm{VMO}$ modulus of the coefficients. \end{corollary} \begin{proof} It suffices to prove the corollary with $\vec f=(f^1,\ldots,f^n)=0$. Indeed, by the solvability of the divergence equation in Lipschitz domains, there exist $\vec \phi_i\in W^{1,q_1}_0(\Omega)^n$ such that $$ \operatorname{div}{\vec \phi_i}=f^i-(f^i)_\Omega \quad \text{in }\, \Omega, \quad \|D\vec \phi_i\|_{L^{q_1}(\Omega)}\le C\|f^i\|_{L^{q_1}(\Omega)}, $$ where $C=C(n,\lambda,K_0, R_1,q)$. If we define $\vec \Phi_\alpha=(\Phi_\alpha^1,\ldots, \Phi^n_\alpha)$ by $$ \Phi^i_\alpha(x)=\varphi^\alpha_i(x)+\frac{(f^i)_\Omega}{n}x_\alpha, $$ then we have that $$ \sum_{\alpha=1}^nD_\alpha \vec \Phi_\alpha=\vec f $$ and $$ \|\vec \Phi_\alpha \|_{L^q(\Omega)}\le C\|D\vec\Phi_\alpha\|_{L^{q_1}(\Omega)}\le C\|\vec f\|_{L^{q_1}(\Omega)}. $$ Due to Lemma \ref{122.lem1}, it is enough to consider the case $q\neq 2$.\\ \\ {\bf{Case 1.}} $q>2$. Let $\gamma=\gamma(n,\lambda,q)$ and $M=M(n,q)$ be constants in Theorem \ref{0123.thm1} and \cite[Theorem 2.1]{MR1313554}, respectively. Set $L=\min\{\gamma, M\}$. If $K_1\in (0,L]$, then by Theorem \ref{0123.thm1}, the method of continuity, and the $L^q$-solvability of the Stokes systems with simple coefficients (see \cite[Theorem 2.1]{MR1313554}), there exists a unique solution $(\vec u,p)\in W^{1,q}_0(\Omega)^n\times L^q_0(\Omega)$ of the problem \eqref{0123.eq3} with $\vec f=0$.\\ \\ {\bf{Case 2.}} $1<q<2$. We use the duality argument. Set $q_0=\frac{q}{q-1}$, and let $L=L(n,\lambda,q_0)$ and $M=M(n,q)$ be constants from Case 1 and \cite[Theorem 2.1]{MR1313554}, respectively. Assume that $K_1\le L$ and $(\vec u, p)\in W^{1,q}_0(\Omega)^n\times L^q_0(\Omega)$ satisfies \eqref{0123.eq3} with $\vec f=0$. For $\vec h_\alpha\in L^{q_0}(\Omega)^n$, there exists $(\vec v,\pi)\in W^{1,q_0}_0(\Omega)^n\times L^{q_0}_0(\Omega)$ such that \begin{equation*} \left\{ \begin{aligned} \sL^*\vec v+D\pi=D_\alpha\vec h_\alpha&\quad \text{in }\, \Omega,\\ \operatorname{div} \vec v=0 &\quad \text{in }\, \Omega, \end{aligned} \right. \end{equation*} where $\sL^*$ is the adjoint operator of $\sL$. Then we have \begin{align*} \int D_\alpha \vec u\cdot \vec h_\alpha\,dx&=-\int_\Omega A_{\alpha\beta}D_\beta \vec u \cdot D_\alpha \vec v\,dx+\int_\Omega \pi \operatorname{div} \vec u\,dx\\ &=\int_\Omega \vec f_\alpha \cdot D_\alpha \vec v\,dx+\int_\Omega \pi g\,dx, \end{align*} which implies that $$ \left|\int D_\alpha \vec u\cdot \vec h_\alpha\,dx\right|\le C\big(\|\vec f_\alpha \|_{L^q(\Omega)}+\|g\|_{L^q(\Omega)}\big)\|\vec h_\alpha\|_{L^{q_0}(\Omega)}, $$ where the constant $C$ depends on $n$, $\lambda$, $K_0$, $R_1$, $q$, and the $\mathrm{VMO}$ modulus of the coefficients. Since $\vec h_\alpha$ was arbitrary, it follows that \begin{equation} \label{1007@e1} \norm{D\vec u}_{L^q(\Omega)}\le C\big(\norm{\vec f_\alpha }_{L^q(\Omega)}+\norm{g}_{L^q(\Omega)}\big). \end{equation} To estimate $p$, let $w\in L^{q_0}(\Omega)$ and $w_0=w-(w)_\Omega$. Then by Remark \ref{K0122.rmk2}, there exists $\vec \phi\in W^{1,q_0}(\Omega)^n$ such that \[ \operatorname{div} \vec \phi=w_0 \quad \text{in }\, \Omega, \quad \norm{\vec \phi}_{W^{1,q_0}(\Omega)}\le C\norm{w_0}_{L^{q_0}(\Omega)}. \] By testing $\vec \phi$ in \eqref{0123.eq3}, it is easy to see that \begin{align*} \Abs{\int_\Omega pw\,dx}&=\Abs{\int_\Omega pw_0\,dx}\\ &\le C\left(\norm{D\vec u}_{L^q(\Omega)}+\norm{\vec f_\alpha}_{L^q(\Omega)}\right)\norm{w_0}_{L^{q_0}(\Omega)}\\ &\le C\left(\norm{D\vec u}_{L^q(\Omega)}+\norm{\vec f_\alpha}_{L^q(\Omega)}\right)\norm{w}_{L^{q_0}(\Omega)}. \end{align*} This together with \eqref{1007@e1} yields $$ \|p\|_{L^q(\Omega)}+\norm{D\vec u}_{L^q(\Omega)}\le C\big(\norm{\vec f_\alpha }_{L^q(\Omega)}+\norm{g}_{L^q(\Omega)}\big). $$ Using the above $L^q$-estimate, the method of continuity, and the $L^q$-solvability of the Stokes systems with simple coefficients, there exists a unique solution $(\vec u,p)\in W^{1,q}_0(\Omega)^n\times L^q_0(\Omega)$ of the problem \eqref{0123.eq3} with $\vec f=0$. \end{proof} \subsection{Auxiliary results} \begin{lemma} \label{160924@lem1} Recall the notation \eqref{160928@eq1}. Suppose that the coefficients of $\sL$ are constants. Let $k$ be a constant. \begin{enumerate}[$(a)$] \item If $(\vec u, p)\in W^{1,2}(B_R)^n\times L^2(B_R)$ satisfies \begin{equation*} \left\{ \begin{aligned} \sL\vec u+Dp=0&\quad \text{in }\, B_{R},\\ \operatorname{div} \vec u= k &\quad \text{in }\, B_{R}, \end{aligned} \right. \end{equation*} then there exists a constant $C=C(n,\lambda)$ such that \begin{equation} \label{160927@eq4} \norm{U}_{L^\infty(B_{R/2})}\le CR^{-n/2}\norm{U}_{L^2(B_R)}+C\abs{k}. \end{equation} \item If $(\vec u, p)\in W^{1,2}(B_R^+)^n\times L^2(B_R^+)$ satisfies \begin{equation*} \left\{ \begin{aligned} \sL\vec u+Dp=0&\quad \text{in }\, B_{R}^+,\\ \operatorname{div} \vec u= k &\quad \text{in }\, B_{R}^+,\\ \vec u=0 &\quad \text{on }\, B_R\cap \set{x_1=0}, \end{aligned} \right. \end{equation*} then there exists a constant $C=C(n,\lambda)$ such that \begin{equation} \label{160927@eq3} \norm{U}_{L^\infty(B_{R/2}^+)}\le CR^{-n/2}\norm{U}_{L^2(B_R^+)}+C\abs{k}. \end{equation} \end{enumerate} \end{lemma} \begin{proof} The interior and boundary estimates for Stokes systems with variable coefficients were studied by Giaquinta \cite{MR0641818}. The proof of the assertion (a) is the same as that of \cite[Theorem 1.10, pp. 186--187]{MR0641818}. See also the proof of \cite[Theorem 2.8, p. 207]{MR0641818} for the boundary estimate \eqref{160927@eq3}. We note that in \cite{MR0641818}, he gives the complete proofs for the Neumann problem and mentioned that the method works for other boundary value problem. Regarding the Dirichlet problem, we need to impose a normalization condition for $p$ because $(\vec u, p+c)$ satisfies the same system for any constant $c\in \mathbb R$. By this reason, the right-hand sides of the estimates \eqref{160927@eq4} and \eqref{160927@eq3} contain the $L^2$-norm of $p$. For more detailed proof, one may refer to \cite{arXiv:1604.02690v2}. Their methods are general enough to allow the coefficients to be measurable in one direction and gives more precise information on the dependence of the constant $C$. \end{proof} \begin{theorem} \label{123.thm1} Let $2<\nu<q<\infty$ and $\nu'=2\nu/(\nu-2)$. Assume $(\vec u,p)\in W^{1,q}_0(\Omega)^n\times L^q_0(\Omega)$ satisfies \begin{equation*} \left\{ \begin{aligned} \sL\vec u+Dp=\vec f+D_\alpha\vec f_\alpha&\quad \text{in }\, \Omega,\\ \operatorname{div} \vec u= g &\quad \text{in }\, \Omega, \end{aligned} \right. \end{equation*} where $\vec f,\, \vec f_\alpha\in L^2(\Omega)^n$ and $g\in L^2_0(\Omega)$. \begin{enumerate}[(i)] \item Suppose that $(\bf{A3}\,(\gamma))$ $(a)$ holds at $0\in \Omega$ with $\gamma>0$. Then, for $R\in (0, \min(R_0,d_0)]$, where $d_0=\operatorname{dist}(0,\partial \Omega)$, $(\vec u,p)$ admits a decomposition \[ (\vec u,p)=(\vec u_1,p_1)+(\vec u_2,p_2) \quad \text{in }\, B_R, \] and we have \begin{align} \label{123.eq1} (U_1^2)^{1/2}_{B_R}&\le C\left(\gamma^{1/\nu'}(U^\nu)^{1/\nu}_{B_R}+(F^2)^{1/2}_{B_R}\right),\\ \label{123.eq1a} \norm{U_2}_{L^\infty(B_{R/2})} &\le C\left(\gamma^{1/\nu'}(U^\nu)^{1/\nu}_{B_R}+(U^2)^{1/2}_{B_R}+(F^2)^{1/2}_{B_R}\right), \end{align} where $C=C(n,\lambda,\nu)$. \item Suppose that $(\bf{A3}\,(\gamma))$ $(a)$ and $(b)$ hold at $0\in \partial \Omega$ with $\gamma\in (0,1/2)$. Then, for $R\in (0, R_0]$, $(\vec u,p)$ admits a decomposition \[ (\vec u,p)=(\vec u_1,p_1)+(\vec u_2,p_2) \quad \text{in }\, \Omega_R, \] and we have \begin{align} \label{123.eq1b} (U_1^2)^{1/2}_{\Omega_R}&\le C\left(\gamma^{1/\nu'}(U^\nu)^{1/\nu}_{\Omega_R}+(F^2)^{1/2}_{\Omega_R}\right),\\ \label{123.eq1c} \norm{U_2}_{L^\infty(\Omega_{R/4})} &\le C\left(\gamma^{1/\nu'}(U^\nu)^{1/\nu}_{\Omega_R}+(U^2)^{1/2}_{\Omega_R}+(F^2)^{1/2}_{\Omega_R}\right), \end{align} where $C=C(n,\lambda,\nu)$. \end{enumerate} Here, we define $U_i$ in the same way as $U$ with $p$ and $\vec u$ replaced by $p_i$ and $\vec u_i$, repectively. \end{theorem} \begin{proof} The proof is an adaptation of that of \cite[Lemma 8.3]{MR2835999}. To prove assertion $(i)$, we denote \[ \sL_0\vec u=-D_\alpha(A_{\alpha\beta}^0D_\beta \vec u), \] where $A_{\alpha\beta}^0=(A_{\alpha\beta})_{B_R}$. By Lemma \ref{122.lem1}, there exists a unique solution $(\vec u_1,p_1)\in W^{1,2}_0(B_R)^n\times {L}_0^2(B_R)$ of the problem \begin{equation*} \left\{ \begin{aligned} \sL_0\vec u_1+Dp_1=\vec f+D_\alpha \vec f_\alpha+D_\alpha\vec h_\alpha&\quad \text{in }\, B_R,\\ \operatorname{div} \vec u_1= g-( g)_{B_R}&\quad \text{in }\, B_R, \end{aligned} \right. \end{equation*} where \[ \vec h_\alpha=(A_{\alpha\beta}^0-A_{\alpha\beta})D_\beta \vec u. \] We also get from \eqref{122.eq1a} that (recall $R\le R_0\le 1$) \begin{equation*} \norm{U_1}_{L^2(B_R)}\le C\left(\norm{\vec h_\alpha}_{L^2(B_R)}+\norm{F}_{L^2(B_R)}\right), \end{equation*} where $C=C(n,\lambda)$. Therefore, by using the fact that \begin{equation*} \norm{\vec h_\alpha}_{L^2(B_R)}\le C\bignorm{A_{\alpha\beta}^0-A_{\alpha\beta}}_{L^1(B_R)}^{1/\nu'}\norm{D\vec u}_{L^\nu(B_R)}\le C_\nu\gamma^{1/\nu'}\abs{B_R}^{1/\nu'}\norm{D\vec u}_{L^\nu(B_R)}, \end{equation*} we obtain \eqref{123.eq1}. To see \eqref{123.eq1a}, we note that $(\vec u_2,p_2)=(\vec u,p)-(\vec u_1,p_1)$ satisfies \begin{equation*} \left\{ \begin{aligned} \sL_0\vec u_2+Dp_2=0&\quad \text{in }\, B_{R},\\ \operatorname{div} \vec u_2= (g)_{B_R} &\quad \text{in }\, B_{R}. \end{aligned} \right. \end{equation*} Then by Lemma \ref{160924@lem1}, we get \[ \norm{U_2}_{L^\infty(B_{R/2})}\le C(U_2^2)^{1/2}_{B_R}+C(\abs{g}^2)^{1/2}_{B_R}, \] and thus, we conclude \eqref{123.eq1a} from \eqref{123.eq1}. Next, we prove assertion $(ii)$. Without loss of generality, we may assume that $(\bf{A3}(\gamma))$ $(b)$ hols at $0$ in the original coordinate system. Define $\sL_0$ as above. Let us fix $y:=(\gamma R,0,\ldots,0)$ and denote \[ B_R^\gamma:=B_R\cap \set{x_1> \gamma R}. \] Then we have \begin{equation*} B_{R/2}\cap \set{x_1>\gamma R}\subset B_{R/2}^+(y)\subset B_R^\gamma. \end{equation*} Take a smooth function $\chi$ defined on $\mathbb R$ such that \[ \chi(x_1)\equiv 0 \text{ for } x_1\le \gamma R, \quad \chi(x_1)\equiv 1 \text{ for } x_1\ge 2\gamma R, \quad \abs{\chi'}\le C(\gamma R)^{-1}. \] We then find that $(\hat{\vec u}(x), \hat{p}(x))=(\chi(x_1) \vec u(x), \chi(x_1)p(x))$ satisfies \[ \left\{ \begin{aligned} \sL_0 \hat{\vec u}+D\hat{p}=\vec \cF&\quad \text{in }\, B^\gamma_R,\\ \operatorname{div} \hat{\vec u}=\cG &\quad \text{in }\, B_R^\gamma,\\ \hat{\vec u}=0 &\quad \text{on }\, B_R\cap \set{x_1=\gamma R}, \end{aligned} \right. \] where we use the notation $\cG=D\chi\cdot \vec u+\chi g$ and \begin{multline*} \vec\cF=\chi \vec f+\chi D_\alpha \vec f_\alpha+p D\chi\\ +D_\alpha\big(A_{\alpha\beta}^0D_\beta ((1-\chi)\vec u)-(A_{\alpha\beta}^0-A_{\alpha\beta})D_\beta \vec u\big)+(\chi-1)D_\alpha(A_{\alpha\beta}D_\beta \vec u). \end{multline*} Let $(\hat{\vec u}_1,\hat{p}_1)\in W^{1,2}_0\big(B^+_{R/2}(y)\big)^n\times {L}^2_0\big(B^+_{R/2}(y)\big)$ satisfy \begin{equation} \label{1128.eq0} \left\{ \begin{aligned} \sL_0 \hat{\vec u}_1+D\hat{p}_1=\vec \cF &\quad \text{in }\, B_{R/2}^+(y),\\ \operatorname{div} \hat{\vec u}_1=\cG-(\cG)_{B^+_{R/2}(y)} &\quad \text{in }\, B_{R/2}^+(y),\\ \hat{\vec u}_1=0 &\quad \text{on }\, \partial B^+_{R/2}(y). \end{aligned} \right. \end{equation} Then by testing with $\hat{\vec u}_1$ in \eqref{1128.eq0}, we obtain \begin{align} \nonumber &\int_{B^+_{R/2}(y)}A_{\alpha\beta}^0D_\beta \hat{\vec u}_1\cdot D_\alpha \hat{\vec u}_1\,dx\\ \nonumber &=\int_{B^+_{R/2}(y)}\vec f\cdot (\chi \hat{\vec u}_1)-\vec f_\alpha \cdot D_\alpha(\chi \hat{\vec u}_1)+pD\chi\cdot \hat{\vec u}_1\,dx\\ \nonumber &\quad +\int_{B^+_{R/2}(y)}-A_{\alpha\beta}^0D_\beta ((1-\chi)\vec u)\cdot D_\alpha \hat{\vec u}_1+(A_{\alpha\beta}^0-A_{\alpha\beta})D_\beta \vec u\cdot D_\alpha \hat{\vec u}_1\,dx\\ \label{1201.eq1} &\quad +\int_{B^+_{R/2}(y)}-A_{\alpha\beta}D_\beta \vec u\cdot D_\alpha((\chi-1)\hat{\vec u}_1)\,dx+\hat{p}_1 (D\chi\cdot \vec u+\chi g)\,dx. \end{align} Note that \[ \abs{D\chi(x_1)}+\abs{D(1-\chi(x_1))}\le C(x_1-\gamma R)^{-1}, \quad \forall x_1>\gamma R. \] Therefore, we obtain by Lemma \ref{0323.lem1} that \begin{equation} \label{123.eq2} \norm{D(\chi \hat{\vec u}_1)}_{L^2(B^+_{R/2}(y))}+\norm{D((1-\chi) \hat{\vec u}_1)}_{L^2(B^+_{R/2}(y))}\le C\norm{D\hat{\vec u}_1}_{L^2(B^+_{R/2}(y))}, \end{equation} and hence, we also have \begin{equation} \label{123.eq2a} \norm{D\chi \cdot \hat{\vec u}_1}_{L^2(B^+_{R/2}(y))}\le C\norm{D\hat{\vec u}_1}_{L^2(B^+_{R/2}(y))}. \end{equation} From \eqref{123.eq2a} and H\"older's inequality, we get \begin{align} \nonumber \int_{B^+_{R/2}(y)}pD\chi \cdot \hat{\vec u}_1\,dx&\le \norm{p}_{L^2(B^+_{R/2}(y)\cap \set{x_1<2\gamma R})}\norm{D\hat{\vec u}_1}_{L^2(B^+_{R/2}(y))}\\ \label{1201.eq3} &\le C\gamma^{1/\nu'}R^{n/\nu'}\norm{p}_{L^\nu(\Omega_R)}\norm{D\hat{\vec u}_1}_{L^2(B^+_{R/2}(y))}. \end{align} Then, by applying \eqref{123.eq2}--\eqref{1201.eq3}, and the fact that (recall $R\le R_0\le 1$) \begin{equation*} \norm{\hat{\vec u}_1}_{L^2(B^+_{R/2}(y))}\le C(n)\norm{D\hat{\vec u}_1}_{L^2(B^+_{R/2}(y))} \end{equation*} to \eqref{1201.eq1}, we have \begin{equation*} \norm{D\hat{\vec u}_1}_{L^2(B^+_{R/2}(y))}\le \varepsilon\norm{\hat{p}_1}_{L^2(B_{R/2}^+(y))}+C_\varepsilon \norm{F}_{L^{2}(\Omega_R)}+C_\varepsilon \cK, \quad \forall\varepsilon>0, \end{equation*} where \begin{multline*} \cK:=\gamma^{1/\nu'}R^{n/\nu'}\norm{p}_{L^\nu(\Omega_R)} +\norm{D((1-\chi)\vec u)}_{L^2(B^+_{R/2}(y))}\\ +\norm{D\vec u}_{L^2(B^+_{R/2}(y)\cap \set{x_1<2\gamma R })}+\bignorm{(A_{\alpha\beta}^0-A_{\alpha\beta})D_\beta \vec u}_{L^2(B^+_{R/2}(y))}. \end{multline*} Similarly, we have \begin{equation*} \norm{\hat{p}_1}_{L^2(B^+_{R/2}(y))}\le C\norm{D\hat{\vec u}_1}_{L^2(B^+_{R/2}(y))}+C\norm{F}_{L^2(\Omega_R)}+C\cK. \end{equation*} Therefore, from the above two inequality, we conclude that \begin{equation} \label{1128.eq1a} \norm{\hat{p}_1}_{L^2(B^+_{R/2}(y))}+\norm{D\hat{\vec u}_1}_{L^2(B^+_{R/2}(y))}\le C\norm{F}_{L^2(\Omega_R)}+C\cK, \end{equation} where $C=C(n,\lambda, \nu)$. Now we claim that \begin{equation} \label{123.eq3} \cK\le C\gamma^{1/\nu'}R^{n/\nu'}\norm{U}_{L^\nu(\Omega_R)}. \end{equation} Observe that by H\"older's inequality and Lemma \ref{0812.lem1}, we have \begin{equation} \label{1201.eq1a} \norm{D\vec u}_{L^2(B^+_{R/2}(y)\cap \set{x_1<2\gamma R})} \le C(n,\nu)\gamma^{1/\nu'}R^{n/\nu'}\norm{D\vec u}_{L^\nu(\Omega_{R})}. \end{equation} We also have \begin{align} \nonumber \bignorm{(A_{\alpha\beta}^0-A_{\alpha\beta})D_\beta \vec u}_{L^2(B^+_{R/2}(y))}&\le C\left(\int_{B_{R}}\bigabs{A_{\alpha\beta}^0-A_{\alpha\beta}}\,dx\right)^{1/\nu'}\norm{D\vec u}_{L^\nu(B^+_{R/2}(y))}\\ \nonumber &\le C\gamma^{1/\nu'}R^{n/\nu'}\norm{D\vec u}_{L^\nu(\Omega_R)}, \end{align} where $C=C(n,\lambda,\nu)$. To estimate $\norm{D((1-\chi)\vec u)}_{L^2(B^+_{R/2}(y))}$, we recall that $\chi-1=0$ for $x_1\ge 2\gamma R$. For any $y'\in B'_{R}$, let $\hat{y}_1=\hat{y_1}(y')$ be the largest number such that $\hat{y}=(\hat{y}_1,y')\in \partial \Omega$. Since $\abs{\hat{y}_1}\le \gamma R$, we have \[ x_1-\hat{y}_1\le x_1+\gamma R\le 3\gamma R , \quad \forall x_1\in [\gamma R, 2\gamma R], \] and thus, we obtain \begin{equation*} \abs{D\chi(x_1)}\le C(x_1-\hat{y}_1), \quad \forall x_1\in [\gamma R, 2\gamma R] \end{equation*} Therefore, we find that \begin{align} \nonumber \int_{\gamma R}^r\abs{D((1-\chi)\vec u)(x_1,y')}^2\,dx_1&\le \int_{\hat{y}_1}^r\abs{D((1-\chi)\vec u)(x_1,y')}^2\,dx_1\\ \label{1201.eq1d} &\le C\int_{\hat{y}_1}^r \abs{D\vec u(x_1,y')}^2\,dx_1, \end{align} where $r=r(y')=\min\big(2\gamma R, \sqrt{R^2-\abs{y'}^2}\big)$. We then get from \eqref{1201.eq1d} that \begin{equation*} \norm{D((1-\chi)\vec u)}_{L^2(B^+_{R/2}(y))}\le C\gamma^{1/\nu'}R^{n/\nu'}\norm{D\vec u}_{L^\nu(\Omega_{R})}, \end{equation*} where $C=C(n,\nu)$. From the above estimates, we obtain \eqref{123.eq3}, and thus, by combining \eqref{1128.eq1a} and \eqref{123.eq3}, we conclude \begin{equation} \label{123.eq3a} \norm{\hat{p}_1}_{L^2(B^+_{R/2}(y))}+\norm{D\hat{\vec u}_1}_{L^2(B^+_{R/2}(y))}\le C\left(\gamma^{1/\nu'}R^{n/\nu'}\norm{U}_{L^\nu(\Omega_R)}+\norm{F}_{L^2(\Omega_R)}\right), \end{equation} where $C=C(n,\lambda,\nu)$. Now, we are ready to show the estimate \eqref{123.eq1b}. We extend $\hat{\vec u}_1$ and $\hat{p}_1$ to be zero in $\Omega_R\setminus B^+_{R/2}(y)$. Let $(\vec u_1,p_1)=\big(\hat{\vec u}_1+(1-\chi)\vec u, \hat{p}_1+(1-\chi)p\big)$. Since $(1-\chi)\vec u$ vanishes for $x_1\ge 2\gamma R$, by using the second inequality in \eqref{1201.eq1d} and H\"older's inequality as in \eqref{1201.eq1a}, we see that \begin{equation*} \norm{D((1-\chi)\vec u)}_{L^2(B_{R/2})}\le C(n)\gamma^{1/\nu'}R^{n/\nu'}\norm{D\vec u}_{L^\nu(\Omega_R)}. \end{equation*} Moreover, it follows from H\"older's inequality that \begin{equation*} \norm{(1-\chi)p}_{L^2(B_{R/2})}\le C(n)\gamma^{1/\nu'}R^{n/\nu'}\norm{p}_{L^\nu(\Omega_R)}. \end{equation*} Therefore, we conclude \eqref{123.eq1b} from \eqref{123.eq3a}. Next, let us set $(\vec u_2,p_2)=(\vec u,p)-(\vec u_1,p_1)$. Then, it is easily seen that $(\vec u_2,p_2)=(0,0)$ in $\Omega_{R}\setminus B^\gamma_{R}$ and $(\vec u_2,p_2)$ satisfies \[ \left\{ \begin{aligned} \sL_0\vec u_2+Dp_2=0 &\quad \text{in }\, B^+_{R/2}(y),\\ \operatorname{div} \vec u_2=(\cG)_{B^+_{R/2}(y)}&\quad \text{in }\, B^+_{R/2}(y),\\ \vec u_2=0 &\quad \text{on }B_{R/2}(y)\cap \set{x_1=\gamma R}. \end{aligned} \right. \] By Lemma \ref{160924@lem1}, we get \[ \norm{U_2}_{L^\infty(B^+_{R/2})}\le CR^{-n/2}\left(\norm{U_2}_{L^2(\Omega_R)}+\norm{\cG}_{L^2(B^+_{R/2}(y))}\right), \] and thus, from \eqref{1201.eq1a} and \eqref{123.eq1b}, we obtain \eqref{123.eq1c}. This completes the proof of the theorem. \end{proof} Now, we recall the maximal function theorem. Let \[ \sB=\set{B_r(x):x\in \mathbb R^n,\, r\in (0,\infty)}. \] For a function $f$ on a set $\Omega\subset \mathbb R^n$, we define its maximal function $\cM(f)$ by \[ \cM(f)(x)=\sup_{B\in \sB,\, x\in B}\fint_B \abs{f(y)}1_\Omega\,dy. \] Then for $f\in L^q(\Omega)$ with $1<q\le \infty$, we have \begin{equation*} \norm{\cM(f)}_{L^q(\mathbb R^n)}\le C\norm{f}_{L^q(\Omega)}, \end{equation*} where $C=C(n,q)$. As is well known, the above inequality is due to the Hardy-Littlewood maximal function theorem. Hereafter, we use the notation \begin{equation*} \begin{aligned} \cA(s)&=\set{x\in \Omega: U(x)>s},\\ \cB(s)&=\bigset{x\in \Omega:\gamma^{-1/\nu'}(\cM(F^2)(x))^{1/2}+(\cM(U^\nu)(x))^{1/\nu}>s}. \end{aligned} \end{equation*} With Theorem \ref{123.thm1} in hand, we get the following corollary. \begin{corollary} \label{0126.cor1} Suppose that $(\bf{A3}\, (\gamma))$ holds with $\gamma\in (0,1/2)$, and $0\in \overline{\Omega}$. Let $2<\nu<q<\infty$ and $\nu'=2\nu/(\nu-2)$. Assume $(\vec u,p)\in W^{1,q}_0(\Omega)^n\times {L}^q_0(\Omega)$ satisfies \begin{equation*} \left\{ \begin{aligned} \sL\vec u+Dp=D_\alpha\vec f_\alpha+\vec f&\quad \text{in }\, \Omega,\\ \operatorname{div} \vec u= g &\quad \text{in }\, \Omega, \end{aligned} \right. \end{equation*} where $\vec f,\, \vec f_\alpha\in L^2(\Omega)^n$ and $g\in L^2_0(\Omega)$. Then there exists a constant $\kappa=\kappa(n,\lambda,\nu)>1$ such that the following holds: If \begin{equation} \label{1204.eq1b} \abs{\Omega_{R/32}\cap \cA(\kappa s)}\ge \gamma^{2/\nu'}\abs{\Omega_{R/32}}, \quad R\in (0,R_0], \quad s>0, \end{equation} then we have \[ \Omega_{R/32}\subset \cB(s). \] \end{corollary} \begin{proof} By dividing $U$ and $F$ by $s$, we may assume $s=1$. We prove by contradiction. Suppose that there exists a point $x\in \Omega_{R/32}=B_{R/32}(0)\cap \Omega$ such that \begin{equation} \label{1203.eq1} \gamma^{-1/\nu'}(\cM(F^2)(x))^{1/2}+(\cM(U^\nu)(x))^{1/\nu}\le 1. \end{equation} In the case when $\operatorname{dist}(0,\partial \Omega)\ge R/8$, we note that \[ x\in B_{R/32}\subset B_{R/8}\subset \Omega. \] Due to Theorem \ref{123.thm1} $(i)$, we can decompose $(\vec u,p)=(\vec u_1,p_1)+(\vec u_2,p_2)$ in $B_{R/8}$ and then, by \eqref{1203.eq1}, we have \begin{equation*} (U_1^2)^{1/2}_{B_{R/8}}\le C_0\big(\gamma^{1/\nu'}(U^\nu)^{1/\nu}_{B_{R/8}}+(F^2)^{1/2}_{B_{R/8}}\big)\le C_0\gamma^{1/\nu'} \end{equation*} and \begin{equation*} \norm{U_2}_{L^\infty(B_{R/32})}\le C_0\big(\gamma^{1/\nu'}(U^\nu)^{1/\nu}_{B_{R/8}}+(U^2)^{1/2}_{B_{R/8}}+(F^2)^{1/2}_{B_{R/8}}\big)\le C_0, \end{equation*} where $C_0=C_0(n,\lambda,\nu)$. From these inequalities and Chebyshev's inequality, we get \begin{align} \nonumber \bigabs{B_{R/32}\cap \cA(\kappa)}&=\bigabs{\set{x\in B_{R/32}:U(x)>\kappa}}\\ \label{126.eq1a} &\le \bigabs{\set{x\in B_{R/32}:U_1>\kappa-C_0}}\le C(n)\frac{C_0^2}{(\kappa-C_0)^2}\gamma^{2/\nu'}\abs{B_{R/32}}, \end{align} which contradicts with \eqref{1204.eq1b} if we choose $\kappa$ sufficiently large. We now consider the case $\operatorname{dist}(0,\partial \Omega)<R/8$. Let $y\in \partial \Omega$ satisfy $\abs{y}=\operatorname{dist}(0,\partial \Omega)$. Then we have \[ x\in \Omega_{R/32}\subset \Omega_{R/4}(y). \] By Theorem \ref{123.thm1} $(ii)$, we can decompose $(\vec u,p)=(\vec u_1,p_1)+(\vec u_2,p_2)$ in $\Omega_{R}(y)$ and then, by \eqref{1203.eq1}, we have \begin{equation*} (U_1^2)^{1/2}_{\Omega_{R}(y)}\le C_0\gamma^{1/\nu'} \quad \text{and}\quad \norm{U_2}_{L^\infty(\Omega_{R/4}(y))}\le C_0. \end{equation*} From this, and by following the same steps used in deriving \eqref{126.eq1a}, we get \[ \bigabs{\Omega_{R/32}\cap \cA(\kappa)}\le C(n)\frac{C_0^2}{(\kappa-C_0)^2}\gamma^{2/\nu'}\abs{\Omega_{R/32}}, \] which contradicts with \eqref{1204.eq1b} if we choose $\kappa$ sufficiently large. \end{proof} \subsection{Proof of Theorem \ref{0123.thm1}} We fix $2<\nu<q$ and denote $\nu'=2\nu/(\nu-2)$. Let $\gamma\in (0,1/2)$ be a constant to be chosen later and $\kappa=\kappa(n,\lambda,\nu)$ be the constant in Corollary \ref{0126.cor1}. Since \[ \abs{\cA(\kappa s)}\le C_0(\kappa s)^{-1}\norm{U}_{L^2(\Omega)} \] for all $s>0$, where $C_0=C_0(n,K_0)$, we get \begin{equation} \label{1215.eq1} \abs{\cA(\kappa s)}\le \gamma^{2/\nu'}\abs{B_{R_0/32}}, \end{equation} provided that \[ s\ge \frac{C_0}{\kappa \gamma^{2/\nu'}|B_{R_0/32}|}\norm{U}_{L^2(\Omega)}:=s_0. \] Therefore, from \eqref{1215.eq1}, Corollary \ref{0126.cor1}, and Lemma \ref{0324.lem1}, we have the following upper bound of the distribution of $U$; $$ \abs{\cA(\kappa s)}\le C_1\gamma^{2/\nu'}\abs{\cB(s)} \quad \forall s>s_0, $$ where $C_1=C_1(n)$. Using this together with the fact that $$ \abs{\cA(\kappa s)}\le (\kappa s)^{-2}\norm{U}_{L^2(\Omega)}^2, \quad \forall s>0, $$ we have \begin{align*} \norm{U}_{L^q(\Omega)}^q&=q\int_0^\infty\abs{\cA(s)}s^{q-1}\,ds=q\kappa^{q}\int_0^\infty \abs{\cA(\kappa s)}s^{q-1}\,ds\\ &= q\kappa^q\int_0^{s_0}\abs{\cA(\kappa s)}s^{q-1}\,ds+q\kappa^q\int_{s_0}^\infty \abs{\cA(\kappa s)}s^{q-1}\,ds\\ &\le C_2\gamma^{2(2-q)/\nu'}\norm{U}_{L^2(\Omega)}^q+C_3\gamma^{2/\nu'}\int_0^\infty \abs{\cB(s)}s^{q-1}\,ds, \end{align*} where $C_2=C_2(n,\lambda,K_0,q,R_0)$ and $C_3=C_3(n,\lambda,q)$. The Hardy-Littlewood maximal function theorem implies that $$ \norm{U}^q_{L^q(\Omega)}\le C_2\gamma^{2(2-q)/\nu'}\norm{U}_{L^2(\Omega)}^q+C_4\gamma^{(2-q)/\nu'}\norm{F}^q_{L^q(\Omega)}+C_4\gamma^{2/\nu'}\norm{U}_{L^q(\Omega)}^q, $$ where $C_4=C_4(n,\lambda,q)$. Notice from Lemma \ref{122.lem1} and H\"older's inequality that $$ \norm{U}_{L^2(\Omega)}^q\le C_5\norm{F}_{L^q(\Omega)}^q, $$ where $C_5=C_5(n,\lambda,K_0,q,A)$. Combining the above two estimates and taking $\gamma=\gamma(n,\lambda,q)\in (0,1/2)$ sufficiently small, we conclude \eqref{1204.eq2}. $\blacksquare$ \section{Appendix} \label{app} In this section, we provide some lemmas. \begin{lemma} \label{0323.lem1} Let $f\in W^{1,2}_0(I)$, where $I=(0, R)$. Then we have \begin{equation} \label{0508.eq1} \norm{x^{-1}f(x)}_{L_2(I)}\le C\norm{Df}_{L_2(I)}, \end{equation} where $C>0$ is a constant. \end{lemma} \begin{proof} We first note that \eqref{0508.eq1} holds for any $f\in C^\infty([0,R])$ satisfying $Df(0)=0$; see \cite[Lemma 7.9]{MR2835999}. Suppose that $f\in W^{1,2}_0(I)$ and $\set{f_n}$ is a sequence in $C^\infty_0([0,R])$ such that $f_n\to f$ in $W^{1,2}(I)$. Then by the Sobolev embedding theorem, $f_n\to f$ in $C([0,R])$. Since the estimates \eqref{0508.eq1} is valid for $f_n$, we obtain by Fatou's lemma that \begin{align*} \int_0^R\bigabs{x^{-1}f(x)}^2\,dx&=\int_0^R\lim_{n\to \infty}\bigabs{x^{-1}f_n(x)}^2\,dx\\ &\le \liminf_{n\to \infty}\int_0^R\bigabs{x^{-1}f_n(x)}^2\,dx\\ &\le C\liminf_{n\to \infty}\int_0^R\abs{Df_n(x)}^2\,dx=\int_0^R\abs{Df(x)}^2\,dx, \end{align*} which establishes \eqref{0508.eq1}. \end{proof} \begin{lemma} \label{0812.lem1} Suppose that $(\bf{A3}(\gamma))$ $(b)$ holds at $0\in \partial \Omega$ with $\gamma\in \big(0,\frac{1}{2}\big)$. Then for $R\in (0,R_0]$, we have \begin{equation} \label{123.a1} \abs{\Omega_R}\ge CR^n, \end{equation} and \begin{equation} \label{123.a1a} \abs{\Omega_R\cap \set{x:x_1<2\gamma R}}\le C\gamma\abs{\Omega_R}, \end{equation} where $C=C(n)$. \end{lemma} \begin{proof} Note that \begin{equation} \label{0410.eq1b} \abs{\Omega_R\cap \set{x:x_1<2\gamma R}}\le 2^n\gamma R^n. \end{equation} Let us fix $a\in(\frac{1}{2},1)$ and \[ \cQ=\Set{x: \abs{x_1}<aR,\, \abs{x_i}<\sqrt{\frac{1-a^2}{d-1}}R, \, i=2,\ldots,n}. \] Then we have \[ \cQ\cap \set{x:x_1>R/2}\subset \Omega_R, \] and hence, we obtain \begin{equation*} \left(a-\frac{1}{2}\right)\left(\frac{1-a^2}{n-1}\right)^{(n-1)/2}R^n=\bigabs{\cQ\cap \set{x:x_1>R/2}}\le \abs{\Omega_R}, \end{equation*} which implies \eqref{123.a1}. By combining \eqref{123.a1} and \eqref{0410.eq1b}, we get \eqref{123.a1a}. \end{proof} The following lemma is a result from the measure theory on the ``crawling of ink spots" which can be found in \cite{MR0563790, MR0579490}. See also \cite{MR2069724}. \begin{lemma} \label{0324.lem1} Suppose that $(\bf{A3}(\gamma))$ $(b)$ holds with $\gamma\in \big(0, \frac{1}{2}\big)$. Let $A$ and $B$ are measurable sets satisfying $A\subset B\subset \Omega$, and that there exists a constant $\varepsilon\in (0,1)$ such that the following hold: \begin{enumerate}[(i)] \item $\abs{A}<\varepsilon \abs{B_{R_0/32}}$. \item For any $x\in \overline{\Omega}$ and for all $R\in (0, R_0/32]$ with $\abs{B_R(x)\cap A}\ge \varepsilon \abs{B_R}$, we have $\Omega_R(x)\subset B$. \end{enumerate} Then we get \[ \abs{A}\le C\varepsilon \abs{B}, \] where $C=C(n)$. \end{lemma} \begin{proof} We first claim that for a.e. $x\in A$, there exists $R_x\in (0, R_0/32)$ such that \begin{equation*} \abs{A\cap B_{R_x}(x)}=\varepsilon \abs{B_{R_x}} \end{equation*} and \begin{equation} \label{0324.eq1a} \abs{A\cap B_{R}(x)}< \varepsilon \abs{B_R}, \quad \forall R\in (R_x, R_0/32]. \end{equation} Note that the function $\rho=\rho(r)$ given by \[ \rho(r)=\frac{\abs{A\cap B_r(x)}}{\abs{B_r}}=\fint_{B_r(x)}1_A(y)\,dy \] is continuous on $[0, R_0]$. Since $\rho(0)=1$ and $\rho(R_0/32)<\varepsilon$, there exists $r_x\in (0, R_0/32)$ such that $\rho(r_x)=\varepsilon$. Then we get the claim by setting \[ R_x:=\max\set{r_x\in (0, R_0):\rho(r_x)=\varepsilon}. \] Hereafter, we denote by $$ \cU=\set{B_{R_x}(x):x\in A'}, $$ where $A'$ is the set of all points $x\in A$ such that $r_x$ exists. Then by the Vitali lemma, we have a countable subcollection $G$ such that \begin{enumerate}[(a)] \item $Q\cap Q'=\emptyset$ for any $Q, Q'\in G$ satisfying $Q\neq Q'$. \item $A'\subset \cup \set{B_{5R}(x): B_R(x)\in G}$. \item $\abs{A}=|A'|\le 5^{n}\sum_{Q\in G}\abs{Q}$. \end{enumerate} By the assumption (i) and \eqref{0324.eq1a}, we see that $$ \abs{A\cap B_{5R}(x)}<\varepsilon\abs{B_{5R}}=\varepsilon5^n\abs{B_R}, \quad \forall B_R(x)\in G. $$ Using this together with the assumption (ii) and Lemma \ref{0812.lem1}, we have \begin{align*} \abs{A}&=\bigabs{\cup\set{B_{5R}(x)\cap A:B_{R}(x)\in G}}\le \sum_{B_{R}(x)\in G}\abs{B_{5R}(x)\cap A}\\ &<\varepsilon 5^{n}\sum_{B_R(x)\in G}\abs{B_R(x)}\le \varepsilon C(n)\sum_{B_R(x)\in G}\abs{B_R(x)\cap \Omega}\\ &=\varepsilon C(n)\bigabs{\cup\set{B_R(x)\cap \Omega:B_R(x)\in G}}\\ &\le \varepsilon C(n)\abs{B}, \end{align*} which completes the proof. \end{proof} \begin{acknowledgment} The authors would like to express their sincerely gratitude to the referee for careful reading and for many helpful comments and suggestions. The authors also thank Doyoon Kim for valuable discussions and comments. Ki-Ahm Lee was supported by the National Research Foundation of Korea (NRF) grant funded by the Korea government (MSIP) (No. 2014R1A2A2A01004618). Ki-Ahm Lee also hold a joint appointment with the Research Institute of Mathematics of Seoul National University. Jongkeun Choi was supported by BK21 PLUS SNU Mathematical Sciences Division. \end{acknowledgment} \end{document}
\begin{equation}gin{document} \title{Thermodynamics of the Casimir Effect\\ - Asymptotic Considerations -} \author{H. Mitter, D. Robaschik} \address{Institut f\"ur Theoretische Physik der Karl-Franzens-Universit\"at Graz, Universit\"atsplatz 5 \\ A-8020 Graz, Austria} \maketitle\bar{\alpha}stracts{ We study the Casimir effect with different temperatures between the plates ($T$) resp. outside of them ($T'$). If we consider the inner system as the black body radiation for a special geometry, then contrary to common belief the temperature approaches a constant value for vanishing volume during isentropic processes. This means: the reduction of the degrees of freedom can not be compensated by a concentration of the energy during an adiabatic contraction of the two-plate system. Looking at the Casimir pressure, we find one unstable equilibrium point for isothermal processes with $T > T'$. For isentropic processes there is additionally one stable equilibrium point for larger values of the distances between the two plates.} \section{Introduction} \noindent The Casimir effect \cite{CAS} is one of the fundamental effects of Quantum Field Theory. It tests the importance of the zero point energy. In principle, one considers two conducting infinitely extended parallel plates at the positions $x_3=0$ and $x_3= a$. These conducting plates change the vacuum energy of Quantum Electrodynamics in such a way that a measurable attractive force between both plates can be observed \cite{EXP}. This situation does not essentially change, if a nonvanishing temperature \cite{MF} is taken into account. The thermodynamics of the Casimir effect \cite{BML} \cite{GREIN} and related problems \cite{BARTO} is well investigated.\\ Here we shall treat the different regions separately. We assume a temperature $T$ for the space between the plates and a temperature $ T' $ for the space outside the plates. Thereby we consider the right plate at $ x_3=a $ as movable, so that different thermodynamic processes such as isothermal or isentropic motions, can be studied. At first we investigate the thermodynamics of the space between the two plates by setting $T'=0$. This can be viewed as the black body radiation (BBR) for a special geometry. The surprising effect is, that for vanishing distance ($a\rightarrow 0$) in isentropic processes the temperature approaches a finite value, which is completely determined by the fixed entropy. This is in contrast to the expected behaviou of the standard BBR, if the known expression derived for a large volume is extrapolated to a small volume. For large values of $a$ the BBR takes the standard form. As a next topic we consider the Casimir pressure assuming that the two physical regions, i.e. the spaces between and outside the two plates possess different temperatures. Depending on the choices of $T$ and $T'$ a different physical behaviour is possible. For $T'<T$ the external pressure is reduced in comparison with the standard case $T'=T$. Therefore we expect the existence of an equilibrium point, where the pure Casimir attraction ($T=0$ effect ) and the differences of the radiation pressures compensate each other. This point is unstable, so that for isothermal processes the movable plate moves either to $a\rightarrow 0$ or to $a \rightarrow \infty$. However, an isentropic motion reduces the internal radiation pressure for growing distances, so that in this case there is an additional stable equilibrium point. \section{Thermodynamic Functions} The thermodynamic functions are already determined by different methods \cite{MF} \cite{BML}. We recalculate them by statistical mechanics including the zero-point energy and cast it in a simpler form which can be studied in detail \cite{MR}. For technical reasons the system is embedded in a large cube (side L). As space between the plates we consider the volume $L^2a$, the region outside is given by $L^2(L-a)$. All extensive thermodynamic functions are defined per area. \\ Free energy $\phi = F/L^2$: \begin{equation}gin{eqnarray} \label{1} \phi_{int} &=& [\frac{\hbar c \pi^2}{a^4}(-\frac{1}{720} + g(v)) +\frac{3\hbar c}{\pi^2} \frac{1}{\lambda^4} ]a ,\\ \label{2} \phi_{ext} &=& [\frac{3\hbar c}{\pi^2} \frac{1}{\lambda^4} -\frac{\hbar c \pi^6}{45}(\frac{v'}{a})^4](L-a). \end{eqnarray} Energy $e = E/L^2$: \begin{equation}gin{eqnarray*} e_{int} &=& [\frac{\hbar c \pi^2}{a^4}(-\frac{1}{720} + g(v) -v \partial_v g(v)) +\frac{3\hbar c}{\pi^2} \frac{1}{\lambda^4} ]a ,\\ e_{ext} &=& [\frac{3\hbar c}{\pi^2} \frac{1}{\lambda^4} +\frac{3 \hbar c \pi^6}{45}(\frac{v'}{a})^4](L-a). \end{eqnarray*} Pressure: \begin{equation}gin{eqnarray} \label{3} p_{int} &=& [\frac{\hbar c \pi^2}{a^4}(-\frac{1}{240} + 3g(v) -v\partial_v g(v)) -\frac{3\hbar c}{\pi^2} \frac{1}{\lambda^4} ],\\ \label{4} p_{ext} &=& [\frac{3\hbar c}{\pi^2} \frac{1}{\lambda^4} -\frac{\hbar c \pi^6}{45}(\frac{v'}{a})^4]. \end{eqnarray} Entropy $\sigma = S/(k L^2)$: \begin{equation}gin{eqnarray} \label{5} \sigma_{int} = -\frac{ \pi}{a^3} \partial_v g(v) a;\;\;\, \sigma_{ext} = \frac{4\pi^5}{45} (\frac{v'}{a})^3 (L-a), \end{eqnarray} $\lambda$ regularizes ($\lambda \rightarrow 0 $) the contributions from the zero-point energy. The thermodynamics is governed by the function $g(v)$. We list two equivalent expressions: \begin{equation}gin{eqnarray} \label{6} g(v) = -v^3[\frac{1}{2}\zeta (3) + k(\frac{1}{v})] = \frac{1}{720} -\frac{\pi^4}{45}v^4 - \frac{v}{4\pi^2}[\frac{1}{2} \zeta(3) + k(4\pi^2 v)]. \end{eqnarray} The function $k(x)$ is given by \begin{equation}gin{eqnarray} \label{7} k(x) = (1- x\partial_x)\sum_{n=1}^{\infty} \frac{1}{n^3}\frac{1}{exp(nx) - 1}. \end{eqnarray} It is strongly damped for large arguments. $v$ is the known variable $v = a T k/(\hbar \pi c)$, the variable $v'$ contains the temperature $T'$ instead of $T$. \section{Black Body Radiation} \noindent As a first topic we consider the space between the two plates as a generalization of the usual black body radiation (BBR) for a special geometry $L \times L \times a $. Contrary to the standard treatment we include here both, the internal and external the zero point energy. Thereby parameter-dependent divergent contributions compensate each other, whereas the physically irrelevant term $ ~ L/{\lambda^4}$ can be omitted \cite{MR}. If we approximate the function $g$ for large $v$ by $g \simeq {1}/{720} - (\pi^4/45) v^4 - \zeta(3)/(8\pi^4) v $, we obtain \begin{equation}gin{eqnarray} \label{8} \phi_{as} &=& \frac{\pi^2 \hbar c}{a^3}[-\frac{\pi^4}{45}v^4 -\frac{\zeta(3) }{8\pi^2} v],\;\;\; \sigma_{as} = \frac{\pi}{a^2} [\frac{4\pi^4}{45}v^3 +\frac{\zeta(3) }{8\pi^2} ],\\ \label{9} p_{as}&=& \frac{\pi^2 \hbar c}{a^4} [\frac{\pi^4}{45}v^4 -\frac{\zeta(3) }{8\pi^2} v],\;\;\;\ e_{as}= \frac{\pi^2 \hbar c}{a^3} \frac{3\pi^4}{45}v^4. \end{eqnarray} These expressions contain the large-volume contributions corresponding to the standard BBR (first term) and corrections. In the other limit of small $v$, we have to use $g(v) = - v^3 \zeta(3)/2 $ and get \begin{equation}gin{eqnarray} \label{10} \phi_{o} &=& \frac{\pi^2 \hbar c}{a^3}[-\frac{1}{720} -\frac{\zeta(3) }{2} v^3],\;\;\; \sigma_{o} = \frac{\pi}{a^2} \frac{3 \zeta(3) }{2} v^2, \\ \label{11} p_{o}&=& \frac{\pi^2 \hbar c}{a^4} [-\frac{1}{240}],\;\;\;\ e_{o}= \frac{\pi^2 \hbar c}{a^3} [-\frac{1}{720} +\zeta(3) v^3]. \end{eqnarray} In this case the contributions of the zero point energy dominate. It is known that nondegenerate vacuum states do not contribute to the entropy, which indeed vanishes at $T=0$.\\ Let us now consider isentropic processes. This means that we fix the values of the entropy for the internal region (\ref{5}) during the complete process. Technically we express this fixed value according to the value of the variable $v$ either through the approximation (\ref{8}) or (\ref{10}). Large distances and/or high temperatures lead to large values of $v$ so we have to use $\sigma_{as}$. Constant entropy means \begin{equation}gin{eqnarray} \label{12} \sigma = {\rm const.} = \sigma_{as} = \frac{4\pi^2k^3}{45(\hbar c)^3} a T^3 +\frac{\zeta(3)}{8\pi} \frac{1}{a^2 }. \end{eqnarray} Asymptotically this is the standard relation BBR $S = L^2 \sigma_{as} = {\rm const.} \times V T^3 $, here valid for large $T $ and $V$. If we now consider smaller values of $a$, then, because of eq.(\ref{5}), also $-\partial_v g(v)$ takes smaller values. It is possible to prove \cite{MR} the inequalities $ g <0 $, $ \partial_v g(v) <0 $ and $ (\partial_v)^2 g(v) <0 $. This monotonic behaviour of $\partial_v g(v)$ leads to the conclusion that also the corresponding values of $v$ become smaller. Consequently, we have to apply the other represention (\ref{10}) for small $v$ and obtain \begin{equation}gin{eqnarray} \label{13} \sigma =\sigma_{as}=\sigma_{o} =\frac{k^2}{\hbar^2 c^2 \pi} \frac{3 \zeta(3) }{2} T^2. \end{eqnarray} This means that for $ a \rightarrow 0 $ the temperature does not tend to infinity, but approaches the finite value \begin{equation}gin{eqnarray} \label{14} T = \left(\sigma \,\, 2 \hbar^2 c^2 \pi/(3 \zeta(3) k^2) \right)^{1/2}. \end{eqnarray} This is in contrast to the expectation: if we apply the standard expression of BBR, fixed entropy implies $VT^3 = {\rm const.} $, so that the temperature tends to infinity for vanishing volume. However this standard expression for BBR, derived for a continuous frequency spectrum, is not valid for small distances. The reduction of the degrees of freedom, i.e. the transition from a continuous frequency spectrum to a discrete spectrum, is the reason for our result. \section{Equilibrium Points of the Casimir Pressure} \noindent The Casimir pressure results from the contributions of the internal and the external regions acting on the right movable plate. \begin{equation}gin{eqnarray} \label{15} P(a,T,T') = P_{ext}(T') + P_{int}(a,T) = \frac{\pi^2 \hbar c}{a^4}p(v) +\frac{\pi^2 k^4}{45 (\hbar c)^3}(T^4 - {T'}^4), \end{eqnarray} where \begin{equation}gin{eqnarray} p(v) = -\frac{1}{4\pi^2} v[\zeta(3) +(2 - v\partial_v)k(4\pi^2v)] =-\frac{1}{240} +3g(v) - v\partial_{v}g(v) -\frac{\pi^4}{45} v^4.\nonumber \end{eqnarray} Usually one considers the case $T=T'$, so that the Casimir pressure is prescribed by $p(v)$ alone. It is known, that $P(a,T,T'=T)$ is a negative but monotonically rising function from $-\infty$ (for $ a\rightarrow 0 $) to $ \; 0\; $ (for $a\rightarrow \infty $). It is clear, that the addition of a positive pressure $ \frac{\pi^2 k^4}{(\hbar c)^3}(T^4 - {T'}^4) $ for $T>T'$ stops the Casimir attraction at a finite value of $ v$. The question is whether this equilibrium point may be stable or not? The answer follows from the monotonically rising behaviour of the standard Casimir pressure. \begin{equation}gin{eqnarray} \label{16} \frac{d}{da}P(a,T,T') =\frac{d}{da}P(a,T,T'=T) >0. \end{eqnarray} Consequently this equilibrium point is unstable (see also \cite{MR}). \\ Next we consider the space between the two plates not for fixed temperature but as a thermodynamically closed system with fixed entropy. In the external region we assume again a fixed temperature $T'$. To solve this problem in principle, it is sufficient to discuss our system for large $v$ (as large $v$ we mean such values of $v$ for which the asymptotic approximations (\ref{8}), (\ref{9}) are valid; this region starts at $ v> 0.2 $ ). Using our asymptotic formulae (\ref{8}),(\ref{9}) we write the Casimir pressure as \begin{equation}gin{eqnarray} \label{17} P(a,v,T') = \frac{\pi^2 \hbar c}{a^4}[\frac{\pi^4}{45}v^4 -\frac{\zeta(3)}{4\pi^2 }v - \frac{\pi^4}{45}{v'}^4 ], \end{eqnarray} with $v' = aT' k/(\hbar c \pi)$ where $v$ has to be determined from the condition $ \sigma_{as}=\sigma = {\rm const.} $ or \begin{equation}gin{eqnarray} \label{18} \pi v^3 = [ a^2 \sigma - \zeta(3)/(8\pi^2)] 45/(4\pi^4). \end{eqnarray} Then we may write \begin{equation}gin{eqnarray} \label{19} P(a,v,T') = \frac{\pi^2 \hbar c}{a^4}[\frac{\sigma a^2} {4\pi} -\frac{9\zeta(3)}{32\pi^2 }] \{\frac{45}{4\pi^4}(\frac{\sigma a^2}{4\pi} - \frac{\zeta(3)}{8\pi^2}) \}^{3/2} -\frac{\pi^2 \hbar c}{a^4} \frac{\pi^4}{45}{v'}^4. \end{eqnarray} At first we consider the case $T'=0$. We look for the possible equilibrium points $P(a,v,T'=0) =0$. The result is $ v^3 = 45\zeta(3)/(4 \pi^6)$. This corresponds to $v=0.24$. For this value of $v$ the used approximation is not very good, but acceptable. A complete numerical estimate \cite{MR} gives the same value. Now we express the temperature $ T$ included in $v$ with the help of the equation for isentropic motions (\ref{18}) and obtain $ a^2 = 9\zeta(3)/(8 \pi \sigma)$. The instabiliy of this point can be directly seen by looking at \begin{equation}gin{eqnarray} \label{20} \frac{d}{da}P(a,T,T'=0) &=& - 4 P(a,T,T'=0) + \frac{\pi^2 \hbar c}{a^4} [\frac{4 \pi^4}{45}v^3 -\frac{\zeta(3)}{8\pi^2 }] (\frac{dv}{da })_{\sigma} |_{P=0} \nonumber\\ &= & \frac{\pi^2 \hbar c}{a^4}\frac{3\zeta(3)}{4\pi^2} (\frac{dv}{da })_{\sigma}. \end{eqnarray} It is intuitively clear that $(\frac{dv}{da })_{\sigma}$ is positive; an explicit proof is given in \cite{MR}. So it is clear, that this point is unstable as in the isothermal case. If we consider, in eq.(\ref{17}), the variable $v= aTk/(\hbar c \pi) $ at fixed $T$, there is no further equilibrium point. This result for isothermal processes is, however, not valid for isentropic processes. In this case we obtain according to eq.(\ref{19}) a second trivial equilibrium point at $a \rightarrow \infty $ for vanishing external temperature ($v'=0$). Between both zeroes we have one maximum. So we conlude: For isentropic processes there must be two equilibrium points; the left one is unstable, the right one at $ a \rightarrow \infty $ corresponds to a vanishing derivative. If we now add a not too high external pressure with the help of an external temperature $T'$, then this second equilibrium point - present for isentropic processes - becomes stable. So, in principle we may observe oscillations at the position of the second equilibrium point. \section*{Acknowledgments} We would like to thank C. B. Lang and N. Pucker for their constant support and K. Scharnhorst, G. Barton and P. Kocevar for discussions on the present topic. \section*{References} \begin{equation}gin{thebibliography}{99} \bibitem{CAS} H. B. G. Casimir, {\em Proc. Kon. Ned. Akad. Wetenschap.} {\bf 51} 793 (1948). \bibitem{EXP} M. J. Sparnay, {\em Physica} {\bf 24},751 (1958); S. K. Lamoreaux, {\em Phys. Rev. Lett.} {\bf 78} 5 (1997); U. Mohideen, A. Roy, Preprint {\bf physics/9805038}. \bibitem{MF} M. Fierz, {\em Helv. Phys. Acta} {\bf 33} , 855 (1960); J. Mehra, {\em Physica} {\bf 37} 145 (1967). \bibitem{BML} L. S. Brown, and G. J. MacLay, {\em Phys. Rev} {\bf 184} 1272 (1969); J. Schwinger, L.L. DeRaad, K.A. Milton, {\em Annals of Physics (NY)} {\bf 115} 1 (1978); K. Scharnhorst, D. Robaschik, and E. Wieczorek, {\em Annalen d. Physik (Leipzig)} {\bf 44} 351 (1987); D. Robaschik, K. Scharnhorst, and E. Wieczorek, {\em Annals of Physics (NY)} {\bf 174} 401 (1987). \bibitem{GREIN} G. Plunien, B. M\"uller, and W. Greiner, {\em Phys. Reports} {\bf 134} (1986) 87 (1986); G. Barton, and N. S. J. Fawcett, {\em Phys. Reports} {\bf 170} 1 (1988); V. M. Mostepanenko, N. N. Trunov, {\em The Casimir Effect and its Application, Oxford, 1997}. \bibitem{BARTO} G. Barton, J.Phys. A : {\em Math. Gen.} {\bf 24} 5533 (1991); M. Revzen, R. Opher, and A. Mann, J. {\em Physics A : Math. Gen.} {\bf 30} 7783 (1997), {\em Europhys. Lett.} {\bf 38} 245 (1997); F. Ravndal, and D. Tollefsen {\em Phys. Rev.} {\bf 40 } 4191 (1989). \bibitem{MR} H. Mitter, D. Robaschik, to be published. \end{thebibliography} \end{document}
\begin{document} \title{Resilient Optimal Estimation Using Measurement Prior} \author{Olugbenga Moses Anubi,~\IEEEmembership{Member,~IEEE,} Charalambos Konstantinou,~\IEEEmembership{Member,~IEEE,} and~Rodney~Roberts,~\IEEEmembership{Senior Member,~IEEE} \thanks{The authors are with the Department of Electrical and Computer Engineering, FAMU-FSU College of Engineering, Tallhassee FL.} \thanks{Emails : [email protected]*, [email protected], [email protected].} } \markboth{} {Shell \MakeLowercase{Anubi \textit{et al.}}: Resilient Optimal Estimation Using Measurement Prior} \maketitle \begin{abstract} This paper considers the problem of optimal estimation for linear system with the measurement vector subject to arbitrary corruption by an adversarial agent. This problem is relevant to cyber-physical systems where, due to the tight coupling of physics, communication and computation, a malicious agent is able to exploit multiple inherent vulnerabilities in order to inject stealthy signals into the measurement process. These malicious signals are calculated to serve the attack objectives of causing false situation awareness and/or triggering a sequence of cascading effects leading to an ultimate system failure. We assume that the attacker can only compromise a portion, but not all, of the measurement channels simultaneously. However, once a channel is compromised, the attacker is free to modify the corresponding measurement arbitrarily. Consequently, the problem is formulated as a compressive sensing problem with additional prior-information model. The prior-information considered is a set inclusion constraint on the measurement vector. It is shown that if the prior set satisfies certain conditions, the resulting recovery error bound is much stronger. The approach is applied to the problem of resilient sate estimation of a power system. For this application, Gaussian Process is used to build a prior generative probabilistic regression model from historical data. The resulting Gaussian Process Regression model recursively maps energy market information to \emph{iid} Gaussian distributions on the relevant system measurements. An optimization-based resilient state estimator is then developed using a re-weighted $\ell_1$-minimization scheme. The developed algorithm is evaluated through a numerical simulation example of the IEEE 14-bus system mapped to the New York Independent System Operator (NYISO) grid data. \end{abstract} \begin{IEEEkeywords} Resilient estimation, Compressive Sensing, Auxiliary models. \end{IEEEkeywords} \IEEEpeerreviewmaketitle \section{Notation}\label{s:notation} The following notions and conventions are employed throughout the paper: $\mathbb{N}$ denotes the set of natural numbers. $\mathbb{R},\mathbb{R}^m,\mathbb{R}^{m\times n}$ denote the space of real numbers, real vectors of length $m$ and real matrices of $m$ rows and $n$ columns respectively. $\mathbb{R}_+$ denotes positive real numbers. $X^\top$ denotes the transpose of the quantity $X$. Normal-face lower-case letters ($x\in\mathbb{R}$) are used to represent real scalars, bold-face lower-case letter ($\mathbf{x}\in\mathbb{R}^m$) represents vectors, normal-face upper case ($X\in\mathbb{R}^{m\times n}$) represents matrices, while calligraphic upper case letters (e.g $\mathcal{T}$) represent sets. Let $\mathcal{T}\subseteq\{1,\hdots,m\}$ then, for a matrix $X\in\mathbb{R}^{m\times n}$, $X_\mathcal{T} \in\mathbb{R}^{\abs{\mathcal{T}}\times n}$ and $X^{\mathcal{T}} \in\mathbb{R}^{m\times \abs{\mathcal{T}}}$ are the sub-matrices obtained by extracting the rows, and columns respectively, of $X$ corresponding to the indices in $\mathcal{T}$. $\mathcal{N}(X)$, $\mathcal{R}(X)$ and $\overline{\sigma}(X)$ denote the null space, range space and the largest singular value of the matrix $X$ respectively. For a vector $\mathbf{x}$, $\mathbf{x}_i$ denotes its $i$th element. The support of a vector $\mathbf{x}\in\mathbb{R}^m$ is denoted by $\supp(\mathbf{x})\triangleq\left\{i\hspace{1mm}:\hspace{1mm}\mathbf{x}_i\neq0\right\}$, with $\abs{\supp(\mathbf{x})}\le m$ being the number of nonzero elements of $\mathbf{x}$. $\mathcal{S}_k^m\triangleq\left\{\mathbf{x}\in\mathbb{R}^m\setminus \{0\}\hspace{1mm}:\hspace{1mm}\abs{\supp(\mathbf{x})}\le k\right\}$ denotes the set of all nonzero $k$-sparse vectors. The superscript $m$ is dropped whenever the dimension is clear from context. The $p$-norm of a vector $\mathbf{x}\in\mathbb{R}^m$ is defined as $\norm{\mathbf{x}}_p\triangleq\left(\sum\limits_{i=1}^m\abs{\mathbf{x}_i}^p\right)^{\frac{1}{p}}$. Given a vector $\mathbf{x}\in\mathbb{R}^m$, the following inequality about vector norms \begin{align*} \norm{\mathbf{x}}_q\le\norm{\mathbf{x}}_p\le m^{\left(\frac{1}{p}-\frac{1}{q}\right)}\norm{\mathbf{x}}_q,\hspace{2mm} 0<p\le q\le\infty \end{align*} is useful for some results down the line. Given a positive scalar $\delta\in\mathbb{R}_+$, a saturation function $\textsf{sat}_\delta:\mathbb{R}\mapsto[-\delta,\hspace{1mm}\delta]$ is given by \begin{align*} \textsf{sat}_\delta(x) = \left\{\begin{array}{rcl}-\delta&\text{if}&x<-\delta\\x&\text{if}&\abs{x}\le\delta\\\delta&\text{if}&x>\delta\end{array}\right. \end{align*} A best $k$th term approximation of a vector $\mathbf{e}\in\mathbb{R}^m$ is denoted by $\mathbf{e}[k] \triangleq\min\limits_{\norm{\mathbf{f}}_0=k}\norm{\mathbf{e}-\mathbf{f}}_1$ . \iffalse By $Q\succeq0$, it is meant that $Q$ is a positive semi-definite symmetric matrix, i.e $\mathbf{x}^\top Q\mathbf{x}\ge0\hspace{1mm}\forall\mathbf{x}\neq0$ and $Q\succ0$ denotes positive definiteness. Given $Q\succ0$, the $Q$-weighted norm is defined as $\norm{\mathbf{x}}_Q\triangleq\mathbf{x}^\top Q\mathbf{x}.$ \fi \section{Introduction} Cyber-physical systems (CPS) refer to a generation of systems with tightly-integrated communication, computational and physical capabilities that can interact with humans through many new modalities \cite{gill2008vision,baheti2011cyber}. Such systems are fundamental to the operation of various safety-critical applications (e.g smart grid, connected \& autonomous vehicles (CAV), etc). Their failure can cause irreversible damage to the underlying physical system as well as to the humans who operate it or depend on it. For example, critical infrastructure domains are composed of a multitude of CPS of various scales and at all levels. The control of CPS is enabled by the proliferation of sensing devices which allow geographically isolated physical plants to be remotely monitored. Field embedded devices, typically called remote terminal units (RTUs), deployed in large-scale, geographically-sparse CPS collect measurements related to the physical process. The measured data are sent via supervisory control and data acquisition (SCADA) systems to central master stations. At the central site, the information from RTUs is utilized to carry out necessary analysis and control, e.g., determine if a leak has occurred and the level of criticality. A critical function at the management system level is to estimate the state variables of the CPS. These state estimates are then used to adjust the control of the physical space. In power systems, for instance, once the operating state is known, estimates are utilized for energy management system application functions such as optimal flow control, automatic generation control, and contingency analysis. The results of such functions are used in order to take preventive and corrective actions as well as ensure secure and reliable operation of the CPS. Due to the significance of state estimation routines, it is of paramount importance that such algorithms incorporate proper mechanisms for operating resiliently in the event of malicious events \cite{mclaughlin2016cybersecurity}. Sophisticated attackers who are able to gain unauthorized access to the communication network of a CPS can modify the transmitted measurements to the central control and estimation stations \cite{liu2011false}, thereby causing a false situation awareness or triggering a cascade of events ultimately leading to a system failure. Furthermore, adversaries can hack into the RTUs or even infiltrate secondary channels of the supply chain in order to distort the measurements \cite{konstantinou2016case}. Existing work on the topic has shown that this class of \emph{false data injection attacks (FDIAs)} can bypass \emph{bad data detection (BDD)} schemes and inject errors in the resulting state estimation without being detected \cite{liu2011false, liang2017review, deng2017false, liang20172015}. Such detection methods are residual schemes traditionally based on the largest normalized residual between the obtained measurements and the predicted values from the system estimated states \cite{wu2018bad}. The impact of FDIAs, on power systems for instance, could skew the electricity markets in favor of the attacker or even result in masking the outage of lines and removing the attacked RTUs from the network \cite{liu2015impacts, kosut2010limiting}. Existing work on addressing the vulnerability of FDIAs typically rely on protecting a set of devices (and thus a set of measurements) or verifying each state variable independently. The high computational and deployment cost, as well as the associated risks of these methods, have hampered their feasibility for use in practical real-time systems \cite{liang2017review}. Moreover, estimation techniques developed for specific system configurations \cite{ashok2018online} often exhibit poor resiliency performance, in general, against FDIAs. Therefore, more computationally feasible, adaptive, and real-time implementable resiliency strategies are needed. The design of such estimators need to consider adverse settings in order to reliably estimate CPS state variables. Consequently, the attack-resilient state estimation has attracted significant attention in recent literature \cite{cardenas2008research}. While there are numerous work on resilient state estimation, we focus on the ones that are optimization-based -- since our work ultimately depends on solving a convex program. One of the earliest work employing optimization \cite{fawzi2014secure} formulated the resilient estimation problem for an LTI system as a compressive sensing (CS) problem and used standard results\cite{candes2005decoding} from the CS community to create a convex relaxation of the resulting optimization problem. Following that, a number of papers have either modified or extended the framework to include measurement noise\cite{pajic2015attack,yong2015resilient}, time varying attack support \cite{hu2016secure}, robustness considerations \cite{pajic2014robustness} and distributed case \cite{kekatos2012distributed}. There are also numerous applications including but not limited to; power systems \cite{deng2017false}, UAVS \cite{fiore2017secure,hu2016secure}, energy delivery systems\cite{mestha2017cyber}, autonomous vehicles and networked systems. In this paper, we build on our previous works on enhancing the \emph{recoverability} of resilient estimators by incorporating prior information, either in form of attack-support estimation\cite{anubi2018robust} or through a more general set inclusion constraint\cite{anubi2019enhanced}. Here, we provide theoretical guarantees of how certain \emph{boundedness} property of the prior information set can improve the reconstruction error bound of the resulting resilient estimator. Unlike the previous work \cite{fawzi2014secure,hu2016secure,chang2018secure} which depend on the Restricted Isometry Property (RIP) \cite{candes2005decoding}, we have derived our results using a related Nullspae Property (NSP) \cite{cohen2009compressed}. The reason for this is given in subsequent sections. Moreover, a numerical example is given in which the developed estimator is applied to the NYISO transmission grid. The prior information generates a likelihood-level ellipsoid constraints on the ``true" measurement vector via a Gaussian Process Regression (GPR) mean and covariance functions of the locational marginal bus prices. This example demonstrates tremendous improvement in resiliency by using readily available auxiliary measurements to corroborate the state estimation process using the proposed scheme. The remaining of the paper is organized as follows: in Section \ref{s:background} we provide necessary definitions and background for this work. Section \ref{s:methodology} presents the formulation of the estimation problem as well as our proposed solution algorithm for the enhanced state estimator. Experimental details and simulation results are described in Section \ref{s:experiments}. Our concluding remarks are discussed in Section \ref{s:conclusions}. \section{Background}\label{s:background} Consider a linear measurement model of the form: \begin{align}\label{eqn:meas_model} \mathbf{y} = H\mathbf{x} + \mathbf{e}, \end{align} where $H\in\mathbb{R}^{m\times n}$ is a measurement/coding matrix ($m>n$) and $\mathbf{y}\in\mathbb{R}^m$ is a measurement vector corrupted by an arbitrary unknown but sparse error vector $\mathbf{e}\in\mathbb{R}^m$. By sparsity, we mean that $\norm{\mathbf{e}}_0\le q$, for a given $q\le m$. In classical error correction problem \cite{berrou1993near,elias1954error}, the objective is to recover the input vector $\mathbf{x}\in\mathbb{R}^n$, given the corrupt measurement $\mathbf{y}$ and the matrix $H\in\mathbb{R}^{m\times n}$. Consequently an optimal decoder $\mathcal{D}_0:\mathbb{R}^m\mapsto\mathbb{R}^n$ is considered, of the form: \begin{align}\label{eqn:opt_dec0_1} \mathcal{D}_0(\mathbf{y}) = \argmin\limits_{\mathbf{x}\in\mathbb{R}^n}{\norm{\mathbf{y}-H\mathbf{x}}_0}. \end{align} Evidently, the existence of such decoder is equivalent to the uniqueness of the underlying index minimization problem. Suppose, the coding matrix $H$ is full rank. Let \begin{align} H = QR = \left[\begin{array}{cc}Q_1&Q_2\end{array}\right]\left[\begin{array}{c}R_1\\0\end{array}\right], \end{align} be the QR decomposition of $H$, where $Q\in\mathbb{R}^{m\times m}$ is orthogonal, $Q_1\in\mathbb{R}^{m\times n}$, $Q_2\in\mathbb{R}^{m\times(m-n)}$, and $R_1\in\mathbb{R}^{n\times n}$ is a full rank upper triangular matrix. Multiplying the left and right hand sides of \eqref{eqn:meas_model} by $Q_2^\top$, the transformed measurement model becomes: \begin{align} Q_2^\top\mathbf{y} = Q_2^\top\mathbf{e}. \end{align} Thus, the optimal decoder $\mathcal{D}_0:\mathbb{R}^m\mapsto\mathbb{R}^n$ is given by \begin{align}\label{eqn:opt_dec0_2} \mathcal{D}_0(\mathbf{y}) = R_1^{-1}Q_1^\top\left(\mathbf{y} - \argmin\limits_{Q_2^\top\left(\mathbf{y}-\mathbf{e}\right)=0}{\norm{e}_0}\right), \end{align} which is equivalently related with the compressive sensing problem\cite{candes2005decoding}: \begin{align}\label{eqn:comp_sens} \Minimize\limits_{\mathbf{e}}{\left\|\mathbf{e}\right\|_0}\hspace{2mm}\SubjectTo\hspace{2mm}Q_2^\top(\mathbf{y}-\mathbf{e})=0. \end{align} Subsequently, we will consider the compressive sensing problem of the form in \eqref{eqn:comp_sens} for analysis purposes, and restrict ourselves to the decoder of the form in \eqref{eqn:opt_dec0_1}( or \eqref{eqn:opt_dec0_2}) for algorithm development. The obvious question that arises, then is to determine if there is a unique minimizer of the above index-minimizing optimization problem. The following proposition, adapted from \cite{hayden2016sparse}, gives the condition for the existence of a unique solution to the optimization problem in \eqref{eqn:comp_sens}. \begin{proposition}[Uniqueness]\label{thm:prop_uniquess} Given $k\in\mathbb{N}$, if every $2k$ columns of $Q_2^\top$ are linearly independent and there exists at least one $p\le k$ for which $\mathcal{S}_p\cap\left(\mathcal{N}(Q_2^\top)+\mathbf{y}\right)\neq \varnothing$, then the optimization problem in \eqref{eqn:comp_sens} has a unique solution. \end{proposition} \begin{proof} It suffices to show that, for all $p\le k$, the feasible region $\mathcal{R}_p\triangleq \left\{\mathbf{e}\in\mathbb{R}^m|\norm{\mathbf{e}}_0=p, Q_2^\top\left(\mathbf{e}-\mathbf{y}\right)=0\right\}=\mathcal{S}_p\cap\left(\mathcal{N}(Q_2^\top)+\mathbf{y}\right)$ is a singleton. If this is true, then the result follows from the existence of at least one feasible point for some $p\le k$. To see that $\mathcal{R}_p$ is a singleton, let $\mathbf{e}_1,\mathbf{e}_2\in\mathcal{R}_p$, $\mathbf{e}_1\neq\mathbf{e}_2$, then $Q_2^\top\left(\mathbf{e}_1-\mathbf{e}_2\right)=0$. Since every $2s$ columns of $Q_2^\top$ are linearly independent, then the last equation is true iff $\norm{\mathbf{e}_1-\mathbf{e}_2}_0>2s\Rightarrow \norm{\mathbf{e}_1}_0+\norm{\mathbf{e}_2}_0>2k\Rightarrow p>k$, a contradiction. Thus, $\mathbf{e}_1=\mathbf{e}_2$, implying that $\abs{\mathcal{R}_p}=1 \hspace{2mm} \forall\hspace{2mm} p\le k$ \end{proof} \begin{corollary} If there exists $p \le m$ such that $\mathcal{S}_{2p}\cap\mathcal{N}(Q_2^\top)=\varnothing$ and $\mathcal{S}_p\cap\left(\mathcal{N}(Q_2^\top)+\mathbf{y}\right)\neq \varnothing$, then the optimization problem in \eqref{eqn:comp_sens} has a unique solution. \end{corollary} \begin{proof} The statement ``every $2s$ columns of $Q_2^\top$ are linearly independent" implies that $\mathcal{S}_{2p}\cap\mathcal{N}(Q_2^\top)=\varnothing$ for $p\le k$. Thus the result follows from \eqref{thm:prop_uniquess}. \end{proof} The optimization problem in \eqref{eqn:comp_sens}, in most instances, does not lend itself to a solution in polynomial time due to the nonconvexity associated with the index-minimization objective. As a result, it is often replaced with its convex neighbor: \begin{align}\label{eqn:comp_sens_L1} \Minimize\limits_{\mathbf{e}}{\left\|\mathbf{e}\right\|_1\hspace{2mm}\SubjectTo\hspace{2mm}Q_2^\top\left(\mathbf{y}-\mathbf{e}\right)=0}. \end{align} As a result, naturally, questions arise about how well the this convex relaxation recovers the solution to the original problem, assuming a unique solution exists? For instance, under what condition(s) will the solution of \eqref{eqn:comp_sens_L1} recover the solution of the original problem \eqref{eqn:comp_sens}. This property called \emph{recoverability} has been studied extensively in compressive sensing literature, largely under the umbrella of either the so called \emph{Restricted Isometry Property} (RIP) or the \emph{Null Space Property} (NSP). While other notions have emerged in recent years, the RIP and NSP are the two most common conditions that one imposes on $Q_2^\top$ in order to guarantee recoverability. In what follows, we outline some RIP and NSP-based results that are relevant to this work. \subsection{RIP-based results} The RIP was introduced in \cite{candes2005decoding} to establish stable recoverability for the relaxed problem in \eqref{eqn:comp_sens_L1}. Ever since, there have been so many other follow-up results and refinements to the original guarantees published by Candes et. al. In what follow, we provide a tiny portion of existing results, slightly modified or built upon in some cases, that are relevant to this work. \begin{definition}[RIP \cite{candes2005decoding}] A matrix $A$ has the RIP of sparsity $k$ if there exists $0<\delta<1$ such that \begin{align} \left(1-\delta\right)\norm{\mathbf{x}}_2^2\le\norm{A\mathbf{x}}_2^2\le\left(1+\delta\right)\norm{\mathbf{x}}_2^2 \end{align} for all $\mathbf{x}\in\mathcal{S}_k$. Moreover, the smallest $\delta$ for which the above inequality holds is called the \emph{restricted isometry constant}, and denoted as $\delta_k(A)$. \end{definition} The above definition essentially requires that every set of columns with cardinality less that or equal to $k$ behaves like an orthonormal system. The following theorem lists the recovery error due to relaxed convex program above. \begin{theorem}[\cite{candes2005decoding},\cite{cai2013sparse}] Let $\mathbf{e}$ be a sparse vector satisfying $Q_2^\top\left(\mathbf{y}-\mathbf{e}\right)=0$ and $\hat{\mathbf{e}}$ be the solution of \eqref{eqn:comp_sens_L1}. If $\displaystyle \delta_{2k}(Q_2^\top)<\frac{1}{\sqrt{2}}$, then \begin{align} \norm{\hat{\mathbf{e}}-\mathbf{e}}_2\le \frac{2}{\sqrt{k}}\left(\frac{\delta_{2k}+\sqrt{\delta_{2k}\left(\frac{1}{\sqrt{2}}-\delta_{2k}\right)}}{\sqrt{2}\left(\frac{1}{\sqrt{2}}-\delta_{2k}\right)}+1\right)\norm{\mathbf{e}-\mathbf{e}[k]}_1, \end{align} where $\mathbf{e}[k]$ is the best $k$-term approximation of $\mathbf{e}$. \end{theorem} \begin{remark} If $\mathbf{e}\in\mathcal{S}_k$, then $\hat{\mathbf{e}}=\mathbf{e}$. Thus, if $\displaystyle \delta_{2k}(Q_2^\top)<\frac{1}{\sqrt{2}}$ the relaxed program in \eqref{eqn:comp_sens_L1} will recover any $k$-sparse vector $\mathbf{e}\in\mathcal{S}_k$ exactly! \end{remark} \begin{remark} While the RIP provides very nice theoretical guarantees, computing/numerically verifying the restricted isometry constant is NP-hard. However, for a large class of matrices, the RIP condition holds with overwhelming probability \cite{candes2006stable}. \end{remark} For any invertible matrix $U$, the matrix $UA$ share the same nullspace as $A$ but can have dramatically different RIP constants. This, at a first glance, might seem like a major drawback of RIP-based analyses, because the equivalent programs $\left\{\Minimize\limits_{\mathbf{x}}{\left\|\mathbf{x}\right\|_1}\hspace{2mm}\SubjectTo\hspace{2mm}A\mathbf{x}=\mathbf{b}\right\}$ and $\left\{\Minimize\limits_{\mathbf{x}}{\left\|\mathbf{x}\right\|_1}\hspace{2mm}\SubjectTo\hspace{2mm}UA\mathbf{x}=U\mathbf{b}\right\}$ may end up having totally different RIP-based recoverability properties. To overcome this situation, many researchers have derived their results using subspace-based analysis, which generally \emph{mods} out such transformations and provide a more uniform result. Next, we examine the \emph{nullspace} property, which has been widely used for such purpose. \subsection{NSP-based results} The term \emph{nullspace property} originates from \cite{cohen2009compressed}. It gives necessary and sufficient conditions for recoverability. Like RIP, numerical verification of the NSP is combinatorial and NP-hard. \begin{definition}[$\textsf{NSP}_q$,\cite{chen2012stability}] A matrix $A$ is said to satisfy the nullspace property with parameters $\gamma\in\mathbb{R}_+$ and $k\in\mathbb{N}$, denoted by $A\in\textsf{NSP}_q(k,\gamma)$, if every nonzero $\mathbf{e}\in\mathcal{N}(A)$ satisfies \begin{align*} \norm{\mathbf{e}_\mathcal{T}}_q<\gamma\norm{\mathbf{e}_{\mathcal{T}^c}}_q \end{align*} for all $\mathcal{T}\subset \left\{\begin{array}{ccc}1&\hdots&n\end{array}\right\}$ with $\abs{\mathcal{T}}\le k$. \end{definition} The following results list some recoverability results based on the NSP. \begin{theorem}[\cite{donoho2001uncertainty,gribonval2003sparse}]\label{thm:eqv_prog} The convex program in \eqref{eqn:comp_sens_L1} uniquely recovers all $k$-sparse vector $\mathbf{e}\in\mathcal{S}_k$ if and only if $Q_2^\top\in\textsf{NSP}_1(k,1)$ \end{theorem} \begin{theorem}\label{thm:NSP} Let $\mathbf{e}\in\mathbb{R}^m$ be a vector satisfying $Q_2^\top\left(\mathbf{y}-\mathbf{e}\right)=0$ and $\hat{\mathbf{e}}$ be the solution of \eqref{eqn:comp_sens_L1}. If $Q_2^\top\in\textsf{NSP}_q(k,\gamma)$ for some $0<\gamma<1$ and $q>1$, then \begin{align} \norm{\hat{\mathbf{e}}-\mathbf{e}}_1\le \frac{m}{\sqrt{2}}\left(\frac{4\left(1+\gamma\right)}{m\left(1-\gamma\right)}\right)^{\frac{1}{q}}\norm{\mathbf{e}-\mathbf{e}[k]}_1, \end{align} where $\mathbf{e}[k]$ is a best $k$-term approximation of $\mathbf{e}$. \end{theorem} \begin{proof} From the results in \cite{chen2012stability}(Theorem III.4.1), the following inequality holds: \begin{align*} \norm{\hat{\mathbf{e}}-\mathbf{e}}_q\le \frac{1}{\sqrt{2}}\left(\frac{4\left(1+\gamma\right)}{\left(1-\gamma\right)}\right)^{\frac{1}{q}}\norm{\mathbf{e}-\mathbf{e}[k]}_q. \end{align*} The result follows by using the following well-known norm inequality for $q>1$: \begin{align*} \norm{x}_q\le\norm{\mathbf{x}}_1\le m^{1-\frac{1}{q}}\norm{\mathbf{x}}_q. \end{align*} \end{proof} \begin{remark} This result demonstrates how the choice of $q$ in the parameterized \emph{nullspace property} $\textsf{NSP}_q$ can be used to modify the error bound. It is also worth noting that the $\textsf{NSP}_q$ may be quite different for different $q$-s. A nice entity relationship diagram for $\textsf{RIP}$, $\textsf{NSP}$ and coherence is also given in Figure III.2 of \cite{chen2012stability}. It would be nice to see the resulting error bounds change with these quantities laid out on the same diagram, although not pursued for this paper. \end{remark} \begin{remark} It is noteworthy that as $q\rightarrow\infty$, the upper bound in Theorem~\ref{thm:NSP} approaches the uniform bound \begin{align} \norm{\hat{\mathbf{e}}-\mathbf{e}}_1\le \frac{m}{\sqrt{2}}\norm{\mathbf{e}-\mathbf{e}[k]}_1. \end{align} \end{remark} \begin{theorem}[maximum correctable errors] Suppose that the nonzero vector $\mathbf{e}\in\mathbb{R}^m$ satisfies \begin{align*} \norm{\mathbf{e}_\mathcal{T}}_q<\gamma\norm{\mathbf{e}_{\mathcal{T}^c}}_q,\hspace{2mm}\gamma\in(0,1), q>1 \end{align*} for all $\mathcal{T}\subset\left\{1,\hdots,m\right\}$ satisfying $\abs{\mathcal{T}}\le k$. Then \begin{align} k<\frac{\gamma^q}{1+\gamma^q}m \end{align} \end{theorem} \begin{proof} Suppose, without loss of generality, that $\abs{\mathbf{e}_1}\ge\hdots\ge\abs{\mathbf{e}_m}$. Then, \begin{align*} \sum\limits_{i=1}^{\abs{\mathcal{T}}}{\abs{\mathbf{e}_i}^q}<\gamma^q\sum\limits_{i=\abs{\mathcal{T}}+1}^{m}{\abs{\mathbf{e}_i}^q}. \end{align*} Observe that $\abs{\mathbf{e}_{\abs{\mathcal{T}}}}$, otherwise the right hand side of the above inequality would be zero identically and the strict inequality in the hypothesis could not hold. Next, dividing through by $\abs{\mathbf{e}_{\abs{\mathcal{T}}}}^q$ and observing that \begin{align*} \frac{\abs{\mathbf{e}_i}}{\abs{\mathbf{e}_{\abs{\mathcal{T}}}}}\left\{\begin{array}{lc}\ge1&\text{ if }i\le\abs{\mathcal{T}}\\\\\le1&\text{ if }i>\abs{\mathcal{T}}\end{array}\right.. \end{align*} Thus, \begin{align*} \abs{\mathcal{T}}\le\sum\limits_{i=1}^{\abs{\mathcal{T}}}{\left(\frac{\abs{\mathbf{e}_i}}{\abs{\mathbf{e}_{\abs{\mathcal{T}}}}}\right)^q}<\gamma^q\sum\limits_{i=\abs{\mathcal{T}}+1}^{m}{\left(\frac{\abs{\mathbf{e}_i}}{\abs{\mathbf{e}_{\abs{\mathcal{T}}}}}\right)^q}\le\gamma^q\left(m-\abs{\mathcal{T}}\right). \end{align*} Rearranging the terms of $\abs{\mathcal{T}}<\gamma^q\left(m-\abs{\mathcal{T}}\right)$ gives \begin{align*} \abs{\mathcal{T}}<\frac{\gamma^q}{1+\gamma^q}m, \end{align*} which gives the desired result for all $\abs{\mathcal{T}}\le k$. \end{proof} \begin{remark} For a given $k$, the result also gives a lower bound on admissible $\gamma$ as \begin{align*} \gamma>\left(\frac{k}{m-k}\right)^{\frac{1}{q}}. \end{align*} \end{remark} The next result gives numerical sufficient conditions for $Q_2^\top\in\textsf{NSP}_1(k,1)$ \begin{theorem} Given the unitary matrix $Q\in\mathbb{R}^{m\times m}$ \begin{align*} Q = \left[\begin{array}{cc}Q_1&Q_2\end{array}\right], \end{align*} where $Q_1\in\mathbb{R}^{m\times n}$ and $Q_2\in\mathbb{R}^{m\times(m-n)}$, $n<m$ are orthogonal complements. For any integers $k<\frac{m}{2}$ and $q\ge2$, if \begin{align} \norm{{Q_1}_\mathcal{T}}_q\triangleq\sup\limits_{\mathbf{x}\neq0}{\frac{\norm{{Q_1}_\mathcal{T}\mathbf{x}}_q}{\norm{\mathbf{x}}_q}}<\frac{1}{2}k^{\frac{1}{q}-1}, \end{align} for all $\mathcal{T}\subset\{1,2,\hdots,m\}$ with $\abs{\mathcal{T}}\le k$, then $Q_2^\top\in\textsf{NSP}_1(k,1)$. \end{theorem} \begin{proof} First, we observe that the inequality $\norm{\mathbf{e}}_q\le\norm{\mathbf{e}}_2\le\norm{\mathbf{e}}_1\le m^{1-\frac{1}{q}}\norm{\mathbf{e}}_q$ holds for all vector $\mathbf{e}\in\mathbb{R}^m$ and integer $q\ge2$. Thus, for all $\mathcal{T}\subset\{1,2,\hdots,m\}$ with $\abs{\mathcal{T}}\le k$ and $\mathbf{x}\in\mathbb{R}^n$, \begin{align*} \norm{{Q_1}_\mathcal{T}}_q &< \frac{1}{2}k^{\frac{1}{q}-1}\Rightarrow 2k^{1-\frac{1}{q}}\norm{{Q_1}_\mathcal{T}\mathbf{x}}_q<\norm{\mathbf{x}}_q\\ &\Rightarrow 2\abs{\mathcal{T}}^{1-\frac{1}{q}}\norm{{Q_1}_\mathcal{T}\mathbf{x}}_q<\norm{\mathbf{x}}_q\\ &\Rightarrow 2\norm{{Q_1}_\mathcal{T}\mathbf{x}}_1 < \norm{\mathbf{x}}_2 = \norm{Q_1\mathbf{x}}_2\\ &\Rightarrow 2\norm{{Q_1}_\mathcal{T}\mathbf{x}}_1 < \norm{Q_1\mathbf{x}}_2 < \norm{Q_1\mathbf{x}}_1\\ &\Rightarrow 2\norm{{Q_1}_\mathcal{T}\mathbf{x}}_1 < \norm{Q_1\mathbf{x}}_1 = \norm{{Q_1}_\mathcal{T}\mathbf{x}}_1 + \norm{{Q_1}_{\mathcal{T}^c}\mathbf{x}}_1\\ &\Rightarrow \norm{{Q_1}_\mathcal{T}\mathbf{x}}_1 < \norm{{Q_1}_{\mathcal{T}^c}\mathbf{x}}_1\\ &\Rightarrow Q_2^\top\in\textsf{NSP}_1(k,1) \end{align*} \end{proof} \begin{remark} For $q=2$, the sufficient condition becomes $\overline{\sigma}({Q_1}_\mathcal{T})<\frac{1}{2\sqrt{k}}$ which imposes a limit on the amount of information any $k$-group of rows can convey of the orthogonal matrix $Q_1$. In other words, this ensures there is sufficient redundancy such if any $k$ combination of rows are deleted, the resulting system can still be used to reconstruct the state. This property is the motivation for the support refinement and row deletion scheme in \cite{anubi2018robust}. \end{remark} The following corollary gives a more specialized result based on $q=1$. \begin{corollary} Let $\mathbf{v}\in\mathbb{R}^m$ be a vector whose elements are the $\infty$-norm of the corresponding row of $Q_1$ i.e, $\mathbf{v}_i = \max\limits_{1\le j\le m}{\abs{{Q_1}_{ij}}}$. If \begin{align} \norm{\mathbf{v}[k]}_1<\frac{1}{2\sqrt{n}}, \end{align} then $Q_2^\top\in\textsf{NSP}_1(k,1)$. \end{corollary} \begin{proof} First, we make the following observations for all $\mathbf{x}\in\mathbb{R}^n$ \begin{align*} &\bullet\hspace{2mm}\norm{{Q_1}_\mathcal{T}\mathbf{x}}_1\le\left(\max\limits_{1\le j\le n}\left\{\norm{{Q_1}_{\mathcal{T}}^j}_1\right\}\right)\norm{\mathbf{x}}_1\\ &\phantom{\bullet\hspace{2mm}\norm{{Q_1}_\mathcal{T}\mathbf{x}}_1}\le\norm{\mathbf{v}[k]}_1\norm{\mathbf{x}}_1.\\ &\bullet\hspace{2mm}\norm{{Q_1}\mathbf{x}}_1\ge\norm{{Q_1}\mathbf{x}}_2=\norm{\mathbf{x}}_2\ge\frac{1}{\sqrt{n}}\norm{\mathbf{x}}_1\\ &\phantom{\bullet\hspace{2mm}\norm{{Q_1}\mathbf{x}}_1}\Rightarrow \frac{1}{\sqrt{n}}\norm{\mathbf{x}}_1\le\norm{{Q_1}\mathbf{x}}_1. \end{align*} Thus, if $2\norm{\mathbf{v}[k]}_1<\frac{1}{\sqrt{n}}$, then \begin{align*} &2\norm{{Q_1}_\mathcal{T}\mathbf{x}}_1\le2\norm{\mathbf{v}[k]}_1\norm{\mathbf{x}}_1<\frac{1}{\sqrt{n}}\norm{\mathbf{x}}_1\le\norm{{Q_1}\mathbf{x}}_1,\\ &\Rightarrow 2\norm{{Q_1}_\mathcal{T}\mathbf{x}}_1 < \norm{Q_1\mathbf{x}}_1 = \norm{{Q_1}_\mathcal{T}\mathbf{x}}_1 + \norm{{Q_1}_{\mathcal{T}^c}\mathbf{x}}_1\\ &\Rightarrow \norm{{Q_1}_\mathcal{T}\mathbf{x}}_1 < \norm{{Q_1}_{\mathcal{T}^c}\mathbf{x}}_1\\ &\Rightarrow Q_2^\top\in\textsf{NSP}_1(k,1) \end{align*} \end{proof} \section{Resilient Estimation with Prior Information}\label{s:methodology} Using prior information to enhance the recovery of sparse signals in compressive sensing is not a new idea \cite{friedlander2011recovering,anubi2018robust,miosso2009compressive,scarlett2012compressed}. However, vast majority of the existing literature focuses on prior information relating to the support of the sparse signal. In this paper, we consider prior information as a probability distribution over the system measurements. For cyber-physical systems, which are the primary subject of this study, such information is readily available via data-driven auxiliary models. In the light of model \eqref{eqn:meas_model} and the optimization problem in \eqref{eqn:comp_sens_L1}, consider the following slightly more general problem: \begin{align}\label{eqn:comp_sens_subspace} \Minimize\limits_{\mathbf{e}}{\left\|\mathbf{e}\right\|_1\hspace{2mm}\SubjectTo\hspace{2mm}\mathbf{y}-\mathbf{e}\in\mathcal{V}\cap\mathcal{X}}, \end{align} where $\mathcal{V}\subset\mathbb{R}^m$ is a linear subspace satisfying the \emph{subspace property} $\norm{\mathbf{v}_\mathcal{T}}_1\le\gamma\norm{\mathbf{v}_{\mathcal{T}^c}}_1$, $\forall \mathbf{v}\in\mathcal{V}, \abs{\mathcal{T}}\le k<m$, and $\mathcal{X}\subset\mathbb{R}^m$ is a convex set with the bounded property $\norm{\mathbf{x}}_1\le\delta$, $\forall \mathbf{x}\in\mathcal{X}$. The bounded set adds extra layer of prior information which, as we will show next, improves the reconstruction error bound. While we have used a very simple bound here, other relevant property may be used to encode specialized prior information which can then lead to specialized result for the particular application. For instance; the bound could be probabilistic -- determined from the ROC characteristic of a data-driven, encode domain-specific relationship among the measurement channels.\\ \noindent We now have all the ingredients to state our main results: \begin{theorem}\label{thm:main_result} Consider the recovery optimization problem in \eqref{eqn:comp_sens_subspace}, where the linear subspace $\mathcal{V}$ satisfies the \emph{subspace property} $\norm{\mathbf{v}_\mathcal{T}}_1\le\gamma\norm{\mathbf{v}_{\mathcal{T}^c}}_1$, $\forall \mathbf{v}\in\mathcal{V}, \abs{\mathcal{T}}\le k<m$, and the convex set $\mathcal{X}\subset\mathbb{R}^m$ satisfies the bounded property $\norm{\mathbf{x}}_1\le\delta$, $\forall \mathbf{x}\in\mathcal{X}$. The reconstruction error with respect to any feasible vector $\mathbf{e}\in\mathbb{R}^m$ is bounded as: \begin{align} \norm{\hat{\mathbf{e}}-\mathbf{e}}_1\le2\textsf{sat}_\delta\left(\frac{1+\gamma}{1-\gamma}\norm{\mathbf{e}-\mathbf{e}[k]}_1\right), \end{align} where $\mathbf{e}[k]$ is the best $k$-term approximation of $\mathbf{e}$. \end{theorem} \begin{remark} This result is similar to existing recovery error-bound in literature \cite{chen2012stability}. The main difference lie in the saturation given by the bound on the prior-information set. This bound show up explicitly because of the way it was defined in the set. In some practical situation, such explicit bound may not exist. It is easy to modify the result based on the new characteristic of the prior-information set. In situations where the actual vector is only known to belong to the set $\mathcal{X}$ with some probability, the inclusion constraint may be reformulated into a chance constraint with the final result inheriting the associated probabilistic guarantees. \end{remark} \begin{remark} Indeed, any $k$-sparse feasible vector $\mathbf{e}\in\mathbb{R}^m$, $\abs{\supp(\mathbf{e})}\le k<m$ will be recovered exactly by the solution to the optimization problem in \eqref{eqn:comp_sens_subspace}. Although the question of the stability of the recovery process to process noise is not pursued in this paper, we expect similar saturated error bound results as obtained above. We will demonstrate the stability numerically by including noise in the example given in subsequent sections. \end{remark} \begin{proof} Let $\mathbf{e}$ be a feasible point of the optimization problem in \eqref{eqn:comp_sens_subspace}, and $\hat{\mathbf{e}}\triangleq\mathbf{e} + \mathbf{h}, \hspace{1mm} \mathbf{h}\in\mathbb{R}^n$ be the optimal point. Given $k<m$, define the index set $\mathcal{T}\subset\left\{1,2,\hdots,m\right\}$ with $\abs{\mathcal{T}}\le k$. By the optimality of $\hat{\mathbf{e}}$, we have that $\norm{\mathbf{e}}_1\ge\norm{\hat{\mathbf{e}}}_1$, which implies that: \begin{align*} \norm{\mathbf{e}}_1&\ge\norm{\hat{\mathbf{e}}}_1 = \norm{\mathbf{e}+\mathbf{h}}_1 \\ &\phantom{norm{\hat{\mathbf{e}}}}=\norm{\mathbf{e}_\mathcal{T}+\mathbf{h}_\mathcal{T}}_1 + \norm{\mathbf{e}_{\mathcal{T}^c}+\mathbf{h}_{\mathcal{T}^c}}_1\\ &\ge\norm{\mathbf{e}_{\mathcal{T}}}_1 - \norm{\mathbf{h}_{\mathcal{T}}}_1 + \norm{\mathbf{h}_{\mathcal{T}^c}}_1 - \norm{\mathbf{e}_{\mathcal{T}^c}}_1\\ \Longrightarrow \norm{\mathbf{h}_{\mathcal{T}^c}}_1&\le \norm{\mathbf{h}_{\mathcal{T}}}_1 + \norm{\mathbf{e}}_1 - \norm{\mathbf{e}_{\mathcal{T}}}_1 + \norm{\mathbf{e}_{\mathcal{T}^c}}_1\\ &= \norm{\mathbf{h}_\mathcal{T}}_1 + 2\norm{\mathbf{e}_{\mathcal{T}^c}}_1. \end{align*} Thus \begin{align}\label{eqn:claim1} \norm{\mathbf{h}_{\mathcal{T}^c}}_1\le\norm{\mathbf{h}_\mathcal{T}}_1 + 2\norm{\mathbf{e}_{\mathcal{T}^c}}_1. \end{align} \noindent Next, since $\mathbf{e}$ and $\hat{\mathbf{e}}$ are feasible, i.e., $\mathbf{e},\hat{\mathbf{e}}\in\mathcal{X}\Rightarrow\norm{\mathbf{e}-\mathbf{y}}_1\le\delta$ and $\norm{\hat{\mathbf{e}}-\mathbf{y}}_1 = \norm{\mathbf{h}+\mathbf{e}-\mathbf{y}}_1\le\delta$, it follows that \begin{align}\label{eqn:claim2} \norm{\mathbf{h}}_1 = \norm{\mathbf{h}_\mathcal{T}}_1 + \norm{\mathbf{h}_{\mathcal{T}^c}}_1&\le2\delta. \end{align} \noindent Moreover, from the feasibility of $\mathbf{e}$ and $\hat{\mathbf{e}}$, $\mathbf{e}-\mathbf{y},\hat{\mathbf{e}}-\mathbf{y}\in\mathcal{V}\Rightarrow\mathbf{h}=\hat{\mathbf{e}}-\mathbf{e}\in\mathcal{V}$. Thus, from the subspace property, it follows that \begin{align}\label{eqn:claim3} \norm{\mathbf{h}_\mathcal{T}}_1\le\gamma\norm{\mathbf{h}_{\mathcal{T}^c}}_1,\hspace{2mm}\text{for some } 0<\gamma<1. \end{align} Adding the inequalities in \eqref{eqn:claim1} and \eqref{eqn:claim3} gives \begin{align*} \norm{\mathbf{h}}_1&\le\left(1-\gamma\right)\norm{\mathbf{h}_\mathcal{T}}_1 + \gamma\norm{\mathbf{h}_\mathcal{T}}_1 + 2\norm{\mathbf{e}_{\mathcal{T}^c}}_1 + \gamma\norm{\mathbf{h}_{\mathcal{T}^c}}_1\\ &\le\left(1-\gamma\right)\norm{\mathbf{h}_\mathcal{T}}_1 + \gamma\norm{\mathbf{h}}_1 + 2\norm{\mathbf{e}_{\mathcal{T}^c}}_1. \end{align*} Subtracting $\gamma\norm{\mathbf{h}}_1$ from both sides and dividing by $1 - \gamma$ gives \begin{align*} \norm{\mathbf{h}}_1\le\norm{\mathbf{h}_\mathcal{T}}_1 + \frac{2}{1-\gamma}\norm{\mathbf{e}_{\mathcal{T}^c}}_1, \end{align*} so that \begin{align}\label{eqn:claim4} \norm{\mathbf{h}_{\mathcal{T}^c}}_1\le \frac{2}{1-\gamma}\norm{\mathbf{e}_{\mathcal{T}^c}}_1. \end{align} Combining \eqref{eqn:claim3} and \eqref{eqn:claim4} yields \begin{align}\label{eqn:claim5} \norm{\mathbf{h}_\mathcal{T}}_1\le\gamma\norm{\mathbf{h}_{\mathcal{T}^c}}_1 \le \frac{2\gamma}{1-\gamma}\norm{\mathbf{e}_{\mathcal{T}^c}}_1. \end{align} By, adding the inequalities in \eqref{eqn:claim4} and \eqref{eqn:claim5}, it follows that \begin{align}\label{eqn:claim6} \norm{\mathbf{h}}_1=\norm{\mathbf{h}_\mathcal{T}}_1+\norm{\mathbf{h}_{\mathcal{T}^c}}\le \frac{2\left(1+\gamma\right)}{1-\gamma}\norm{\mathbf{e}_{\mathcal{T}^c}}_1, \end{align} which, after combining with \eqref{eqn:claim2}, yields \begin{align*} \norm{\mathbf{h}}_1&\le\min\left\{\frac{2\left(1+\gamma\right)}{1-\gamma}\norm{\mathbf{e}_{\mathcal{T}^c}}_1,2\delta\right\}\\ &\le2\min\left\{\frac{\left(1+\gamma\right)}{1-\gamma}\norm{\mathbf{e}_{\mathcal{T}^c}}_1,\delta\right\}. \end{align*} Thus, the inequality \begin{align*} \norm{\mathbf{h}}_1\le 2\textsf{sat}_\delta\left(\frac{\left(1+\gamma\right)}{1-\gamma}\norm{\mathbf{e}_{\mathcal{T}^c}}_1\right) \end{align*} holds for all index set $\mathcal{T}\subset\left\{1,2,\hdots,m\right\}$ with $\abs{\mathcal{T}}\le k$. The result follows by selecting $\mathcal{T} = \supp(\mathbf{e})$. \end{proof} Now, we focus on the development of a resilient reconstruction algorithm using both measurement model and a prior information model. Consider a concurrent model of the form: \begin{align}\label{eqn:model_based} \mathbf{y} &= H\mathbf{x} + \mathbf{e}+ \boldsymbol{\varepsilon}\\\label{eqn:data_driven} \mathbf{y} &\sim\mathcal{N}(\mu(\mathbf{z}),\Sigma(\mathbf{z}))\\\label{eqn:noise_model} \boldsymbol{\varepsilon}&\sim\mathcal{N}(\mathbf{0},\textsf{diag}(\sigma_1^2,\hdots,\sigma_m^2)) \end{align} where $H\in\mathbb{R}^{m\times n}$ is the measurement matrix, $\mathbf{x}\in\mathbb{R}^n$ is the state vector, $\mathbf{e}\in\mathbb{R}^m, \norm{\mathbf{e}}_0\le k<m$ is the attack vector, and $\boldsymbol{\varepsilon}\in\mathbb{R}^m$ is the measurement noise. The concurrent model consists of a measurement model \eqref{eqn:model_based}, prior information (auxiliary) model \eqref{eqn:data_driven} given as a function of the auxiliary variable $\mathbf{z}\in\mathbb{R}^p$, and a noise model \eqref{eqn:noise_model}, \noindent where \begin{align*} \mu(\mathbf{z}) = \left[\begin{array}{c}\mu_1(\mathbf{z})\\\vdots\\\mu_m(\mathbf{z})\end{array}\right]\text{ and } \Sigma(\mathbf{z})= \left[\begin{array}{ccc}\Sigma_1(\mathbf{z})&&\\&\ddots&\\&&\Sigma_m(\mathbf{z})\end{array}\right] \end{align*} for some mean and covariance functions $\mu_i:\mathbb{R}^p\mapsto\mathbf{R}$ and $\Sigma_i:\mathbb{R}^p\mapsto\mathbb{R}_+$ respectively (see Section~\ref{sec:auxiliary_model} for a particular example using GPR). For a Cyber-physical system, the measurement model is usually physics-based while the prior-information is data-driven. The noise model is generally knowledge-based. One of the main advantages of using models of this form for a CPS is that the resulting blend of the generalization properties of physics-based models and the adaptive local accuracy of data-driven methods creates an additional layer of redundancy which can reveal the truth even if portions of the measurement is subject to adversarial corruption. In order to remain undetectable, any viable attack vector $\mathbf{y}_a,\norm{\mathbf{y}_a}_{\ell_0}=p\le m$ necessarily have to satisfy the condition $p(\mathbf{y}+\mathbf{y}_a|\mathbf{z},\mathcal{D})\ge p(\mathbf{y}|\mathbf{z},\mathcal{D})$. This provides an additional layer of security by: \textit{1)} requiring the attacker to have knowledge of the auxiliary model and the parameters, and \textit{2)} limiting the magnitude of possible state corruption. Let $\mathbf{y}^*$ be the true value of the measured variable, the optimal estimation problem is cast as the optimization problem: \begin{align}\label{eqn:enh_res_est} \begin{array}{lc} \Minimize & \left\|\mathbf{y}-H\mathbf{x}-\boldsymbol{\varepsilon}\right\|_{l_0} \\ \SubjectTo&\\ &H\mathbf{x}\in\mathcal{Y}(\mathbf{z})\\ &\boldsymbol{\varepsilon}\in\mathcal{E}, \end{array} \end{align} where the convex sets $\mathcal{Y}(\mathbf{z})$ and $\mathcal{E}$ have the property that: \begin{align} p(\mathbf{y}^*\in\mathcal{Y}|\mathbf{z},\mathcal{D})\ge\tau\\ p(\boldsymbol{\varepsilon}^*\in\mathcal{E})\ge\tau. \end{align} \noindent The idea is essentially seeking a state vector, together with the minimum attacked channels and a highly likely noise vector, which completely explains the observations while having a high likelihood according to the auxiliary model prior. Ideally, one would use an index minimizing ``$0$-norm" in the objective, as done above. However, Theorem~\ref{thm:main_result} shows that the $1$-norm relaxation achieves a really good reconstruction property, provided that the range space of $H$ satisfies the subspace property. The optimization parameter $\tau\in(0,\hspace{2mm}1]$ controls the likelihood threshold. It can be set to a constant value or optimized with respect to some higher-level objectives. Thus, the resilient state estimation optimization problem is equivalent to: \begin{align}\label{eqn:enh_res_est2} \begin{array}{ll} \Minimize & \left\|\mathbf{y}-H\mathbf{x}-\boldsymbol{\varepsilon}\right\|_1 \\ \SubjectTo&\\ &\begin{array}{rl} \norm{H\mathbf{x}+\boldsymbol{\varepsilon}-\mu(\mathbf{z})}_{\Sigma^{-1}(\mathbf{z})}^2 & \le \chi^2_m(\tau)\\ \norm{\boldsymbol{\varepsilon}}_{\Sigma_\varepsilon^{-1}}^2 & \le\chi^2_{m}(\tau), \end{array} \end{array} \end{align} \noindent where $\Sigma_\varepsilon = \textsf{diag}(\sigma_1^2,\hdots,\sigma_m^2)$ and $\chi^2_m(\tau)$ is the quantile function for probability $\tau$ of the chi-squared distribution with $m$ degrees of freedom. The following lemma will be useful in proving the next result about the reconstruction error bound of the resulting resilient estimation based on the optimization problem in \eqref{eqn:enh_res_est2}. \begin{lemma}\label{lemma:kth_term_error_bound} Given a vector $\boldsymbol{\varepsilon}\in\mathbb{R}^m$ with $\norm{\boldsymbol{\varepsilon}}_2\le\delta$, then the following $k$th term approximation error bound \begin{align} \norm{\boldsymbol{\varepsilon} - \boldsymbol{\varepsilon}[k]}_1\le\frac{m-k}{\sqrt{m}}\delta \end{align} holds for $k<m$. \end{lemma} \begin{proof} Without loss of generality, suppose the elements of $\boldsymbol{\varepsilon}$ are ordered as $\abs{\boldsymbol{\varepsilon}_1}\le\abs{\boldsymbol{\varepsilon}_2}\le\hdots\le\abs{\boldsymbol{\varepsilon}_m}$, then \begin{align*} \norm{\boldsymbol{\varepsilon} - \boldsymbol{\varepsilon}[k]}_1 &= \sum\limits_{i=1}^{m-k}{\abs{\boldsymbol{\varepsilon}_i}}\\ &\le\sum\limits_{i=1}^{m}{\abs{\boldsymbol{\varepsilon}_i}} - k\abs{\boldsymbol{\varepsilon}_k} = \norm{\boldsymbol{\varepsilon}}_1-k\abs{\boldsymbol{\varepsilon}_k}\\&\le \norm{\boldsymbol{\varepsilon}}_1-\frac{k}{m-k}(m-k)\abs{\boldsymbol{\varepsilon}_k}\\ &\le\norm{\boldsymbol{\varepsilon}}_1-\frac{k}{m-k}\sum\limits_{i=1}^{m-k}{\abs{\boldsymbol{\varepsilon}_i}}\\&\le \norm{\boldsymbol{\varepsilon}}_1-\frac{k}{m-k}\norm{\boldsymbol{\varepsilon} - \boldsymbol{\varepsilon}[k]}_1 \end{align*} From which \begin{align*} \norm{\boldsymbol{\varepsilon} - \boldsymbol{\varepsilon}[k]}_1\le\frac{m-k}{m}\norm{\boldsymbol{\varepsilon}}_1\le\frac{m-k}{\sqrt{m}}\norm{\boldsymbol{\varepsilon}}_2\le\frac{m-k}{\sqrt{m}}\delta \end{align*} \end{proof} \begin{theorem} Consider the recovery optimization problem in \eqref{eqn:enh_res_est2}. Suppose the unknown true state $\mathbf{x}^*\in\mathbb{R}^n$ is a feasible of the optimization problem. If the range space $\mathcal{R}(H)$ of $H$ satisfies the \emph{subspace property} $\norm{\mathbf{v}_\mathcal{T}}_1\le\gamma\norm{\mathbf{v}_{\mathcal{T}^c}}_1$, $\forall \mathbf{v}\in\mathcal{R}(H), \abs{\mathcal{T}}\le k<m$, then the reconstruction error can be upper bounded as: \begin{align}\nonumber \norm{\hat{\mathbf{x}} - \mathbf{x}^*}_2&\le C_1\textsf{sat}_{\delta(\tau)}\left(C_2\norm{\hat{\mathbf{e}}-\hat{\mathbf{e}}[k] }_1 + C_3\delta(\tau)\right) \\&\hspace{3cm}+ C_1\textsf{sat}_{\delta(\tau)}\left(C_3\delta(\tau)\right), \end{align} where $\hat{\mathbf{e}} = \mathbf{y}-H\hat{\mathbf{x}} - \hat{\boldsymbol{\varepsilon}}$ is the objective residual, \begin{align*} \delta(\tau) &= \overline{\Sigma}^{\frac{1}{2}}\chi_m(\tau), \hspace{1mm} C_1 = \frac{2}{\underline{{\sigma}}_H},\hspace{1mm} C_2 =\frac{1+\gamma}{1-\gamma},\hspace{1mm}\\ &C_3 = \frac{(1+\gamma)}{(1-\gamma)}\frac{(m-k)}{\sqrt{m}}\overline{\sigma}, \end{align*} $\underline{{\sigma}}_H$ is the smallest singular value of $H$, and $\overline{\sigma}$ and $\overline{\Sigma}$ are the biggest standard deviations of the auxiliary model and measurement noise statistics respectively. \end{theorem} \begin{proof} Define the sets $\mathcal{X}, \mathcal{X}_\varepsilon\subset\mathbb{R}^m$ as \begin{align*} \mathcal{X}(\mathbf{z}) &\triangleq \left\{\mathbf{y}\in\mathbb{R}^m\hspace{1mm}:\hspace{1mm}\norm{\mathbf{y}-\boldsymbol{\mu}(\mathbf{z})}_{\Sigma^{-1}(\mathbf{z})}^2\le\chi_m^2(\tau)\right\}\\ \mathcal{X}_\varepsilon&\triangleq\left\{\boldsymbol{\varepsilon}\in\mathbb{R}^m\hspace{1mm}:\hspace{1mm}\norm{\boldsymbol{\varepsilon}}_{\Sigma_{\boldsymbol{\varepsilon}}^{-1}}^2\le\chi_m^2(\tau)\right\}. \end{align*} Thus, the optimization problem in \eqref{eqn:enh_res_est2} can be expressed as: \begin{align}\label{eqn:enh_res_est_P}\tag{$P$} \begin{array}{ll} \Minimize & \left\|\mathbf{e}\right\|_1 \\ \SubjectTo&\\ &\begin{array}{rl} \mathbf{y}-\mathbf{e}-\boldsymbol{\varepsilon}&\in\mathcal{R}(H)\\ \mathbf{y}-\mathbf{e}&\in\mathcal{X}(\mathbf{z})\\ \boldsymbol{\varepsilon}&\in\mathcal{X}_{\boldsymbol{\varepsilon}}. \end{array} \end{array} \end{align} Also, consider the reduced problem \begin{align}\label{eqn:enh_res_est_P_hat}\tag{$\hat{P}$} \begin{array}{ll} \Minimize & \left\|\mathbf{e}\right\|_1 \\ \SubjectTo&\\ &\mathbf{y}-\mathbf{e}\in\mathcal{R}(H)\cap\mathcal{X}. \end{array} \end{align} Let \begin{itemize} \item $\mathbf{e}^*\in\mathbb{R}^m,\hspace{2mm} \norm{\mathbf{e}^*}_0=k$ and $\boldsymbol{\varepsilon}^*\in\mathbb{R}^m$ be the unknown actual attack vector and noise instance respectively, \item $\hat{\mathbf{e}}, \hat{\boldsymbol{\varepsilon}}\in\mathbb{R}^m$ be the minimal points of the optimization problem in \eqref{eqn:enh_res_est_P}, and \item $\hat{\mathbf{e}}_2\in\mathbb{R}^m$ be the solution of the reduced problem in \eqref{eqn:enh_res_est_P_hat}. \end{itemize} Using the result in Theorem~\ref{thm:main_result}, the observation that $\mathbf{e}^*+\boldsymbol{\varepsilon}^*$ and $\hat{\mathbf{e}} + \hat{\boldsymbol{\varepsilon}}$ are feasible points of \eqref{eqn:enh_res_est_P_hat} and Lemma~\ref{lemma:kth_term_error_bound}, yield: \begin{align*} \norm{\hat{\mathbf{e}}_2 - \mathbf{e}^* - \boldsymbol{\varepsilon}^*}_1&\le 2\textsf{sat}_\delta\left(\frac{1+\gamma}{1-\gamma}\norm{\boldsymbol{\varepsilon}^* - \boldsymbol{\varepsilon}[k]^*}_1\right)\\&\le2\textsf{sat}_\delta\left(\frac{(1+\gamma)(m-k)\bar{\Sigma}}{(1-\gamma)\sqrt{m}}\delta\right), \end{align*} with $\delta = \overline{\Sigma}^{\frac{1}{2}}\chi_m(\tau)$. Using the left-hand-side triangular inequality, the above inequality implies that: \begin{align*} &\norm{\hat{\mathbf{e}} + \hat{\boldsymbol{\varepsilon}} - \mathbf{e}^* - \boldsymbol{\varepsilon}^*}_1\le \norm{\hat{\mathbf{e}}_2 - \mathbf{e}^* - \boldsymbol{\varepsilon}^*}_1 + \norm{\hat{\mathbf{e}}_2 - \hat{\mathbf{e}} - \hat{\boldsymbol{\varepsilon}}}_1\\ &\le2\textsf{sat}_\delta\left(\frac{(1+\gamma)(m-k)\bar{\Sigma}}{(1-\gamma)\sqrt{m}}\delta\right)\\&\hspace{1cm} + 2\textsf{sat}_\delta\left(\frac{1+\gamma}{1-\gamma}\norm{\hat{\mathbf{e}}-\hat{\mathbf{e}}[k] + \hat{\boldsymbol{\varepsilon}} - \hat{\boldsymbol{\varepsilon}}[k]}_1\right)\\ &\le2\textsf{sat}_\delta\left(\frac{(1+\gamma)(m-k)\bar{\Sigma}}{(1-\gamma)\sqrt{m}}\delta\right) \\&\hspace{5mm}+ 2\textsf{sat}_\delta\left(\frac{1+\gamma}{1-\gamma}\norm{\hat{\mathbf{e}}-\hat{\mathbf{e}}[k] }_1 + \frac{(1+\gamma)(m-k)\bar{\Sigma}}{(1-\gamma)\sqrt{m}}\delta\right). \end{align*} Expressing the right-hand-side of the last inequality in the ``language" of the original problem in \eqref{eqn:enh_res_est2} yields \begin{align*} &\norm{H(\hat{\mathbf{x}} - \mathbf{x}^*)}_1\le2\textsf{sat}_\delta\left(\frac{(1+\gamma)(m-k)\bar{\Sigma}}{(1-\gamma)\sqrt{m}}\delta\right) \\&\hspace{1cm}+ 2\textsf{sat}_\delta\left(\frac{1+\gamma}{1-\gamma}\norm{\hat{\mathbf{e}}-\hat{\mathbf{e}}[k] }_1 + \frac{(1+\gamma)(m-k)\bar{\Sigma}}{(1-\gamma)\sqrt{m}}\delta\right), \end{align*} where $\hat{\mathbf{x}} - \mathbf{x}^*$ is the resulting state estimation error, which is consequently bounded as: \begin{align*} \norm{\hat{\mathbf{x}} - \mathbf{x}^*}_2\le C_1\textsf{sat}_\delta\left(C_2\norm{\hat{\mathbf{e}}-\hat{\mathbf{e}}[k] }_1 + C_3\delta\right) + C_1\textsf{sat}_\delta\left(C_3\delta\right) \end{align*} \end{proof} \section{Numerical Example: Power system state estimation with data-driven economic auxiliary model}\label{s:experiments} In this numerical simulation example, a resilient state estimation algorithm based on the optimization problem in \eqref{eqn:enh_res_est2} is developed and evaluated on the IEEE 14-bus test case mapped to actual data from the the New York Independent System Operator (NYISO). For this application, the prior information is obtained from a GPR mapping from some energy market information to an \emph{iid} Gaussian distributions on the system measurements. This example first appeared in our earlier work\cite{anubi2019enhanced}. Interested readers are directed to that paper for more details. In what follows, we only provide an overview to strengthen the theoretical results of the previous sections. \subsection{Setup}\label{ss:setup} The IEEE 14-bus system, shown in Fig. \ref{fig:IEEE14bus}, represents a simple approximation of the American electric power system as of February 1962. It has 14 buses, 5 generators, and 11 loads. The system has 27 state variables which are the voltage angles and voltage magnitudes of the buses, with the first bus angle chosen as the reference one. The buses/nodes of the power grid model are assumed to be supported with IIoT measurement sensors such as remote terminal units (RTUs) able to provide bus-related measurements of active and reactive power injection and flow. Simulation experiments are performed using the actual load data of New York state as provided by NYISO\cite{NYISO_data}. Specifically, five-minute load data of NYISO for 3 months (between January and March) in 2017 and 2018 are used. Furthermore, each region of the NYISO map, shown in Fig. \ref{fig:NYISO}, is mapped in an ascending order with every load bus of IEEE 14 system, i.e. using the following mapping: $[2\rightarrow1,~3\rightarrow2,~4\rightarrow3,~5\rightarrow4,~6\rightarrow5,~9\rightarrow6,~10\rightarrow7,~11\rightarrow8,~12\rightarrow9,~13\rightarrow10,~14\rightarrow11] $, where the first element show the load bus of IEEE 14 case the second the region of NYISO, e.g., bus 2 to region A-WEST, bus 3 to region B-GENESE, bus 4 to region C-CENTRL, etc. By this, we were able to create realistic attack data to validate the earlier theoretical claims. \begin{figure} \caption{IEEE 14-bus system.} \caption{NYISO map of the 11 control area load zones} \caption{IEEE 14-bus system mapped into NYISO control area load zones data.} \label{fig:IEEE14bus} \label{fig:NYISO} \end{figure} \subsection{Auxiliary model}\label{sec:auxiliary_model} From the collected NYSIO historical load and market data, we built a Gaussian Process Regression (GPR) model which maps from locational bus marginal prices to bus voltages and angle measurements. This, as shown in previous sections, provides an added layer of redundancy for boosting system resiliency to arbitrary data corruption. A Gaussian Process (GP) is a collection (possibly infinite) of continuous random variables $\mathcal{G}$, any finite subset of which are jointly Gaussian. GPR uses GPs to encode prior distributions over functions\footnote{In this case will be functions from auxiliary measurements to observed measurements.}. The priors are then updated to form posterior distributions when new data is collected. For a comprehensive introduction to GP and GPR, and their applications for learning and control, the readers are directed to \cite{rasmussen2006gaussian} and a recent survey in \cite{Liu2018Gaussian}. Consider a dataset $\mathcal{D} = \left\{\mathbf{Z},\mathbf{Y}\right\}$, where $\mathbf{Z}\in\mathbb{R}^{p\times N}$ is a matrix containing the values of the auxiliary variables column-wise, $\mathbf{Y}\in\mathbb{R}^{m\times N}$ are the corresponding sensor measurement values and $N$ is the number of datapoint in the dataset. The goal is to learn an implicit mapping $f:\mathbb{R}^p\mapsto\mathbb{R}^m$ for which \begin{align} \mathbf{y}_i = f(\mathbf{z}_i) + \boldsymbol{\varepsilon},\hspace{2mm} i=1,\hdots N, \end{align} \noindent where $\boldsymbol{\varepsilon}\sim\mathcal{N}(\mathbf{0},\textsf{diag}(\sigma_1^2,\hdots,\sigma_m^2))$. In theory, without any further restriction, the problem is ill-defined because there are potentially many possible functions that explains the data exactly notwithstanding the measurement noise. As a means of regularization, the class of functions for consideration is refined by the restriction $f(\mathbf{z})\sim\mathcal{GP}(m(\mathbf{z}),k(\mathbf{z},\mathbf{z}'))$ to a GP completely specified by its mean and covariance functions\footnote{Also known as kernels.} \begin{align} \mu(\mathbf{z}) &\triangleq \mathbb{E}[f(\mathbf{z})]\\ k(\mathbf{z},\mathbf{z}') &\triangleq \mathbb{E}[(f(\mathbf{z})-\mu( \mathbf{z}))(f(\mathbf{z'})-\mu( \mathbf{z}'))]. \end{align} \noindent The covariance function can then be specified apriori without an explicit probability distribution. This is where the prior (possibly knowledge-based) information is encoded in the GP. While any positive definite function may pass for a covariance function, one commonly used is the squared exponential covariance function: \begin{align} k(\mathbf{z},\mathbf{z}') = A\exp\left(-\frac{1}{2l}\norm{\mathbf{z}-\mathbf{z}'}_2^2\right), \end{align} \noindent where hyperparameters $A$ and $l$ implicitly define a smoothness-promoting prior. Given a query point $\mathbf{z}\in\mathbb{R}^p$ for the auxiliary variable, the posterior distribution for the $j$th sensor values is $p(y_j|\mathbf{z},\mathcal{D}) = \mathcal{N}(\mu_j(\mathbf{z}),\Sigma_j(\mathbf{z}))$, with the mean and covariance function given by \begin{align} \mu_j(\mathbf{z}) &= \mathbf{k}(\mathbf{z})^\top\left(K+\sigma_j^2 I\right)^{-1}\mathbf{Y}_j^\top,\\ \Sigma_j(\mathbf{z})&= k(\mathbf{z},\mathbf{z}) - \mathbf{k}(\mathbf{z})^\top\left(K+\sigma_j^2 I\right)^{-1}\mathbf{k}(\mathbf{z}),\hspace{2mm}j=1,\hdots,m \end{align} \noindent where $K\in\mathbb{R}^{N\times N}$ is a covariance matrix with entries $K_{ij}=k(\mathbf{z}_i,\mathbf{z}_j)$ and $\mathbf{k}(\mathbf{z})\in\mathbb{R}^N$ is a vector with entries $\mathbf{k}(\mathbf{z})_i = k(\mathbf{z},\mathbf{z}_i)$. The overall sensor values posterior distribution is given by: \begin{align} p(\mathbf{y}|\mathbf{z},\mathcal{D}) &= \prod_{j=1}^m \mathcal{N}(\mu_j(\mathbf{z}),\Sigma_j(\mathbf{z}))\\ &=\mathcal{N}(\mu(\mathbf{z}),\Sigma(\mathbf{z})), \end{align} \noindent where \begin{align*} \mu(\mathbf{z}) = \left[\begin{array}{c}\mu_1(\mathbf{z})\\\vdots\\\mu_m(\mathbf{z})\end{array}\right]\text{ and } \Sigma(\mathbf{z})= \left[\begin{array}{ccc}\Sigma_1(\mathbf{z})&&\\&\ddots&\\&&\Sigma_m(\mathbf{z})\end{array}\right] \end{align*} \subsection{Solution Algorithm} In addition to the nice reconstruction property of the $1$-norm relaxation, Iteratively re-weighted algorithms \cite{candes2008enhancing, chartrand2008iterative} have been demonstrated to be a highly effective way of approximating the solution of the nonconvex problem with successive convex problems. In particular, for the solution of the problem in \eqref{eqn:enh_res_est}, the re-weighted $1$-norm minimization scheme of \cite{candes2008enhancing} is employed to give even stronger reconstruction algorithm. Consider the operator $\mathcal{P}:\mathbb{R}^m\times\mathbb{R}^p\times\mathbb{R}^{m\times m}\mapsto \mathbb{R}^{n+m}$, where \begin{align} \hat{\mathbf{x}}(W),\hspace{2mm}\hat{\boldsymbol{\varepsilon}}(W) = \mathcal{P}(\mathbf{y},\mathbf{z},W) \end{align} \noindent are given by the minimizers of the convex program: \begin{align}\label{eqn:enh_res_est3} \begin{array}{lc} \Minimize & \left\|W\left(\mathbf{y}-H\mathbf{x}-\boldsymbol{\varepsilon}\right)\right\|_1 \\ \SubjectTo&\\ &\begin{array}{rl} \norm{H\mathbf{x}+\boldsymbol{\varepsilon}-\mu(\mathbf{z})}_{\Sigma^{-1}(\mathbf{z})}^2 & \le \chi^2_{n_c}(\tau)\\ \norm{\boldsymbol{\varepsilon}}_{\Sigma_\varepsilon^{-1}}^2 & \le\chi^2_{m}(\tau), \end{array} \end{array} \end{align} \noindent Using this, the algorithm for the enhance state estimator is outlined in Algorithm~\ref{alg:enh_state_est}. \begin{algorithm}[htbp] \caption{Resilient Optimal State Estimation Algorithm Using Re-weighted $1$-norm minimization}\label{alg:enh_state_est} \begin{algorithmic} \Procedure{Offline}{} \State $\mathcal{D} \gets \left\{\mathbf{Z},\mathbf{Y}\right\}$\Comment{Dataset sparsification} \State $K\gets k(\mathbf{Z},\mathbf{Z})$\Comment{Kernel matrix} \State $\Sigma_\varepsilon,A,l \gets$\Comment{Hyperparameters initialization,} \EndProcedure \Procedure{Collect Data}{} \State $\mathbf{y}\gets$\Comment{Sensor measurements at the current instant} \State $\mathbf{z}\gets$\Comment{Auxiliary measurements at the current instant} \EndProcedure \Procedure{Update Models}{} \State $H\gets$\Comment{Model-based. See Sub-section~\ref{ss:setup} for details} \For{$j = 1$ to $m$}\Comment{Data-driven posterior} \State $\mu_j \gets \mathbf{k}(\mathbf{z})^\top\left(K+\sigma_j^2 I\right)^{-1}\mathbf{Y}_j^\top,$\Comment{Mean} \State $\Sigma_j\gets k(\mathbf{z},\mathbf{z}) - \mathbf{k}(\mathbf{z})^\top\left(K+\sigma_j^2 I\right)^{-1}\mathbf{k}(\mathbf{z}),\hspace{2mm}$\Comment{Covariance} \EndFor \EndProcedure \Procedure{Re-weighted $1$-norm minimization}{$\mathbf{y}$,$\mathbf{z}$} \State $W\triangleq \textsf{diag}[w_1,\hdots,w_m]\gets I$ \State $l\gets0$\Comment{Iteration count} \While{\texttt{not converged and }$l\le l_{max}$} \State $\hat{\mathbf{x}}^l,\hspace{2mm}\hat{\boldsymbol{\varepsilon}}^l \gets \mathcal{P}(\mathbf{y},\mathbf{z},W)$\Comment{$\ell_1$ minimization} \State $\mathbf{r}\gets \mathbf{y}-H\hat{\mathbf{x}}^l - \hat{\boldsymbol{\varepsilon}}^l$\Comment{residual} \For{$j=1$ to $m$}\Comment{weights update} \State $w_j\gets\frac{1}{\abs{\mathbf{r}_j}+\delta}$ \EndFor $l\gets l+1$\Comment{increment counter} \EndWhile\label{euclidendwhile} \State \textbf{return} $\hat{\mathbf{x}}^l,\hat{\boldsymbol{\varepsilon}}^l$\Comment{State estimate is $\hat{\mathbf{x}}^l$} \EndProcedure \end{algorithmic} \end{algorithm} \subsection{Results} The enhanced resilient estimation algorithm in Algorithm~\ref{alg:enh_state_est} was implemented and ran for data collected every five minutes in a simulation environment. The process begins with the auxiliary measurements $\mathbf{z}=\left[\begin{array}{ccc}z_\text{lbmp}&z_\text{mcl}&z_\text{mcc}\end{array}\right]$, which are actual data downloaded from the respective nodes of the NYISO transmission grid. Here, $z_\text{lbmp}$ is the \texttt{locational bus marginal prices} (\$/MWh), $z_\text{mcl}$ is the \texttt{marginal cost loses} (\$/MWh) and $z_\text{mcc}$ is the \texttt{marginal cost congestion} (\$/MWh). Next, the trained GPR model is executed to give the mean $\boldsymbol{\mu}(\mathbf{z})$ and the covariance $\Sigma(\mathbf{z})$ of the data-driven auxiliary model. Two kinds of FDIA generation were used in the simulation. For the first kind, attack vectors are generated to bias selected measurements locations by 500\% of its true value along a randomly chosen direction. For the second kind, the attack vectors $\mathbf{y}_a$ are systematically generated to result in a specified bias in the state estimation at targeted state variables. \begin{figure} \caption{Simulation results for targeted sensor measurements.\\ Attack vectors are generated to bias select measurements locations by 500\% of its true value along a randomly chosen direction. Plots is the percentage of successful estimations vs. the percentage of attacked sensor nodes.} \label{fig:results_target_measurements} \end{figure} \begin{figure} \caption{Simulation results for targeted state FDIA.\\ Attack vectors are generated to bias particular state variables by 50\%. Plotted are the distribution of the \texttt{rms} \label{fig:results_target} \end{figure} Fig. \ref{fig:results_target_measurements} and Fig. \ref{fig:results_target} show the performance of the proposed algorithm, compared with other standard methods in literature, to the two kinds of FDIA described above. For the first set of results, three different state estimation algorithms are simulated against a FDIA directed at specific measurement locations. The three algorithms are: \textit{1)} standard least squares ($\hat{\mathbf{x}} = \argmin\norm{\mathbf{y}-H\mathbf{x}}_2^2$), \textit{2)} re-weighted $\ell_1$ without the auxiliary model constraint and \textit{3)} the proposed re-weighted $\ell_1$ with auxiliary model constraint. There are 109 load flow measurements in the simulation. Each simulated scenario, circle points in Fig.~\ref{fig:results_target_measurements}, examines 200 simulations (per state estimation method) with random combinations of sensor locations having fixed percentage (\texttt{x-axis} values) of sensor nodes under attack. For the second set of results, the attacks were created in the range space of the system Jacobian matrices. It is well known (e.g., \cite{liu2011false}) that both unconstrained methods will behave similarly under this class of attacks. Thus, we restrict our comparison only to the re-weighted $\ell_1$ algorithms -- one with auxiliary constraints and the other without. Fig.~\ref{fig:results_target} shows the simulations results for four different cases with different numbers of targeted state variables. Fig.~\ref{fig:results_target} shows two plots for each case side-by-side -- one with auxiliary constraints and the other without. Each plot contains the distributions of the maximum absolute relative error, as well as the root-mean-square (\texttt{rms}) values of the relative error for the targeted states. As can be seen from the figures, re-weighted $\ell_1$ algorithms without auxiliary constraints, even though significantly outperforms least-squares based methods in general, are not resilient against state-targeted FDIA. The proposed re-weighted $\ell_1$ with auxiliary constraints shows significant improvement for both performance indicators. Noticeable effects of the state-targeted FDIA begin to appear when 10 or more states are targeted. This requires compromising more or less 85\% of the system measurement, a feat that demands tremendous amount of resources from any malicious actor. \section{Conclusion and future Work}\label{s:conclusions} In this paper, we showed that incorporation of prior measurement information can significantly improve the resiliency of optimal state estimation algorithms. In particular, we proved that certain prior set inclusion constraints results in much stronger reconstruction error bound. The problem is formulated as a constrained compressive sensing problem and standard results were extended to prove the main results. In addition, numerical simulations were used to validate the theoretical claims by developing a re-weighted $\ell_1$ minimization-base resilient state estimation algorithm for power systems in which data acquired from various IIOT sensors and devices are poisoned with false data injection attacks. The particular case tested is the IEEE 14-bus system mapped to actual NYISO load data. Thus, by corroborating the state estimation with prior auxiliary model, we have demonstrated that it is possible to make it much more difficult to attack a CPS just by corrupting portion of its sensor measurements. Our future work will aim to extend the theoretical and algorithmic developments in this paper to: \begin{itemize} \item incorporate additional auxiliary information in the estimation, as well as evaluate the developed algorithms through digital real-time simulation platforms using both simulated and field data \item the dynamic case using multiple event-triggered auxiliary models \item apply the results to the distributed resilient state estimators and moving horizon estimators. \item the nonlinear case via infinite-dimensional compressive sensing in Banach space. \end{itemize} Moreover, there are interesting theoretical questions that remain open; For instance, what is the resulting stability assessments and margins of the resulting closed loop system when the resilient estimator is used as a dynamic filter, whereby the estimated states are fed into the underlying controller(s)?. An answer to these questions, and likes, will help us judge the quality of an auxiliary model required to achieve a given success rate. Finally, we aim to apply this approach to more examples of CPSs. \section*{Acknowledgment} Authors are grateful to the Florida State University (FSU) Council on Research and Creativity (CRC) for funding this effort through the First Year Assistant Professor Award Program (FYAP) \#043354 \ifCLASSOPTIONcaptionsoff \fi \end{document}
\begin{document} \title[Strongly damped wave equation]{A note on a strongly damped wave equation with fast growing nonlinearities} \author[] {Varga Kalantarov${}^1$, and Sergey Zelik${}^2$} \begin{abstract} A strongly damped wave equation including the displacement depending nonlinear damping term and nonlinear interaction function is considered. The main aim of the note is to show that under the standard dissipativity restrictions on the nonlinearities involved the initial boundary value problem for the considered equation is globally well-posed in the class of sufficiently regular solutions and the semigroup generated by the problem possesses a global attractor in the corresponding phase space. These results are obtained for the nonlinearities of an arbitrary polynomial growth and without the assumption that the considered problem has a global Lyapunov function. \end{abstract} \subjclass[2000]{35B40, 35B45} \keywords{strongly damped wave equations, attractors, supercritical nonlinearities} \thanks{The work of Varga Kalantarov was partially supported by the Scientific and Research Council of Turkey, grant no. 112T934. } \address{${}^1$ Department of mathematics, Ko{\c c} University, \newline\indent Rumelifeneri Yolu, Sariyer 34450\newline\indent Sariyer, Istanbul, Turkey} \email{[email protected]} \address{${}^2$ University of Surrey, Department of Mathematics, \newline Guildford, GU2 7XH, United Kingdom.} \email{[email protected]} \maketitle \section{Introduction}\label{s0} In a bounded smooth domain $\Omega\subset\mathbb R^3$, we consider the following problem: \begin{equation}\label{0.1} \begin{cases} \partial_t^2u+f(u)\partial_t u-\gamma\partial_t\Delta_x u-\Delta_x u+g(u)=h,\ \ u\big|_{\partial\Omega}=0,\\ u\big|_{t=0}=u_0,\ \ \partial_t u\big|_{t=0}=u_1. \end{cases} \end{equation} Here $u=u(t,x)$ is unknown function, $\Delta_x$ is the Laplacian with respect to the variable $x$, $\gamma>0$ is a given dissipation parameter, $f$ and $g$ are given nonlinearities and $h\in L^2(\Omega)$ are given external forces. We assume throughout of the paper that the nonlinearities $f$ and $g$ satisfy \begin{equation}\label{0.2} \begin{cases} 1.\ \ f,g\in C^1(R),\\ 2.\ \ -C+\alpha|u|^p\le f(u)\le C_1(1+|u|^p),\\ 3.\ \ -C+\alpha|u|^q\le g'(u)\le C(1+|u|^q), \end{cases} \end{equation} where $p,q\ge0$ and $p+q>0$. \par Strongly damped wave equations of the form \eqref{0.1} and similar equations are of a great current interest, see \cite{CaCho02,CCD1,DP,GM,Ka86,Khan2010,Khan2012,Ma,PS05,PZ06-1,YS09,We} and references therein. The most studied is the case with only one nonlinearity ($f\equiv0$), i.e. the problem of the form \begin{equation}\label{0.1g} \begin{cases} \partial_t^2u-\gamma\partial_t\Delta_x u-\Delta_x u+g(u)=h,\ \ u\big|_{\partial\Omega}=0,\\ u\big|_{t=0}=u_0,\ \ \partial_t u\big|_{t=0}=u_1. \end{cases} \end{equation} Even in this particular case, the equation has a lot of non-trivial and interesting features attracting the attention of many mathematicians, see \cite{CaCho02,CCD1,GM,Ka86,Ma,We} and references therein. For instance, it has been thought for a long time that, for the case of the solutions belonging to the so-called energy phase space, there is a critical growth exponent $q_{max}=4$ for the nonlinearity $g$ and that the properties of the solutions in the supercritical case $q>q_{max}$ are {\it principally} different from the subcritical case $q<q_{max}$. On the other hand, as has been shown already in \cite{We} that the problem \eqref{0.1g}, with nonlinear term satisfying just the condition $g'(s)\geq -C, \ \forall s\in\mathbb R$ has a global unique solution belonging to the more regular phase space \begin{equation}\label{0.smspace} \mathcal E_1:=[H^2(\Omega)\cap H^1_0(\Omega)]\times H^1_0(\Omega),\ \ \xi_u(t)\in\mathcal E_1,\ \ t\ge0. \end{equation} Here and below $H^s(\Omega)$ stands for the usual Sobolev space of distributions whose derivatives up to order $s$ belong to $L^2(\Omega)$ and $H^s_0(\Omega)$ means the closure of $C_0^\infty(\Omega)$ in $H^s(\Omega)$. \par So, there is no critical growth exponent for the class of smooth solutions $\xi_u:=(u,\partial_t u)\in \mathcal E_1$. A dissipativity of the semigroup generated by problem \eqref{0.1g} in the phase space $\mathcal E_1 $ was shown in \cite{Ka88} and \cite{PZ06}, in \cite{PZ06} regularity of the attractor of the semigroup was also established. \par The global unique solvability, dissiptivity and asymptotic regularity of solutions of \eqref{0.1g} {\it without} any growth restrictions (just assuming that $g(u)$ satisfies \eqref{0.2} with arbitrary $q\in \mathbb R_+$) has been relatively recently established in \cite{KaZ08} also for the case of solutions belonging to the natural energy space \begin{equation}\label{0.enspace} \mathcal E_{f=0}:=[H^1_0(\Omega)\cap L^{q+2}(\Omega)]\times L^2(\Omega). \end{equation} Thus, despite the expectations, even on the level of energy solutions there is no critical exponent for the growth rate of $g$ and the analytic and dynamic properties (existence and uniqueness, dissipativity, asymptotic smoothing, attractors and their dimension) look very similar for the cases $q<4$ and $q>4$. This is related with the non-trivial monotonicity properties of the equation considered in the space $L^2(\Omega)\times H^{-1}(\Omega)$, see \cite{KaZ08} for more details. \par The alternative case when another nonlinearity vanishes $g\equiv0$ also leads to essential simplifications. Indeed, assuming that $h=0$ for simplicity and introducing the new variable $v(t):=\int_0^tu(s)\,ds$, we reduce \eqref{0.1} to \begin{equation*} \partial_t^2v-\gamma\partial_t v+F(\partial_t v)-\Delta_x v=c, \end{equation*} where $c$ depends on the initial data and $F(u):=\int_0^u f(v)\,dv$. Using e.g., the methods of \cite{Khan08}, one can show the absence of a critical exponent for the growth rate of $f(u)$ in the energy phase space. \par The situation becomes more complicated when both nonlinearities $f$ and $g$ are presented in the equation and grow sufficiently fast since the methods developed to treat the case of fast growing $g$ are hardly compatible with the methods for $f$ and vise versa. In particular, the problem of presence or absence of critical growth exponents for the non-linearities $f$ and $g$ is still open here and, to the best of our knowledge, a more or less complete theory for this equation (including existence and uniqueness, dissipativity, asymptotic regularity, attractors, etc.) is built up only for the case where $f$ and $g$ satisfy the growth restrictions $p\le 4$, $q\le4$ and additional monotonicity restriction \begin{equation}\label{0.grad} f(u)\ge0,\ \ u\in\mathbb R, \end{equation} see \cite{Khan2011} for the details. \par The main aim of these notes is to show that problem \eqref{0.1} is globally well-posed and dissipative at least in the class of the so-called strong solutions $\xi_u\in\mathcal E_1$ {\it without} any restrictions on the growth exponents $p$ and $q$ and without the monotonicity assumption \eqref{0.grad}. To be more precise, the main result of the notes is the following theorem. \begin{theorem}\label{Th0.main} Let $h\in L^2(\Omega)$ and the nonlinearities $f$ and $g$ satisfy assumptions \eqref{0.2}. Then, for every $\xi_u(0)\in \mathcal E_1$, there exists a unique strong solution $\xi_u\in C(\mathbb R_+,\mathcal E_1)$ of \eqref{0.1} and the following estimate holds: \begin{equation}\label{0.4} \|\xi_u(t)\|_{\mathcal E_1}\le Q(\|\xi_u(0)\|_{\mathcal E_1})e^{-\alpha t}+Q(\|h\|_{L^2}) \end{equation} for some positive constant $\alpha$ and monotone function $Q$, where $$ \|\xi_u(t)\|^2_{\mathcal E_1}:=\|\nabla_x \partial_t u(t)\|^2_{L^2}+\|\Delta_x u(t)\|^2_{L^2}. $$ \end{theorem} The proof of this theorem is given in Section \ref{s1}. \par The dissipative estimate \eqref{0.4} is strong enough to obtain the existence of a global attractor $\mathcal A$ for the considered system in the phase space $\mathcal E_1$ and verify that its smoothness is restricted by the regularity of the data $f$, $g$ and $h$ only, see Section \ref{s2} for more details. Note also that, in contrast to the most part of papers on the subject, we do not use the monotonicity assumption \eqref{0.grad}. As a result, the equation does not possess any more a global Lyapunov function and the non-trivial dynamics on the attractor becomes possible. For instance, our assumptions include the Van der Pole nonlinearities $f(u)=u^3-u$ and $g(u)=u$, so the time periodic orbits (and chaotic dynamics) become possible. Another classical example with non-trivial dynamics is the so-called FitzHugh-Nagumo system: \begin{equation} \begin{cases} \partial_t u=\Delta_x u-\phi(u)-v,\\ \partial_t v=u-v. \end{cases} \end{equation} Indeed, differentiating the first equation by $t$ and removing the variable $v$ using the second equation, we obtain the equation \begin{equation} \partial_t^2 u -\partial_t\Delta_x u+\psi'(u)\partial_t u-\Delta_x u+\psi(u)=0, \end{equation} where $\psi (u)=u+ \phi(u)$. So, the FitzHugh-Nagumo system is indeed a particular case of the strongly damped wave equation of the form \eqref{0.1} . \par Thus, relaxing the monotonicity assumption \eqref{0.grad} indeed makes the theory essentially more general and interesting. As a price to pay, we lose the control over the possible growth of weak energy solutions. Indeed, the energy equality for our problem reads \begin{equation}\label{0.energy-eq} \frac d{dt}\left(\frac 12\|\partial_t u\|^2_{L^2}+\frac12\|\nabla_x u\|^2_{L^2}+(G(u),1)-(h,u)\right)+\gamma\|\partial_t\nabla_x u\|^2_{L^2}+(f(u)\partial_t u,\partial_t u)=0 \end{equation} (here and below $(u,v)$ stands for the classical inner product in $L^2(\Omega)$ and $G(u):=\int_0^u g(v)\,dv$). We see that under the assumptions \eqref{0.2}, we have only the control $(f(u)\partial_t u,\partial_t u)\ge -C\|\partial_t u\|^2_{L^2}$ which is enough to prove the existence of weak energy solutions but is {\it not sufficient} to verify that they are globally bounded in time. Actually, we do not know how to obtain the dissipative energy estimate on the level of weak energy solutions and by this reason have to consider more smooth solutions $\xi_u\in \mathcal E_1$. \section{Main estimate}\label{s1} The main aim of this section is to prove the key estimate \eqref{0.4}. We start with slightly weaker dissipative estimate in the space $H^2(\Omega)\cap H^1_0(\Omega)\times L^2(\Omega)$. In what follows to simplify notations we will denote by $C$ various constants that do not depend on the initial data. \begin{theorem}\label{Th1.main} Let the conditions of the Theorem \ref{Th0.main} be satisfied and let $\xi_u\in C(\mathbb R_+,\mathcal E_1)$ be a strong solution of \eqref{0.1}. Then the following estimate holds: \begin{equation}\label{1.4} \|\partial_t u(t)\|_{L^2}^2+\|u(t)\|_{H^2}^2+\int_t^{t+1}\|\partial_t u(s)\|^2_{H^1}\,ds\le Q(\|\partial_t u(0)\|_{L^2}^2+\|u(0)\|_{H^2}^2)e^{-\alpha t}+Q(\|h\|_{L^2}), \end{equation} where the positive constant $\alpha$ and monotone function $Q$ which are independent of $\xi_u$. \end{theorem} \begin{proof} The proof of this estimate is strongly based on the new estimate obtained by multiplication of \eqref{0.1} by $v:=\partial_t u-\gamma\Delta_x u+F(u)$, where $F(u):=\int_0^uf(v)\,dv$. It is not difficult to show that under the above assumptions on the solution $\xi_u$, $v\in L^2(\Omega)$ and the multiplication is allowed. Then, after the straightforward transformations, we get \begin{multline}\label{1.5} \frac d{dt}\left(\frac12\|v\|^2_{L^2}+\frac12\|\nabla_x u\|^2_{L^2}+(G(u),1)\right)+\\+\gamma\|\Delta_x u\|^2_{L^2}+(f(u)+\gamma g'(u),|\nabla_x u|^2)+(F(u),g(u))=(h,v). \end{multline} In addition, due to \eqref{0.2} and the assumption $p+q>0$, \begin{multline} (f(u)+g'(u),|\nabla_x u|^2)+(F(u),g(u))\ge\\\ge (|f(u)|+|g'(u)|,|\nabla_x u|^2)+\frac12(|F(u)|+1,|g(u)|+1)- C(\|\nabla_x u\|^2 +\|u\|^2+1) \end{multline} and using the interpolation $\|\nabla_x u\|^2_{L^2}\le\|\Delta_x u\|_{L^2}\|u\|_{L^2}$, we have \begin{multline}\label{1.6} \frac d{dt}\left(\|v\|^2_{L^2}+\|\nabla_x u\|^2_{L^2}+2(G(u),1)\right)+\\+\gamma\|\Delta_x u\|^2_{L^2}+(|f(u)|+\gamma|g'(u)|,|\nabla_x u|^2)+(|F(u)|+1,|g(u)|+1)\le C+2\|h\|_{L^2}\|v\|_{L^2}. \end{multline} This estimate is still not enough to get the desired dissipative estimate since we do not have the positive term related with $\|v\|_{L^2}$ without the differentiation. \par At the next step, we use the energy equality \eqref{0.energy-eq} which is obtained by multiplication of \eqref{0.1} by $\partial_t u$ and which together with our assumptions on $f$ gives \begin{multline}\label{1.7} \frac{d}{dt}\left(\frac12\|\partial_t u\|^2_{L^2}+\frac12\|\nabla_x u\|^2_{L^2}+(G(u),1)\right)+\\+(|f(u)|+1,|\partial_t u|^2)+\gamma\|\partial_t\nabla_x u\|^2_{L^2}\le L(\|\partial_t u\|^2_{L^2}+\|h\|^2_{L^2}) \end{multline} for some positive constant $L$. \par To estimate the term in the right-hand side of \eqref{1.7}, we multiply equation \eqref{0.1} by $u$ which gives \begin{equation}\label{1.8} \|\partial_t u\|^2_{L^2}=\frac d{dt}\left((u,\partial_t u)+\frac\gamma2\|\nabla_x u\|^2_{L^2}\right)+\|\nabla_x u\|^2_{L^2}+(g(u),u)+(f(u)\partial_t u,u)-(h,u). \end{equation} Note that, due to our assumptions \eqref{0.2} on the nonlinearity $f$, for any $\beta>0$, \begin{equation} |(f(u)\partial_t u,u)|\le \beta(|f(u)|,|\partial_t u|^2)+C_\beta(|f(u)|+1,u^2+1) \end{equation} Using now the assumption that $p+q>0$, we see that $|f(u)|u^2\sim |u|^{p+2}$ and $|F(u)g(u)|\sim |u|^{p+2+q}$ as $u\to\infty$, therefore \begin{equation}\label{1.good} |(f(u)\partial_t u,u)|\le \beta(|f(u)|,|\partial_t u|^2)+C_\beta(|F(u)|+1,|g(u)|+1)+C_\beta. \end{equation} Inserting this into the right-hand side of \eqref{1.7} and fixing $\beta>0$ being small enough, we arrive at \begin{multline}\label{1.9} \frac{d}{dt}\left(\frac12\|\partial_t u\|^2_{L^2}+\frac12\|\nabla_x u\|^2_{L^2}+(G(u),1)- L((u,\partial_t u)+\frac\gamma2\|\nabla_x u\|^2_{L^2})\right)+\\+\frac12(|f(u)|+1,|\partial_t u|^2)+\gamma\|\partial_t\nabla_x u\|^2_{L^2}\le CL(|F(u)|+1,|g(u)|+1)+C(\|\nabla_x u\|^2_{L^2}+\|h\|^2_{L^2}+1). \end{multline} Taking a sum of \eqref{1.9} multiplied by a small parameter $\kappa>0$ with \eqref{1.6}, we have \begin{multline}\label{1.10} \frac d{dt}\left(\|v\|^2_{L^2}+(1+\frac\kappa2-L\frac{\kappa\gamma}2)\|\nabla_x u\|^2_{L^2}+\right.\\\left.+(2+\kappa)(G(u),1)-L\kappa(u,\partial_t u)+\frac\kappa2\|\partial_t u\|^2_{L^2}\right)+\\+ \beta\left(\|\partial_t u\|^2_{L^2}+\|\Delta_x u\|^2_{L^2}+(|f(u)|+1,|\partial_t u|^2)+\right.\\\left.+(|f(u)|+\gamma|g'(u)|,|\nabla_x u|^2)+(|F(u)|+1,|g(u)|+1)+\|\partial_t\nabla_x u\|^2_{L^2}\right)\le\\\le C(\|h\|_{L^2}\|v\|_{L^2}+\|h\|^2_{L^2}+1). \end{multline} for some positive constant $\beta$ depending on $\kappa$. We now note that it is possible to fix $\kappa$ being small enough that the function \begin{equation} \mathcal E_u(t):=\|v\|^2_{L^2}+(1+\frac\kappa2-L\frac{\kappa\gamma}2)\|\nabla_x u\|^2_{L^2}+(2+\kappa)(G(u),1)-L\kappa(u,\partial_t u)+\frac\kappa2\|\partial_t u\|^2_{L^2} \end{equation} will satisfy the inequalities \begin{multline}\label{1.11} \alpha\left(\|v\|^2_{L^2}+\|\partial_t u\|^2_{L^2}+\|\nabla_x u\|^2_{L^2}+(|G(u)|,1)\right)-C_1\le \mathcal E_u(t)\le\\\le C\left(1+\|v\|^2_{L^2}+\|\partial_t u\|^2_{L^2}+\|\nabla_x u\|^2_{L^2}+(|G(u)|,1)\right) \end{multline} for some positive $\alpha$. Thus, \eqref{1.10} reads \begin{multline}\label{1.12} \frac d{dt}\mathcal E_u(t)+ \beta\left(\|\partial_t u\|^2_{L^2}+\|\Delta_x u\|^2_{L^2}+(|f(u)|+1,|\nabla_x u|^2)+(|F(u)|+1,|g(u)|+1)\right)+\\+\beta(|f(u)|+1,|\partial_t u|^2)\le C(\|h\|_{L^2}\|v\|_{L^2}+\|h\|^2_{L^2}+1). \end{multline} Let now $q\ge p$. Then $$(|F(u)|+1)(|g(u)|+1)\sim|u|^{p+q+2}\ge|u|^{2p+2}\sim F(u)^2, $$ and using the obvious estimate \begin{equation} \|v\|^2_{L^2}\le C(\|\partial_t u\|^2_{L^2}+\|\Delta_x u\|^2_{L^2}+\|F(u)\|^2_{L^2}), \end{equation} we see that, in the case $q\ge p$, \eqref{1.12} implies \begin{equation}\label{1.13} \frac d{dt}\mathcal E_u(t)+\beta \mathcal E_u(t)\le C(\|h\|^2_{L^2}+1) \end{equation} for some positive $\beta$. \par It only remains to study the case $p>q$. In this case, we extract the desired $L^2$ norm of $F(u)$ from the term $(|f(u)|+1,|\nabla_x u|^2)$. Indeed \begin{equation}\label{26} (|f(u)|+1,|\nabla_x u|^2)\ge \alpha(|u|^p,|\nabla_x u|^2)=\alpha_1\|\nabla_x(|u|^{(p+2)/2})\|^2_{L^2}. \end{equation} Since $H^1_0(\Omega)$ is continuously embedded into $L^4(\Omega)$ we have $$ \alpha_1\|\nabla_x(|u|^{(p+2)/2})\|^2_{L^2}\geq \alpha_3\||u|^{p+2}\|_{L^2}, $$ and we obtain from \eqref{26} that $$ (|f(u)|+1,|\nabla_x u|^2)\ge \alpha_3\|F(u)\|_{L^2}^{\frac{p+2}{p+1}}-C $$ for some positive $\alpha_i, \ i=1,2,3$. Thus, \eqref{1.12} now reads \begin{equation}\label{1.14} \partial_t \mathcal E_u(t)+\beta[\mathcal E_u(t)]^{\frac{p+2}{2(p+1)}}\le C(\|h\|_{L^2}[\mathcal E_u(t)]^{1/2}+\|h\|^2_{L^2}+1). \end{equation} Since $\frac{p+2}{2(p+1)}>\frac12$, the Gronwall type inequality works in both cases and gives the dissipative estimate for $\mathcal E_u(t)$: \begin{equation}\label{1.edis} \mathcal E_u(t)\le Q(\mathcal E_u(0))e^{-\alpha t}+Q(\|h\|_{L^2}), \end{equation} where the positive constant $\alpha$ and monotone function $Q$ are independent of $t$ and $u$. Note also that, due to the maximal regularity result for the semilinear heat equation, \begin{equation}\label{1.vu} C(\|\partial_t u(t)\|^2_{L^2}+\|u(t)\|^2_{H^2})-C\le\|v(t)\|^2_{L^2}\le Q(\|\partial_t u(t)\|^2_{L^2}+ \|u(t)\|^2_{H^2}) \end{equation} for some positive $C$ and $Q$. The desired estimate \eqref{0.4} follows in a straightforward way from \eqref{1.edis} and \eqref{1.vu}. Thus, Theorem \ref{Th1.main} is proved. \end{proof} Next proposition gives the uniqueness of the strong solution of equation \eqref{0.1}. \begin{proposition}\label{Prop1.unique} Let the conditions of the Theorem \ref{Th0.main} hold and let $\xi_{u_1},\xi_{u_2}\in C(\mathbb R_+,\mathcal E_1)$ be two solutions of the problem \eqref{0.1}. Then, the following estimate holds: \begin{equation}\label{1.lip} \|\xi_{u_1}(t)-\xi_{u_2}(t)\|_{\mathcal E}\le Ce^{Kt}\|\xi_{u_1}(0)-\xi_{u_2}(0)\|_{\mathcal E}, \end{equation} where the constants $C$ and $K$ depend on the initial data and $\|\xi_u\|^2_{\mathcal E}:=\|\nabla_x u\|^2_{L^2}+\|\partial_t u\|^2_{L^2}$. \end{proposition} \begin{proof} Indeed, let $v:=u_1-u_2$. Then, this function solves \begin{equation}\label{1.dif} \partial_t^2 v+f(u_1)\partial_t v-\gamma\Delta_x\partial_t v-\Delta_x v=-[f(u_1)-f(u_2)]\partial_t u_2-[g(u_1)-g(u_2)]. \end{equation} Multiplying this equation by $\partial_t v$ and using the estimate \eqref{1.4} together with the embeddings $H^2(\Omega)\subset C(\Omega)$ and $H^1(\Omega)\subset L^4(\Omega)$, we have \begin{multline} \frac12\frac d{dt}\|\xi_v\|^2+\gamma\|\nabla_x\partial_t v\|^2_{L^2}+(f(u_1)\partial_t v,\partial_t v)=-\\ -([f(u_1)-f(u_2)]\partial_t u_2,\partial_t v)-(g(u_1)-g(u_2),\partial_t v)\le\\\le C\|v\|_{L^2} \|\partial_t v\|_{L^4}\|\partial_t u_1\|_{L^4}+C\|v\|_{L^2}\|\partial_t v\|_{L^2}\le\\\le \frac \gamma2\|\nabla_x \partial_t v\|^2_{L^2}+\|\nabla_x\partial_t u_1\|^2_{L^2}\|v\|^2_{L^2}. \end{multline} Thus, we end up with the following inequality \begin{equation} \frac d{dt}\|\xi_v\|^2+\gamma\|\nabla_x\partial_t v\|^2_{L^2}\le C\|\nabla_x \partial_t u_2\|^2_{L^2} \|\xi_v\|^2_{L^2} \end{equation} and the Gronwall inequality applied to this relation finishes the proof of the proposition. \end{proof} We are now ready to check the dissipativity in $\mathcal E_1$. \begin{proposition}\label{Prop1.e1dis} Let the conditions of the Theorem \ref{Th0.main} be satisfied. Then, for every $\xi_u(0)\in \mathcal E_1$, there is a unique solution $\xi_u\in C(\mathbb R_+,\mathcal E_1)$ of the problem \eqref{0.1} and the following estimate holds: \begin{equation}\label{1.e1dis} \|\xi_u(t)\|_{\mathcal E_1}+\int_t^{t+1}\|\partial_t u(s)\|^2_{H^2}\,ds\le Q(\|\xi_u(0)\|_{\mathcal E_1})e^{-\alpha t}+Q(\|h\|_{L^2}), \end{equation} for some positive constant $\alpha$ and monotone function $Q$. \end{proposition} \begin{proof} We restrict ourselves to the formal derivation of the dissipative estimate \eqref{1.e1dis}. The existence of a solution as well as the justification of this derivation can be done in a standard way using, e.g., Galerkin approximations. Moreover, due to \eqref{1.4}, we only need to obtain the control over the higher norms of the derivative $\partial_t u$. To this end, we multiply equation \eqref{0.1} by $-\partial_t\Delta_x u$. Then, after some transformations, we get \begin{multline}\label{1.dt} \frac12\frac d{dt}\|\xi_u\|^2_{\mathcal E_1}+\|\xi_u\|^2_{\mathcal E_1}+\gamma\|\partial_t\Delta_x u\|^2_{L^2}= (f(u)\partial_t u,\partial_t\Delta_x u)+\\+(g(u),\partial_t \Delta_x u)+\|\partial_t\nabla_x u\|^2_{L^2}+\|\Delta_x u\|^2_{L^2}\le C(\|\partial_t u\|^2_{L^2}+\|\Delta_x u\|^2_{L^2})+\frac\gamma2\|\partial_t\Delta_x u\|^2_{L^2}, \end{multline} where we have implicitly used that $H^2(\Omega)\subset C(\Omega)$ and the interpolation $\|v\|_{H^1}^2\le C\|v\|_{L^2}\|v\|_{H^2}$. The obtained estimate gives \begin{equation} \frac d{dt}\|\xi_u\|^2_{\mathcal E_1}+\|\xi_u\|^2_{\mathcal E_1}+ \gamma\|\partial_t \Delta_x u\|^2_{L^2}\le C(\|\partial_t u\|^2_{L^2}+\|\Delta_x u\|^2_{L^2}) \end{equation} and the Gronwall inequality together with \eqref{1.4} finishes the proof of the proposition. \end{proof} \section{A global attractor}\label{s2} In this section, we study the long-time behavior of solutions of the problem \eqref{0.1} in terms of the associated global attractor. For the reader convenience, we first remind the key definitions of the attractors theory, see \cite{BV,tem} for more details. \par According to Proposition \ref{Prop1.e1dis}, the solution operators of the problem \eqref{0.1} generate a semigroup in the phase space $\mathcal E_1$ \begin{equation}\label{2.sem} S(t)\xi_u(0):=\xi_u(t),\ \ S(t):\mathcal E_1\to\mathcal E_1,\ \ S(t+h)=S(t)\circ S(h),\ t,h\ge0. \end{equation} Moreover, according to the estimate \eqref{1.e1dis}, the semigroup $S(t)$ is {\it dissipative} in the phase space $\mathcal E_1$, i.e., the estimate \begin{equation}\label{2.dis} \|S(t)\xi\|_{\mathcal E_1}\le Q(\|\xi\|_{\mathcal E_1})e^{-\alpha t}+Q(\|h\|_{L^2}),\ \ \xi\in\mathcal E_1 \end{equation} holds for some positive constant $\alpha$ and monotone function $Q$. \begin{definition}\label{Def2.set} Let $S(t):\mathcal E_1\to\mathcal E_1$ be a semigroup. A set $\mathcal B\subset\mathcal E_1$ is called an attracting set for this semigroup if for every bounded set $B\subset\mathcal E_1$ and every neighborhood $\mathcal O(\mathcal B)$ of the set $\mathcal B$, there exists $T=T(B,\mathcal O)$ such that \begin{equation} S(t)B\subset \mathcal O(\mathcal B) \end{equation} for all $t\ge T$. \end{definition} \begin{definition}\label{Def2.attr} Let $S(t):\mathcal E_1\to\mathcal E_1$ be a semigroup. A set $\mathcal A$ is called a global attractor for the semigroup $S(t)$ if \par 1. The set $\mathcal A$ is compact in $\mathcal E_1$; \par 2. The set $\mathcal A$ is strictly invariant: $S(t)\mathcal A=\mathcal A$ for all $t\ge0$; \par 3. The set $\mathcal A$ is an attracting set for the semigroup $S(t)$. \end{definition} To verify the existence of a global attractor, we will use the following version of an abstract attractor existence theorem. \begin{proposition}\label{Prop2.attr} Let $S(t):\mathcal E_1\to\mathcal E_1$ be a semigroup satisfying the following two assumptions: \par 1.\ There exists a compact attracting set $\mathcal B$ for the semigroup $S(t)$; \par 2. \ For every $t\ge0$, the map $S(t):\mathcal E_1\to\mathcal E_1$ has a closed graph in $\mathcal E_1\times\mathcal E_1$. \par Then, this semigroup possesses a global attractor $\mathcal A\subset\mathcal B$ which is generated by all complete bounded trajectories: \begin{equation} \mathcal A=\mathcal K\big|_{t=0}, \end{equation} where $\mathcal K\subset L^\infty(\mathbb R,\mathcal E_1)$ is a set of functions $u:\mathbb R\to\mathcal E_1$ such that $S(t)u(h)=u(t+h)$ for all $h\in\mathbb R$ and~$t\ge0$. \end{proposition} For the proof of this proposition, see \cite{PZ}. \par We are now ready to state and prove the main result of this section. \begin{theorem}\label{Th2.main} Suppose that the conditions of the Theorem \ref{Th0.main} are satisfied. Then the semigroup $S(t)$ associated with problem \eqref{0.1} possesses a global attractor $\mathcal A$ in the phase space $\mathcal E_1$. \end{theorem} \begin{proof} Indeed, the second assumption of Proposition \ref{Prop2.attr} is an immediate corollary of Proposition \ref{Prop1.unique}, so we only need to check the first one. To this end, we split a solution $u$ of equation \eqref{0.1} in a sum $u(t):=v(t)+w(t)$, where the function $v$ solves the linear problem: \begin{equation}\label{2.linear} \begin{cases} \partial_t^2v-\gamma\partial_t\Delta_x v-\Delta_x v=h,\ \ v\big|_{\partial\Omega}=0,\\ \xi_v\big|_{t=0}=\xi_u\big|_{t=0} \end{cases} \end{equation} and the reminder $w$ satisfies \begin{equation}\label{2.w} \begin{cases} \partial_t^2w-\gamma\partial_t\Delta_x w-\Delta_x w=-f(u)\partial_t u-g(u),\ \ \ \ w\big|_{\partial\Omega}=0,\\ \xi_w\big|_{t=0}=0. \end{cases} \end{equation} Moreover, without loss of generality, we may assume that $g(0)=0$. The properties of functions $v$ and $w$ are collected in the following two lemmas. \begin{lemma}\label{Lem2.conv} Let the above assumptions hold and let $H=H(x)\in H^2(\Omega)\cap H^1_0(\Omega)$ be the solution of the problem $$ -\Delta_x H=h, \ x \in \Omega; \ u\big|_{\partial\Omega}=0. $$ Then, the following estimate holds: \begin{equation}\label{2.decay} \|\xi_v(t)-\xi_H\|_{\mathcal E_1}\le C(\|\xi_u(0)\|_{\mathcal E_1}+\|h\|_{L^2})e^{-\alpha t}, \end{equation} where $\xi_H=(H,0)$ and positive constants $C$ and $\alpha$ are independent of $u$. \end{lemma} \begin{proof}[Proof of the Lemma] Indeed, introducing the new variable $\tilde v(t):=v(t)-H$, we reduce \eqref{2.linear} to the homogeneous form \begin{equation} \begin{cases} \partial_t^2 \tilde v-\gamma\partial_t\Delta_x \tilde v-\Delta_x \tilde v=0,\ \ \tilde v\big|_{\partial\Omega}=0,\\ \xi_{\tilde v}\big|_{t=0}=\xi_u\big|_{t=0}-\xi_H. \end{cases} \end{equation} Multiplying this equation by $\partial_t\Delta_x\tilde v+\beta\Delta_x\tilde v$, where $\beta$ is a small positive parameter, and arguing in a standard way we derive that \begin{equation} \|\xi_{\tilde v}(t)\|^2_{\mathcal E_1}\le C\|\xi_{\tilde v}(0)\|^2_{\mathcal E_1}e^{-\alpha t} \end{equation} for some positive $C$ and $\alpha$, see e.g., \cite{BV,tem} as well as the proof of Lemma \ref{Lem2.comp} below. The desired estimate \eqref{2.decay} is an immediate corollary of this estimate and Lemma \ref{Lem2.conv} is proved. \end{proof} Thus, we have proved that the $v$ component of the solution $u$ converges exponentially to a single function $H\in H^2(\Omega)$ which is independent of time and the initial data. The next lemma shows that the $w$ component is more regular. \begin{lemma}\label{Lem2.comp} Let the above assumptions hold and let \begin{equation} \mathcal E_2:=[H^3(\Omega)\cap\{u\big|_{\partial\Omega}=\Delta_x u\big|_{\partial\Omega}=0\}] \times [H^2(\Omega)\cap H^1_0(\Omega)]. \end{equation} Then the solution $w$ of problem \eqref{2.w} belongs to $\mathcal E_2$ for all $t\ge0$ and the following estimate holds: \begin{equation}\label{2.wcomp} \|\xi_w(t)\|_{\mathcal E_2}\le Q(\|\xi_u(0)\|_{\mathcal E_1})e^{-\alpha t}+Q(\|h\|_{L^2}), \end{equation} for some positive constant $\alpha$ and monotone function $Q$ which are independent of $u$. \end{lemma} \begin{proof}[Proof of the Lemma] We give below only the formal derivation of estimate \eqref{2.wcomp} which can be justified e.g., using the Galerkin approximations. First, due to the assumption $g(0)=0$, it follows from the equation \eqref{2.w} that at least formally $\Delta_x w\big|_{\partial\Omega}=0$, so we may multiply equation \eqref{2.w} by $\partial_t\Delta_x^2 w+\beta \Delta_x^2 w$ and do integration by parts. This gives \begin{multline}\label{2.huge} \frac d{dt}\left(\frac 12\|\xi_w\|^2_{\mathcal E_2}+\beta(\nabla_x\Delta_x w,\nabla_x \partial_t w)+ \frac{\gamma\beta}2\|\nabla_x\Delta_x w\|^2_{L^2}\right)+\beta\|\nabla_x\Delta_x w\|^2_{L^2}- \beta\|\partial_t\Delta_x w\|^2_{L^2}+\\+\gamma\|\partial_t\nabla_x\Delta_x w\|^2_{L^2}=(\nabla_x(f(u)\partial_t u+g(u)), \nabla_x(\partial_t\Delta_x w+\beta\Delta_x w)). \end{multline} Fixing $\beta>0$ small enough and using the notation $$E_2(w):=\frac 12\|\xi_w\|^2_{\mathcal E_2}+\beta(\nabla_x\Delta_x w,\nabla_x \partial_t w)+ \frac{\gamma\beta}2\|\nabla_x\Delta_x w\|^2_{L^2}$$ we see that, on the one hand, \begin{equation}\label{2.equiv} C_1\|\xi_w\|_{\mathcal E_2}^2\le E_2(w)\le C_2\|\xi_w\|^2_{\mathcal E_2} \end{equation} for some positive constants $C_1$ and $C_2$. On the other hand, the equation \eqref{2.huge} implies that \begin{equation}\label{2.gron} \frac d{dt}E_2(w)+\alpha E_2(w)\le C(\|f(u)\partial_t u\|^2_{H^1}+\|g(u)\|^2_{H^1}) \end{equation} for some positive constants $C$ and $\alpha$. Finally, using the embedding $H^2(\Omega)\subset C(\Omega)$ and growth restrictions \eqref{0.2}, we estimate the right-hand side of \eqref{2.gron} as follows: \begin{equation}\label{2.last} \|f(u)\partial_t u\|^2_{H^1}+\|g(u)\|^2_{H^1}\le C(\|u\|_{H^2}^{p+q+1}+1)(\|\partial_t\nabla_x u\|^2_{L^2}+1). \end{equation} Applying the Gronwall inequality to \eqref{2.gron} and using \eqref{2.last} and \eqref{2.equiv} together with the dissipative estimate \eqref{1.e1dis}, we derive the desired estimate \eqref{2.wcomp} and finish the proof of Lemma \ref{Lem2.comp}. \end{proof} It is not difficult now to finish the proof of the theorem. Indeed, Lemmas \ref{Lem2.conv} and \ref{Lem2.comp} show that the set \begin{equation} \mathcal B:=\xi_H+\{w\in \mathcal E_2,\ \|\xi_w\|_{\mathcal E_2}\le R\} \end{equation} will be a compact attracting set for the semigroup $S(t)$ generated by the problem \eqref{0.1} if $R$ is large enough. Thus, all assumptions of the Proposition \ref{Prop2.attr} are verified and the theorem is proved. \end{proof} \begin{remark} As already it was mentioned in the introduction, we do not know how to deduce the basic dissipative estimate for the {\it weak} solutions of the problem \eqref{0.1} in the phase space $\mathcal E$ in the case when the condition \eqref{0.grad} is violated. However, as follows from the Theorem \ref{Th1.main}, we have such an estimate in the intermediate space $[H^2(\Omega)\cap H^1(\Omega)]\times L^2(\Omega)$ which is in a sense natural for strongly damped wave equations, see \cite{PZ06,KaZ08}. Actually, the problem is well posed in this space and the above developed attractor theory can be extended to to this phase space as well. \end{remark} \end{document}
\begin{document} \centerline{\bf Simulation of an entangled state in a chain of three nuclear spins system } \vskip2pc \centerline{G.V. L\'opez and L. Lara} \centerline{Departamento de F\'{\i}sica, Universidad de Guadalajara} \centerline{Apartado Postal 4-137, 44410 Guadalajara, Jalisco, M\'exico} \vskip2pc \centerline{PACS: 03.67.Lx, 03.65.Ta} \vskip2cm \centerline{ABSTRACT} \vskip1pc\noindent We study the formation of an entangled state in a one-dimensional chain of three nuclear spins system which interact weakly through the Ising type of interaction and taking into account first and second neighbor interactions. We can get this entangled state using two pulses ($\pi/2$ and $\pi$ pulses), and we study the efficiency of getting this entangled state as a function of the ratio of the second neighbor interaction coupling constant to the first neighbor interaction coupling constant ($J'/J$). We found that for $J'/J\ge 0.04$, the entangled state is well defined. \vfil\eject\noindent {\bf 1. Introduction} \vskip0.5pc\noindent The huge interest in quantum computation and quantum information was triggered by the polynomial time solution of the prime factorization problem (Shor's algorithm [1]), the fast data base searching (Grover's algorithm [2]), error correction codes [3], robust entanglement [4], and the teleportation phenomenum [5]. Almost any quantum system with at least two quantum levels may be used, in principle, for quantum computation. This one uses qubits (quantum bits) instead of bits to process information. A qubit is the superposition of any two levels of the system , called $|0\rangle$ and $|1\rangle$ states, $\Psi=C_0|0\rangle+C_1|1\rangle$ with $|C_0|^2+|C_1|^2=1$. The tensorial product of $L$-qubits makes up a register of lenght $L$, say $|x\rangle=|i_{L-1},\dots,i_0\rangle$, with $i_j=0,1$, and a quantum computer with $L$-qubits works in a $2^L$ dimensional Hilbert space, where an element of this space is of the form $\Psi=\sum C_x|x\rangle$, with $\sum|C_x|^2=1$. \vskip0.5pc\noindent Quantum computers of few qubits [6] have been in operations and have been used to explore quantum gates, entanglement, small number Shor's factorization, small data base Grover's searching, teleportation, error corrections, and cryptography. However, to make serious computer calculations one may requires a quantum computer with at least of 100-qubits registers, and we think that will be hopefully achived in a near future. One solid state quantum comuter model that has been explored for physical realization and which allows to make analytical studies is that one made up by one-dimensional chain of nuclear spins sytems [7] , where the Ising interaction among first neighbor spins allows to inplement ideally this type of computer with 1000-qubits or more [8]. One of the important phenomena we studied with this model was the entangled state formation [9]. In this paper, we consider second neighbor spin interaction in a chain of three nuclear spins system. We show that this allows us to implement an entangled state using two pulses ($\pi/2$ and $\pi$), and we determine the threshold of the second neighbor interaction coupling parameter to get a well define entangled state. \vskip2pc \leftline{\bf 2. Equation of Motion} \vskip1pc\noindent Consider a one-dimensional chain of three equally spaced nuclear-spins system (spin one half) making an angle $\cos\theta=1/\sqrt{3}$ with respect the z-component of the magnetic field (selected in this way to eliminate the dipole-dipole interaction between spins) and having an rf-magnetic field in the transversal plane. The magnetic field is given by $${\bf B}=(b\cos(\omega t+\varphi), -b \sin(\omega t+\varphi), B_o(z))\ ,\eqno(1)$$ where $b$ is the amplitude of the rf-field, $B_o(z)$ is the amplitude of the z-component of the magnetic field, $\omega$ and $\varphi$ are the angular frequency and phase of the rf-field. So, the Hamiltonian of the system is given by $$H=-\sum_{k=0}^2{\bf \mu_k}\cdot {\bf B_k}-2J\hbar\sum_{k=0}^1I_k^zI_{k+1}^z -2J'\hbar\sum_{k=0}^0I_k^zI_{k+2}^z\ ,\eqno(2)$$ where ${\bf\mu_k}$ represents the magnetic moment of the kth-nucleus which is given in terms of the nuclear spin as ${\bf\mu_k}=\hbar\gamma(I_k^x,I_k^y, I_k^z)$, being $\gamma$ the proton gyromagnetic ratio. ${\bf B_k}$ represents the magnetic field at the location of the $kth$-spin ($z=z_k$). The second term at the right side of (2) represents the first neighbor spin interaction, and the third term represents the second neighbor spin interaction. $J$ and $J'$ are the coupling constants for these interactions. This Hamiltonian can be written in the following way $$H=H_0+W\ ,\eqno(3a)$$ where $H_0$ and $W$ are given by $$H_0=-\hbar\left\{\sum_{k=0}^2\omega_kI_k^z+2J(I_0^zI_1^z+I_1^zI_2^z)+2J'I_0^zI_2^z\right\}\eqno(3b)$$ and $$W=-{\hbar\Omega\over 2}\sum_{k=0}^2\biggl[e^{i(\omega t+\varphi)}I_k^++e^{-i(\omega t+\varphi)}I_k^-\biggr]\ ,\eqno(3c)$$ where $\omega_k=\gamma B_o(z_k)$ is the Larmore frequency of the kth-spin, $\Omega=\gamma b$ is the Rabi's frequency, and $I_k^{\pm}=I_k^x\pm iI_k^y$ represents the ascend operator (+) or the descend operator (-). The Hamiltonian $H_0$ is diagonal on the basis $\{|i_2i_1i_0\rangle\}$, where $i_j=0,1$ (0 for the ground state and 1 for the exited state), $$H_0|i_2i_1i_0\rangle=E_{i_2i_1i_0}|i_2i_1i_0\rangle\ .\eqno(4a)$$ The eigenvalues $E_{i_2i_1i_0}$ are given by $$E_{i_2i_1i_0}=-{\hbar\over 2}\biggl\{(-1)^{i_2}\omega_2+(-)^{i_1}\omega_1+(-1)^{i_0}\omega_0+ J[(-1)^{i_0+i_1}+(-1)^{i_1+i_2}]+(-1)^{i_0+i_2}J'\biggr\}\ .\eqno(4b)$$ The term (3c) of the Hamiltonian (3a) allows to have a single spin transitions on the above eigenstates by choosing the proper resonant frequency, as shown in Figure 1. In this work, we are interested in the transitions $|000\rangle\longleftrightarrow |001\rangle$ and $|001\rangle\longleftrightarrow |101\rangle$ which have the resonant frequencies $$\omega=\omega_0+J+J'\eqno(5a)$$ and $$\omega=\omega_2+J-J'\ .\eqno(5b)$$ To solve the Schr\"odinger equation $$i\hbar{\partial\Psi\over\partial t}=H\Psi\ ,\eqno(6)$$ let us propose a solution of the form $$\Psi(t)=\sum_{k=0}^7C_k(t)|k\rangle\ ,\eqno(6)$$ where we have used decimal notation for the eigenstates in (4a), $H_0|k\rangle=E_k|k\rangle$. Substituting (6) in (5), multiplying for the bra $\langle m|$, and using the orthogonality relation $\langle m|k\rangle=\delta_{mk}$, we get the following equation for the coefficients $$i\hbar\dot C_m=E_mC_m+\sum_{k=0}^7C_k\langle m|W|k\rangle\ \ m=0,\dots,7.\eqno(7)$$ Now, using the following transformation $$C_m(t)=D_m(t)e^{-iE_m t/\hbar}\ ,\eqno(8)$$ the fast oscillation term $E_mC_m$ of Eq. (7) is removed (this is equivalent to going to the interaction representation), and the following equation is gotten for the coefficients $D_m$ $$i\dot D_m={1\over\hbar}\sum_{k=0}^7W_{mk}D_ke^{i\omega_{mk}t}\ ,\eqno(9a)$$ where $W_{mk}$ denotes the matrix elements $\langle m|W|k\rangle$, and $\omega_{mk}$ are defined as $$\omega_{mk}={E_m-E_k\over\hbar}\ .\eqno(9b)$$ Eq. (9a) represents a set of sixteen real coupling ordinary differential equations which can be solved numerically, and where $W_{mk}$ are the elements of the matrix $$(W)=-{\hbar\Omega\over 2}\pmatrix{ 0 & z^* & z^* & 0 & z^* & 0 & 0 & 0 \cr z & 0 & 0 & z^* & 0 & z^* & 0 & 0 \cr z & 0 & 0 & z^* & 0 & 0 & z^* & 0 \cr 0 & z & z & 0 & 0 & 0 & 0 & z^*\cr z & 0 & 0 & 0 & 0 & z^* & z^* & 0 \cr 0 & z & 0 & 0 & z & 0 & 0 & z^*\cr 0 & 0 & z & 0 & z & 0 & 0 & z^*\cr 0 & 0 & 0 & z & 0 & z & z & 0 \cr}\ ,\eqno(9c)$$ where $z$ is defined as $z=e^{i(\omega t+\varphi)}$, and $z^*$ is its complex conjugated. \vskip2pc \leftline{\bf 3. Numerical Simulations} \vskip1pc\noindent We start with the ground state, $\Psi_0=|000\rangle$, of the system and apply a $\pi/2$-pulse with $\varphi=0$ and with frequency $\omega=\omega_0-J-J'$ to get the superposition state $$\Psi_1={1\over\sqrt{2}}\biggl(|000\rangle+|001\rangle\biggr)\ .\eqno(10)$$ Then, we apply a $\pi$-pulse with $\varphi=0$ and frequency $\omega=\omega_2+J-J'$ to get the entangled state $$\Psi_2={1\over\sqrt{2}}\biggl(|000\rangle-|101\rangle\biggr)\ .\eqno(11)$$ The entangled state with plus sign can be gotten using a phase $\varphi=\pi$. To solve numerically (9a), we select similar values for the parameters as reference 8 and 9. So, in units of $2\pi\times MHz$, we set the following values $$\omega_0=100\ ,\ \omega_1=200\ ,\ \omega_2=400\ ,\ J=5\ ,\ \Omega=0.1\eqno(12)$$ The coupling constant $J'$ is chosen with at least one order of magnitude less that $J$ since in the chain of spins one expect that second neighbor contribution to be at least one order of magnitude weaker than first neighbor contribution, depending on the interspace separation of the nuclei. In all our simulations the total probability, $\sum|C_k(t)|^2$, is conserved equal to one within a precision of $10^{-6}$. \vskip0.5pc\noindent Figure 2 shows the behavior of $Re~ D_0$, $Im~D_0$, $Re~D_5$, and $Im~D_5$ during the two pulses ($t=\tau=\pi/2\Omega+\pi/\Omega$) for the digital initial state and with $J'=0.2$. We can see the formation of the superposition state after the first $\pi/2$-pulse and the formation of the entangled state (11) after the following $\pi$-pulse. Fig. 3 shows the behavior of the probabilities $|C_k|^2$, $k=0,\dots,7$ during the two pulses with the clear formation of the superposition state and the entangled state. Fig. 4 shows the behavior of the expected z-component of the spin, $$\langle I_0^z\rangle={1\over 2}\sum_{k=0}^7(-1)^k|C_k(t)|^2\ ,\eqno(13a)$$ $$\langle I_1^z\rangle={1\over 2}\biggl\{|C_0|^2+|C_1|^2-|C_2|^2-|C_3|^2+|C_4|^2+|C_5|^2-|C_6|^2-|C_7|^2\biggr\}\ ,\eqno(13b)$$ and $$\langle I_2^z\rangle={1\over 2}\sum_{k=0}^3|C_k|^2-\sum_{k=4}^7|C_k|^2\ ,\eqno(13c)$$ during the two pulses. As one could expect $\langle I_0^z\rangle=\langle I_2^z\rangle=0$ at the end of the two pulses due to the formation of the entangled state (11). The expected value of the spin is rotating in the plane $(x,y)$ as is shown on Fig. 5. These transversal expected values are given by $$\langle I_0^x\rangle=Re\biggl(C_1^*C_0+C_3^*C_2+C_5^*C_4+C^*_7C_6\biggr)\ ,\hskip1cm\langle I_0^y\rangle=Im\biggl(\dots\biggr)\ ,\eqno(14a)$$ $$\langle I_1^x\rangle=Re\biggl(C_2^*C_0+C_3^*C_1+C_6^*C_4+C^*_7C_5\biggr)\ ,\hskip1cm\langle I_1^y\rangle=Im\biggl(\dots\biggr)\ ,\eqno(14b)$$ and $$\langle I_2^x\rangle=Re\biggl(C_4^*C_0+C_5^*C_1+C_6^*C_2+C^*_7C_3\biggr)\ ,\hskip1cm\langle I_2^y\rangle=Im\biggl(\dots\biggr)\ .\eqno(14c)$$ To determine the range of values of $J'$ (second neighbor coupling constant) for which the entangled state is well defined after the two pulses, that is, where the other resonances and non-resonant transition are canceled, we calclate the fidelity paramete [10] for this process, $$F=\langle\Psi_{expected}|\Psi_{numerical}\rangle\ ,\eqno(15)$$ where $\Psi_{expected}$ is our state (11), and $\Psi_{numerical}$ is the resulting wave function from our simulations. Fig. 6 shows the fidelity as a function of the ratio of the second neighbor interaction constant to first neighbor interaction constant, $J'/J$. As one can see, for a value $J'/J\ge 0.04$ one gets a very well defined entangled state. This means that the second neighbor interaction with even two orders of magnitud weaker than the first neighbor interaction, we can generate an entangled state in this system. \vfil\eject \vskip3pc\noindent \leftline{\bf 4. Conclusions and Comments} \vskip0.5pc\noindent We have studied the formation of an entangled state using two pulses in a chain of three nuclear spins system with first and second neighbor Ising spin interaction. The characteritics of the entangled state were determinated, and we found that the entanglement can be realized even for very weak second neighbor spin interaction ($J'/J\ge 0.04$). We consider that the coupling constant $J'$ may play an important rolle in the so called $2\pi$-method found in reference [8,9] to supress non-resonant transition in the chain of nuclear spins system because this parameter will enter in the detuning parameter ( $\Delta=(E_p-E_m)/\hbar-\omega$). \vskip5pc\noindent \leftline{\bf Acknowledgements} \vskip0.5pc\noindent This work was supported by SEP under the contract PROMEP/103.5/04/1911 and the University of Guadalajara. \vfil\eject \leftline{\bf Figure Captions} \vskip1pc\noindent Fig. 1 Energy levels and resonant frequencies of interest. \vskip0.5pc\noindent Fig. 2 Entangled state formation, (1): $Re~ D_0$, (2): $Im~D_0$, (3): $Re~D_5$, (4): $Im~D_5$ with $J'=0.2$ \vskip0.5pc\noindent Fig. 3 Probabilities for $J'=0.2$, (k): $|C_k(t)|^2$ for $k=0,\dots,7$. \vskip1pc\noindent Fig. 4 Expected values (a): $\langle I_0^z\rangle$, (b): $\langle I_1^z\rangle$, and (c): $\langle I_2^z\rangle$. \vskip0.5pc\noindent Fig. 5 For $J'=0.2$, expected values of the transversal components of the spin. \vskip0.5pc\noindent Fig. 6 Real, Imaginary parts of the Fidelity, and its modulus. \vfil\eject \leftline{\bf References} \obeylines{ 1. P.W. Shor, {\it Proc. of the 35th Annual Symposium on the Foundation \quad of the Computer Science}, IEEE, Computer Society Press, N.Y. 1994, 124. 2. L.K. Grover, Phys. Rev. Lett., {\bf 79} (1997) 627. \quad L.K. Grover, Science, {\bf 280} (1998) 228. 3. P.W. Shor, Phys. Rev. A {\bf 52} (1995) R2493. \quad E.Knill, R. Laflamme, and W.H. Zurek, Science, {\bf 279} (1998) 342. A. Stean, Proc. R. Soc. London Se A, {\bf 452} (1996) 2551. 4. F. Schmidt-Kaler, S. Gulde, M. Riebe, T. Deuschle, A. Kreuter, G. Lancaster, \quad C. Becher, J. Eschner, H. H\"affner, and R. Blatt, \quad J. Phys. B, {\bf 36} (2003) 623. \quad C.F. Roos, G.P.T. Lancaster, M. Riebe, H. H\"affner, W. H\"ansel, S. Gulde, \quad C. Becher, J. Eschner, F. Schmidt-Kaler, R. Blatt, \quad Phys. Rev. Lett. {\bf 92}, (2004) 220402. 5. M. Riebe, H. H\"affner, C. F. Roos,Ê W. H\"ansel, J. Benhelm, G. P. T. Lancaster, \quad T.W. K\"orber, C. Becher, F. Schmidt-Kaler, D. F. V. James,Ê R. Blatt, \quad Nature {\bf 429}, (2004) 734. 6. D. Boshi, S. Branca, F.D. Martini, L. Hardy, and S. popescu \quad Phys. Rev. Lett., {\bf 80} (1998) 1121. \quad C.H. Bennett and G. Brassard, {\it Proc. IEEE international Conference on \quad Computers, Systems, and Signal Processing}, N.Y. (1984) 175. \quad I.L. Chuang, N.Gershenfeld, M.G. Kubinec, and D.W. Lung \quad Proc. R. Soc. London A, {\bf 454} (1998) 447. \quad I.L. Chuang, N. Gershenfeld, and M.G. Kubinec \quad Phys. Rev. Lett., {\bf 18} (1998) 3408. \quad I.L. Chuang, L.M.K. Vandersypen, X.L. Zhou, D.W. Leung, and S. Lloyd, \quad Nature, {\bf 393} (1998) 143. \quad P.Domokos, J.M. Raimond, M. Brune, and S. Haroche, \quad Phys. Rev. Lett., {\bf 52} (1995) 3554. \quad J.Q. You, Y. Nakamura, F.Nori, Phys. Rev. Lett.,{\bf 91} (2002) 197902. 7. Lloyd, Science, {\bf 261} (1993) 1569. \quad G.P. Berman, G.D. Doolen, D.D. Holm, and V.I Tsifrinovich \quad Phys. Lett. A, {\bf 1993} (1994) 444. 8. G.P. Berman, G.D. Doolen, D.I. Kamenev, G.V. L\'opez, and V.I. Tsifrinovich \quad Phys. Rev. A, {\bf 6106} (2000) 2305. 9. G.P. Berman, G.D. Doolen, G.V. L\'opez, and V.I. Tsifrinovich \quad quant-ph/9802015, quant-ph/9909032, Phys. Rev. A, {\bf 61} (2000) 062305. 10 . A. Peres, Phys. Rev. A {\bf 30} (1984) 1610. } \end{document}
\begin{document} \title{Excess-noise suppression for a squeezed state propagating through random amplifying media via wave-front shaping} \begin{abstract} After propagating through a random amplifying medium, a squeezed state commonly shows excess noise above the shot-noise level. Since large noise can significantly reduce the signal-to-noise ratio, it is detrimental for precision measurement. To circumvent this problem, we propose a noise-reduction scheme using wavefront shaping. It is demonstrated that the average output quantum noise can be effectively suppressed even beyond the shot-noise limit. Both the decrease on amplification strength and the increase on input squeezing strength can give rise to a decrease in the suppressed average quantum noise. Our results not only show the feasibility of manipulating the output quantum noise of random amplifying media, but also indicate potential applications in quantum information processing in complex environments, such as, quantum imaging, quantum communication, and quantum key distribution. \end{abstract} \section{Introduction} The random medium exhibits unusual transmission properties which couples light into different channels randomly by multiple scattering. Previously, light scattering was considered harmful, since it may distort the incident wavefront and result in a speckle pattern. Later, it is shown that light scattering could also play a positive role in many applications. For instance, (1) in imaging, it can improve the resolution by overcoming the traditional diffraction limit, owing to the increased effective aperture number by multiple scattering \cite{van2011,park2014}; (2) in optical communication, it provides the possibility to increase the capacity by the raising number of scattered modes that carry the information \cite{simon2001,two2002}. In addition, light scattering can also be applied in other fields, such as, secure authentication \cite{goorden2014,yao2016}, high-speed random number generator \cite{argyris2010,xiang2015}, programmable optical circuit \cite{huisman2015,marcucci2020}. Therefore, light transport through random media has become an active subject from both theoretical and experimental perspectives. In particular, the random amplifying media (RAMs) have attracted considerable attention because nonlinearity or amplification provides an additional degree of freedom for coherent control of mesoscopic transport \cite{renthlei2015,liew2015}. By adjusting the amplification strength, one could conveniently manipulate the transmission properties of light which could benefit for many potential applications, such as, random laser \cite{cao2003,luan2015,churkin2015}. Recently, coherent-state light propagation through a RAM has been explored from different aspects. For example, Liew \textit{et al.} \cite{liew2015} investigated the effect of amplification on the reflection properties. It was revealed that the amplification could minimize the reflectance of the random medium by destructive interference. Burkov \textit{et al.} \cite{burkov1997} studied the correlation of scattered light. It was demonstrated that the angular correlation has a power-law decay and exhibits oscillations. Patra \textit{et al.} \cite{patra1999} analyzed the quantum noise of the scattered light. It is found that the output shows excess noise related to the transmission and reflection matrices of the medium for a coherent-state input. As a typical nonclassical state, the squeezed state is of importance since it possesses lower quantum noise in one quadrature component than that of the coherent state (\textit{or} equivalently the shot noise) \cite{walls1983,walls2007,barnett2002,lvovsky2015}. Therefore, the squeezed state can enhance signal-to-noise ratio \cite{caves81,yurke1986,xiao1987precision} and has been utilized in different applications ranging from quantum imaging \cite{beskrovnyy2005,sokolov2004} to gravitational wave detection \cite{aasi2013,barsotti2018,mehmet2018}. However, the squeezed state suffers from an increase in output quantum noise after propagating through a RAM \cite{patra2000} (see Fig. \ref{fig1}(a)), which is induced by spontaneous emission and multiple scattering. It is worth pointing out that for a squeezed-state input, the input quantum noise is below the shot-noise level (SNL), whereas the output noise is always increased, even above the SNL. More interestingly, compared with the coherent state, the squeezed state initially possessing lower noise will have larger noise in the output \cite{patra2000}. As is well known, the large noise leads to a decrease in the signal-to-noise ratio which is detrimental for precision detection (e.g., high-resolution imaging). Therefore, we wonder whether there exists a method to reduce the average output noise for the squeezed-state input. \begin{figure*} \caption{Quadrature fluctuation detection of beams transmitted through a random amplifying medium (a) in the absence of WFS, (b) in the presence of WFS. $\hat{a} \label{fig1} \end{figure*} Wavefront shaping (WFS) is a promising technology for optical focusing and imaging through random media \cite{vellekoop2007,vellekoop2008,popoff2010,mosk2012,hong2017,hong2018,tzang2019,blochet2019}, which paves the way in manipulating the speckle pattern in an expected manner. Experimentally, the WFS can be performed by a spatial light modulator (SLM), as shown in Fig. \ref{fig1}(b). The SLM acts as a reconfigurable matrix of pixels to imprint desired phases on the incident wavefront. In recent decades, it has been extensively used in various optical applications, for instance, quantum simulator \cite{prl2019}, quantum data locking \cite{lum2016}, and high-resolution imaging \cite{jang2018,chen2018}. In particular, the WFS is also a common technique in the optical authentication scheme based on scattering medium \cite{goorden2014,yao2016}. In this work, we propose a noise-reduction scheme using WFS for the case of squeezed-state light propagating through RAMs. Comparing with Ref. \cite{patra2000}, we exploit the technique of WFS to reduce the output noise. In addition, the comparison between the linear and amplifying cases is performed. It is found that the amplifying media always have larger average quantum noise than that of the linear ones regardless of WFS. Besides, unlike the linear case where the suppressed quantum fluctuation always reaches below the SNL, the reduced quantum fluctuation can be either below or above the SNL for the amplifying case. Moreover, we provide the condition for the reduced average quantum fluctuation to reach below the SNL. This paper is organized as follows: in Sec. 2, we briefly describe the model of propagation of quantized light through a RAM. Sec. 3 elucidates how the WFS suppresses the average quantum fluctuation of output modes. In Sec. 4, we compare the cases of amplifying and linear media. Sec. 5 is devoted to the conclusion of the main results. \section{Theoretical model} Fig. \ref{fig1}(a) illustrates the propagation of quantized light through a RAM. Generally, a RAM consists of randomly distributed scattering particles with amplification either in the background medium or in the particles themselves. When light propagates through a RAM, it would be multiple scattered and amplified. To quantitatively characterize the property of a RAM, three kernel factors are introduced: the transport mean free path $l$, the thickness $L$, and the amplification length $L_a$ \cite{patra1999}. Note that different from the linear media with only two primary parameters ($L$ and $l$) \cite{xu2017a,xu2017b,li2019,li2019b}, the RAMs require an extra amplification length $L_a = \sqrt{D \tau_a}$ to account the nonlinearity \cite{patra1999}, where $1/\tau_a$ is the amplification rate and $D = c l /3$ the diffusion constant ($c$ the velocity of light in the medium). \subsection{Propagation of quantized light through a random amplifying medium} After propagating through a RAM, the scattered mode $b$ can be expressed as \cite{fedorov2009} \begin{eqnarray} \label{inout001} \hat{a}_b = \sum_{a'=1}^{N} t_{a' b} \hat{a}_{a'}^{\rm{in}} + \sum_{b'=N+1}^{2N} r_{b' b} \hat{a}_{b'}^{\rm{in}} + \sum_{c'} v_{c' b}^{\ast} \hat{c}_{c'}^{\rm{in}\dagger}, \end{eqnarray} where $\hat{a}_b$ indicates the annihilation operator of scattered mode $b$, $\hat{a}_{a'}^{\rm{in}}$ ($\hat{a}_{b'}^{\rm{in}}$) the annihilation operators of input modes on the left-hand side (right-) of the RAM, $\hat{c}^{\rm{in}\dagger}_{c'}$ the creation operators of spontaneous emission modes inside the RAM, $t_{a' b}$ ($r_{b' b}$) the transmission (reflection) coefficients from the input modes $a'$ ($b'$) to the output mode $b$, $v_{c' b}^{\ast}$ the connection between the spontaneous emission modes and the output mode $b$, $N$ the number of transmission channels. Noticeably, the last term on the right-hand side in Eq. (\ref{inout001}) quantifies the spontaneous emission inside the RAM, with $c'$ running over ``objects'' (e.g., atoms or molecules) and the operator $\hat{c}_{c'}^{\rm{in} \dagger}$ fulfilling the commutation relation $[\hat{c}_{i}^{\rm{in}}, \hat{c}_{j}^{\rm{in} \dagger}] = \delta_{ij}$. Unlike the random linear medium with only transmission and reflection coefficients ($t_{a' b}$, $r_{b' b}$), the RAM involves an additional spontaneous emission coefficient ($v_{c'b}^{\ast}$), which are subject to a constraint $\sum_{a'} |t_{a' b}|^2 + \sum_{b'} |r_{b' b}|^2 - \sum_{c'}|v^{\ast}_{c' b}|^2 = 1$ (see Appendix A). The ensemble-averaged transmission, reflection, and spontaneous-emission coefficients are given by \cite{fedorov2009} \begin{eqnarray} \label{eq2a} \overline{T_{a' b}} &=& \frac{1}{N}\frac{\sin(l/L_{a})}{\sin(L/L_a)},\\ \label{eq2b} \overline{R_{b' b}} &=& \frac{1}{N}\frac{\sin[(L-l)/L_{a}]}{\sin(L/L_a)},\\% \overline{V_{b}} &=& \frac{\sin(l/L_a) + \sin[(L-l)/L_a]}{\sin(L/L_a)}-1, \label{eq2c} \end{eqnarray} where $ T_{a'b}= |t_{a'b}|^2$, $ R_{b'b}= |r_{b'b}|^2 $, $V_b = \sum_{c'} V_{c'b}= \sum_{c'}|v_{c'b}|^2$, and the overline stands for the average over the ensemble of disorder realizations. Note that $\overline{T_{a'b}}$, $\overline{R_{b'b}}$, and $\overline{V_{c'b}}$ diverge at $L/L_{a} = \pi$ which is identify as a threshold for random-laser emission. Clearly, this analysis method based on Eq. (\ref{inout001}) can only be applied below the laser threshold (i.e., $L/L_a < \pi$). If $L/L_a$ is infinite small (ie., $L/L_a \to 0$), Eqs. (\ref{eq2a})-(\ref{eq2c}) can be rewritten as $\overline{T_{a' b}} =[1/(L/l)]/N$, $\overline{R_{b' b}} =[1-1/(L/l)]/N $, and $\overline{V_{b}} =0$, respectively, which are exactly the same as the coefficients for the linear case \cite{lodahl2006b}. Evidently, this generalized model is suitable for both the amplifying and the linear cases. The quadrature operators are introduced as $\hat{x} = \hat{a}^{\dagger} + \hat{a}$ and $\hat{p} = i(\hat{a}^\dagger - \hat{a})$. According to Eq. (\ref{inout001}), the quadrature operators of scattered mode $b$ are then written as \begin{eqnarray} \hat{x}_b=&\sum_{a'}\sqrt{T_{a'b}}[\cos\phi_{a'b}\hat{x}_{a'}^{\rm{in}}-\sin\phi_{a'b}\hat{p}_{a'}^{\rm{in}}]\nonumber\\ &+\sum_{b'}\sqrt{R_{b' b}}[\cos \phi_{b'b}\hat{x}_{b'}^{\rm{in}}-\sin\phi_{b'b}\hat{p}_{b'}^{\rm{in}}]\nonumber\\ &+\sum_{c'}\sqrt{V_{c' b}}[\cos\phi_{c'b}\hat{x}_{c'}^{\rm{in}}-\sin\phi_{c'b}\hat{p}_{c'}^{\rm{in}}],\label{xb}\\ \hat{p}_b=& \sum_{a'}\sqrt{T_{a' b}}[\sin\phi_{a' b}\hat{x}_{a'}^{\rm{in}}+\cos\phi_{a'b}\hat{p}_{a'}^{\rm{in}}]\nonumber\\ &+\sum_{b'}\sqrt{R_{b' b}}[\sin \phi_{b' b}\hat{x}_{b'}^{\rm{in}}+\cos\phi_{b'b}\hat{p}_{b'}^{\rm{in}}]\nonumber \\ &-\sum_{c'}\sqrt{V_{c'b}}[\sin\phi_{c' b}\hat{x}_{c'}^{\rm{in}}+\cos\phi_{c'b}\hat{p}_{c'}^{\rm{in}}],\label{pb} \end{eqnarray} where we have defined $t_{a'b} = \sqrt{T_{a'b}} e^{i \phi_{a'b}}$, $r_{b'b} = \sqrt{R_{b'b}} e^{i \phi_{b'b}}$, $v_{c'b}^{\ast} = \sqrt{V_{c'b}} e^{-i \phi_{c'b}}$, $\hat{x}^{\rm{in}}_{c'} = \hat{c}^{\rm{in}\dagger}_{c'} + \hat{c}^{\rm{in}}_{c'}$, and $\hat{p}^{\rm{in}}_{c'} = i(\hat{c}^{\rm{in}\dagger}_{c'} - \hat{c}^{\rm{in}}_{c'})$. \subsection{Modified propagation via wavefront shaping} In this work we consider the situation of optical focusing through a random medium with WFS. In such a case, the expected phases, $\phi^{\rm{SLM}}_{a'}=-\phi_{a'b}$ $(a'=1,2,...,N)$ are imprinted on the incident wavefront via WFS where the output mode $b$ corresponds to the focused beam. This phase modulator exactly compensates the phase retardation in the RAM for each transmission channel which leads to a constructive interference in the output mode $b$. Correspondingly, the initial input-output relation in Eq. (1) is modified as \begin{eqnarray} \hat{a}_{b}^{\rm{w}} = \sum_{a'=1}^{N}{|t_{a'b }| \hat{a}_{a'}^{\rm{in}}} + \sum_{b'=N+1}^{2N}{r_{b'b } \hat{a}_{b'}^{\rm{in}}}+ \sum_{c'} v_{c' b}^{\ast} \hat{c}_{c'}^{\rm{in}\dagger}, \label{creation2} \end{eqnarray} where the superscript $w$ stands for WFS and $|t_{a'b}|$ takes the place of the original complex transmission coefficient $t_{a'b}$. By taking into account the WFS, based on Eq. (\ref{creation2}), the quadrature operators of the scattered mode $b$ now becomes \begin{eqnarray} \label{xp1} \hat{x}_b^{\rm{w}} &=& \sum_{a'}{\sqrt{T_{a'b}} \hat{x}_{a'}^{\rm{in}} } + \sum_{b'}{\sqrt{R_{b'b}} [\cos \phi_{b'b} \hat{x}_{b'}^{\rm{in}} - \sin \phi_{b'b} \hat{p}_{b'}^{\rm{in}}] } \nonumber \\ && + \sum_{c'}{\sqrt{V_{c'b}} [\cos \phi_{c'b} \hat{x}_{c'}^{\rm{in}} - \sin \phi_{c'b} \hat{p}_{c'}^{\rm{in}}] },\\ \label{xp2} \hat{p}_b^{\rm{w}} &=& \sum_{a'}{\sqrt{T_{a'b}} \hat{p}_{a'}^{\rm{in}} } + \sum_{b'}{\sqrt{R_{b'b}} [\cos \phi_{b'b} \hat{p}_{b'}^{\rm{in}} + \sin \phi_{b'b} \hat{x}_{b'}^{\rm{in}}] } \nonumber \\ &&+ \sum_{c'}{\sqrt{V_{c'b}} [\cos \phi_{c'b} \hat{p}_{c'}^{\rm{in}} + \sin \phi_{c'b} \hat{x}_{c'}^{\rm{in}}] }. \end{eqnarray} Note in passing that our scheme can be realized with a similar experimental setup as shown in Refs. \cite{qiao2017,peng2018,osnabrugge2019}. Nevertheless, those works mainly focusing on the enhanced intensity in the speckle pattern, whereas our work will concentrate on the reduced quantum noise of scattered modes. \section{Variance of quadrature of the scattered modes} \begin{figure*} \caption{The difference between $\overline{\langle (\Delta \hat{x} \label{fig002} \end{figure*} The variance of operator $\hat{O}$ is defined as \begin{eqnarray} \label{var20} \langle (\Delta \hat{O})^2 \rangle \equiv \langle \hat{O}^2 \rangle - \langle \hat{O} \rangle^2, \end{eqnarray} where $\hat{O} = \hat{x}_b^{\rm{w}},\hat{p}_b^{\rm{w}}$, and $\langle \hat{O} \rangle$ denotes the expectation value of $\hat{O}$. That is to say, to obtain the variances, it requires to calculate $\langle \hat{x}_b^{\rm{w}} \rangle, \langle \hat{p}_b^{\rm{w}} \rangle, \langle (\hat{x}_b^{\rm{w}})^2 \rangle$, and $\langle (\hat{p}_b^{\rm{w}})^2 \rangle$. Assuming that the light is only injected on the left-hand side of the RAM [see Fig. \ref{fig1}(b)] and the input beams of the other side are vacuum states (i.e., $\langle \hat{x}_{b'}^{\rm{in}}\rangle = \langle \hat{p}_{b'}^{\rm{in}}\rangle=0$). According to Eqs. (\ref{xp1}) and (\ref{xp2}), the expectation values of $\hat{x}_b^{\rm{w}}$ and $\hat{p}_b^{\rm{w}}$ can be obtained \begin{eqnarray} \label{var2a} \langle \hat{x}_b^{\rm{w}} \rangle &=& \sum_{a'}{\sqrt{T_{a'b}} \langle \hat{x}_{a'}^{\rm{in}} \rangle}, \\ \label{var2b} \langle \hat{p}_b^{\rm{w}} \rangle &=& \sum_{a'}{\sqrt{T_{a'b}} \langle \hat{p}_{a'}^{\rm{in}} \rangle}. \end{eqnarray} Note that $\langle \hat{x}_b^{\rm{w}} \rangle$ ($\langle \hat{p}_b^{\rm{w}} \rangle$) is only related to the transmitted modes $\langle \hat{x}_{a'}^{\rm{in}} \rangle$ ($\langle \hat{p}_{a'}^{\rm{in}} \rangle$). Similarly, from Eqs. (\ref{xp1}) and (\ref{xp2}), the expectation values of $(\hat{x}_b^{\rm{w}})^2$ and $(\hat{p}_b^{\rm{w}})^2$ are found to be \begin{eqnarray} \label{sqzxb2} \langle (\hat{x}_b^{\rm{w}})^2 \rangle &= & \sum_{a'a''} \sqrt{T_{a' b}T_{a'' b}} [ \langle\hat{x}_{a'}^{\rm{in}} \hat{x}_{a''}^{\rm{in}}\rangle ] \nonumber \\ &&+\sum_{b'} R_{b' b} [\cos^2 \phi_{b' b} \langle \hat{x}_{b'}^{\rm{in} 2} \rangle + \sin^2 \phi_{b' b} \langle \hat{p}_{b'}^{\rm{in} 2} \rangle \nonumber \\ &&-\cos \phi_{b' b} \sin \phi_{b' b} \langle \hat{x}_{b'}^{\rm{in}} \hat{p}_{b'}^{\rm{in}} + \hat{p}_{b'}^{\rm{in}} \hat{x}_{b'}^{\rm{in}} \rangle] \nonumber\\ &&+\sum_{c'} V_{c' b} [\cos^2 \phi_{c' b} \langle \hat{x}_{c'}^{\rm{in} 2} \rangle + \sin^2 \phi_{c' b} \langle \hat{p}_{c'}^{\rm{in} 2} \rangle \nonumber \\ &&-\cos \phi_{c' b} \sin \phi_{c' b} \langle \hat{x}_{c'}^{\rm{in}} \hat{p}_{c'}^{\rm{in}} + \hat{p}_{c'}^{\rm{in}} \hat{x}_{c'}^{\rm{in}}\rangle ], \end{eqnarray} \begin{eqnarray} \label{sqzpb2} \langle (\hat{p}_b^{\rm{w}})^2 \rangle &= & \sum_{a'a''} \sqrt{T_{a' b}T_{a'' b}} [ \langle\hat{p}_{a'}^{\rm{in}} \hat{p}_{a''}^{\rm{in}}\rangle ] \nonumber\\ &&+\sum_{b'} R_{b' b} [\cos^2 \phi_{b' b} \langle \hat{p}_{b'}^{\rm{in} 2} \rangle + \sin^2 \phi_{b' b} \langle \hat{x}_{b'}^{\rm{in} 2} \rangle \nonumber \\ &&+ \cos \phi_{b' b} \sin \phi_{b' b} \langle \hat{x}_{b'}^{\rm{in}} \hat{p}_{b'}^{\rm{in}} + \hat{p}_{b'}^{\rm{in}} \hat{x}_{b'}^{\rm{in}} \rangle] \nonumber\\ &&+ \sum_{c'} V_{c' b} [\cos^2 \phi_{c' b} \langle \hat{p}_{c'}^{\rm{in} 2} \rangle + \sin^2 \phi_{c' b} \langle \hat{x}_{c'}^{\rm{in} 2} \rangle \nonumber \\ && + \cos \phi_{c' b} \sin \phi_{c' b} \langle \hat{x}_{c'}^{\rm{in}} \hat{p}_{c'}^{\rm{in}} + \hat{p}_{c'}^{\rm{in}} \hat{x}_{c'}^{\rm{in}}\rangle ], \end{eqnarray} which are universal for arbitrary input state. Inserting Eqs. (\ref{var2a}) and (\ref{sqzxb2}) into Eq. (\ref{var20}) yields the variance $\langle (\Delta \hat{x}_b^{\rm{w}})^2\rangle$ as \begin{eqnarray} \label{sqzxb2b1} \langle (\Delta \hat{x}_b^{\rm{w}})^2 \rangle &=& \sum_{a'}{T_{a'b}\langle (\Delta \hat{x}_{a'}^{\rm{in}})^2 \rangle } \nonumber\\ &&+ \sum_{a'\neq a''} 2\sqrt{T_{a'b} T_{a''b}} [ {\rm{cov}} (\hat{x}_{a'}^{\rm{in}}, \hat{x}_{a''}^{\rm{in}}) ] \nonumber \\ &&+ \sum_{b'} R_{b'b}[\cos^2 \phi_{b'b}\langle (\Delta \hat{x}_{b'}^{\rm{in}})^2 \rangle \nonumber\\ &&+ \sin^2 \phi_{b'b} \langle (\Delta \hat{p}_{b'}^{\rm{in}})^2 \rangle \nonumber \\ &&- 2\cos \phi_{b'b} \sin \phi_{b'b} {\rm{cov}}(\hat{x}_{b'}^{\rm{in}}, \hat{p}_{b'}^{\rm{in}}) ] \nonumber\\ && + \sum_{c'}V_{c'b}[\cos^2 \phi_{c'b}\langle (\Delta \hat{x}_{c'}^{\rm{in}})^2 \rangle \nonumber \\ && + \sin^2 \phi_{c'b}\langle (\Delta \hat{p}_{c'}^{\rm{in}})^2 \rangle \nonumber\\ && - 2\cos \phi_{c'b} \sin \phi_{c'b} {\rm{cov}}(\hat{x}_{c'}^{\rm{in}}, \hat{p}_{c'}^{\rm{in}}) ], \end{eqnarray} where the covariance function is defined as ${\rm{cov}}(\hat{Y},\hat{Z}) \equiv \frac{1}{2} (\langle \hat{Y} \hat{Z} \rangle +\langle \hat{Z} \hat{Y} \rangle) - \langle \hat{Y}\rangle \langle \hat{Z} \rangle $. By averaging over the ensemble of RAMs, we obtain \begin{eqnarray} \label{mean002az} \overline {\langle (\Delta \hat{x}_b^{\rm{w}})^2 \rangle} &=& \overline{\sum_{a'} T_{a'b} \langle (\Delta \hat{x}_{a'}^{\rm{in}})^2 \rangle} \nonumber \\ && + \overline{\sum_{a'\neq a''} 2\sqrt{T_{a'b} T_{a''b}} {\rm{cov}} (\hat{x}_{a'}^{\rm{in}}, \hat{x}_{a''}^{\rm{in}}) } \nonumber \\ && + \overline{\sum_{b'} \frac{1}{2} R_{b'b} [\langle (\Delta \hat{x}_{b'}^{\rm{in}})^2 \rangle + \langle (\Delta \hat{p}_{b'}^{\rm{in}})^2 \rangle]} \nonumber\\ && + \overline{\sum_{c'} \frac{1}{2}V_{c'b} [\langle (\Delta \hat{x}_{c'}^{\rm{in}})^2 \rangle + \langle (\Delta \hat{p}_{c'}^{\rm{in}})^2 \rangle]}, \end{eqnarray} where we have used $\overline{\sin^2 \phi_{b'b} } = \overline{\sin^2 \phi_{c'b} } = \overline{\cos^2 \phi_{b'b} } =\overline{\cos^2 \phi_{c'b} } = 1/2$ and $\overline{\cos \phi_{b'b} \sin \phi_{b'b} }= \overline{\cos \phi_{c'b} \sin \phi_{c'b} } = 0$ \cite{coeff}. Consider squeezed states as input ($|\Psi^{\rm{in}} \rangle = [\hat{D}(\alpha)\hat{S}(r)|0\rangle]^{\otimes N}$), with the number of transmission channels $N$, displacement operator $\hat{D}(\alpha) = e^{\alpha \hat{a}^{\dagger} - \alpha^{\ast} \hat{a}}$, and squeezing operator $\hat{S}(r) = e^{ (r /2)(\hat{a}^{\dagger 2} - \hat{a}^{2}) }$ (complex number $\alpha$ being the amplitude and real number $r$ the squeezing parameter). One can obtain $\langle(\Delta \hat{x}_{a'}^{\rm{in}})^2 \rangle = e^{-2r}$, $\langle(\Delta \hat{x}_{b'}^{\rm{in}})^2 \rangle = \langle(\Delta \hat{p}_{b'}^{\rm{in}})^2 \rangle = \langle(\Delta \hat{x}_{c'}^{\rm{in}})^2 \rangle = \langle(\Delta \hat{p}_{c'}^{\rm{in}})^2 \rangle = 1$, ${\rm{cov}} (\hat{x}_{a'}^{\rm{in}}, \hat{x}_{a''}^{\rm{in}})|_{a' \neq a''} = 0$. Straightforwardly, Eq. (\ref{mean002az}) can be simplified to \begin{eqnarray} \label{sqz002az} \overline {\langle (\Delta \hat{x}_b^{\rm{w}})^2 \rangle}_{\rm{sqz}} = 2 \overline {V_{b} } + 1 - \overline {T_{b}} (1-e^{-2r}), \end{eqnarray} since $\overline {T_{ b}} +\overline {R_{b}}-\overline{V_{b}} = 1$ (see Appendix A). It is obvious that with the increase of $r$, the modified average quantum fluctuation in Eq. (\ref{sqz002az}) decreases for a given RAM. \begin{figure*} \caption{The rescaled average quantum fluctuations $R_{\theta} \label{fig003} \end{figure*} Consider $r=0$ (i.e., the coherent-state input), Eq. (\ref{sqz002az}) is then reduced to $\overline {\langle (\Delta\hat{x}_b^{\rm{w}})^2 \rangle}_{\rm{sqz}\to\rm{\rm{coh}}} = 2 \overline {V_{b} } + 1$. For convenience, one can rewrite Eq. (\ref{sqz002az}) as \begin{eqnarray} \label{sqz003} \overline {\langle (\Delta \hat{x}_b^{\rm{w}})^2 \rangle}_{\rm{sqz}}= \overline {\langle (\Delta \hat{x}_b^{\rm{w}})^2 \rangle}_{\rm{coh}} - \overline {T_{ b}} (1-e^{-2r}), \end{eqnarray} where $ \overline {\langle (\Delta \hat{x}_b^{\rm{w}})^2 \rangle}_{\rm{coh}} \equiv 2 \overline {V_{b} } + 1$. One can find that $\overline {\langle (\Delta\hat{x}_b^{\rm{w}})^2 \rangle}_{\rm{sqz}} < \overline {\langle (\Delta\hat{x}_b^{\rm{w}})^2 \rangle}_{\rm{coh}}$ always succeeds when $r>0$, which indicates that with WFS, the squeezed state has lower average output noise than that of the coherent state. For comparison, we calculate the average quantum fluctuation in the absence of WFS \begin{eqnarray} \label{sqz001} \overline {\langle (\Delta \hat{x}_b)^2 \rangle}_{\rm{sqz}} = \overline {\langle (\Delta \hat{x}_b)^2 \rangle}_{\rm{coh}} + \overline {T_{ b}} [\cosh (2r) - 1], \end{eqnarray} where $\overline {\langle (\Delta \hat{x}_b)^2 \rangle}_{\rm{coh}} \equiv 2 \overline {V_{ b} } + 1$ represents the average quantum fluctuation of the scattered light in the absence of WFS with the coherent-state input. The detailed derivation is present in Appendix B. Comparing Eqs. (\ref{sqz003}) and (\ref{sqz001}), one can extract the difference between the average quantum fluctuations with and without WFS \begin{eqnarray} \label{sqz004} \overline {\langle (\Delta \hat{x}_b)^2 \rangle}_{\rm{sqz}} - \overline {\langle (\Delta \hat{x}_b^{\rm{w}})^2 \rangle}_{\rm{sqz}} = \overline {T_{ b}} \sinh (2r), \end{eqnarray} since $\overline {\langle (\Delta \hat{x}_b)^2 \rangle}_{\rm{coh}} = \overline {\langle (\Delta\hat{x}_b^{\rm{w}})^2 \rangle}_{\rm{coh}}$ is used. Fig. \ref{fig002}(a) [\ref{fig002}(b)] depicts the difference between $\overline{\langle (\Delta \hat{x}_b)^2 \rangle}$ and $\overline{\langle (\Delta \hat{x}_b^{\rm{w}})^2 \rangle}$ as a function of $L/L_a$ and $r$ [$L/L_a$ and $L/l$]. It is found that the difference is always larger than zero for a squeezed-state input ($r>0$) which implies that the WFS can reduce the average quantum fluctuation in the presence of a squeezed-state input. For convenience, we introduce the rescaled average quantum fluctuation, \begin{eqnarray} R_{\theta} = \overline{\langle (\Delta \theta )^2 \rangle}_{\rm{sqz}} / \overline{\langle (\Delta \theta )^2 \rangle}_{\rm{coh}}, \end{eqnarray} where $\theta = \hat{x}_b, \hat{x}_b^{\rm{w}}$. Fig. \ref{fig003} compares the rescaled average quantum fluctuations $R_{\theta}$ with and without WFS as a function of $r$ [(a), (b)] and $L/L_a$ [(c), (d)]. In Figs. \ref{fig003}(a)-\ref{fig003}(d), the curves with the triangle marks denote the situations without WFS whereas those without the triangle marks (except for the gray-dashed line) represent the cases with WFS. The gray-dashed line stands for the average output noise for the coherent-state input. As shown in Figs. \ref{fig003}(a)-\ref{fig003}(d), the purple-solid, red-dashed, and blue-dashed lines without triangle marks are always below their corresponding lines with triangle marks when $r>0$. This implies that the WFS can always reduce the average quantum noise for the squeezed-state input. \section{Comparison and discussion} \begin{figure*} \caption{The average quantum fluctuations $\overline{\langle (\Delta \theta)^2 \rangle} \label{fig003a} \end{figure*} \subsection{The suppressed and increased average quantum fluctuations} One important aspect neglected so far is the quadrature $\hat{p}_{b}^{\rm{w}}$. Similar to $\hat{x}_{b}^{\rm{w}}$, the average quantum fluctuation of $\hat{p}_{b}^{\rm{w}}$ can be cast into \begin{eqnarray} \overline {\langle (\Delta \hat{p}_b^{\rm{w}})^2 \rangle}_{\rm{sqz}} = \overline {\langle (\Delta \hat{p}_b^{\rm{w}})^2 \rangle}_{\rm{coh}} + \overline {T_{ b}} (e^{2r} - 1), \label{sqz002pp} \end{eqnarray} where $\overline {\langle (\Delta \hat{p}_b^{\rm{w}})^2 \rangle}_{\rm{coh}}= 2 \overline {V_{ b} } + 1$ means the average output quantum fluctuation for the coherent-state input. Meanwhile, the average quantum fluctuation of $\hat{p}_b$ in the absence of WFS is found to be \begin{eqnarray} \label{sqz001aa} \overline {\langle (\Delta \hat{p}_b)^2 \rangle}_{\rm{sqz}} = \overline {\langle (\Delta \hat{p}_b)^2 \rangle}_{\rm{coh}} + \overline {T_{ b}} [\cosh (2r) - 1], \end{eqnarray} where $\overline {\langle (\Delta \hat{p}_b)^2 \rangle}_{\rm{coh}} = 2 \overline {V_{ b} } + 1$ represents the case of the coherent-state input. Figs. \ref{fig003a}(a) and \ref{fig003a}(b) compare the average quantum fluctuations between $\hat{x}$ and $\hat{p}$ with and without WFS. Fig. \ref{fig003a}(a) plots the average quantum fluctuations as a function of $r$. The blue-dotted-dashed line with (without) triangle marks represents the case of $\hat{x}_b$ ($\hat{x}_b^{\rm{w}}$) while the red-dashed line with (without) square marks denotes the case of $\hat{p}_b$ ($\hat{p}_b^{\rm{w}}$). The gray-dashed line stands for the average quantum noise for the coherent-state input. It is easy to find that with the increase of $r$, $\overline{\langle (\Delta \hat{x}^{\rm{w}}_b)^2\rangle}$ decreases whereas $\overline{\langle (\Delta \hat{x}_b)^2\rangle}$, $\overline{\langle (\Delta \hat{p}_b)^2\rangle}$, and $\overline{\langle (\Delta \hat{p}^{\rm{w}}_b)^2\rangle}$ increase. In the absence of WFS, the average quantum fluctuations of $ \hat{x}_b$ and $ \hat{p}_b$ coincide with each other. Intriguingly, in the presence of WFS, the average quantum fluctuation of $ \hat{x}_b^{\rm{w}}$ is smaller than that of $ \hat{x}_b$ whereas the average quantum fluctuation of $ \hat{p}_b^{\rm{w}}$ becomes larger than that of $ \hat{p}_b$, which yields that the WFS leads to a decrease in the average quantum fluctuation of $\hat{x}$ but an increase in that of $\hat{p}$. In the absence of WFS, the squeezed light experiences random phases when propagating through the RAM. This means that both $\hat{x}_b$ and $\hat{p}_b$ will be the mixture of the squeezing, anti-squeezing, and quadrature component in various orientation of the original squeezed light, which leads to the same fluctuation for both quadrature. On the other hand, the WFS removes these random phases, which results in $\hat{x}_b^{\rm{w}}$ and $\hat{p}_b^{\rm{w}}$ retaining the original quadrature of the squeezed light, with additional noise added from the spontaneous emission. Fig. \ref{fig003a}(b) depicts the average quantum fluctuations as a function of $L/L_a$. It can be seen that as $L/L_a$ increases, $\overline{\langle (\Delta \hat{x}_b)^2\rangle}$, $\overline{\langle (\Delta \hat{x}^{\rm{w}}_b)^2\rangle}$, $\overline{\langle (\Delta \hat{p}_b)^2\rangle}$, and $\overline{\langle (\Delta \hat{p}^{\rm{w}}_b)^2\rangle}$ increase. Notably, $\overline{\langle (\Delta \hat{x}_b^{\rm{w}})^2 \rangle}$ is still below the gray-dashed line, which indicates that the squeezed state has lower average output quantum noise than that of the coherent state with WFS. \begin{figure*} \caption{The average output quantum fluctuations $\overline{\langle (\Delta \hat{x} \label{xr} \end{figure*} \subsection{Comparison between the amplifying and linear cases} To give insight into the effects of nonlinearity on the suppressed quantum noise, we compare the amplifying and linear situations. By setting $L_a \to \infty$ (i.e., $ L/L_a \to 0$ and $\overline{V_{b}} = 0 $, amplifying effects vanishing), based on Eqs. (\ref{sqz002az}) and (\ref{sqz001}), the average quantum fluctuations in linear cases with and without WFS can be expressed as \begin{eqnarray} \label{linear00a} \overline {\langle (\Delta \hat{x}_b^{\rm{w}})^2 \rangle}_{\rm{sqz},\rm{lin}}=&1 - \overline {T_{ b}} (1-e^{-2r}), \\ \overline {\langle (\Delta\hat{x}_b)^2 \rangle}_{\rm{sqz},\rm{lin}} =& 1 + \overline {T_{ b}} [\cosh (2r) - 1], \end{eqnarray} respectively, which is consistent with our previous work \cite{li2019b}. Fig. \ref{xr} shows the average output quantum fluctuations $\overline{\langle (\Delta \hat{x})^2 \rangle}_{\rm{sqz}}$ ($\hat{x} = \hat{x}_b, \hat{x}_b^{\rm{w}}$) versus (a) $r$ and (b) $L/l$. The blue-triangle-marked-dashed, blue-dashed, red-triangle-marked-solid, red-solid, and gray-dotted-dashed curves denote the cases of amplifying media without WFS, amplifying ones with WFS, linear ones without WFS, linear ones with WFS, and the SNL, respectively. In Fig. \ref{xr}(a), with the increasing of $r$, the average output quantum noise without WFS increases whereas the one with WFS decreases. This is due to the fact that the average output quantum noise without WFS is related to the input quantum noise, which encompasses not only the noise of squeezed quadrature ($\langle (\Delta \hat{x}_{a'}^{\rm{in}})^2 \rangle = e^{-2r}$) but also the noise of anti-squeezed quadrature ($\langle (\Delta \hat{p}_{a'}^{\rm{in}})^2 \rangle = e^{2r}$). When $r$ becomes large, the maximum noise ascends steeply, which provokes the increase of the average output quantum noise. By contrast, the average output quantum noise with WFS is related to the input quantum noise, which includes only the squeezed noise ($\langle (\Delta \hat{x}_{a'}^{\rm{in}})^2 \rangle = e^{-2r}$). The noise of the anti-squeezed quadrature $\langle (\Delta \hat{p}_{a'}^{\rm{in}})^2 \rangle = e^{2r}$ disappears owning to the destructive interference of quantum noise \cite{li2019b,elste2009}. With the increase of $r$, the squeezed noise ($\langle (\Delta \hat{x}_{a'}^{\rm{in}})^2 \rangle = e^{-2r}$) decreases, which gives rise to a decrease in the average output quantum noise. Fig. \ref{xr}(b) shows the average output noise as a function of $L/l$. It is obvious that, with the increase of $L/l$, the average output noise without WFS decreases, whereas the average output noise with WFS increases. From Figs. \ref{xr}(a) and \ref{xr}(b), it is found that WFS can reduce the average quantum fluctuation in both linear and amplifying cases. Nevertheless, unlike the linear case ($L/l = 0$, solid lines) where the suppressed quantum noise can always reach below the SNL, the reduced average quantum noise can be either below or above the SNL for the amplifying case. The phenomenon can be explained as follows: initially, the linear situation without WFS shows the excess noise resulting from multiple scattering. On the contrary, the amplifying case without WFS presents the excess noise induced by not only multiple scattering but also nonlinear amplification. The WFS can effectively reduce the excess noise from multiple scattering rather than amplification. Resultantly, for the linear case, the excess noise is well suppressed below the SNL via WFS. Nevertheless, for the amplifying case, the excess noise can be reduced below the SNL only if the multiple scattering dominates (i.e., weak amplification strength). It is worth noting that the excess noise from amplification can be categorized into two types: one from spontaneous emission and the other one from stimulated emission. Although the WFS is not able to reduce the noise due to spontaneous emission, it can still effectively suppress the one from stimulated emission. Therefore, the WFS could still reduce the output average noise below the SNL in some certain condition for the amplifying case. \subsection{The condition for the suppressed quantum fluctuations to achieve below the SNL} \begin{figure} \caption{The region for the suppressed average quantum fluctuation to reach below the SNL. Parameter: $e^{-2r} \label{figsnl} \end{figure} Sub-shot noise of light belongs to the most prominent nonclassical trait. Nevertheless, the suppressed average quantum noise can not always reach below the SNL for the amplifying case. We shall now discuss the condition for the suppressed average quantum noise to achieve below the SNL. Assuming that the suppressed average quantum noise reaches below the SNL (i.e., $\overline {\langle (\Delta \hat{x}_b^{\rm{w}})^2 \rangle}_{\rm{sqz}} < 1$), from Eq. (\ref{sqz002az}), one has \begin{eqnarray} \label{snl002} 2 \overline {V_{ b} } -\overline {T_{ b}} (1-e^{-2r}) < 0. \end{eqnarray} Combined with Eqs. (\ref{eq2a}) and (\ref{eq2c}), Eq. (\ref{snl002}) can be rewritten as \begin{eqnarray} \sin (\frac{l}{L_a}) (1+e^{-2r}) + 2 \sin (\frac{L-l}{L_a}) - 2 \sin (\frac{L}{L_a}) <0. \end{eqnarray} The solution is found to be \begin{eqnarray} \label{solution001} L/L_a < \arcsin M_{+}, \end{eqnarray} where the detailed derivation is shown in Appendix C, $M_{+} = [mn + \sqrt{4m^2-n^2+4}]/[2(m^2 +1)]$ with $n = 1+e^{-2r}$, $m = (1 - \sqrt{1 - p^2})/p$ and $p = \sin (l/L_a)$. Fig. \ref{figsnl} intuitively illustrates the solution in Eq. (\ref{solution001}) where for simplicity we consider the situation of the large squeezing strength ($e^{-2r} \to 0$) \cite{sqzpara}. The colored region allows $\overline{ \langle (\Delta \hat{x}_b^{\rm{w}})^2 \rangle }_{\rm{sqz}}$ to reach below the SNL. It is obvious that this condition requires a weak amplification strength. This is because the WFS can effectively reduce the excess noise induced by multiple scattering but not amplification. \section{Conclusion} In summary, we investigate the effect of wavefront shaping on the average quantum noise of scattered modes in the random amplifying media. It is demonstrated that the wavefront shaping offers the ability to reduce the average output quantum noise for a squeezed-state input. Particularly, the wavefront shaping can effectively suppress the excess noise resulted from multiple scattering but not amplification. This reduction is owing to the destructive interference of quantum noise. In addition, both the decrease on amplification strength \textit{and} the increase on the input squeezing strength can lead to a decrease in the suppressed average noise. It is found that unlike the linear media where the suppressed average quantum noise is always below the shot-noise level, the reduced average quantum noise can be either below or above the shot-noise level for the amplifying case. Moreover, we provide the condition for the suppressed noise to achieve below the shot-noise level which requires the amplification strength to be weak. Our results may have potential implications in quantum information processing, such as high-resolution imaging and optical authentication. For instance, in the authentication system based on scattering medium \cite{goorden2014,yao2016}, the most vital process involves light focusing through a random medium with the help of WFS. Our work might contribute to design this kind of authentication schemes with squeezed-state input. \appendix \section{Derivation of the summation of transmission, reflection, and spontaneous emission coefficients} \label{derivation} The input-output relation of a random amplifying medium is given by \begin{eqnarray} \hat{a}_b^{\dagger} = \sum_{a'} t_{a' b}^{\ast} \hat{a}_{a'}^{\rm{in} \dagger} + \sum_{b'} r_{b' b}^{\ast} \hat{a}_b^{\rm{in} \dagger} + \sum_{c'} v_{c' b}^{\ast} \hat{c}_{c'}^{\rm{in} }, \\ \nonumber \hat{a}_b = \sum_{a'} t_{a' b} \hat{a}_{a'}^{\rm{in}} + \sum_{b'} r_{b' b} \hat{a}_{b'}^{\rm{in}} + \sum_{c'} v_{c' b}^{\ast} \hat{c}_{c'}^{{\rm{in}}\dagger}, \end{eqnarray} where $t_{a'b}^{\ast}$ ($r_{b'b}^{\ast}$, $v_{c' b}^{\ast}$) is the conjugate of $t_{a'b}$ ($r_{b'b}$, $v_{c' b}$). According to the commutation relation $[\hat{a}_b, \hat{a}_b^{\dagger}] = 1$, one can easily obtain \begin{eqnarray} \label{sumsumsum} \sum_{a'} |t_{a'b}|^2 + \sum_{b'} |r_{b'b}|^2 - \sum_{c'} |v_{c'b}|^2 = 1, \end{eqnarray} where $[\hat{a}_{i}^{\rm{in}}, \hat{a}_{j}^{\rm{in}\dagger}] = \delta_{ij}$ ($i,j=a',b'$) has been used. Let $T_{b} = \sum_{a'}|t_{a'b}|^2$, $R_{b} = \sum_{b'}|r_{b'b}|^2$, and $V_{b} = \sum_{c'} |v_{c'b}|^2$. Eq. (\ref{sumsumsum}) can be then rewritten as $ T_{b} + R_{b} - V_{b} = 1$. \section{Average quantum fluctuation of the scattered light in the absence of WFS} \label{cohinput} The variance of $\hat{x}_b$ without WFS is given by \begin{eqnarray} \label{v2b} \langle (\Delta \hat{x}_b)^2\rangle = \langle \hat{x}_b^2\rangle - \langle \hat{x}_b\rangle^2. \end{eqnarray} To obtain the variance $\langle (\Delta \hat{x}_b)^2\rangle$, it is necessary to calculate $ \langle \hat{x}_b\rangle$ and $\langle \hat{x}_b^2\rangle$. In the absence of WFS, the mean value of $\hat{x}_b$ in Eq. (\ref{xb}) is found to be \begin{eqnarray} \langle \hat{x}_b \rangle =& \sum_{a'} {\sqrt{T_{a'b}} [ \cos \phi_{a'b} \langle \hat{x}_{a'}^{\rm{in}} \rangle - \sin \phi_{a'b} \langle \hat{p}_{a'}^{\rm{in}} \rangle] }, \end{eqnarray} According to Eq. (\ref{xb}), $\hat{x}_b^2$ can be obtained \begin{eqnarray} \label{xb0} \hat{x}_b^2 &=& \sum_{a'a''} \sqrt{T_{a' b}T_{a'' b}} [\cos \phi_{a' b} \cos \phi_{a'' b} \hat{x}_{a'}^{\rm{in}} \hat{x}_{a''}^{\rm{in}} \nonumber \\ &&+ \sin \phi_{a' b} \sin \phi_{a'' b} \hat{p}_{a'}^{\rm{in}} \hat{p}_{a''}^{\rm{in}} \nonumber \\ &&- \cos \phi_{a' b} \sin \phi_{a'' b} \hat{x}_{a'}^{\rm{in}} \hat{p}_{a''}^{\rm{in}} \nonumber \\ &&-\sin \phi_{a' b}\cos \phi_{a'' b} \hat{p}_{a'}^{\rm{in}} \hat{x}_{a''}^{\rm{in}}] \nonumber \\ &&+\sum_{b'b''} \sqrt{R_{b' b} R_{b'' b}} [\cos \phi_{b' b}\cos \phi_{b'' b} \hat{x}_{b'}^{\rm{in}} \hat{x}_{b''}^{\rm{in}} \nonumber \\ &&+ \sin \phi_{b' b} \sin \phi_{b'' b} \hat{p}_{b'}^{\rm{in}} \hat{p}_{b''}^{\rm{in}} \nonumber \\ &&- \cos \phi_{b' b} \sin \phi_{b'' b} \hat{x}_{b'}^{\rm{in}} \hat{p}_{b''}^{\rm{in}} \nonumber \\ &&- \sin \phi_{b' b} \cos \phi_{b'' b} \hat{p}_{b'}^{\rm{in}} \hat{x}_{b''}^{\rm{in}}] \nonumber \\ &&+ \sum_{c'c''} \sqrt{V_{c' b}V_{c'' b}} [\cos \phi_{c' b} \cos \phi_{c'' b} \hat{x}_{c'}^{\rm{in}} \hat{x}_{c''}^{\rm{in}} \nonumber \\ &&+ \sin \phi_{c' b} \sin \phi_{c'' b} \hat{p}_{c'}^{\rm{in}} \hat{p}_{c''}^{\rm{in}} \nonumber \\ &&- \cos \phi_{c' b} \sin \phi_{c'' b} \hat{x}_{c'}^{\rm{in}} \hat{p}_{c''}^{\rm{in}} \nonumber \\ &&- \sin \phi_{c' b} \cos \phi_{c'' b} \hat{p}_{c'}^{\rm{in}} \hat{x}_{c''}^{\rm{in}} ]\nonumber \\ &&+ \sum_{a'b'} 2\sqrt{T_{a' b}R_{b' b}} \{[\cos \phi_{a' b} \hat{x}_{a'}^{\rm{in}} - \sin \phi_{a' b} \hat{p}_{a'}^{\rm{in}}]\nonumber \\ && \times[\cos \phi_{b' b} \hat{x}_{b'}^{\rm{in}} - \sin \phi_{b' b} \hat{p}_{b'}^{\rm{in}}]\} \nonumber \\ &&+ \sum_{a'c'} 2\sqrt{T_{a' b}V_{c' b}} \{[\cos \phi_{a' b} \hat{x}_{a'}^{\rm{in}} - \sin \phi_{a' b} \hat{p}_{a'}^{\rm{in}}]\nonumber \\ && \times[\cos \phi_{c' b} \hat{x}_{c'}^{\rm{in}} - \sin \phi_{c' b} \hat{p}_{c'}^{\rm{in}}]\} \nonumber \\ &&+ \sum_{b'c'} 2\sqrt{R_{b' b}V_{c' b}}\{[\cos \phi_{b' b} \hat{x}_{b'}^{\rm{in}} - \sin \phi_{b' b} \hat{p}_{b'}^{\rm{in}}]\nonumber \\ &&\times[\cos \phi_{c' b} \hat{x}_{c'}^{\rm{in}} - \sin \phi_{c' b} \hat{p}_{c'}^{\rm{in}}]\}. \end{eqnarray} Then $\langle \hat{x}_b^2 \rangle$ is found to be \begin{eqnarray} \langle \hat{x}_b^2 \rangle &= & \sum_{a'a''} \sqrt{T_{a' b}T_{a'' b}} [\cos \phi_{a' b} \cos \phi_{a'' b} \langle\hat{x}_{a'}^{\rm{in}} \hat{x}_{a''}^{\rm{in}}\rangle \nonumber \\ &&+ \sin \phi_{a' b} \sin \phi_{a'' b} \langle\hat{p}_{a'}^{\rm{in}} \hat{p}_{a''}^{\rm{in}}\rangle \nonumber \\ &&- \cos \phi_{a' b} \sin \phi_{a'' b} \langle\hat{x}_{a'}^{\rm{in}} \hat{p}_{a''}^{\rm{in}} \rangle \nonumber \\ &&-\sin \phi_{a' b}\cos \phi_{a'' b} \langle\hat{p}_{a'}^{\rm{in}} \hat{x}_{a''}^{\rm{in}} \rangle] \nonumber \\ &&+\sum_{b'} R_{b' b} [\cos^2 \phi_{b' b} \langle \hat{x}_{b'}^{\rm{in} 2} \rangle + \sin^2 \phi_{b' b} \langle \hat{p}_{b'}^{\rm{in} 2} \rangle \nonumber \\ &&- \cos \phi_{b' b} \sin \phi_{b' b} \langle \hat{x}_{b'}^{\rm{in}} \hat{p}_{b'}^{\rm{in}} + \hat{p}_{b'}^{\rm{in}} \hat{x}_{b'}^{\rm{in}} \rangle] \nonumber \\ &&+ \sum_{c'} V_{c' b} [\cos^2 \phi_{c' b} \langle \hat{x}_{c'}^{\rm{in} 2} \rangle + \sin^2 \phi_{c' b} \langle \hat{p}_{c'}^{\rm{in} 2} \rangle \nonumber \\ &&- \cos \phi_{c' b} \sin \phi_{c' b} \langle \hat{x}_{c'}^{\rm{in}} \hat{p}_{c'}^{\rm{in}} + \hat{p}_{c'}^{\rm{in}} \hat{x}_{c'}^{\rm{in}}\rangle ]. \label{xb2} \end{eqnarray} The variance is then expressed as \begin{eqnarray} \langle (\Delta \hat{x}_{b})^2 \rangle &=& \sum_{a'}T_{a'b}[\cos^2 \phi_{a'b}\langle (\Delta \hat{x}_{a'}^{\rm{in}})^2 \rangle \nonumber \\ &&+ \sin^2 \phi_{a'b}\langle (\Delta \hat{p}_{a'}^{\rm{in}})^2 \rangle \nonumber \\ &&- 2\cos \phi_{a'b} \sin \phi_{a'b} {\rm{cov}}(\hat{x}_{a'}^{\rm{in}}, \hat{p}_{a'}^{\rm{in}}))]\nonumber \\ && + \sum_{b'}R_{b'b}[\cos^2 \phi_{b'b}\langle (\Delta \hat{x}_{b'}^{\rm{in}})^2 \rangle \nonumber \\ &&+ \sin^2 \phi_{b'b}\langle (\Delta \hat{p}_{b'}^{\rm{in}})^2 \rangle \nonumber \\ &&- 2\cos \phi_{b'b} \sin \phi_{b'b} {\rm{cov}}(\hat{x}_{b'}^{\rm{in}}, \hat{p}_{b'}^{\rm{in}}) ] \nonumber \\ &&+ \sum_{c'}V_{c'b}[\cos^2 \phi_{c'b}\langle (\Delta \hat{x}_{c'}^{\rm{in}})^2 \rangle\nonumber \\ && + \sin^2 \phi_{c'b}\langle (\Delta \hat{p}_{c'}^{\rm{in}})^2 \rangle\nonumber \\ && - 2\cos \phi_{c'b} \sin \phi_{c'b} {\rm{cov}}(\hat{x}_{c'}^{\rm{in}}, \hat{p}_{c'}^{\rm{in}}) ] \nonumber \\ &&+ \sum_{a'\neq a''} \{ \sqrt{T_{a'b} T_{a''b}} [2\cos \phi_{a' b} \cos \phi_{a''b} \nonumber \\ &&\times{\rm{cov}} (\hat{x}_{a'}^{\rm{in}}, \hat{x}_{a''}^{\rm{in}}) + 2\sin \phi_{a' b} \sin \phi_{a''b} {\rm{cov}}(\hat{p}_{a'}^{\rm{in}}, \hat{x}_{a''}^{\rm{in}}) \nonumber \\ &&-2\cos \phi_{a'b} \sin \phi_{a''b} {\rm{cov}}(\hat{x}_{a'}^{\rm{in}}, \hat{p}_{a''}^{\rm{in}})] \}, \label{xb2a} \end{eqnarray} where the covariance fuction is defined as ${\rm{cov}}(\hat{Y},\hat{Z}) \equiv \frac{1}{2} (\langle \hat{Y} \hat{Z} \rangle +\langle \hat{Z} \hat{Y} \rangle) - \langle \hat{Y}\rangle \langle \hat{Z} \rangle $. Consider the squeezed state as input ($|\Psi^{\rm{in}} \rangle = [\hat{D}(\alpha)\hat{S}(r)|0\rangle]^{\otimes N}$), by averaging over realizations of disorder, Eq. (\ref{xb2a}) can be simplified as \begin{eqnarray} \overline{\langle(\Delta\hat{x}_b)^2\rangle}_{\rm{sqz}}&=&\frac{1}{2}\overline {T_{ b}} [\overline{\langle (\Delta\hat{x}_{a'}^{\rm{in} })^2 \rangle }+\overline{\langle (\Delta \hat{p}_{a'}^{\rm{in} })^2\rangle }] +\overline {R_{b}} +\overline {V_{ b} } \nonumber \\ &= & 2\overline {V_{ b} } + 1 +\overline{T_{b}} [\cosh (2r) -1]. \label{sqz001aaa} \end{eqnarray} When the input is the coherent state (i.e., $r=0$), Eq. (\ref{sqz001aaa}) can be cast into \begin{eqnarray} \overline {\langle (\Delta\hat{x}_b)^2 \rangle}_{\rm{coh}} = 2 \overline {V_{ b} } + 1 . \label{coh001aaa} \end{eqnarray} \section{Derivation of Eq. (\ref{solution001})} \label{appendixc} Inserting Eqs. (\ref{eq2a}) and (\ref{eq2c}) into (\ref{snl002}) arrives at \begin{eqnarray} \label{x01} \sin (\frac{l}{L_a}) (1 +e^{-2 r}) + 2 \sin (\frac{L-l}{L_a}) - 2 \sin (\frac{L}{L_a})<0. \end{eqnarray} By using trigonometric formulas, Eq. (\ref{x01}) could be expressed as \begin{eqnarray} \label{x03} [1 + e^{-2r} - 2\cos(\frac{L}{L_a})] \sin (\frac{l}{L_a}) + [\cos (\frac{l}{L_a}) - 1] 2 \sin (\frac{L}{L_a}) <0. \end{eqnarray} Eq. (\ref{x03}) could be further cast into \begin{eqnarray} \label{x05} \frac{1+e^{-2r}}{2} - \frac{1- \sqrt{1-p^2}}{p} M < \sqrt{1 - M^2}, \end{eqnarray} where we have defined $M = \sin(L/L_a)$ and $p = \sin(l/L_a)$ for simplicity. From Eq. (\ref{x05}), one can obtain \begin{eqnarray} \label{x08} (m^2+1) M^2 - m n M - (4 - n^2)/4 < 0, \end{eqnarray} where $n = 1 + e^{-2r}$ and $m = (1- \sqrt{1-p^2})/p$. It is easy to verify that there always exists a solution for Eq. (\ref{x08}) \begin{eqnarray} M_{-} < M < M_{+}, \end{eqnarray} where \begin{eqnarray} M_{\pm} = \frac{m n \pm \sqrt{ 4m^2 - n^2 + 4}}{2 (m^2 + 1)}. \end{eqnarray} It is worthy pointing out that $M_{-}<0$ and $M_{+}>0$. However, in our scheme, $M= \sin (L/L_a)>0$ should be positive (since $L/L_a < \pi$ corresponding to the case below the laser threshold \cite{fedorov2009}). Therefore, the solution is found to be \begin{eqnarray} 0< M < M_{+}. \end{eqnarray} Accordingly, one can obtain \begin{eqnarray} L/L_a < \arcsin(M_{+}). \end{eqnarray} \end{document}
\begin{document} \renewcommand*{\thefootnote}{\fnsymbol{footnote}} \begin{center} \Large{\textbf{Multifractal processes: definition, properties and new examples}}\\ Danijel Grahovac$^1$\footnote{[email protected]}\\ \end{center} \begin{flushleft} \footnotesize{ $^1$ Department of Mathematics, University of Osijek, Trg Ljudevita Gaja 6, 31000 Osijek, Croatia} \end{flushleft} \textbf{Abstract: } We investigate stochastic processes possessing scale invariance properties which we refer to as multifractal processes. The examples of such processes known so far do not go much beyond the original cascade construction of Mandelbrot. We provide a new definition of the multifractal process by generalizing the definition of the self-similar process. We establish general properties of these processes and show how existing examples fit into our setting. Finally, we define a new class of examples inspired by the idea of Lamperti transformation. Namely, for any pair of infinitely divisible distribution and a stationary process one can construct a multifractal process. \section{Introduction} Multifractality may refer to a variety of properties which are usually used to describe objects possessing some type of scale invariance. The term itself has its true meaning in the analysis of local regularity properties of measures and functions. When it comes to stochastic processes, multifractality usually refers to models that exhibit nonlinear scaling of moments in time. More precisely, for some range of values of $q$ and $t$, the $q$-th absolute moment of a process $\{X(t)\}$ at $t$ can be written in the form \begin{equation}\label{momscal:motivating} \mathbb{E}|X(t)|^q = c(q) t^{\tau (q)}, \end{equation} where the so-called scaling function $q \mapsto \tau(q)$ is nonlinear. This contrasts the case of self-similar processes for which $\tau$ is a linear function. The property can also be based on the moments of increments of the process and can also be assumed to hold only asymptotically for $t\to 0$, in contrast to the exact scaling in \eqref{momscal:motivating}. For more details see \cite{mandelbrot1997mmar}, \cite{riedi2003multifractal} and the references therein. The processes satisfying \eqref{momscal:motivating} or some variant of it have received considerable attention in variety of applications like turbulence, finance, climatology, medical imaging, texture classification (see e.g.~\cite{mandelbrot1997mmar}, \cite{bacry2008continuous}, \cite{robert2008hydrodynamic}, \cite{duchon2012forecasting}, \cite{lovejoy2013weather}, \cite{abry2015irregularities}, \cite{pavlov2016multifractality}, \cite{kalamaras2017multifractal}, \cite{laib2018multifractal} and the references therein). The scaling of moments as in \eqref{momscal:motivating} is usually a consequence of a more general scaling property and so it is not the best candidate for a defining property. In this paper we specify multifractality as a property of the finite dimensional distributions of the process. Recall that the process $\{X(t)\}$ is self-similar if for every $a>0$ there exists $b>0$ such that \begin{equation*} \{X(at)\} \overset{d}{=} \{bX(t)\}, \end{equation*} where $\{\cdot\} \overset{d}{=} \{\cdot\}$ denotes the equality of the finite dimensional distributions of two processes. The basic idea of our approach is to generalize the definition of a self-similar process. This idea can be traced back to Mandelbrot (see e.g.~\cite{mandelbrot1997mmar}), who used the following property as a motivation for the scaling of moments: for a process $\{X(t), \, t \in {\mathcal{T}}\}$ there is a set $\Lambda \subset (0,\infty)$ such that for every $\lambda \in \Lambda$ there exists a positive random variable $M_{\lambda}$ independent from the process $\{X(t)\}$ such that \begin{equation}\label{mfdef:motivating} X(\lambda t) \overset{d}{=} M_\lambda X(t), \quad \forall t \in {\mathcal{T}}. \end{equation} However, to our knowledge, such generalizations of self-similarity have never been studied systematically. One can also find \eqref{mfdef:motivating} stated in the sense of equality of finite dimensional distributions, that is \begin{equation}\label{mfdef:motivatingfdd} \{X(\lambda t) \}_{t\in {\mathcal{T}}} \overset{d}{=} \{ M_\lambda X(t)\}_{t\in {\mathcal{T}}}. \end{equation} Such properties are also referred to as exact scale invariance, exact stochastic scale invariance or statistical self-similarity (see e.g.~\cite{allez2013lognormal}, \cite{bacry2003log}, \cite{barral2014exact}). A typical example of a process satisfying \eqref{mfdef:motivatingfdd} are cascade processes constructed in \cite{bacry2003log}, \cite{barral2002multifractal} and \cite{muzy2002multifractal} . A class of examples has also been given in \cite{veneziano1999basic} called independent stationary increment ratio processes, but these actually satisfy a variant of \eqref{mfdef:motivating} and not the corresponding variant of \eqref{mfdef:motivatingfdd} (see Subsection \ref{subsec:examples} for details). Imposing that relation \eqref{mfdef:motivating} holds in the sense of equality of finite dimensional distributions seems unnecessarily restrictive as it requires for $\lambda \in \Lambda$ the existence of a \emph{single} random variable $M_\lambda$ such that $(X(\lambda t_1), \dots, X(\lambda t_n)) =^d (M_\lambda X(t_1), \dots,\allowbreak M_\lambda X(t_n))$ for any choice of $t_1,\dots,t_n\in {\mathcal{T}}$ and $n\in {\mathbb N}$. Instead we will require only that the random factors corresponding to each time point are identically distributed (see Definition \ref{def:mf} for details). This simple generalization of \eqref{mfdef:motivatingfdd} will allow us to obtain a whole class of new examples and to build a theory that generalizes self-similarity in a natural way. In Section \ref{sec2} we start with the definition and its ramifications. We show that, in contrast to self-similar processes, the scaling property of multifractal processes cannot hold for every scale $\lambda > 0$ as it then reduces to self-similarity. Also, the equality of finite dimensional distributions as in \eqref{mfdef:motivatingfdd} cannot hold over $(0,\infty)$ except when the process is self-similar. From these facts we identify restrictions that should be imposed on the set of scales and the time sets involved. In Section \ref{sec3} we investigate general properties of multifractal processes. We show an intimate connection between scaling factors and infinitely divisible distributions. More precisely, by appropriately transforming a family of scaling factors indexed by $\lambda \in \Lambda$, one obtains a process which has one-dimensional distributions as some L\'evy process. From this relation scaling of moments as in \eqref{momscal:motivating} is a straightforward consequence. We also show that the cascade processes provide an example where the process obtained from the family of scaling factors is not a L\'evy process although its one-dimensional marginals correspond to some L\'evy process. In Section \ref{sec4}, using the idea of Lamperti transformation, we define a class of new multifractal processes which we call \emph{$L$-multifractals}. To any pair of L\'evy process and stationary process there corresponds one $L$-multifractal process. Properties of $L$-multifractal processes are investigated in Section \ref{sec5}. It is shown that these process do not have stationary increments in general. However, by restricting the time set and by carefully choosing the corresponding L\'evy process and stationary process, a process with second-order stationary increments may be obtained. Although most papers on multifractal processes deal with variations of \eqref{momscal:motivating}, an exception is the work of \cite{veneziano1999basic} which aims at providing a general treatment of processes satisfying a variant of \eqref{mfdef:motivatingfdd}. A close inspection of the proofs there shows that all the examples given actually satisfy a variant of \eqref{mfdef:motivating} and not \eqref{mfdef:motivatingfdd}. Moreover, the scaling factors are stated to be in one-to-one correspondence with L\'evy processes, which would exclude the case of cascade processes as we show in this paper. Let us also mention that the construction of the family of scaling factors provided there via L\'evy process necessarily involves bounded time interval and cannot be extended to $(0,\infty)$. Another line of development that generalizes the cascade construction is the multiplicative chaos theory that can be seen as a continuous analog of Mandelbrot's $\star$-scale invariance (see \cite{rhodes2014gaussian} for details). However, the random measures obtained in this way do not possess exact scale invariance in general, except when they reduce to the cascade case. See \cite{allez2013lognormal}, \cite{barral2013gaussian}, \cite{rhodes2014gaussian} and the references therein. In \cite{veneziano1999basic}, the processes satisfying \eqref{mfdef:motivatingfdd} are referred to as stochastically self-similar (see also \cite{gupta1990multiscaling}). Although this is a more meaningful term than multifractal, we prefer the later as it is now widespread in the literature. \section{The definition of a multifractal process}\label{sec2} We first introduce some notation and assumptions. All the processes considered will have values in ${\mathbb R}$. In what follows, $=^d$ stands for the equality in law of two random variables, while $\{\cdot\} =^d \{\cdot\}$ denotes the equality of finite dimensional distributions of two stochastic processes. If the time set is not specified in this equality than it is assumed that it holds over the whole time set where the processes are defined. A process $\{X(t), \, t \in {\mathcal{T}}\}$ is said to be nontrivial if $X(t)$ is not a constant a.s.~for every $t\in {\mathcal{T}}$. Every process considered will be assumed to be not identically null so that there is $t\in {\mathcal{T}}$, $t\neq 0$, such that $P(X(t)\neq 0)>0$. As elaborated in the introduction, \eqref{mfdef:motivatingfdd} may be overly restrictive and it does not provide a natural generalization of self-similar processes. For this reason, we introduce the following definition of multifractality. \begin{definition}\label{def:mf} A stochastic process $X=\{X(t), \, t \in {\mathcal{T}}\}$ is \textbf{multifractal} if there exist sets $\Lambda \subset (0,\infty)$ and ${\mathcal{S}} \subset {\mathcal{T}}$ such that for every $\lambda \in \Lambda$ there exists a family of identically distributed positive random variables $\{M(\lambda,t), \, t \in \mathcal{S} \}$, independent of $\{X(t)\}$ such that \begin{equation}\label{mfdef:prop} \{ X(\lambda t) \}_{t \in {\mathcal{S}}} \overset{d}{=} \{ M(\lambda, t) X(t) \}_{t \in {\mathcal{S}}}. \end{equation} \end{definition} To clarify further, \eqref{mfdef:prop} means that for every choice of $t_1,\dots,t_n \in {\mathcal{S}}$, $n \in {\mathbb N}$ it holds that \begin{equation*} \left(X(\lambda t_1),\dots,X(\lambda t_n) \right) \overset{d}{=} \left( M(\lambda, t_1) X(t_1), \dots, M(\lambda, t_n) X(t_n) \right). \end{equation*} Clearly, this generalizes the equality \eqref{mfdef:motivatingfdd} where $M(\lambda,t)=M_\lambda$ does not depend on $t$. This and the case of self-similar processes inspired the condition stated in the definition that for every fixed $\lambda \in \Lambda$ \begin{equation}\label{Mstatmarg} M(\lambda, t_1) \overset{d}{=} M(\lambda, t_2), \quad \forall t_1, t_2 \in {\mathcal{S}}. \end{equation} We will refer to random field $M=\{M(\lambda,t), \, \lambda \in \Lambda, t \in {\mathcal{S}}\}$ as the \textbf{family of scaling factors}. \begin{remark} Definition \ref{def:mf} involves several sets: \begin{itemize} \item ${\mathcal{T}}$ is the time set where the process is defined, \item $\Lambda$ is a set of scales for which \eqref{mfdef:prop} holds \item ${\mathcal{S}}$ is a subset of ${\mathcal{T}}$ over which the equality of finite dimensional distributions \eqref{mfdef:prop} holds. \end{itemize} Such level of generality will prove to be important later on. We will assume that ${\mathcal{T}}$ is one of the following: ${\mathcal{T}}=(0,\infty)$, ${\mathcal{T}}=[0,\infty)$ or ${\mathcal{T}}=(0,T]$, ${\mathcal{T}}=[0,T]$ for some $T>0$. Note also that $0 \in {\mathcal{S}}$ makes the unduly restriction that $X(0)=0$ a.s.~if $M(\lambda,t)$ is not a constant a.s. \end{remark} \begin{example} Suppose $X$ is multifractal with $\Lambda=(0,\infty)$, ${\mathcal{S}}={\mathcal{T}}$ and for every $\lambda \in \Lambda$, $M(\lambda,t)$ is deterministic. Due to \eqref{Mstatmarg}, $t \mapsto M(\lambda,t)$ is constant, say $m(\lambda)$. The definition then reduces to the classical definition of self-similarity (see e.g.~\cite{embrechts2002selfsimilar}, \cite{pipiras2017long}). Furthermore, if $X$ is nontrivial and right continuous in law (meaning that for every $t_0>0$, $X(t) \to^d X(t_0)$ as $t \downarrow t_0$), then there exists a unique $H\in {\mathbb R}$ called the Hurst parameter such that $m(\lambda)=\lambda^H$ (see \cite[Section 8.5.]{bingham1989regular} and the references therein). We shortly say $X$ is $H$-ss. Typically ${\mathcal{T}}=(0,\infty)$ and if ${\mathcal{T}}=[0,\infty)$, then $H\geq 0$. Furthermore, $H>0$ implies $X(0)=0$ a.s.~and $H=0$ if and only if $X(t)=X(0)$ a.s.~for every $t>0$ (\cite{embrechts2002selfsimilar}). In this setting, \eqref{mfdef:motivating} corresponds to a concept of marginally self-similar process as defined in \cite[Section 8.5.]{bingham1989regular}. Adopting this terminology, a process satisfying \eqref{mfdef:prop} only for the one-dimensional distributions may be referred to as marginally multifractal. Note that this is equivalent to \eqref{mfdef:motivating} when ${\mathcal{S}}={\mathcal{T}}$. \end{example} \subsection{The set $\Lambda$} We start our analysis of Definition \ref{def:mf} by investigating how the scaling property \eqref{mfdef:prop} affects the size of the set $\Lambda$. It is known that the scaling of moments \eqref{momscal:motivating} cannot hold for every $t>0$ with $\tau$ being nonlinear (see e.g.~\cite{mandelbrot1997mmar} and \cite{muzy2013random}). In our setting this correspond to restrictions on the set $\Lambda$. We start by showing that if the scaling is deterministic, then typically $\Lambda=(0,\infty)$ and ${\mathcal{S}}={\mathcal{T}}$. \begin{proposition}\label{prop:LamSwhendeterministic} Suppose $X=\{X(t), \, t \in {\mathcal{T}}\}$ is a multifractal process such that $M(\lambda,t)=m(\lambda)$ is deterministic for every $\lambda \in \Lambda$. If $\Lambda$ contains an interval, then \eqref{mfdef:prop} holds for any $\lambda\in (0,\infty)$. If additionally ${\mathcal{S}}$ contains an interval, then \eqref{mfdef:prop} holds with ${\mathcal{S}}={\mathcal{T}}$, hence $X$ is self-similar. \end{proposition} \begin{proof} Let $\Lambda'$ denote the set of $\lambda$ for which \eqref{mfdef:prop} holds. If $\lambda \in \Lambda'$, then from $\{X(t)\}_{t \in{\mathcal{S}}} \overset{d}{=} \{m(\lambda) X(t/\lambda)\}_{t \in{\mathcal{S}}}$ we have $\{X(t/\lambda)\}_{t \in{\mathcal{S}}} \overset{d}{=} \{1/m(\lambda) X(t)\}_{t \in{\mathcal{S}}}$ implying that $1/\lambda \in \Lambda'$. Furthermore, if $\lambda_1, \lambda_2 \in \Lambda'$, then since \begin{equation}\label{prop1proofmultiplciative} \{X(\lambda_1 \lambda_2 t)\}_{t \in{\mathcal{S}}} \overset{d}{=} \{m(\lambda_1) X(\lambda_2 t)\}_{t \in{\mathcal{S}}}\overset{d}{=} \{m(\lambda_1) m(\lambda_2) X(t)\}_{t \in{\mathcal{S}}}, \end{equation} we have $\lambda_1 \lambda_2 \in \Lambda'$. Hence, $\Lambda'$ is a multiplicative subgroup of $(0,\infty)$ containing $\Lambda$. Since it has positive measure it must be $\Lambda'=(0,\infty)$ (see e.g.~\cite[Corollary 1.1.4]{bingham1989regular}). Let $t \in {\mathcal{S}}$ be such that $P(X(t)\neq 0)>0$. Such $t$ exists, as if $X(t)=0$ a.s.~for every $t\in{\mathcal{S}}$, then $X$ is identically null since $X(s)\overset{d}{=} m(s/t) X(t)$ for every $s\in{\mathcal{T}}$. By using \cite[Lemma 1.1.1]{embrechts2002selfsimilar} we conclude from \eqref{prop1proofmultiplciative} that $m(\lambda_1 \lambda_2)=m(\lambda_1)m(\lambda_2)$. We now show that \eqref{mfdef:prop} can be extended to ${\mathcal{S}}'$ of the form ${\mathcal{S}}'= \alpha {\mathcal{S}}=\{\alpha s : s \in {\mathcal{S}}\}$ for any $\alpha>0$. Indeed, for every $\lambda \in (0,\infty)$ we have \begin{align*} \{ X(\lambda t) \}_{t \in {\mathcal{S}}'} &= \{ X(\lambda \alpha t/\alpha) \}_{t \in {\mathcal{S}}'} = \{ X(\lambda \alpha s) \}_{s \in {\mathcal{S}}} \overset{d}{=} \{ m(\lambda \alpha) X(s) \}_{s \in {\mathcal{S}}} \\ &= \{ m(\lambda) m(\alpha) X(s) \}_{s \in {\mathcal{S}}} \overset{d}{=} \{ m(\lambda) X(\alpha s) \}_{s \in {\mathcal{S}}} \overset{d}{=} \{ m(\lambda) X(t) \}_{t \in {\mathcal{S}}'}, \end{align*} which proves the claim. \end{proof} Proposition \ref{prop:LamSwhendeterministic} shows that typically, deterministic scaling may be extended to any scale $\lambda \in (0,\infty)$. The next proposition provides a sort of converse showing that the random scaling cannot hold for any scale since then it reduces to deterministic scaling. To show this, we introduce a further assumption on the process and make use of the Mellin transform (see Appendix \ref{appendix:mellin}). For the argument bellow, we would need to assume that the domain of the Mellin transform applied to $X(t)$ does not degenerate into imaginary axis. For the process $X=\{X(t), \, t\in {\mathcal{T}}\}$, let \begin{equation*} \overline{q} \left(X\right) = \sup \left\{ q \geq 0 : \mathbb{E}|X(t)|^q < \infty \text{ for all } t \in {\mathcal{T}} \right\}. \end{equation*} We will assume that $\overline{q}\left(X\right)>0$, so that for every $t\in {\mathcal{T}}$ the Mellin transform of $X(t)$ is defined at least for $0 \leq {\mathbb R}e z < \overline{q} \left(X\right)$. It is worth noting that this is a very mild assumption. \begin{proposition}\label{prop:nomfgen} Suppose $X=\{X(t), \, t \in {\mathcal{T}}\}$ is a multifractal process with $\overline{q} \left(X\right)>0$. If $\Lambda=(0,\infty)$, then $M(\lambda,t)=m(\lambda)$ is a.s.~a constant for any $\lambda \in \Lambda'$, $\Lambda'=\{\lambda \in \Lambda : \text{ there exists } t \in {\mathcal{S}} \text{ such that }\lambda t \in {\mathcal{S}}\}$. In particular, if ${\mathcal{S}}={\mathcal{T}}$, then $X$ is self-similar. \end{proposition} \begin{proof} Recall that we have assumed that for every process considered, there is $t\in {\mathcal{T}}$, $t\neq 0$, such that $P(X(t)\neq 0)>0$. Since $\{X(t)\}$ is multifractal, then this is true for every $\lambda t$, $\lambda \in \Lambda$, $t\in{\mathcal{S}}$. There is at least one $t\in {\mathcal{S}}$ such that $P(X(t)\neq 0)>0$ as otherwise $X$ would be identically null because of $X(s)\overset{d}{=} M(s/t, t) X(t)$. Since $\Lambda=(0,\infty)$ we conclude that $P(X(t)\neq 0)>0$ for every $t\in{\mathcal{T}}$, $t\neq 0$. Let $\lambda \in \Lambda'$ and take $t \in {\mathcal{S}}$ such that $\lambda t \in {\mathcal{S}}$. Then $1/\lambda\in \Lambda$ and from $X(t)\overset{d}{=} M(1/\lambda,\lambda t) X(\lambda t)$ and $X(\lambda t)\overset{d}{=} M(\lambda,t) X(t)$ we have \begin{align*} \mathcal{M}_{|X(t)|}(z) &= \mathcal{M}_{M(1/\lambda, \lambda t)}(z) \mathcal{M}_{|X(\lambda t)|}(z), \\ \mathcal{M}_{|X(\lambda t)|}(z) &= \mathcal{M}_{M(\lambda, t)}(z) \mathcal{M}_{|X(t)|}(z). \end{align*} and \begin{equation*} \mathcal{M}_{|X(t)|}(z) = \mathcal{M}_{M(1/\lambda,\lambda t)}(z) \mathcal{M}_{M(\lambda,t)}(z) \mathcal{M}_{|X(t)|}(z). \end{equation*} Here $\mathcal{M}_X$ denotes the Melilin transform of the random variable $X$ (see Appendix \ref{appendix:mellin}). Since $P(|X(t)| > 0)>0$, for real $z \in (0,\overline{q} \left(X\right))$ we have that $\mathcal{M}_{|X(t)|}(z) =\mathbb{E}|X(t)|^z >0$ and hence \begin{equation}\label{e:proofp2:1} \mathcal{M}_{M(1/\lambda, \lambda t)}(z) \mathcal{M}_{M(\lambda,t)}(z) = 1. \end{equation} This uniquely determines the distribution (see Appendix \ref{appendix:mellin}) and if we have independent random variables $M_1\overset{d}{=}M(1/\lambda,\lambda t)$ and $M_2\overset{d}{=}M(\lambda,t)$, then \eqref{e:proofp2:1} implies that $M_1 M_2 = 1$ a.s. This is impossible unless $M_1$ and $M_2$ are constants a.s., so we conclude $M(\lambda,t)$ is a.s.~a constant. Due to \eqref{Mstatmarg}, $M(\lambda,t)=m(\lambda)$ for every $t\in {\mathcal{S}}$. If ${\mathcal{S}}={\mathcal{T}}$, then, as we have assumed, ${\mathcal{S}}$ is of the form $(0,\infty)$, $[0,\infty)$, $(0,T]$ or $[0,T]$. All of these imply $\Lambda'=(0,\infty)$, hence it follows that the process is self-similar. \end{proof} \begin{remark} For $\Lambda'=\Lambda$ to hold in Proposition \ref{prop:nomfgen}, it is enough that $(0,u)\subset {\mathcal{S}}$ or that $(u,\infty)\subset {\mathcal{S}}$ for some $u>0$. \end{remark} \begin{remark} Note that without further assumptions $Y_1 Z \overset{d}{=} Y_2 Z$ with $Z$ positive and independent of $Y_1$ and $Y_2$ does not necessarily imply that $Y_1\overset{d}{=} Y_2$. Indeed, in \cite[p.~506]{feller1971introduction}, one can find example of random variables such that $\widetilde{Y}_1+\widetilde{Z} \overset{d}{=} \widetilde{Y}_2 + \widetilde{Z}$ with $\widetilde{Z}$ independent of $\widetilde{Y}_1$ and $\widetilde{Y}_2$, but $\widetilde{Y}_1$ and $\widetilde{Y}_2$ do not have the same distribution. By taking exponentials one gets the counterexample for the product (see also \cite[Exercise 1.12.]{chaumont2012exercises}). This explains the assumption $\overline{q} \left(X\right)>0$ in Proposition \ref{prop:nomfgen}. We note that this condition can be replaced with the appropriate condition on moments of negative order. More precisely, we can assume $\underline{q}\left(X\right)<0$ where \begin{equation}\label{qunderline} \underline{q} \left(X\right) = \inf \left\{ q \leq 0 : \mathbb{E}|X(t)|^q < \infty \text{ for all } t \in {\mathcal{T}} \right\}. \end{equation} \end{remark} The crucial property for the proof of Proposition \ref{prop:nomfgen} is that every $\lambda \in \Lambda$ has its inverse element $1/\lambda$ in $\Lambda$. To enable random scaling, one has to consider $\Lambda$ being monoid and not group under multiplication. Thus, we will have in general two distinct classes of multifractal processes depending on whether $\Lambda=(0,1]$ or $\Lambda=[1,\infty)$. In \cite{veneziano1999basic}, the processes defined by the variation of property \eqref{mfdef:motivatingfdd} are referred to as contraction (resp.~dilation) stochastically self-similar in the case that corresponds to our $\Lambda=(0,1]$ (resp.~$\Lambda=[1,\infty)$). \subsection{The set ${\mathcal{S}}$} To enable random scaling one has to make restrictions on the set ${\mathcal{S}}$ too. Indeed, the next proposition shows that if $\Lambda=(0,1]$ and $[1,\infty) \subset {\mathcal{S}}$, or if $\Lambda=[1,\infty)$ and $(0,1] \subset {\mathcal{S}}$, then the scaling is necessarily deterministic and reduces to self-similarity. In particular, there is no random scaling if ${\mathcal{S}}=(0,\infty)$. To prove this we will assume that the process $X$ under consideration defined on the probability space $(\Omega, \mathcal{F}, P)$ is jointly measurable, i.e.~$(t, \omega) \mapsto X(t,\omega)$ is $\mathcal{B}({\mathcal{T}}) \times \mathcal{F}$-measurable (see also Remark \ref{rem:cauchyfuneqproof}). \begin{proposition}\label{prop:nomfgenonS} Suppose ${\mathcal{T}}=(0,\infty)$ or ${\mathcal{T}}=[0,\infty)$ and $X=\{X(t), \, t \in {\mathcal{T}}\}$ is a jointly measurable multifractal process such that $\overline{q} \left(X\right)>0$ and $\Lambda=(0,1]$ or $\Lambda=[1,\infty)$. If $\overline{\Lambda} \subset {\mathcal{S}}$, $\overline{\Lambda}:=\{1/\lambda : \lambda\in \Lambda \}$, then $X$ is self-similar. \end{proposition} \begin{proof} First note that $P(X(t)\neq 0)>0$ for every $t \in {\mathcal{S}}$. Indeed, let $s \in {\mathcal{S}}$ be such that $P(X(s)\neq 0)>0$. Such $s$ exists, as if $X(s)=0$ a.s.~for every $s\in{\mathcal{S}}\supset \overline{\Lambda}$, then from $X(\lambda)\overset{d}{=} M(\lambda,1) X(1)$ we would have $X(s)=0$ a.s.~for every $s>0$ and $X$ would be identically null. For arbitrary $t\in {\mathcal{S}}$, either $s/t \in \Lambda$ or $t/s \in \Lambda$. From $X(s)\overset{d}{=} M(s/t,t) X(t)$ or from $X(t)\overset{d}{=} M(t/s,s) X(s)$ we then conclude $P(X(t)\neq 0)>0$. Let $q \in (0,\overline{q} \left(X\right))$. Given $t_1, t_2 \in (0,\infty)$, take $\lambda \in \Lambda$ such that $\lambda t_1, \lambda t_1 t_2 \in \Lambda$ and hence, by the assumptions, $1/(\lambda t_1) \in {\mathcal{S}}$. Since $1/\lambda \in {\mathcal{S}}$ we have from \eqref{mfdef:prop} that \begin{equation}\label{prop:nomfgenonS:proof:eq1} \begin{aligned} X(t_1) &\overset{d}{=} M(\lambda t_1, 1/\lambda) X(1/\lambda),\\ X(1) &\overset{d}{=} M(\lambda, 1/\lambda) X(1/\lambda). \end{aligned} \end{equation} By denoting \begin{align*} f_q(\lambda) &= \mathbb{E} |M(\lambda, t) |^q, \quad \lambda \in \Lambda,\\ g_q(t) &= \frac{\mathbb{E} |X(t) |^q}{\mathbb{E} |X(1) |^q}, \quad t \in {\mathcal{T}}, \end{align*} and using $\mathbb{E}|X(1/\lambda)|^q>0$, we get from \eqref{prop:nomfgenonS:proof:eq1} that \begin{equation*} \frac{f_q(\lambda t_1)}{f_q(\lambda)} = g_q(t_1). \end{equation*} Similarly, since $\lambda t_1, \lambda t_1 t_2 \in \Lambda$ and $1/(\lambda t_1) \in {\mathcal{S}}$ we have \begin{equation*} \frac{f_q(\lambda t_1 t_2 )}{f_q(\lambda t_1)} = g_q(t_2) \quad \text{ and } \quad \frac{f_q(\lambda t_1 t_2 )}{f_q(\lambda)} = g_q(t_1 t_2). \end{equation*} We conclude that \begin{equation}\label{prop:proof:cauchyfun} g_q(t_1) g_q(t_2) = \frac{f_q(\lambda t_1)}{f_q(\lambda)} \frac{f_q(\lambda t_1 t_2 )}{f_q(\lambda t_1)} = g_q(t_1 t_2) \end{equation} for any $t_1, t_2 \in (0,\infty)$. The joint measurability and Fubini's theorem imply that $t\mapsto \mathbb{E} |X(t) |^q$, and hence $t\mapsto g_q(t)$ is measurable. Hence, for each $q \in (0,\overline{q} \left(X\right))$, there is $\tau(q)\in {\mathbb R}$ such that $g_q(t)=t^{\tau(q)}$ for $t\in (0,\infty)$. We now show that $q \mapsto \tau(q)$ is a linear function (see \cite{mandelbrot1997mmar}). Let $q_1,q_2 \in (0, \overline{q}(X))$, $w_1, w_2 \geq 0$, $w_1+w_2=1$ and put $q=q_1 w_1 + q_2 w_2$. From H\"older's inequality we have that \begin{equation*} \mathbb{E}|X(t)|^q \leq \left( \mathbb{E}|X(t)|^{q_1} \right)^{w_1} \left( \mathbb{E}|X(t)|^{q_2} \right)^{w_2} \end{equation*} and by taking logarithms \begin{align*} &\tau(q) \log t + \log \mathbb{E} |X(1) |^q \\ &\hspace{3em} \leq \left( w_1 \tau(q_1) + w_2 \tau(q_2) \right) \log t + w_1 \log \mathbb{E} |X(1) |^{q_1} + w_2 \log \mathbb{E} |X(1) |^{q_2}. \end{align*} Dividing by $\log t < 0$, $t<1$, and letting $t \to 0$ gives $\tau(q) \geq w_1 \tau(q_1) + w_2 \tau(q_2)$ showing that $\tau$ is concave. But if we divide by $\log t$, $t>1$, and let $t\to \infty$ we get that $\tau(q) \leq w_1 \tau(q_1) + w_2 \tau(q_2)$, hence $\tau$ must be linear. For $\lambda \in \Lambda$, we have from \eqref{mfdef:prop} \begin{equation*} \mathbb{E}|X(\lambda)|^q = \mathbb{E} |M(\lambda, 1)|^q \mathbb{E}|X(1)|^q \end{equation*} and so $\mathbb{E} |M(\lambda, 1)|^q = \lambda^{\tau(q)}$ with $\tau$ linear. In particular, the Mellin transform is $\mathcal{M}_{M(\lambda, 1)}(q) = \lambda^{\tau(q)}$ for every $q \in (0, \overline{q}(X))$. It follows that $M(\lambda, 1)$ is constant a.s. From Proposition \ref{prop:LamSwhendeterministic} we conclude that $X$ is self-similar. \end{proof} \begin{remark}\label{rem:cauchyfuneqproof} The assumption on joint measurability is used only to preclude the existence of pathological solutions of the Cauchy functional equation \eqref{prop:proof:cauchyfun}. Alternatively, one could assume e.g.~that $t\mapsto \mathbb{E} |X(t) |^q$ is continuous at a point or bounded (see e.g.~\cite{aczel1989functional}). \end{remark} \begin{remark} In Proposition \ref{prop:nomfgenonS} we assume ${\mathcal{T}}$ is not bounded. If ${\mathcal{T}}$ is bounded, e.g.~${\mathcal{T}}=[0,T]$, it is possible to have random scaling over the whole time domain, i.e.~${\mathcal{S}}={\mathcal{T}}$. This happens for cascade processes (see Subsection \ref{subsec:examples}). \end{remark} Note that the independence of the process and the family of scaling factors in \eqref{mfdef:prop} is crucial for the proof of Proposition \ref{prop:nomfgenonS}. Indeed, one can construct a process with ${\mathcal{T}}=(0,\infty)$ such that \eqref{mfdef:prop} holds with ${\mathcal{S}}={\mathcal{T}}$, but the scaling family is not independent of the process (see Section \ref{sec4} and Remark \ref{rem:depconstr}). \subsection{Summary} Proposition \ref{prop:nomfgenonS} shows that ${\mathcal{S}}$ must not be too large, but we also do not want it to be too small. Based on what we have proved in this section, it is not very restrictive to assume the following. \begin{assumption} For any multifractal process, we assume that \begin{enumerate}[(i)] \item either $\Lambda=(0,1]$ or $\Lambda=[1,\infty)$, unless one of the cases is specified, \item $\lambda {\mathcal{S}} = \{\lambda t : t \in {\mathcal{S}} \} \subset {\mathcal{S}}$ for every $\lambda \in \Lambda$. \end{enumerate} \end{assumption} Moreover, we will implicitly exclude self-similar process from the discussion on multifractal processes. Note that \textit{(ii)} is satisfied as soon as $\Lambda \subset {\mathcal{S}}$. On the other hand, if \textit{(ii)} holds and $1\in {\mathcal{S}}$, then $\Lambda \subset {\mathcal{S}}$. This assumption leads to two typical classes of multifractal processes: \begin{itemize} \item $\Lambda=(0,1]$, ${\mathcal{S}}=(0,1]$ or ${\mathcal{S}}=(0,T]$, $\mathcal{T}=(0,\infty)$ or $\mathcal{T}=(0,T]$ \item $\Lambda=[1,\infty]$, ${\mathcal{S}}=[1,\infty)$ or ${\mathcal{S}}=[T,\infty)$, $\mathcal{T}=(0,\infty)$ or $\mathcal{T}=[T,\infty)$. \end{itemize} These two classes are closely related, as the following lemma shows by establishing a simple correspondence between them. \begin{lemma}\label{lemma:Lambdacorrespondance} Suppose $X=\{X(t), \, t\in{\mathcal{T}}\}$ is multifractal with the family of scaling factors $\{M(\lambda,t), \, \lambda \in \Lambda, t\in {\mathcal{S}}\}$. Then the process $\overline{X}=\{\overline{X}(t), t \in \overline{{\mathcal{T}}}\}$ defined by \begin{equation*} \overline{X}(t)=X(1/t), \quad t \in \overline{{\mathcal{T}}}=\{1/t : t \in {\mathcal{T}}\}, \end{equation*} is multifractal with the family of scaling factors \begin{equation*} \{\overline{M}(\lambda,t), \, \lambda \in \overline{\Lambda}, t \in \overline{{\mathcal{S}}}\}\overset{d}{=}\{M(1/\lambda,1/t), \, \lambda \in \overline{\Lambda}, t \in \overline{{\mathcal{S}}}\}, \end{equation*} where $\overline{{\mathcal{S}}}=\{1/t : t \in {\mathcal{S}}\}$ and $\overline{\Lambda}=[1,\infty)$ if $\Lambda=(0,1]$ or $\overline{\Lambda}=(0,1]$ if $\Lambda=[1,\infty)$. \end{lemma} \begin{proof} This is obvious since \begin{align*} \left\{\overline{X}(\lambda t) \right\}_{t\in \overline{{\mathcal{S}}}} &= \left\{ X\left( 1/(\lambda t) \right) \right\}_{1/t \in {\mathcal{S}}}\\ &\overset{d}{=} \left\{ M\left( 1/\lambda, 1/t \right) X \left(1/t \right) \right\}_{1/t \in {\mathcal{S}}}\\ &=\left\{ \overline{M}(\lambda,t) \overline{X}(t) \right\}_{t\in \overline{{\mathcal{S}}}}. \end{align*} \end{proof} We also note that it is possible to extend the sets $\mathcal{S}$ and $\mathcal{T}$ by scaling the time with some fixed scale $T>0$. For example, if $\{X(t), \, t \in (0,1]\}$ is multifractal with $\Lambda=(0,1]$ and $\mathcal{S}=(0,1]$, then the process $\{\widetilde{X}, \, t \in (0,T]\}$ defined by \begin{equation}\label{e:extendingTandS} \widetilde{X}(t)=X(t/T), \quad t \in (0,T], \end{equation} is multifractal with $\Lambda=(0,1]$ and $\mathcal{S}=(0,T]$. \section{Properties and examples}\label{sec3} Our first goal is to derive general properties of the family $M=\{M(\lambda, t), \, \lambda \in \Lambda, t \in {\mathcal{S}}\}$, dictated by the relation \eqref{mfdef:prop}. As in the previous section, certain regularity assumptions are needed for the proofs. \begin{proposition}\label{prop:Mproperties} If $X=\{X(t), \, t \in {\mathcal{T}}\}$ is multifractal and $\overline{q} \left(X\right)>0$, then the following holds: \begin{enumerate}[(i)] \item $M(1, t)=1$ a.s.~for every $t \in {\mathcal{S}}$. \item For every $\lambda_1, \lambda_2 \in \Lambda$ and $t\in {\mathcal{S}}$, $M(\lambda_1 \lambda_2,t) \overset{d}{=} M^{(1)} M^{(2)}$ with $M^{(1)}\overset{d}{=} M(\lambda_1, \lambda_2 t)$ and $M^{(2)}\overset{d}{=} M(\lambda_2, t)$ independent. \item Let $\lambda \in \Lambda$ and $t\in {\mathcal{S}}$. Then for every $n \in {\mathbb N}$ there exist independent identically distributed positive random variables $M^{(1)}, \dots, M^{(n)}$ such that \begin{equation*} M(\lambda, t) \overset{d}{=} M^{(1)} \cdots M^{(n)}. \end{equation*} Moreover, $M^{(1)}\overset{d}{=} M(\lambda^{1/n}, t)$. \end{enumerate} \end{proposition} \begin{proof} \textit{(i)} By the same argument as in the proof of Proposition \ref{prop:nomfgen}, for $t\in {\mathcal{S}}$ we obtain from $X(t)\overset{d}{=} M(1,t) X(t)$ that for $z \in (0,\overline{q} \left(X\right))$ \begin{equation*} \mathcal{M}_{M(1,t)}(z) = 1, \end{equation*} which implies the statement. \textit{(ii)} Since $\lambda_1, \lambda_2 \in \Lambda$, then $\lambda_1 \lambda_2 \in \Lambda$, $\lambda_2 t \in {\mathcal{S}}$ and from \eqref{mfdef:prop} we have that for $t\in {\mathcal{S}}$ \begin{align*} \mathcal{M}_{|X(\lambda_1 \lambda_2 t)|}(z) &= \mathcal{M}_{M(\lambda_1 \lambda_2, t)}(z) \mathcal{M}_{|X(t)|}(z), \\ \mathcal{M}_{|X(\lambda_1 \lambda_2 t)|}(z) &= \mathcal{M}_{M(\lambda_1, \lambda_2 t)}(z) \mathcal{M}_{|X(\lambda_2 t)|}(z), \\ \mathcal{M}_{|X(\lambda_2 t)|}(z) &= \mathcal{M}_{M(\lambda_2,t)}(z) \mathcal{M}_{|X(t)|}(z), \end{align*} and therefore \begin{equation*} \mathcal{M}_{M(\lambda_1 \lambda_2, t)}(z) \mathcal{M}_{|X(t)|}(z) = \mathcal{M}_{M(\lambda_1, \lambda_2 t)}(z) \mathcal{M}_{M(\lambda_2, t)}(z) \mathcal{M}_{|X(t)|}(z). \end{equation*} As in Proposition \ref{prop:nomfgen}, it follows for $z \in (0,\overline{q} \left(X\right))$ that \begin{equation*} \mathcal{M}_{M(\lambda_1 \lambda_2, t)}(z) = \mathcal{M}_{M(\lambda_1, \lambda_2 t)}(z) \mathcal{M}_{M(\lambda_2,t)}(z). \end{equation*} Hence, taking $M^{(1)}\overset{d}{=} M(\lambda_1, \lambda_2 t)$ and $M^{(2)}\overset{d}{=} M(\lambda_2, t)$ independent completes the proof. \textit{(iii)} Similarly as in \textit{(ii)}, $\lambda \in \Lambda$ implies $\lambda^{1/n} \in \Lambda$ and from \eqref{mfdef:prop} it follows for $i=0,\dots,n-1$ that \begin{equation*} \mathcal{M}_{\left| X\left( \lambda^{(n-i)/n} t\right) \right|}(z) = \mathcal{M}_{M\left(\lambda^{1/n}, \lambda^{(n-i-1)/n} t \right)}(z) \mathcal{M}_{\left| X\left( \lambda^{(n-i-1)/n} t\right) \right|}(z). \end{equation*} A successive application and \eqref{Mstatmarg} yield \begin{equation*} \mathcal{M}_{M(\lambda, t)}(z) \mathcal{M}_{|X(t)|}(z) = \left( \mathcal{M}_{M(\lambda^{1/n}, t)}(z) \right)^n \mathcal{M}_{|X(t)|}(z) \end{equation*} and by the same argument as in \textit{(ii)} taking $M^{(1)}, \dots, M^{(n)}$ independent and distributed as $M(\lambda^{1/n},t)$ gives $M(\lambda,t) \overset{d}{=} M^{(1)} \cdots M^{(n)}$. \end{proof} \begin{remark} The property \textit{(iii)} in Proposition \ref{prop:Mproperties} appears across the literature under various names. In \cite{veneziano1999basic} it is referred to as log-infinite divisibility (see also \cite{bacry2003log}). In different context, the authors of \cite{hirsch2013mellin} obtain a random variable possessing the same property which is called multiplicative infinite divisibility there. Zolotarev \cite[Section 3.5]{zolotarev1986one} refers to the same property as $M$-infinite divisibility. Clearly, Proposition \ref{prop:Mproperties}\textit{(iii)} implies that for every $\lambda \in \Lambda$ and $t\in {\mathcal{S}}$, $\log M(\lambda,t)$ is infinitely divisible. \end{remark} The infinite divisibility of $\log M(\lambda,t)$ suggest an intimate relation with L\'evy processes. Recall that a L\'evy process is a process starting at zero with stationary independent increments and stochastically continuous. Recall that the stochastic continuity of some process $\{Y(t)\}$ means that for every $t_0$, $Y(t) \to^P Y(t_0)$ as $t \to t_0$. If the same holds with convergence in distribution, then we say that the process is continuous in law. \begin{proposition}\label{prop:MtoL} Suppose $X=\{X(t), \, t \in {\mathcal{T}}\}$ is multifractal, $\overline{q} \left(X\right)>0$ and for some $t\in {\mathcal{S}}$ (and hence for every $t\in{\mathcal{S}}$), $\{M(\lambda,t), \, \lambda \in \Lambda\}$ is continuous in law. Let $\{L(s), \, s \geq 0\}$ be a L\'evy process such that \begin{equation*} L(1) \overset{d}{=} \begin{cases} \log M(e^{-1}, t),& \text{ if } \Lambda=(0,1], \\ \log M(e, t),& \text{ if } \Lambda=[1,\infty). \end{cases} \end{equation*} Then for every $s\geq 0$ \begin{equation}\label{MtoL01} L(s) \overset{d}{=} \log M(e^{-s}, t) \end{equation} if $\Lambda=(0,1]$, and if $\Lambda=[1,\infty)$ \begin{equation}\label{MtoL1infty} L(s) \overset{d}{=} \log M(e^s, t). \end{equation} \end{proposition} \begin{proof} Let $\mu$ denote the distribution of $L(1)$ which is infinitely divisible by Proposition \ref{prop:Mproperties}\textit{(iii)}. For every $n\in {\mathbb N}$ there is a unique probability measure $\mu^{1/n}$ such that the $n$-fold convolution denoted by $\left(\mu^{1/n}\right)^n$ is $\mu$. Moreover, $\mu^s$ is well-defined for every $s\geq 0$ and is equal to the distribution of $L(s)$ (see \cite[Chapter 7]{sato1999levy}). For $s\geq 0$, let $\mu_s$ denote the distribution of $\log M(e^{-s},t)$ if $\Lambda=(0,1]$ or the distribution of $\log M(e^s,t)$ if $\Lambda=[1,\infty)$. Clearly, $\mu_1=\mu$ and by Proposition \ref{prop:Mproperties}\textit{(iii)} for every $n\in {\mathbb N}$, $\mu_1=\left(\mu_{1/n}\right)^n$ and so $\mu_{1/n}=\mu^{1/n}$. Again by Proposition \ref{prop:Mproperties}\textit{(iii)} $\mu_{m/n}=\left(\mu_{1/n} \right)^m = \mu^{m/n}$ for every $m\in {\mathbb N}$. This proves the statement for every rational $s$. If $s$ is irrational, take $(s_n)$ to be a sequence of rational numbers such that $s_n\to s$. By the continuity in law $\mu_{s_n} \to^d \mu_s$ and so $\mu_s=\mu^s$. \end{proof} \begin{remark} One can avoid making assumptions on the family $\{M(\lambda,t), \, \lambda\in \Lambda\}$ by using conditions on the original process. Suppose $X$ is continuous in law. For some $t\in {\mathcal{S}}$ we have $P(X(t)\neq 0)>0$ and for the sequence $(\lambda_n)$ in $\Lambda$ such that $\lambda_n \to \lambda \in \Lambda$ we have $X(\lambda_n t) \to^d X(\lambda t)$ and so $M(\lambda_n,t) X(t) \to^d M(\lambda, t) X(t)$. From here we obtain for $\theta \in {\mathbb R}$ \begin{equation*} \mathbb{E} \left[ \mathbf{1}_{\{|X(t)|>0\}} e^{ i \theta \left(\log M(\lambda_n, t) + \log |X(t)| \right) } \right] \to \mathbb{E} \left[ \mathbf{1}_{\{|X(t)|>0\}} e^{ i \theta \left(\log M(\lambda, t) + \log |X(t)|\right)} \right]. \end{equation*} By independence it follows that \begin{align*} &\mathbb{E} \left[ e^{ i \theta \log M(\lambda_n, t) } \right] \mathbb{E} \left[ \mathbf{1}_{\{|X(t)|>0\}} e^{ i \theta \log |X(t)| } \right]\\ &\hspace{4cm} \to \mathbb{E} \left[ e^{ i \theta \log M(\lambda, t) } \right] \mathbb{E} \left[ \mathbf{1}_{\{|X(t)|>0\}} e^{ i \theta \log |X(t)| } \right]. \end{align*} Assuming additionally that the characteristic function of $\log |X(t)|$ has only isolated zeros, we can conclude that \begin{equation*} \mathbb{E} \left[ e^{ i \theta \log M(\lambda_n, t) } \right] \to \mathbb{E} \left[ e^{ i \theta \log M(\lambda, t) } \right], \end{equation*} yielding continuity in law of $\{\log M(\lambda,t), \, \lambda \in \Lambda\}$. The assumption on the moments in Propositions \ref{prop:nomfgen}, \ref{prop:nomfgenonS} and \ref{prop:Mproperties} can be replaced with the condition that the characteristic function of $\log |X(t)|$ has only isolated zeros. \end{remark} \begin{remark}\label{remark:afterprops} Notice that in the proofs of Propositions \ref{prop:LamSwhendeterministic}-\ref{prop:MtoL} only one-dimensional distributions of the multifractal process $X$ are used. Therefore it is enough to assume $X$ is marginally multifractal and hence it applies to processes satisfying only \eqref{mfdef:motivating}. \end{remark} We note that \eqref{MtoL01} and \eqref{MtoL1infty} only show that the one-dimensional marginal distributions of two processes are equal. However, $\{\log M(e^{-s}, t), \, s \geq 0\}$ (or $\{\log M(e^{s}, t), \, s \geq 0\}$) need not be a L\'evy process. For Brownian motion such examples are known under the name fake Brownian motion (see \cite{oleszkiewicz2008fake} and references therein). Actually, we will show in Subsection \ref{subsec:examples} that the well-known example of multifractal process, multiplicative cascades, provide a family of scaling factors that does not arise from some L\'evy process. Proposition \ref{prop:MtoL} shows that the marginal distributions of the family $\{M(\lambda,t)\}$ are completely determined by $M(e^{-1},t)$ (or $M(e,t)$). It also provides an approach for constructing a family of scaling factors with properties as in Proposition \ref{prop:Mproperties}. Indeed, for fixed $t> 0$ one could take $M(\lambda, t)=e^{ L(-\log \lambda)}$ for $\Lambda=(0,1]$ or $M(\lambda,t)=e^{ L(\log \lambda)}$ for $\Lambda=[1,\infty)$ with $\{L(s), \, s \geq 0\}$ being some L\'evy process. This idea will be further developed in Section \ref{sec4} where it is used to define a new class of multifractal processes. \subsection{Scaling of moments} The scaling of moments in the sense of relation \eqref{momscal:motivating} is a direct consequence of \eqref{mfdef:prop}. Indeed, suppose $X=\{X(t), \, t \in {\mathcal{T}}\}$ is multifractal and the assumptions of Proposition \ref{prop:MtoL} hold. Assume that $\Lambda=(0,1]$, the argument is similar in the other case. There exists a L\'evy process $\{L(s)\}$ such that $M(\lambda,t) =^d e^{L(-\log \lambda)}$. Let $\mathbb{P}si$ denote the characteristic exponent of $L$, that is $\mathbb{P}si(\theta) = \log \mathbb{E} \left[ e^{i\theta L(1)} \right]$. If we assume that $1\in {\mathcal{S}}$, then since for $t\in \Lambda$, $X(t) \overset{d}{=} M(t,1) X(1)$, it follows that for $q \in [0, \overline{q}(X))$ \begin{equation*} \mathbb{E} \left[ M(t,1)^q \right] = \mathbb{E} \left[ e^{q L(-\log t)} \right] < \infty. \end{equation*} Hence the moment generating function of $L(s)$ exists on $[0, \overline{q}(X))$ for every $s\geq 0$. Moreover, for $q \in [0, \overline{q}(X))$ \begin{equation*} \mathbb{E} \left[ e^{q L(s)} \right] = e^{s \psi(q)} \end{equation*} and by naturally extending $\mathbb{P}si$ we have $\psi(q) = \mathbb{P}si(-iq)$. We will refer to $\psi$ as the Laplace exponent. The same argument applies to moments of negative order, that is for $q \in (\underline{q}(X),0]$ where $\underline{q}(X)$ is defined in \eqref{qunderline}. This way we have proved: \begin{proposition}\label{prop:momscalgeneral} Under the assumptions of Proposition \ref{prop:MtoL}, if $1\in {\mathcal{S}}$, then for every $q \in (\underline{q}(X), \overline{q}(X))$ \begin{itemize} \item if $\Lambda=(0,1]$ \begin{equation*} \mathbb{E} |X(t)|^q = t^{-\psi(q)} \mathbb{E}|X(1)|^q, \quad t \in (0,1], \end{equation*} \item if $\Lambda=[1,\infty)$ \begin{equation*} \mathbb{E} |X(t)|^q = t^{\psi(q)} \mathbb{E}|X(1)|^q, \quad t \in [1,\infty), \end{equation*} \end{itemize} where $\psi$ is the Laplace exponent of the L\'evy process $L$ defined in Proposition \ref{prop:MtoL}. \end{proposition} We conclude that the role of the scaling function $\tau$ in \eqref{momscal:motivating} is taken by the Laplace exponent $\psi$ or $-\psi$. Since $\psi$ is the cumulant generating function of $L(1)$, it is well known that $\psi$ is convex and strictly convex if and only if $L(1)$ is non-degenerate. The strict concavity of the scaling function is a typical property characterizing multifractals that satisfy \eqref{momscal:motivating} (see e.g.~\cite{mandelbrot1997mmar}). This corresponds to our case $\Lambda=(0,1]$ when we have $\tau(q)=-\psi(q)$ which is strictly concave if $L(1)$ is non-degenerate. \begin{remark}\label{rem:noscalingofmom} The scaling of moments as in \eqref{momscal:motivating} cannot hold for every $t>0$ with $\tau$ being nonlinear. Indeed, this follows as in the proof of Proposition \ref{prop:nomfgenonS} (see also \cite{mandelbrot1997mmar}). \end{remark} Without involving moments, the scaling property may be expressed in terms of the Mellin transforms. Assuming $1\in {\mathcal{S}}$, it follows from \eqref{mfdef:prop} that for every $\theta \in {\mathbb R}$ we have \begin{itemize} \item if $\Lambda=(0,1]$ \begin{equation*} \mathcal{M}_{|X(t)|}(\theta i) = t^{-\mathbb{P}si(\theta)} \mathcal{M}_{|X(1)|}(\theta i), \quad t \in (0,1], \end{equation*} \item if $\Lambda=[1,\infty)$ \begin{equation*} \mathcal{M}_{|X(t)|}(\theta i) = t^{\mathbb{P}si(\theta)} \mathcal{M}_{|X(1)|}(\theta i), \quad t \in [1,\infty). \end{equation*} \end{itemize} \subsection{Examples}\label{subsec:examples} A prominent example of a truly multifractal process satisfying scale invariance in the sense of \eqref{mfdef:motivatingfdd} are multiplicative cascades. The cascades have been introduced by Mandelbrot \cite{mandelbrot1972} using a discrete grid-based construction. Several equivalent constructions have been proposed to obtain continuous scaling properties starting with \cite{barral2002multifractal} and followed by \cite{bacry2003log}, \cite{muzy2002multifractal} and, more recently, \cite{barral2014exact}. Let $\nu$ be an arbitrary infinitely divisible distribution and $\mathbb{P}si$ its characteristic exponent, $\mathbb{P}si(\theta) = \log \mathbb{E} \left[ e^{i\theta \nu} \right]$. Assume that $\theta_c= \sup \{ \theta \geq 0 : \mathbb{E} \left[ e^{\theta \nu} \right] < \infty \} > 1$ so that the Laplace exponent $\psi (\theta) = \log \mathbb{E} \left[ e^{\theta \nu} \right]$ is finite on $[0, \theta_c)$. Furthermore, assume that $\psi(1)=0$ so that $\mathbb{E} \left[ e^{\nu} \right]= 1$. Next, let $\mathcal{L}$ be an independently scattered infinitely divisible random measure on the half-plane ${\mathcal{H}} = \{ (u, v) : u \in {\mathbb R}, v \geq 0\}$ associated to $\nu$ with control measure $\mu(du,dv)=v^2 du dv$ (see \cite{rajput1989spectral} for details). In particular, for every Borel set $A \subset {\mathcal{H}}$ such that $\mu(A) < \infty$ \begin{equation}\label{exa:cascade:lambda} \mathbb{E} \exp \left\{i \theta \mathcal{L} (A) \right\} = e^{\mathbb{P}si(\theta) \mu(A)}. \end{equation} Fix $T > 0$ and for $t \in {\mathbb R}$ and $l > 0$ define sets (cones) \begin{equation*} A_l(t) = \{(u,v) : v \geq l, \ -f(v)/2 < u-t \leq f(v)/2 \}, \end{equation*} where \begin{equation*} f(v)= \begin{cases} v, \ v\leq T,\\ T, \ v> T. \end{cases} \end{equation*} Now we can define stochastic process \begin{equation*} \omega_l(t) = \mathcal{L} \left( A_l(t) \right), \quad t \in {\mathbb R}, \end{equation*} and for $l > 0$ a random measure on ${\mathbb R}$ by \begin{equation*} Q_l(dt) = e^{\omega_l(t)} dt. \end{equation*} One can show that a.s.~$Q_l$ converges weakly to a random measure $Q$, as $l\to 0$ (see \cite{barral2014exact} for details). This limiting measure $Q$ is called the log-infinitely divisible cascade and the cascade process $\{X(t),\, t \in [0, \infty)\}$ is obtained by putting $X(t) = Q([0, t])$. For $\lambda \in (0, 1]$ and $l \in (0, T]$ the process $\{\omega_l(t)\}$ satisfies the following property \begin{equation*} \{ \omega_{\lambda l}(\lambda t) \}_{t\in [0,T]} \overset{d}{=} \{ \Omega(\lambda) + \omega_l(t)\}_{t\in [0,T]}, \end{equation*} where $\Omega(\lambda)$ is independent of $\{ \omega_l(t) \}$, it does not depend on $t$ and $l$ and has the characteristic function $\mathbb{E} \left[ e^{i \theta \Omega(\lambda)} \right] = \lambda^{-\mathbb{P}si(\theta)}$. From here, one easily obtains that for $\lambda \in (0, 1]$ \begin{equation}\label{cascadesscaling} \{ X(\lambda t)\}_{t \in [0,T]} \overset{d}{=} \{ W(\lambda) X(t) \}_{t \in [0,T ]}, \end{equation} with $W(\lambda) = \lambda e^{\Omega(\lambda)}$, independent of $\{ X(t)\}$. This implies that $\{X(t)\}$ is multifractal by Definition \ref{def:mf} with ${\mathcal{T}}=[0,\infty)$, ${\mathcal{S}}=[0,T]$, $\Lambda = (0, 1]$ and the family of scaling factors $M(\lambda, t) = W(\lambda)$ not depending on $t$. Let $\{Z(s), \, s \geq 0\}$ be a process defined by $Z(s)=\log W(e^{-s})$. Then by Proposition \ref{prop:MtoL}, the one-dimensional distributions of $Z$ correspond to those of some L\'evy process $L$. Here we can actually compute that \begin{equation*} \mathbb{E} \exp \left\{ i\theta Z(s) \right\} = \mathbb{E} \exp \left\{ i\theta \log \left( e^{-s} e^{\Omega(e^{-s})} \right) \right\} = \exp \left\{ s \mathbb{P}si(\theta) - i \theta s \right\}, \end{equation*} and hence $L$ can be identified with the process $\{\widetilde{L}(s)-s\}$ where $\{\widetilde{L}(s)\}$ is L\'evy process with characteristic exponent $\mathbb{P}si$. However, we will now show that $Z$ is not a L\'evy process. It is sufficient to show for arbitrary $0<s_1<s_2$ that \begin{equation}\label{exa:cascade:ZLdiff} \left( Z(s_1)+s_1, Z(s_2)+s_2 \right) \overset{d}{\neq} \left( \widetilde{L}(s_1), \widetilde{L}(s_2) \right). \end{equation} Since $\left( Z(s_1)+s_1, Z(s_2)+s_2 \right) \overset{d}{=} \left( \Omega(e^{-s_1}), \Omega(e^{-s_2}) \right)$, we put $\lambda_1=e^{-s_1}$, $\lambda_2=e^{-s_2}$ and for $t, l \in (0,T]$ we consider the characteristic function of $\left( \omega_{\lambda_1 l} (\lambda_1 t), \omega_{\lambda_2 l} (\lambda_2 t)\right)$: \begin{equation*} \mathbb{E} \exp \left\{ i \left(a_1 \omega_{\lambda_1 l}(\lambda_1 t) + a_2 \omega_{\lambda_2 l}(\lambda_2 t) \right) \right\} = \mathbb{E} \exp \left\{ i \left(a_1 \mathcal{L} (A_{\lambda_1 l}(\lambda_1 t)) + a_2 \mathcal{L} (A_{\lambda_2 l}(\lambda_2 t)) \right) \right\}. \end{equation*} Now let \begin{align*} B_1 &= A_{\lambda_1 l}(\lambda_1 t) \backslash A_{\lambda_2 l}(\lambda_2 t),\\ B_2 &= A_{\lambda_1 l}(\lambda_1 t) \cap A_{\lambda_2 l}(\lambda_2 t),\\ B_3 &= A_{\lambda_2 l}(\lambda_2 t) \backslash A_{\lambda_1 l}(\lambda_1 t), \end{align*} and since these sets are disjoint we have by independence and \eqref{exa:cascade:lambda} that \begin{align*} &\mathbb{E} \exp \left\{ i \left(a_1 \omega_{\lambda_1 l}(\lambda_1 t) + a_2 \omega_{\lambda_2 l}(\lambda_2 t) \right) \right\}\\ &\hspace{3cm}= \mathbb{E} \exp \left\{ i \left(a_1 \mathcal{L} (B_1) + (a_1+a_2) \mathcal{L} (B_2) + \mathcal{L}(B_3) \right) \right\}\\ &\hspace{3cm}= \exp \left\{ \mathbb{P}si(a_1) \mu(B_1) + \mathbb{P}si(a_1+a_2) \mu(B_2) + \mathbb{P}si(a_2) \mu(B_3) \right\}. \end{align*} The cascades are obtained in the limit when $l\to 0$, so we may assume $l \leq t$. A direct computation shows that \begin{align*} \mu(B_1) &= \log \frac{\lambda_1-\lambda_2}{\lambda_1} + \log \frac{t}{l} + 1,\\ \mu(B_2) &= \log \frac{1}{\lambda_1-\lambda_2} + \log \frac{T}{t},\\ \mu(B_3) &= \log \frac{\lambda_1-\lambda_2}{\lambda_2} + \log \frac{t}{l} + 1, \end{align*} and hence \begin{align*} &\mathbb{E} \exp \left\{ i \left(a_1 \omega_{\lambda_1 l}(\lambda_1 t) + a_2 \omega_{\lambda_2 l}(\lambda_2 t) \right) \right\}\\ &= \exp \left\{ \mathbb{P}si(a_1) \log \lambda_1^{-1} + \psi(a_2) \log \lambda_2^{-1} \log (\lambda_1-\lambda_2) \left(\mathbb{P}si(a_1) + \mathbb{P}si(a_2) - \mathbb{P}si(a_1+a_2)\right) \right\}\\ &\quad \times \exp \left\{\mathbb{P}si(a_1+a_2) \left(1+\log \frac{T}{l} \right) + \left(1+ \log \frac{t}{l} \right) \left(\mathbb{P}si(a_1) + \mathbb{P}si(a_2) - \mathbb{P}si(a_1+a_2)\right) \right\}. \end{align*} Now we can write \begin{equation*} \left( \omega_{\lambda_1 l} (\lambda_1 t), \omega_{\lambda_2 l} (\lambda_2 t)\right) \overset{d}{=} \left( \Omega(\lambda_1), \Omega(\lambda_2) \right) + \left(\omega_l'(t), \omega_l''(t) \right), \end{equation*} where the random vectors on the right are independent and $\omega_l'(t) =^d \omega_l''(t) =^d \omega_l(t)$. This implies that the characteristic function of $\left( Z(s_1)+s_1, Z(s_2)+s_2 \right)$ is \begin{align*} \mathbb{E} &\exp \left\{ i \left(a_1 (Z(s_1)+s_1) + a_2 (Z(s_2)+s_2) \right) \right\}\\ &= \exp \left\{ \mathbb{P}si(a_1) s_1 + \mathbb{P}si(a_2) s_2 + \log (e^{-s_1}-e^{- s_2}) \left(\mathbb{P}si(a_1) + \mathbb{P}si(a_2) - \mathbb{P}si(a_1+a_2)\right) \right\}. \end{align*} On the other hand, $\{\widetilde{L}(s)\}$ is a L\'evy process so that \begin{align*} \mathbb{E} \exp \left\{ i \left(a_1 \widetilde{L}(s_1) + a_2 \widetilde{L}(s_2) \right) \right\} &= \mathbb{E} \exp \left\{ i \left( (a_1+a_2) \widetilde{L}(s_1) + a_2 \widetilde{L}(s_2) - a_2 \widetilde{L}(s_1)\right) \right\}\\ &= \exp \left\{ \mathbb{P}si(a_2) (s_2-s_1) + \mathbb{P}si(a_1+a_2) s_1 \right\}, \end{align*} which proves \eqref{exa:cascade:ZLdiff}. \begin{remark} This fact is of some independent interest as it provides an example of a process whose one-dimensional marginal distributions are the same as those of some L\'evy process but the process itself is not a L\'evy process. This problem has been considered in the martingale setting for Brownian motion and generally for self-similar processes (see \cite{fan2015mimicking}, \cite{oleszkiewicz2008fake} and the references therein). The example obtained here will be elaborated in more details elsewhere. \end{remark} Further examples of multifractal processes can be obtained by compounding the cascade process and some self-similar process. For Brownian motion this gives the so-called multifractal random walk (see \cite{bacry2003log}). These models have gained considerable interest in mathematical finance since they can replicate most of the stylized facts of financial time series. The construction of the cascade process necessarily involves the so-called integral scale $T$ which is hard to interpret in finance. Several extensions have been proposed by letting $T\to\infty$, however, these do not satisfy exact scale invariance as in \eqref{mfdef:motivating} (see e.g.~\cite{duchon2012forecasting}, \cite{muzy2013random}). Note that this is in accordance with Proposition \ref{prop:nomfgenonS}. As mentioned in the introduction, in \cite{veneziano1999basic} the following variant of \eqref{mfdef:motivatingfdd} is investigated: for every $\lambda \in \Lambda$, $\{X(t)\} =^d \{M_\lambda X(\lambda t) \}$ with $M_\lambda$ independent of $\{X(t)\}$. The canonical example provided there are the so-called processes with independent stationary increment ratios. When $\Lambda=(0,1]$, these in essence correspond to processes defined by $X(t)=e^{L(\log \left(t/T\right))}$ for $t \in [T,\infty)$, where $L$ is a L\'evy process. For $\lambda \in (0,1]$ we have \begin{align*} X(t) &= e^{L(\log \left(t/T\right)) - L(\log \left(t/T\right) + \log \lambda) + L(\log \left(t/T\right)+\log \lambda)}\\ &= e^{L(\log \left(t/T\right)) - L(\log \left(t/T\right) + \log \lambda)} X(\lambda t), \end{align*} where the two random variables on the right are independent by the independence of increments of $L$. This means that $X(t)=^d M_\lambda X(\lambda t)$ with $$M_\lambda=e^{L(\log \left(t/T\right)) - L(\log \left(t/T\right) + \log \lambda)}.$$ Although the distribution of $M_\lambda$ does not depend on $t$ by the stationarity of increments of $L$, it is not the same random variable for every $t$ and the definition does not hold in the sense of equality od finite dimensional distributions. It does however satisfy our Definition \ref{def:mf}, but it cannot be extended to the interval ${\mathcal{T}}=(0,\infty)$. In Section \ref{sec4} we will present an approach that solves this problem. We mention the last example of a process we are aware that has exact scale invariance property. The multifractional Brownian motion with random exponent is a process $\{Y(t)\}$ obtained by replacing the Hurst parameter of fractional Brownian motion with some stochastic process $\{S(t),\, t\in {\mathbb R}\}$ (see \cite{ayache2005multifractional} for details). Given a stationary process $\{S(t)\}$ it has been showed in \cite[Theorem 4.1]{ayache2005multifractional} using a wavelet decomposition of the process $\{Y(t), \, t\in {\mathbb R}\}$ that for any $\lambda>0$ it holds that \begin{equation*} Y(\lambda t) \overset{d}{=} \lambda^{S(t)} Y(t), \quad \forall t\in {\mathbb R}. \end{equation*} It follows from Proposition \ref{prop:nomfgen} and Remark \ref{remark:afterprops} that if $\overline{q}(Y)>0$, then $S(t)$ is constant a.s.~and hence no new examples of multifractal processes can arise in this way. To our knowledge, the list of examples of processes satisfying exact scale invariance ends here. In the next section we provide a large class of processes that are multifractal by Definition \ref{def:mf}. \section{$L$-multifractals}\label{sec4} One of the main drawbacks of the cascade construction is that one obtains a process defined only on a bounded time interval as the construction necessarily involves an integral scale $T$. Our goal here is to develop an alternative approach that would provide processes defined on the unbounded time intervals and satisfying Definition \ref{def:mf}. In our definition of multifractality we made two steps away from the scaling property of cascades \eqref{cascadesscaling} that will make this possible. Firstly, we made transition from \eqref{mfdef:motivatingfdd} to \eqref{mfdef:prop} and secondly, we allowed the scaling to hold over set ${\mathcal{S}}$ not necessarily equal to ${\mathcal{T}}$. In fact, we will detail here the construction of multifractal processes such that ${\mathcal{T}}=(0,\infty)$ and ${\mathcal{S}}=\Lambda=(0,1]$. Moreover, for these processes the property \eqref{mfdef:prop} will actually hold over ${\mathcal{S}}=(0,\infty)$, but the family of scaling factors and the process will not be independent in this case. As shown in Proposition \ref{prop:nomfgenonS}, it is not possible to preserve independence and random scaling together with ${\mathcal{S}}=(0,\infty)$. Using this approach, an abundance of examples of processes satisfying Definition \ref{def:mf} can be obtained. Indeed, to any L\'evy process (hence any infinitely divisible distribution) and stationary process, there corresponds one multifractal process. For this class of processes the equality \eqref{MtoL01} (or \eqref{MtoL1infty}) in Proposition \ref{prop:MtoL} will not hold only for the one-dimensional marginals, but for the finite dimensional distributions. Because of this correspondence with L\'evy processes, we will call these processes $L$-multifractals. As shown in Subsection \ref{subsec:examples}, the cascades do not belong to this class. The method for obtaining multifractal processes is inspired by the idea of Lamperti transformation which provides the correspondence between stationary and self-similar processes. If $\{Y(t), \, t \in {\mathbb R}\}$ is a stationary process and $H\in {\mathbb R}$, then the process $\{X(t),\, t>0\}$ defined by \begin{equation}\label{e:Lamptransclassical1} X(t)=t^H Y(\log t), \quad t>0 \end{equation} is self-similar with Hurst parameter $H$. Conversely, if $\{X(t), \, t > 0\}$ is $H$-ss, then \begin{equation}\label{e:Lamptransclassical} Y(t)=e^{-tH} X(e^t), \quad t\in {\mathbb R}, \end{equation} defines a stationary process. Our next goal is to extend this idea to the multifractal case. The results are stated only for the case $\Lambda=(0,1]$ as the other one is analogous by Lemma \ref{lemma:Lambdacorrespondance}. Specifying $H$ in the Lamperti transformation corresponds to specifying family $\{M(\lambda,t),\allowbreak \, \lambda \in \Lambda, t\in {\mathcal{S}}\}$ in the multifractal case. First we show how to obtain such family satisfying properties given in Proposition \ref{prop:Mproperties} from an arbitrary L\'evy process. \begin{lemma}\label{lemma:Mdef} Let $L=\{L(s), \, s \geq 0\}$ be a L\'evy process. For $a \geq 0$ define a family of random variables $\{M^{(a)}(\lambda,t), \, \lambda \in (0,1], t \leq e^{a} \}$ given by \begin{equation}\label{Madef} M^{(a)}(\lambda,t) = e^{L(a-\log t - \log \lambda) - L(a-\log t)}. \end{equation} Then there exists a family $\{M(\lambda,t), \, \lambda \in (0,1], t > 0\}$ such that for every $a \geq 0$ \begin{equation}\label{MtoMa:fdd} \left\{ M(\lambda,t) \right\}_{\lambda \in (0,1], \, t \in (0, e^{a}]} \overset{d}{=} \left\{ M^{(a)}(\lambda,t) \right\}_{\lambda \in (0,1], \, t \in (0, e^{a}]} \end{equation} and satisfying the following properties: \begin{enumerate}[(i)] \item For every $\lambda \in (0,1]$, $\left\{ M(\lambda,e^u), \, u \in {\mathbb R} \right\}$ is a stationary process. \item For every $t>0$, $M(1,t)=1$ a.s. \item For $\lambda_1,\lambda_2 \in (0,1]$, $t>0$ and $a \geq 0$, $M^{(a)}(\lambda_1, \lambda_2 t)$ and $M^{(a)}(\lambda_2, t)$ are independent and \begin{equation*} M^{(a)}(\lambda_1 \lambda_2, t) = M^{(a)}(\lambda_1,\lambda_2 t) M^{(a)}(\lambda_2, t). \end{equation*} Moreover, $M(\lambda_1, \lambda_2 t)$ and $M(\lambda_2, t)$ are independent and \begin{equation*} M(\lambda_1 \lambda_2, t) \overset{d}{=} M(\lambda_1,\lambda_2 t) M(\lambda_2, t). \end{equation*} \end{enumerate} \end{lemma} \begin{proof} If we denote for $\tau\geq 0$, $\widehat{L}^{(\tau)}(s)=L(\tau+s)-L(\tau)$, then clearly $\{\widehat{L}^{(\tau)}(s)\}_{s\geq 0} \overset{d}{=} \{ L(s)\}_{s\geq 0}$. Taking $a \geq b \geq 0$ and so $e^{a} \geq e^{b}$ we get that \begin{equation}\label{lemma:Mdef:proof1} \begin{aligned} &\left\{ M^{(a)}(\lambda,t) \right\}_{\lambda \in (0,1], \, t \in (0, e^{b}]}\\ &= \left\{ e^{ L(a-b + b - \log t - \log \lambda) - L(a-b+b-\log t) } \right\}_{\lambda \in (0,1], \, t \in (0, e^{b}]} \\ &= \left\{ e^{ L(a-b+b- \log t - \log \lambda) - L(a-b) - \left( L(a-b+b-\log t) - L(a-b)\right) } \right\}_{\lambda \in (0,1], \, t \in (0, e^{b}]} \\ &= \left\{ e^{ \widehat{L}^{(a-b)}(b-\log t - \log \lambda) - \widehat{L}^{(a-b)}(b-\log t) } \right\}_{\lambda \in (0,1], \, t \in (0, e^{b}]} \\ &\overset{d}{=} \left\{ e^{ L(b - \log t - \log \lambda) - L(b - \log t) } \right\}_{\lambda \in (0,1], \, t \in (0, e^{b}]} \\ &= \left\{ M^{(b)}(\lambda,t) \right\}_{\lambda \in (0,1], \, t \in (0, e^{b}]}. \end{aligned} \end{equation} Given $(\lambda_1,t_1),\dots,(\lambda_n,t_n) \in (0,1] \times (0,\infty)$ we can find $a\geq 0$ such that $\max_{i=1,\dots,n}\allowbreak t_i \leq e^{a}$. Let $\mu_{(\lambda_1,t_1),\dots,(\lambda_n,t_n)}$ denote the distribution of $( M^{(a)}(\lambda_1,t_1),\allowbreak\dots, \allowbreak M^{(a)}(\lambda_n,t_n) )$, which does not depend on $a$ as shown above. The measures $\mu_{(\lambda_1,t_1),\dots,(\lambda_n,t_n)}$, $(\lambda_1,t_1),\dots,(\lambda_n,t_n)\allowbreak \in (0,1] \times (0,\infty)$, $n \in {\mathbb N}$ form a consistent family and by the Kolmogorov extension theorem there exists a two-parameter process $\{M(\lambda,t), \, \lambda \in (0,1], t > 0 \}$ with finite dimensional distributions $\mu_{(\lambda_1,t_1),\dots,(\lambda_n,t_n)}$, $(\lambda_1,t_1),\dots,(\lambda_n,t_n) \in (0,1] \times (0,\infty)$, $n \in {\mathbb N}$ such that \eqref{MtoMa:fdd} holds. We now prove properties \textit{(i)}-\textit{(iii)}. \textit{(i)} It is enough to show that for arbitrary $h>0$ and $a \geq 0$ it holds that \begin{equation*} \left\{ M(s,e^{u}) \right\}_{u \leq a} \overset{d}{=} \left\{ M(s,e^{u-h}) \right\}_{u \leq a}. \end{equation*} Since $u \leq a$ implies $e^{u-h}\leq e^u\leq e^{a}$ we have \begin{align*} \left\{ M(s,e^{u-h}) \right\}_{u \leq a} &\overset{d}{=} \left\{ M^{(a)}(\lambda,e^{u-h}) \right\}_{u \leq a}\\ &= \left\{ e^{L(a-u+h - \log \lambda) - L(a-u+h)} \right\}_{u \leq a}\\ &= \left\{ e^{\widehat{L}^{(h)}(a-u - \log \lambda) - \widehat{L}^{(h)}(a-u)} \right\}_{u \leq a}\\ &\overset{d}{=} \left\{ e^{L(a-u - \log \lambda) - L(a-u)} \right\}_{u \leq a}\\ &= \left\{ M^{(a)}(\lambda,e^{u}) \right\}_{u \leq a}\\ &\overset{d}{=} \left\{ M(\lambda,e^{u}) \right\}_{u \leq a}. \end{align*} \textit{(ii)} This is clear from \eqref{Madef} and \eqref{MtoMa:fdd}. \textit{(iii)} By taking $a$ such that $t \leq e^{a}$ we have \begin{align*} M(\lambda_1 \lambda_2, t) &\overset{d}{=} M^{(a)}(\lambda_1 \lambda_2,t) = e^{L(a - \log t - \log \lambda_1 - \log \lambda_2) - L(a-\log t)}\\ &= e^{L(a-\log t - \log \lambda_1 - \log \lambda_2) - L(a-\log t - \log \lambda_2) + L(a-\log t - \log \lambda_2)- L(a-\log t)}\\ &= M^{(a)}(\lambda_1, \lambda_2 t) M^{(a)}(\lambda_2, t), \end{align*} which are independent by the independence of increments of $L$. Since \begin{align*} \left( M(\lambda_1, \lambda_2 t), M(\lambda_2, t)\right) \overset{d}{=} \left( M^{(a)}(\lambda_1, \lambda_2 t), M^{(a)}(\lambda_2, t) \right), \end{align*} the statement follows. \end{proof} A family $\{M(\lambda,t), \, \lambda \in (0,1], t > 0\}$ will be said to \textbf{correspond to a L\'evy process} $L$ if for every $a\geq 0$ \eqref{MtoMa:fdd} holds with $\{M^{(a)}(\lambda,t), \, \lambda \in (0,1], t \leq e^{a} \}$ given by \eqref{Madef}. We can use the previous construction of such family to build the multifractal process from a stationary process. The multifractal process obtained in this way will have $\{M(\lambda,t), \, \lambda \in (0,1], t \in (0,1])\}$ as the family of scaling factors. This represents a multifractal analog of the Lamperti transformation. \begin{theorem}\label{thm:MFLamperti} Let $L=\{L(s), \, s \geq 0\}$ be a L\'evy process and $Y=\{Y(u), \, u \in {\mathbb R}\}$ a stationary process independent of $L$. For $a \geq 0$ define the process $\{X^{(a)}(t), \, t \in (0,e^{a}] \}$ by setting \begin{equation}\label{Xadef} X^{(a)}(t) = e^{L(a-\log t) - L(a)} Y(\log t). \end{equation} Then there exists a process $X=\{X(t), \, t > 0 \}$ such that for every $a \geq 0$ \begin{equation}\label{XtoXa:fdd} \left\{ X(t) \right\}_{t \in (0,e^{a}]} \overset{d}{=} \left\{ X^{(a)}(t) \right\}_{t \in (0,e^{a}]}. \end{equation} and for every $\lambda \in (0,1]$ \begin{equation}\label{XisMF} \left\{ X(\lambda t) \right\}_{t\geq 0} \overset{d}{=} \left\{ M(\lambda,t) X(t) \right\}_{t\geq 0}, \end{equation} where $\{M(\lambda,t), \, \lambda \in (0,1], t > 0 \}$ is the family of scaling factors corresponding to a L\'evy process $L$. The process $X$ and the family $\{M(\lambda,t), \, \lambda \in (0,1], t \in (0,1] \}$ are independent, hence $X$ is multifractal with ${\mathcal{S}}=\Lambda=(0,1]$. \end{theorem} \begin{proof} Denoting again for $\tau\geq 0$, $\widehat{L}^{(\tau)}(s)=L(\tau+s)-L(\tau)$, we have for $a \geq b \geq 0$ \begin{align*} \left\{ X^{(a)}(t) \right\}_{t \in (0,e^{b}]} &= \left\{ e^{L(a-b+b-\log t) - L(a-b+b)} Y(\log t) \right\}_{t \in (0, e^{b}]}\\ &= \left\{ e^{L(a-b+b-\log t)-L(a-b) - \left(L(a-b+b)- L(a-b) \right)} Y(\log t) \right\}_{t \in (0, e^{b}]}\\ &= \left\{ e^{\widehat{L}^{(a-b)}(b-\log t) - \widehat{L}^{(a-b)}(b)} Y(\log t) \right\}_{t \in (0, e^{b}]}\\ &\overset{d}{=} \left\{ e^{L(b-\log t) - L(b)} Y(\log t) \right\}_{t \in (0, e^{b}]}\\ &= \left\{ X^{(b)}(t) \right\}_{t \in (0, e^{b}]}. \end{align*} As in Lemma \ref{lemma:Mdef}, this shows that finite dimensional distributions of $\{X^{(a)}(t), \, t \leq e^{a} \}$, $a\geq 0$ do not depend on $a$ and form a consistent family. An appeal to the Kolmogorov extension theorem gives the existence of $X$ satisfying \eqref{XtoXa:fdd}. To show multifractality of $X$ it suffices to show \eqref{XisMF} holds over $t \in (0,e^{a}]$ for arbitrary $a\geq 0$. By using the notation of Lemma \ref{lemma:Mdef} and stationarity of $Y$ we have \begin{align} \left\{ X(\lambda t) \right\}_{t \in (0,e^{a}]} &\overset{d}{=} \left\{ X^{(a)}(\lambda t) \right\}_{t \in (0,e^{a}]}\nonumber \\ &= \left\{ e^{L(a-\log t - \log \lambda)-L(a)} Y(\log t + \log \lambda) \right\}_{t \in (0,e^{a}]}\nonumber \\ &= \left\{ e^{L(a-\log t - \log \lambda)-L(a-\log t)} e^{L(a-\log t)-L(a)} Y(\log t + \log \lambda) \right\}_{t \in (0,e^{a}]}\nonumber \\ &\overset{d}{=} \left\{ e^{L(a-\log t - \log \lambda)-L(a-\log t)} e^{L(a-\log t)-L(a)} Y(\log t) \right\}_{t \in (0,e^{a}]}\nonumber \\ &= \left\{ M^{(a)}(\lambda, t) X^{(a)}(t) \right\}_{t \in (0,e^{a}]} \label{thm:MFLamperti:proofline}\\ &\overset{d}{=} \left\{ M(\lambda,t) X(t) \right\}_{t \in (0,e^{a}]}.\nonumber \end{align} When $t\in(0,1]$, two factors in \eqref{thm:MFLamperti:proofline} are independent due to independence of increments of $L$ and independence of $L$ and $Y$ and hence family $\{M(\lambda,t), \, \lambda \in (0,1], t \in (0,1] \}$ can be taken independent of $\{X(t)\}$. \end{proof} A multifractal process $X$ will be said to be \textbf{$L$-multifractal} if its family of scaling factors corresponds to a L\'evy process $L$. Every process $X$ obtained as in Theorem \ref{thm:MFLamperti} is $L$-multifractal. \begin{remark}\label{rem:depconstr} Note that the equality of finite dimensional distributions \eqref{XisMF} holds over $(0,\infty)$, but the scaling family is independent from the process only over $(0,1]$, hence ${\mathcal{S}}=(0,1]$. Putting $X^{(a)}(t) = e^{L(a+\log t) - L(a)} Y(\log t)$ for $t\in(e^{-a},\infty)$ instead of \eqref{Xadef}, yields by similar arguments a process for which \eqref{XisMF} holds over $(0,\infty)$. The random field $M$ corresponds to the one constructed similarly as in Lemma \ref{lemma:Mdef} but with $M^{(a)}(\lambda,t) = e^{L(a+\log t - \log \lambda) - L(a+\log t)}$, $\lambda \in (0,1]$, $t\in(e^{-a},\infty)$, replacing \eqref{Madef}. The independence does not hold in \eqref{XisMF} hence the process $X$ is not multifractal by Definition \ref{def:mf}. \end{remark} The analog of the inverse Lamperti transformation also holds. Indeed, every $L$-multifractal corresponds to a stationary process in a sense given by the following theorem. \begin{theorem}\label{thm:MFLampertiInv} Suppose $\{X(t), \, t>0\}$ is $L$-multifractal with the scaling family $\{M(\lambda,t), \,\allowbreak \lambda \in (0,1], t > 0 \}$. Then the process $\{Y(s), \, s \geq 0 \}$ defined by \begin{equation*} Y(s) = M(e^{-s},e^s) X(e^s). \end{equation*} is stationary. \end{theorem} \begin{proof} For arbitrary $h>0$ we have \begin{align*} \left\{ Y(s+h) \right\}_{s \geq 0} &= \left\{ M(e^{-s-h},e^{s+h}) X(e^{s+h}) \right\}_{s \geq 0} \\ &\overset{d}{=} \left\{ M(e^{-s},e^{s}) M(e^{-h},e^{s+h}) X(e^{s+h}) \right\}_{s \geq 0}\\ &\overset{d}{=} \left\{ M(e^{-s},e^{s}) X(e^{s}) \right\}_{s \geq 0}\\ &\overset{d}{=} \left\{ Y(s) \right\}_{s \geq 0}. \end{align*} \end{proof} Let us mention that if in Theorems \ref{thm:MFLamperti} and \ref{thm:MFLampertiInv} $L(s)=-Hs$, $s\geq 0$, then one obtains the classical form of the Lamperti transformation. \section{Properties of $L$-multifractal processes}\label{sec5} In this section we derive several properties of $L$-multifractal processes defined in Theorem \ref{thm:MFLamperti}. \subsection{Scaling of moments} Since the process $X$ from Theorem \ref{thm:MFLamperti} is multifractal, Proposition \ref{prop:momscalgeneral} implies the scaling of moments holds. By using \eqref{Xadef}, we can actually prove more. \begin{proposition} Let $X$ be a process obtained in Theorem \ref{thm:MFLamperti} from L\'evy process $L$ and stationary process $Y$. If $q\in {\mathbb R}$ is such that \begin{equation*} \mathbb{E} \left[ e^{qL(1)} \right]< \infty, \ \mathbb{E} \left[ e^{-qL(1)} \right]< \infty \ \text{ and } \ \mathbb{E}|Y(1)|^q<\infty, \end{equation*} then $\mathbb{E}|X(t)|^q<\infty$ for every $t>0$. If $\psi$ is the Laplace exponent of $L$, $\mathbb{E} \left[ e^{q L(s)} \right] = e^{s \psi(q)}$, then \begin{equation}\label{L-MF:moscal} \mathbb{E}|X(t)|^q = \begin{cases} t^{-\psi(q)} \mathbb{E}|X(1)|^q, & \text{ if } t \leq 1, \\ t^{\psi(-q)} \mathbb{E}|X(1)|^q, & \text{ if } t > 1. \end{cases} \end{equation} \end{proposition} \begin{proof} By taking $a\geq 0$ such that $t \leq e^a$ we have \begin{align*} \mathbb{E}|X(t)|^q = \mathbb{E} \left[ e^{q\left( L(a-\log t) - L(a) \right)} \right] \mathbb{E}|Y(\log t)|^q &= \begin{cases} \mathbb{E} \left[ e^{q L(-\log t)} \right] \mathbb{E}|X(1)|^q, & \text{ if } t \leq 1, \\[1ex] \mathbb{E} \left[ e^{-q L(\log t)} \right] \mathbb{E}|X(1)|^q, & \text{ if } t > 1. \end{cases}\\ &= \begin{cases} t^{-\psi(q)} \mathbb{E}|X(1)|^q, & \text{ if } t \leq 1, \\ t^{\psi(-q)} \mathbb{E}|X(1)|^q, & \text{ if } t > 1. \end{cases} \end{align*} \end{proof} It is important to note that \eqref{L-MF:moscal} does not contradict Remark \ref{rem:noscalingofmom}, since in \eqref{L-MF:moscal} we actually have $\mathbb{E}|X(t)|^q = t^{\tau(q, t)} \mathbb{E}|X(1)|^q$ with the exponent $\tau$ depending additionally on $t$: \begin{equation*} \tau(q,t) = \begin{cases} -\psi(q), & \text{ if } t \leq 1, \\ \psi(-q), & \text{ if } t > 1. \end{cases} \end{equation*} In terms of the Mellin transforms, we similarly obtain for $\theta \in {\mathbb R}$ \begin{equation*} \mathcal{M}_{|X(t)|}(\theta i) = \begin{cases} t^{-\mathbb{P}si(\theta)} \mathcal{M}_{|X(1)|}(\theta i), & \text{ if } t \leq 1, \\ t^{\mathbb{P}si(-\theta)} \mathcal{M}_{|X(1)|}(\theta i), & \text{ if } t > 1, \end{cases} \end{equation*} where $\mathbb{P}si$ is the characteristic exponent of $L$, $\mathbb{P}si(\theta) = \log \mathbb{E} \left[ e^{i\theta L(1)} \right]$. \subsection{Stationarity of increments} When it comes to applications like finance, turbulence and other fields, an important feature of stochastic process used for modeling is stationarity of increments. This provides applicability of statistical methods and is often plausible to assume. However, even for self-similar processes this may be hard to achieve. In fact, as noted by \cite{barndorff1999stationary}, there is no simple characterization of marginal laws of self-similar processes with stationary increments. We will first show that, unfortunately, the process $X$ constructed in Theorem \ref{thm:MFLamperti} with finite variance can not have stationary increments if considered on the time set $\mathcal{T}=(0,\infty)$. However, if we restrict the time set to, say $\mathcal{T}=(0,1]$, then it may be possible for $X$ to have stationary increments. We will show that by appropriately choosing the stationary process $Y$ in Theorem \ref{thm:MFLamperti} and restricting the time set, one can obtain a multifractal process with second-order stationary increments meaning that its covariance function is the same as if it has stationary increments. Suppose $X$ is an $L$-multifractal process defined in Theorem \ref{thm:MFLamperti} with finite variance. By taking $a\geq 0$ such that $e^a\geq t > s$, we have directly from \eqref{Xadef} that \begin{equation}\label{e:covLMFgen} \begin{aligned} &\mathbb{E} X(t)X(s) = \mathbb{E} \left[ e^{L(a-\log t) - L(a)} Y(\log t) e^{L(a-\log s) - L(a)} Y(\log s)\right]\\ &= \mathbb{E} \left[ e^{L(a-\log t) - L(a) + L(a-\log s) - L(a)}\right] \mathbb{E} \left[ Y(\log t) Y(\log s)\right]\\ &=\begin{cases} \mathbb{E} \left[ e^{-2 \left( L(a) - L(a-\log s)\right)} \right] \mathbb{E} \left[e^{-\left(L(a-\log s) - L(a-\log t)\right) }\right] \mathbb{E} \left[ Y(\log t) Y(\log s)\right], & \text{ if } t > 1, \ s>1, \\ \mathbb{E} \left[ e^{- \left( L(a) - L(a-\log t)\right)} \right] \mathbb{E} \left[ e^{ L(a-\log s) - L(a) }\right] \mathbb{E} \left[ Y(\log t) Y(\log s)\right], & \text{ if } t > 1, \ s\leq 1, \\ \mathbb{E} \left[ e^{2\left( L(a-\log t) - L(a)\right)} \right] \mathbb{E} \left[ e^{L(a-\log s) - L(a-\log t) }\right] \mathbb{E} \left[ Y(\log t) Y(\log s)\right], & \text{ if } t \leq 1, \ s\leq 1, \\ \end{cases}\\ &=\begin{cases} t^{\psi(-1)} s^{\psi(-2)-\psi(-1)} \mathbb{E} \left[ Y(\log t) Y(\log s)\right], & \text{ if } t > 1, \ s>1, \\ t^{\psi(-1)} s^{-\psi(1)} \mathbb{E} \left[ Y(\log t) Y(\log s)\right], & \text{ if } t > 1, \ s\leq 1, \\ t^{\psi(1)-\psi(2)} s^{-\psi(1)} \mathbb{E} \left[ Y(\log t) Y(\log s)\right], & \text{ if } t \leq 1, \ s\leq 1, \\ \end{cases} \end{aligned} \end{equation} where $\psi$ is the Laplace exponent of $L$. On the other hand, if $X$ is $L$-multifractal with stationary increments, then for $t>s$ \begin{equation}\label{e:covLMFsi} \mathbb{E} X(t)X(s) = \begin{cases} \frac{1}{2} \left( t^{\psi(-2)} + s^{\psi(-2)} - (t-s)^{\psi(-2)}\right) \mathbb{E} X(1)^2, & \text{ if } t > 1, \ s>1 \ \text{and} \ t-s>1, \\ \frac{1}{2} \left( t^{\psi(-2)} + s^{\psi(-2)} - (t-s)^{-\psi(2)}\right) \mathbb{E} X(1)^2, & \text{ if } t > 1, \ s>1 \ \text{and} \ t-s\leq 1, \\ \frac{1}{2} \left( t^{\psi(-2)} + s^{-\psi(2)} - (t-s)^{\psi(-2)}\right) \mathbb{E} X(1)^2, & \text{ if } t > 1, \ s\leq 1 \ \text{and} \ t-s>1, \\ \frac{1}{2} \left( t^{\psi(-2)} + s^{-\psi(2)} - (t-s)^{-\psi(2)}\right) \mathbb{E} X(1)^2, & \text{ if } t > 1, \ s\leq 1 \ \text{and} \ t-s\leq 1, \\ \frac{1}{2} \left( t^{-\psi(2)} + s^{-\psi(2)} - (t-s)^{-\psi(2)}\right) \mathbb{E} X(1)^2, & \text{ if } t \leq 1, \end{cases} \end{equation} which follows from \eqref{L-MF:moscal} and the following identity valid for any stationary increments process with finite variance \begin{align*} \mathbb{E} X(t)X(s) &= \frac{1}{2} \left( \mathbb{E} X(t)^2 + \mathbb{E} X(s)^2 - \mathbb{E} \left(X(t)-X(s)\right)^2 \right)\\ &= \frac{1}{2} \left( \mathbb{E} X(t)^2 + \mathbb{E} X(s)^2 - \mathbb{E} X(t-s)^2 \right). \end{align*} We are now considering is it possible to choose $Y$ and $\psi$ in \eqref{e:covLMFgen} to get the covariance function \eqref{e:covLMFsi} as if $X$ has stationary increments. Let $u, h >0$ so that $e^{u+h}>1$, $e^u >1$ and suppose that $e^{u+h}-e^u \leq 1$. Then by equating \eqref{e:covLMFgen} and \eqref{e:covLMFsi} we have \begin{align} &\mathbb{E} Y(u+h) Y(u) = \mathbb{E} Y(\log e^{u+h}) Y(\log e^u)\nonumber\\ &\quad=\frac{1}{2} \mathbb{E} X(1)^2 \frac{e^{\psi(-2)(u+h)} + e^{\psi(-2)u} - (e^{u+h}-e^u)^{-\psi(2)}} {e^{\psi(-1)(u+h)} e^{(\psi(-2)-\psi(-1))u}}\nonumber\\ &\quad=\frac{1}{2} \mathbb{E} X(1)^2 \left( e^{(\psi(-2) - \psi(-1))h} + e^{-\psi(-1)h} - e^{-\psi(-1)h} e^{-(\psi(-2) + \psi(2))u} (e^{h}-1)^{-\psi(2)} \right).\label{e:covYudep} \end{align} Since $Y$ is stationary, \eqref{e:covYudep} must not depend on $u$, hence it should hold $\psi(-2)=-\psi(2)$. But then $\psi$ is a convex function passing through three collinear points $(-2,\-\psi(2))$, $(0,0)$ and $(2,\psi(2))$ and hence it must be linear (see e.g.~\cite[Lemma 2]{GLST2019Bernoulli}) implying that $L(1)$ is degenerate and $X$ is self-similar. To conclude, a process defined in Theorem \ref{thm:MFLamperti} with finite variance cannot have stationary increments unless it is self-similar. One can notice the problem appears with \eqref{e:covLMFsi} having different forms for $t-s>1$ and $t-s\leq 1$. However, if we restrict the time domain of the process to $\mathcal{T}=(0,1]$, then we can obtain a multifractal process with second-order stationary increments. Consider a multifractal process from Theorem \ref{thm:MFLamperti} restricted to $\mathcal{T}=(0,1]$. In this case, $X$ can be defined as \begin{equation*} X(t)= e^{L(-\log t)} Y(\log t), \quad t \in (0,1], \end{equation*} where $L=\{L(t), \, t \geq 0\}$ is some L\'evy process and $Y=\{Y(t),\, t \in {\mathbb R}\}$ is a stationary process. If $u<u+h<0$, then $e^{u+h}\leq 1$, $e^u \leq 1$, $e^{u+h}-e^u \leq 1$ and equating again \eqref{e:covLMFgen} and \eqref{e:covLMFsi} yields \begin{align} \mathbb{E} Y(u+h) Y(u) &= \mathbb{E} Y(\log e^{u+h}) Y(\log e^u)\nonumber\\ &=\frac{1}{2} \mathbb{E} X(1)^2 \frac{e^{-\psi(2)(u+h)} + e^{-\psi(2)u} - (e^{u+h}-e^u)^{-\psi(2)}} {e^{(\psi(1)-\psi(2))(u+h)} e^{-\psi(1)u}}\nonumber\\ &=\frac{1}{2} \mathbb{E} X(1)^2 \left( e^{-\psi(1)h} + e^{-(\psi(1)-\psi(2))h} - e^{-(\psi(1) - \psi(2))h} (e^{h}-1)^{-\psi(2)} \right)\nonumber\\ &=\frac{1}{2} \mathbb{E} X(1)^2 e^{-\psi(1)h} \left( 1 + e^{\psi(2)h} - (1 - e^{-h})^{-\psi(2)} \right),\label{e:covSolution} \end{align} which does not depend on $u$. Note that for $\psi(q)=-Hq$, $0<H<1$, we recover the covariance function of the stationary process obtained by the classical Lamperti transformation of fractional Brownian motion (see \cite{cheridito2003fractional}). In particular, for $\psi(q)=-q/2$ we get the Ornstein-Uhlenbeck (OU) process (see e.g.~\cite{samorodnitsky1994stable}). Recall that OU process $\{Y(u),\, u \in{\mathbb R}\}$ with parameter $\lambda>0$ is a stationary Gaussian process with mean zero and covariance function \begin{equation*} \mathbb{E} \left[ Y(u+h) Y(u) \right]= \mathbb{E} Y(0)^2 e^{-\lambda |h|}, \quad u, h \in {\mathbb R}. \end{equation*} Note that it is not immediately clear whether \eqref{e:covSolution} defines a covariance function of some stationary process. We will consider a simple example in the next subsection. Also note that although assuming $\mathcal{T}=(0,1]$ may seem overly restrictive, by using \eqref{e:extendingTandS} we can extend the time set to $(0,T]$ for arbitrary $T>0$. We summarize the previous discussion in the following proposition. \begin{proposition}\label{prop:newconstruction} Let $T>0$ and suppose $L$ is a L\'evy process with Laplace exponent $\psi$ well-defined on $[0,2]$ and \begin{equation}\label{e:cov:Y:prop} \gamma(h)=\frac{1}{2} \mathbb{E} Y(0)^2 e^{-\psi(1)h} \left( 1 + e^{\psi(2)h} - (1 - e^{-h})^{-\psi(2)} \right), \end{equation} is a covariance function of strictly stationary process $\{Y(t), \, t \in {\mathbb R}\}$. Then the process $\{X(t), \, t \in (0,T]\}$ \begin{equation*} X(t) = e^{L\left(-\log (t/T \right)} Y \left(\log (t/T) \right), \quad t \in (0,T], \end{equation*} is multifractal with $\Lambda=(0,1]$, $\mathcal{S}=(0,T]$ and \begin{equation}\label{e:process:cov} \mathbb{E} X(t)X(s) = \frac{1}{2} T^{\psi(2)} \mathbb{E} X(1)^2 \left( t^{-\psi(2)} + s^{-\psi(2)} - |t-s|^{-\psi(2)}\right), \quad t,s \in (0,T]. \end{equation} In particular, for any $\varepsilon>0$ the sequence $X(t_j+\varepsilon)-X(t_j)$, $j=1,\dots, \lfloor T/\varepsilon\rfloor$ is weakly stationary with \begin{equation}\label{e:increment:cov} \begin{aligned} \mathbb{E}&\left(X(t_j+\varepsilon)-X(t_j)\right) \left(X(t_i+\varepsilon)-X(t_i) \right)\\ &= \frac{1}{2} \varepsilon^{-\psi(2)} \mathbb{E} X(1)^2 \left( |j-i+1|^{-\psi(2)} + |j-i-1|^{-\psi(2)} - 2 |j-i|^{-\psi(2)} \right). \end{aligned} \end{equation} \end{proposition} Identity \eqref{e:increment:cov} is easily obtained from \eqref{e:process:cov} and it takes the form of covariances of fractional Gaussian noise (see \cite{samorodnitsky1994stable}). If $\psi(2)=-1$, then the increments are uncorrelated. Furthermore, \eqref{e:process:cov} implies that for any $t,s \in (0,T]$ \begin{equation*} \mathbb{E} \left(X(t) - X(s)\right)^2 = \frac{1}{2} T^{\psi(2)} \mathbb{E} X(1)^2 |t-s|^{-\psi(2)}. \end{equation*} If $-\psi(2)-1>0$, then by Kolmogorov's theorem (see e.g.~\cite[Theorem 2.2.8]{karatzas2012brownian}) there exists a modification of $\{X(t)\}$ which is locally H\"older continuous with exponent $\gamma$ for every $\gamma \in (0, (-\psi(2)-1)/2)$. \subsection{Examples} A number of processes can be constructed from Theorem \ref{thm:MFLamperti}. Given a L\'evy process $L$, one can simply take $Y(t)=1$ a.s.~to obtain positive multifractal process which is an exponential of a L\'evy process in logarithmic time extended to the whole $(0,\infty)$. We shall consider in more details a specific example that may be viewed as multifractal analog of Brownian motion. Suppose the L\'evy process $L$ is Brownian motion with drift $\mu$ so that $\psi(q)=\mu q + \sigma^2 q^2/2$, $q\in {\mathbb R}$. We consider the process constructed in Proposition \ref{prop:newconstruction} and take stationary process $Y$ to be OU process with parameter $\lambda = \psi(1)+1$, hence \begin{equation*} \mathbb{E} \left[ Y(u+h) Y(u) \right]= \mathbb{E} Y(0)^2 e^{-(\psi(1)+1) |h|}, \quad u, h \in {\mathbb R}. \end{equation*} Note that this is exactly \eqref{e:cov:Y:prop} with $\psi(2)=-1$. Hence, the process $\{X(t), \, t \in (0,T]\}$ defined in Proposition \ref{prop:newconstruction} will have second-order stationary increments. The condition $\psi(2)=-1$ implies $\mu=-1/2-\sigma^2$ and \begin{equation*} \psi(q)=-\left( \frac{1}{2} + \sigma^2 \right) q + \frac{\sigma^2}{2} q^2. \end{equation*} The increments of $X$ are uncorrelated and \begin{equation*} \mathbb{E} X(t) X(s) = \frac{\min \{t,s\}}{T}. \end{equation*} Since the classical Lamperti transformation \eqref{e:Lamptransclassical1} of OU process yields Brownian motion, the process $X$ represents a multifractal analog of Brownian motion. The scaling function function if given by $\tau(q)=-\psi(q)$ and well defined for $q \in (-1,\infty)$ since absolute moments of order less than or equal to one are infinite for Gaussian distribution. The scaling function is of the same form as the scaling function of multifractal random walk which is Brownian motion with time taken to be log-normal multiplicative cascade process (see \cite{bacry2003log}). Other properties of these processes require deeper investigation which will be addressed in future work. One of the interesting question is whether the sample paths of these processes posses multifractal properties in the sense of the varying local regularity exponents (see e.g.~\cite{abry2015irregularities}, \cite{grahovac2014bounds}, \cite{jaffard1999multifractal} and the references therein). \appendix \section{Mellin transform}\label{appendix:mellin} Recall that the Mellin transform (or Mellin-Stieltjes transform) of a nonnegative random variable $X$ with distribution function $F$ is defined as \begin{equation*} \mathcal{M}_X(z) = \int_0^\infty x^z dF(x) \end{equation*} for $z \in {\mathbb C}$. The integral exists for all $z$ in some strip $S=\{z : \sigma_1 \leq {\mathbb R}e z \leq \sigma_2 \}$, $\sigma_1 \leq \sigma_2$ which contains the imaginary axis and possibly degenerates into this axis. The Mellin transform completely determines the distribution of nonnegative random variable $X$. Furthermore, if the strip $S$ does not degenerate into imaginary axis, it is uniquely determined by its values on the interval $(\sigma_1, \sigma_2)$. Indeed, in the case $P(X>0)=1$, by applying the change of variables it is easy to see that $\mathcal{M}_X$ can be expressed as the two-sided Laplace transform of the random variable $-\log X$ with distribution function $G$: \begin{equation*} \mathcal{M}_X(z) = \int_{-\infty}^\infty e^{-zx} dG(x). \end{equation*} Since the two-sided Laplace transform is analytic function on $S$, so is $\mathcal{M}_X$ (see e.g.~\cite[p.~240]{widder1946laplace}). Therefore by the familiar property of analytic functions, $\mathcal{M}_X$ is uniquely determined by its values on the interval $(\sigma_1, \sigma_2)$. In the case $P(X=0)>0$, we can apply the same argument to random variable $Y$ defined by distribution function $\widetilde{F}(x)=(F(x)-F(0))/(1-F(0)) \mathbf{1}_{\{x\geq 0\}}$ and use the fact that $\mathcal{M}_X(z)=(1-F(0)) \mathcal{M}_Y(z)$. Moreover, if the strip $S$ does not degenerate into imaginary axis inversion formulas can be obtained by exploiting the relation with the two-sided Laplace transform. The definition can be extended to include real-valued variables, however we do not pursue this question here. More details about Mellin transform can be found in \cite{galambos2004products} and \cite{zolotarev1957mellin}. The main reason Mellin transform proves useful is the following property: if $X$ and $Y$ are two independent nonnegative random variables and $\mathcal{M}_X$, $\mathcal{M}_Y$ are their Mellin transforms defined on strips $S_1$ and $S_2$ respectively, then the Mellin transform of the product $XY$ in the strip $S_1\cap S_2$ is \begin{equation*} \mathcal{M}_{XY} (z) = \mathcal{M}_X (z) \mathcal{M}_Y (z). \end{equation*} \end{document}
\begin{document} \title{{\Large \bf A stochastic optimal control problem governed by SPDEs via a spatial-temporal interaction operator}\thanks{This work was supported by the National Natural Science Foundation of China (11471230, 11671282).}} \author{Zhun Gou, Nan-jing Huang\footnote{Corresponding author. E-mail addresses: [email protected]; [email protected]}, Ming-hui Wang and Yao-jia Zhang\\ {\small\it Department of Mathematics, Sichuan University, Chengdu, Sichuan 610064, P.R. China}} \date{} \maketitle \begin{center} \begin{minipage}{5.5in} \noindent{\bf Abstract.} In this paper, we first introduce a new spatial-temporal interaction operator to describe the space-time dependent phenomena. Then we consider the stochastic optimal control of a new system governed by a stochastic partial differential equation with the spatial-temporal interaction operator. To solve such a stochastic optimal control problem, we derive an adjoint backward stochastic partial differential equation with spatial-temporal dependence by defining a Hamiltonian functional, and give both the sufficient and necessary (Pontryagin-Bismut-Bensoussan type) maximum principles. Moreover, the existence and uniqueness of solutions are proved for the corresponding adjoint backward stochastic partial differential equations. Finally, our results are applied to study the population growth problems with the space-time dependent phenomena. \\ \ \\ {\bf Keywords:} Stochastic partial differential equation; Spatial-temporal dependence; Spatial-temporal interaction operator; Stochastic optimal control problem; Maximum principle. \\ \ \\ {\bf 2010 Mathematics Subject Classification}: 60H10, 60J75, 91B70, 92D25, 93E20. \end{minipage} \end{center} \section{Introduction} \paragraph{} In last decades, many scholars have focused on the topic of stochastic partial differential equations (SPDEs), which has many real world applications \cite{holden1996stochastic, liu2016analysis, ma1997Adapted, mijena2016intermittence}. In this paper, we consider a stochastic optimal control problem governed by a new SPDE with a spatial-temporal interaction operator which can be used to describe the space-time dependent phenomena appearing in population growth problems. To explain the motivations of our work, we first recall some recent works concerning on the stochastic optimal control problems governed by SPDEs. In 2005, {\O}ksendal \cite{Oksendal2005optimal} studied the stochastic optimal control problem governed by the SPDE, proved a sufficient maximum principle for the problem, and applied the results to solve the optimal harvesting problem described by the SPDE without the time delay. However, there are many models with past dependence in realistic world, in which the optimal control problems governed by some dynamic systems with time delays have more practical applications. For example, for biological reasons, time delays occur naturally in population dynamic models \cite{Mohammed1998Stochastic, Oksendal2011optimal}. Therefore, when dealing with optimal harvesting problems of biological systems, one can be led to the optimal control problems of the systems with time delays. Motivated by this fact, {\O}ksendal et al. \cite{oksendal2012Optimal} investigated the stochastic optimal control problem governed by the delay stochastic partial differential equation (DSPDE), established both sufficient and necessary stochastic maximum principles for this problem, and illustrated their results by an application to the optimal harvesting problem from a biological system. Besides, we note that another area of applications is mathematical finance, where time delays in the dynamics can represent memory or inertia in the financial system (see, for example, \cite{agram2019stochastic}). Some other applications, we refer the reader to \cite{basse2018multivariate, Gopalsamy2013Stability, Kocic2010Generalized, meng2015optimal, Mokkedem2019Optimal} and the references therein. On the other hand, it is equally important to study the stochastic optimal control problem governed by dynamic system with the spatial dependence because it also has many applications in real problems such as the harvesting problems of biological systems \cite{hening2018stochastic, Schreiber2009Invasion}. To deal with the problems, Agram et al. \cite{agram2019spdes} introduced the space-averaging operator and considered a system of the SPDE with this type operator. Then they proved both sufficient and necessary stochastic maximum principles for the problem governed by such an SPDE and applied the results to solve the optimal harvesting problem for a population growth system in an environment with space-mean interactions. Following \cite{agram2019spdes}, Agram et al. \cite{Agram2019Singular} also solved a singular control problem of optimal harvesting from a fish population, of which the density is driven by the SPDE with the space-averaging operator. For some related works concerned with the optimal control problems for SPDEs, we refer the reader to \cite{bensoussan2004stochastic, Da2014Stochastic, Dumitrescu2018Stochastic, Fuhrman2016Stochastic, Hu1990Maximum, lu2015Stochastic, Wu2019Boundary}. Now, a natural question arises: can we describe both of the past dependence and the space-mean dependence in the same framework? Moreover, the question is generalized as follows: can we describe the spatial-temporal dependence of the state in the stochastic system? To this end, we construct the spatial-temporal interaction operator. Then, we consider the stochastic optimal control problem in which the state is governed by the new system of the SPDE with this operator in the filtered probability space $(\Omega,\mathscr{F},\mathscr{F}_t,\mathbb{P})$ satisfying the usual hypothesis. This system takes the following form: \begin{equation}\label{SDE} \begin{cases} dX(t,x)&=\left(A_xX(t,x)+b(t,x)-u(t,x)\right)dt+\sigma(t,x)dB_t+\int_{\mathbb{R}_0}\gamma(t,x,\zeta)\widetilde{N}(dt,d\zeta),\\ &\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\;(t,x)\in[0,T]\times D;\\ X(t,x)&=\xi(t,x), \qquad\qquad\qquad\qquad\qquad\qquad\,\qquad\qquad (t,x)\in(0,T]\times \partial D;\\ X(t,x)&=\eta(t,x), \qquad\qquad\qquad\qquad\qquad\qquad\,\qquad\qquad (t,x)\in[-\delta,0]\times \overline{D};\\ u(t,x)&=\beta(t,x), \qquad\qquad\qquad\qquad\qquad\qquad\qquad\,\qquad (t,x)\in[-\delta,0]\times \overline{D}, \end{cases} \end{equation} where $dX(t,x)$ is the differential with respect to $t$, $u(t,x)$ is the control process and $D\subset \mathbb{R}^d$ is an open set with $C^1$ boundary $\partial D$. Moreover, $\overline{D}=D\bigcup \partial D$. We have used simplified notations in equation \eqref{SDE} such that \begin{align*} b(t,x)&=b(t,x,X(t,x),\overline{X}(t,x),u(t,x),\overline{u}(t,x)),\\ \sigma(t,x)&=\sigma(t,x,X(t,x),\overline{X}(t,x),u(t,x),\overline{u}(t,x)),\\ \gamma(t,x,\zeta)&=b(t,x,\zeta,X(t,x)),\overline{X}(t,x),u(t,x),\overline{u}(t,x)). \end{align*} where $\overline{X}(t,x)$ denotes the space-time dependent density. Consequently, we focus on the study of the following stochastic optimal control problem which captures the spatial-temporal dependence. \begin{prob}\label{problem} Suppose that the performance functional associated to the control $u\in \mathcal{U}^{ad}$ takes the form \begin{equation*} J(u)=\mathbb{E}\left[\int_0^T\int_Df(t,x,X(t,x),\overline{X}(t,x),u(t,x),\overline{u}(t,x))dxdt+\int_Dg(x,X(T,x))dx\right], \end{equation*} where $X(t,x)$ is described by \eqref{SDE}, $f$ and $g$ are two given functions satisfying some mild conditions, and $\mathcal{U}^{ad}$ is the set of all admissible control processes. The problem is to find the optimal control $\widehat{u}=\widehat{u}(t,x) \in \mathcal{U}^{ad}$ such that \begin{equation}\label{prob} J(\widehat{u})=\sup \limits_{u\in\mathcal{U}^{ad}}J(u). \end{equation} \end{prob} The rest of this paper is structured as follows. The next section introduces some necessary preliminaries including the definition of the spatial-temporal interaction operator, and derives an adjoint backward stochastic partial differential equation (BSPDE) with spatial-temporal dependence by defining a Hamiltonian functional. In Section 3, the sufficient and necessary maximum principles of the related control problem are derived, respectively. In Section 4, the existence and uniqueness of solutions are obtained for the related BSPDE of the control problem with the spatial-temporal interaction operator. Finally, two examples are presented in Section 5 as applications of our main results. \section{Preliminaries} In this section, some necessary definitions and propositions are given to state \eqref{SDE} in detail. We also give several examples to show that all these definitions are well-posed. Now, in \eqref{SDE}, the terms $B_t$ and $\widetilde{N}(dt,d\zeta)$ denote a one-dimensional $\mathcal{F}_t$-adapted Brownian motion and a compensated Poisson random measure, respectively, such that $$ \widetilde{N}(dt,d\zeta)={N}(dt,d\zeta)-\nu(dt,d\zeta), $$ where ${N}(dt,d\zeta)$ is a Poisson random measure associated with the one-dimensional $\mathcal{M}_t$-adapted Poisson process $P_N(t)$ defined on $\mathbb{R}_0=\mathbb{R}\setminus \{0\}$ with the characteristic measure $\nu(dt,d\zeta)$. Here, $B_t$ and $P_N(t)$ are mutually independent. Moreover, $\sigma$-algebras $\mathcal{F}=(\mathcal{F}_t)_{t\geq0}$ and $\mathcal{M}=(\mathcal{M}_t)_{t\geq0}$ are right-continuous and increasing. The augmented $\sigma$-algebra $\mathscr{F}_t$ is generated by $$ \mathscr{F}_t=\sigma\left(\mathcal{F}_t\vee\mathcal{M}_t\right). $$ We extend $X(t,x)$ to the process on $[0,T]\times \mathbb{R}^d$ by setting $$ X(t,x)=0,\quad (t,x)\in [-\delta,T]\times \mathbb{R}^d\setminus \overline{D}. $$ Next, we recall some useful sets and spaces which will be used throughout this paper. \begin{defn}${}$ \begin{itemize} \item $H=L^2(D)$ is the set of all Lebesgue measurable functions $f:D\rightarrow\mathbb{R}$ such that $$ \|f\|_{H}:=\left(\int_{D}|f(x)|^2dx\right)^{\frac{1}{2}}<\infty,\quad x\in D. $$ In addition, $\langle f(x),g(x)\rangle_H=\int_{D}f(x)g(x)dx$ denotes the inner product in $H$. \item $\mathcal{R}$ denotes the set of Lebesgue measurable functions $r:\mathbb{R}_0\times D\rightarrow \mathbb{R}$. $L^2_{\nu}(H)$ is the set of all Lebesgue measurable functions $\gamma\in \mathcal{R}$ such that $$ \|\gamma\|_{L^2_{\nu}(H)}:=\left(\int_{D}\int_{\mathbb{R}_0}|\gamma(x,\zeta)|^2\nu(d\zeta) dx\right)^{\frac{1}{2}}<\infty,\quad x\in D. $$ \item $H_{T}=L^2_{\mathscr{F}}([0,T]\times \Omega,H)$ is the set of all $\mathscr{F}$-adapted processes $X(t,x)$ such that $$ \|X(t,x)\|_{H_T}:=\mathbb{E}\left(\int_{D}\int_0^T|X(t,x)|^2dtdx\right)^{\frac{1}{2}}<\infty. $$ \item $H^{-\delta}_{T}=L^2_{\mathscr{F}}([-\delta,T]\times \Omega,H)$ is the set of all $\mathscr{F}$-adapted processes $X(t,x)$ such that $$ \|X(t,x)\|_{H^{-\delta}_T}:=\mathbb{E}\left(\int_{D}\int_{^{-\delta}}^T|X(t,x)|^2dtdx\right)^{\frac{1}{2}}<\infty. $$ \item $V=W^{1,2}(D)$ is a separable Hilbert space (the Sobolev space of order $1$) which is continuously, densely imbedded in $H$. Consider the topological dual of $V$ as follows: $$ V\subset H\cong H^{*}\subset V^{*}. $$ In addition, let $\langle A_xu,u\rangle_{*}$ be the duality product between $V$ and $V^{*}$, and $\|\cdot\|_V$ the norm in the Hilbert space $V$. \item $\mathcal{U}^{ad}$ is the set of all stochastic processes which take values in a convex subset $\mathcal{U}$ of $\mathbb{R}^d$ and are adapted to a given subfiltration $\mathbb{G}=(\mathcal{G}_t)_{t\geq0}$. Here, $\mathcal{G}_t\subseteq \mathscr{F}_t$ for all $t\geq0$. Moreover, $\mathcal{U}^{ad}$ is called the set of admissible control processes $u$. \end{itemize} \end{defn} \begin{defn} The adjoint operator $A_x^{*}$ of a linear operator $A_x$ on $C_0^{\infty}(\mathbb{R}^d)$ is defined by $$ \langle A_x\phi,\psi\rangle_{L^2(\mathbb{R}^d)}=\langle \phi,A_x^{*}\psi\rangle_{L^2(\mathbb{R}^d)},\quad \forall \phi,\psi\in C_0^{\infty}(\mathbb{R}^d). $$ Here, $\langle \phi_1,\phi_2\rangle_{L^2(\mathbb{R}^d)}=\int_{\mathbb{R}^d}\phi_1(x)\phi_2(x)dx$ is the inner product in $L^2(\mathbb{R}^d)$. If $A_x$ is the second order partial differential operator acting on $x$ given by $$ A_x\phi=\sum \limits_{i,j=1}^n \alpha_{ij}(x)\frac{\partial^2 \phi}{\partial x_i\partial x_j}+\sum\limits_{i=1}^n\beta_{i}(x)\frac{\partial \phi}{\partial x_i},\quad \forall \phi\in C^2(\mathbb{R}^d), $$ where $(\alpha_{ij}(x))_{1\leq i,j\leq n}$ is a given nonnegative definite $n\times n$ matrix with entries $\alpha_{ij}(x)\in C^2(D)\bigcap C(\overline{D})$ for all $i,j=1,2,\ldots, n$ and $\beta_{i}(x)\in C^2(D)\bigcap C(\overline{D})$ for all $i=1,2,\ldots, n$, then it is easy to show that $$ A_x^{*}\phi=\sum \limits_{i,j=1}^n \frac{\partial^2 }{\partial x_i\partial x_j}(\alpha_{ij}(x)\phi(x))-\sum\limits_{i=1}^n\frac{\partial}{\partial x_i}(\beta_{i}(x)\phi(x)),\quad \forall \phi\in C^2(\mathbb{R}^d). $$ \end{defn} We interpret $X(t,x)$ as a weak (variational) solution to \eqref{SDE}, if for $t\in[0,T]$ and all $\phi\in C_0^{\infty}(D)$, the following equation holds. \begin{align}\label{+1} \langle X(t,x),\phi\rangle_H=&\langle \beta(0,x),\phi\rangle_H+\int_0^t\langle X(s,x),A_x^{*}\phi\rangle_{*}ds+\int_0^t\langle b(s,X(s,x)),\phi\rangle_Hds\nonumber\\ &+\int_0^t\langle \sigma(s,X(s,x)),\phi\rangle_HdB_s+\int_0^t\int_{\mathbb{R}_0}\langle \gamma(s,X(s,x),\zeta),\phi\rangle_Hd\widetilde{N}(s,\zeta). \end{align} In equation \eqref{+1}, these coefficients $b$, $\sigma$ and $\gamma$ are all the simplified notations. Now, we give the definition of the spatial-temporal interaction operator. \begin{defn}\label{+3} $S$ is said to be a spatial-temporal interaction operator if it takes the following form \begin{eqnarray}\label{space-averaging} S(X(t,x))=\int_{R_{\theta}}\int_{t-\delta}^tQ(t,s,x,y)X(s,x+y)dsdy\quad (X(t,x)\in H^{-\delta}_{T}), \end{eqnarray} where $Q(t,s,x,y)$ denotes the density function such that \begin{equation}\label{+2} \int_{y-R_{\theta}}\int_{s\vee 0}^{(s+\delta)\wedge T}|Q(t,s,x,y-x)|^2 dtdx\leq M. \end{equation} Here the set $$ R_{\theta}=\{y\in \mathbb{R}^d;\|y\|_2<\theta\} $$ is an open ball of radius $\theta>0$ centered at $0$, where $\|\cdot\|_2$ represents the Euclid norm in $\mathbb{R}^d$. \end{defn} \begin{prop}\label{+6} For any $X(t,x)\in H^{-\delta}_{T}$, one has \begin{equation}\label{norm of S} \|S(X(t,x))\|_{H_T}\leq \sqrt{M}\|X(t,x)\|_{H^{-\delta}_{T}}. \end{equation} This implies that $S:H^{-\delta}_{T}\rightarrow H_{T}$ is a bounded linear operator. \end{prop} \begin{proof} Applying Cauchy-Schwartz's inequality and Fubini's theorem, we have \begin{align*} \|S(X(t,x))\|^2_{H_T}&=\mathbb{E}\left[\int_D\int_0^T\left[\int_{R_{\theta}}\int_{t-\delta}^tQ(t,s,x,y)X(s,x+y)dsdy\right]^2dxdt\right]\\ &\leq \mathbb{E}\left[\int_D\int_0^T\int_{R_{\theta}}\int_{t-\delta}^t|Q(t,s,x,y)|^2|X(s,x+y)|^2 dsdydxdt\right]\\ &=\mathbb{E}\left[\int_D\int_{-\delta}^T\int_{R_{\theta}}\left(\int_{s\vee 0}^{(s+\delta)\wedge T}|Q(t,s,x,y)|^2 dt\right)|X(s,x+y)|^2dydsdx\right]\\ &=\mathbb{E}\left[\int_D\int_{-\delta}^T\int_{x+R_{\theta}}\left(\int_{s\vee 0}^{(s+\delta)\wedge T}|Q(t,s,x,z-x)|^2 dt\right)|X(s,z)|^2dzdsdx\right]\\ &=\mathbb{E}\left[\int_D\int_{-\delta}^T\left(\int_{D\cap(z-R_{\theta})}\int_{s\vee 0}^{(s+\delta)\wedge T}|Q(t,s,x,z-x)|^2 dtdx\right)|X(s,z)|^2ds dz\right]\\ &\leq M\mathbb{E}\left[\int_D\int_{-\delta}^T|X(s,z)|^2dzds\right]=M\|X(t,x)\|_{H^{-\delta}_{T}}^2 \end{align*} This completes the proof. \end{proof} \begin{example}\label{example} We give examples for spatial-temporal interaction operators in the following three cases, respectively. \begin{enumerate}[($\romannumeral1$)] \item If we set $$ Q_0(t,s,x,y-x)=e^{-\rho(t-s)}e^{\|y\|_2}, $$ where $\rho_1,\rho_2$ are two positive constants, then $Q_0(t,s,x,y-x)$ clearly satisfies condition \eqref{+2} and $S_0:H^{-\delta}_{T}\rightarrow H_{T}$, $$ S_0(X(t,x))=\int_{R_{\theta}}\int_{t-\delta}^te^{-\rho_1(t-s)}e^{-\rho_2\|y\|_2}X(s,x+y)dtdx \quad (\forall X(t,x)\in H^{-\delta}_{T}) $$ becomes the spatial-temporal interaction operator. It shows that an increase in distance $\|y\|_2$ or time interval $t-s$ results in a decreasing effect for local population density. \item When there is no temporal dependence, we set $S_1:H\rightarrow H:$ $$ S_1(X(t,x))=\int_{R_{\theta}}Q_1(x,y)X(t,x+y)dx \quad (\forall X(t,x)\in H), $$ where the density function $Q_1(x,y)$ satisfies $$ \int_{y-R_{\theta}}|Q_1(x,y-x)|^2 dx\leq M. $$ For $Q_1(x,y)=\frac{1}{V(R_{\theta})}$, where $V(\cdot)$ is the Lebesgue volume in $\mathbb{R}^d$, $S_1$ reduces to the space-averaging operator proposed in \cite{agram2019spdes}. \item When there is no spatial dependence, we set $S_2:H^{-\delta}_{T}\rightarrow H_{T}$, $$ S_2(X(t,x))=\int_{t-\delta}^{t}Q_2(t,s)X(s,x) ds \quad (\forall X(t,x)\in H), $$ where the density function $Q_2(x,y)$ satisfies $$ \int_{s\vee 0}^{(s+\delta)\wedge T}|Q_2(t,s)|^2 dt\leq M. $$ For $Q_2(t,s)=1$, $S_2$ reduces to the well-known moving average operator. \end{enumerate} \end{example} In the sequel, we illustrate the Fr\'{e}chet derivative for spatial-temporal interaction operators. \begin{defn} The Fr\'{e}chet derivative $\nabla_{S}F$ of a map $F:H^{-\delta}_{T}\rightarrow H_{T}$ has a dual function if $$ \mathbb{E}\left[\int_D\int_0^T\langle \nabla_SF,X\rangle(t,x) dxdt\right]=\mathbb{E}\left[\int_D\int_{-\delta}^T\nabla_S^*F(t,x)X(t,x)dxdt\right], \quad \forall X(t,x)\in H^{-\delta}_{T}. $$ \end{defn} \begin{example}\label{exam1} Let $F:H^{-\delta}_{T}\rightarrow H_{T}$ be a given map by setting $$ F(X)(t,x)=\langle F,X\rangle(t,x)=S(X(t,x))=\int_{R_{\theta}}\int_{t-\delta}^tQ(t,s,x,y)X(s,x+y)dsdy,(t\geq 0)\quad X(t,x)\in H^{-\delta}_{T}. $$ Since $F$ is linear, for any $X(t,x)\in H^{-\delta}_{T}$, we have $$ \langle \nabla_SF,\psi\rangle(t,x)=\langle F,\psi\rangle(t,x)=\int_{R_{\theta}}\int_{t-\delta}^tQ(t,s,x,y)X(s,x+y)dsdy $$ and so \begin{align*} &\mathbb{E}\left[\int_D\int_0^T\langle \nabla_SF,\psi\rangle dxdt\right]\\ =&\mathbb{E}\left[\int_D\int_0^T\int_{R_{\theta}}\int_{t-\delta}^tQ(t,s,x,y)X(s,x+y)dsdy dxdt\right]\\ =&\mathbb{E}\left[\int_D\int_{-\delta}^T\int_{R_{\theta}}\left(\int_{s\vee 0}^{(s+\delta)\wedge T}Q(t,s,x,y) dt\right)X(s,x+y)dydxds\right]\\ =&\mathbb{E}\left[\int_D\int_{-\delta}^T\left(\int_{D\cap(z-R_{\theta})}\int_{s\vee 0}^{(s+\delta)\wedge T}Q(t,s,x,z-x) dtdx\right)X(s,z)dsdz\right], \quad \forall \psi\in H. \end{align*} This implies that $$ \nabla^{*}_{S}F(s,z)=\int_{D\cap(z-R_{\theta})}\int_{s\vee 0}^{(s+\delta)\wedge T}Q(t,s,x,z-x) dtdx. $$ Therefore for $t\in[-\delta,T]$, $$ \nabla^{*}_{S}F(t,x)=\int_{D\cap(x-R_{\theta})}\int_{t\vee 0}^{(t+\delta)\wedge T}Q(s,t,y,x-y) dsdy =\int_{D}\int_{t}^{T}Q(s,t,y,x-y)\mathbb{I}_{x-R_{\theta}}(y)\mathbb{I}_{[0,T-\delta]}(t) dsdy. $$ \end{example} \begin{remark} For any $X=X(t,x)\in H$, we set $$ \overline{X}(t,x)=S(X(t,x)), \quad \overline{u}(t,x)=S(u(t,x)). $$ \end{remark} Now, we introduce these coefficients of SPDE \eqref{SDE} and the functions in Problem \ref{problem} in detail. We assume that all of these are functions in $C^1(H)$ and take the following forms: \begin{align*} b(t,x,X,S_X,u,S_u)=&b(t,x,X,S_X,u,S_u,\omega):E\rightarrow \mathbb{R};\\ \sigma(t,x,X,S_X,u,S_u)=&\sigma(t,x,X,S_X,u,S_u,\omega):E\rightarrow \mathbb{R};\\ \gamma(t,x,X,S_X,u,S_u,\zeta)=&\gamma(t,x,X,S_X,u,S_u,\zeta,\omega):E'\rightarrow \mathbb{R};\\ f(t,x,X,S_X,u,S_u)&=f(t,x,X,S_X,u,S_u,\omega):E\rightarrow \mathbb{R};\\ g(x,X(T))&=g(x,X(T),\omega):E''\rightarrow\mathbb{R}, \end{align*} where \begin{align*} E=&[-\delta,T]\times D\times\mathbb{R}\times \mathbb{R}\times \mathcal{U}^{ad}\times \mathbb{R} \times \Omega;\\ E'=&[-\delta,T]\times D\times\mathbb{R}\times \mathbb{R}\times \mathcal{U}^{ad}\times \mathbb{R} \times \mathbb{R}_0 \times \Omega;\\ E''=&D\times \mathbb{R}\times \Omega. \end{align*} Next, we define the related Hamiltonian functional. \begin{defn}\label{+4} Define the Hamiltonian functional with respect to the optimal control problem \eqref{prob} by $H:[0,T+\delta]\times D\times \mathbb{R}\times \mathscr{L}(\mathbb{R}^d)\times \mathbb{R} \times\mathcal{U}^{ad}\times \mathbb{R}\times \mathbb{R}\times \mathbb{R}\times \mathcal{R}\times \Omega\rightarrow \mathbb{R}$ as follows: \begin{align}\label{+14} H(t,x)&=H(t,x,X,S_X,u,S_u,p,q,r(\cdot))\nonumber\\ &=H(t,x,X,S_X,u,S_u,p,q,r(\cdot),\omega)\nonumber\\ &=f(t,x,X,S_X,u,S_u)+b(t,x,X,S_X,u,S_u)p+\sigma(t,x,X,S_X,u,S_u)q\nonumber\\ &\quad \mbox{}+\int_{\mathbb{R}_0}\gamma(t,x,X,S_X,u,S_u,\zeta)rd\zeta \end{align} Moreover, we suppose that functions $b$, $\sigma$, $\gamma$, $f$ and $H$ all admit bounded Fr\'{e}chet derivatives with respect to $X$, $S_X$, $u$ and $S_u$, respectively. \end{defn} We associate the following adjoint BSPDE to the Hamiltonian \eqref{+14} in the unknown processes $p(t,x),q(t,x),r(t,x,\cdot)$. \begin{equation}\label{+5} \begin{cases} dp(t,x)&=-\left(\frac{\partial H}{\partial X}(t,x)+A^{*}_xp(t,x)+\mathbb{E}\left[\nabla^*_{S_X}H(t,x)\Big|\mathscr{F}_t\right]\right)dt+q(t,x)dB_t+\int_{\mathbb{R}_0}r(t,x,\zeta)\widetilde{N}(dt,d\zeta),\\ &\quad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\quad\;\;(t,x)\in[0,T]\times D;\\ p(t,x)&=\frac{\partial g}{\partial X}(T,x),\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\;\quad (t,x)\in[T,T+\delta]\times \overline{D};\\ p(t,x)&=0, \qquad\qquad\qquad\qquad\qquad\quad\;\;\qquad\qquad\qquad\qquad\quad (t,x)\in[0,T)\times \partial D;\\ q(t,x)&=0, \qquad\qquad\qquad\qquad\qquad\quad\;\;\qquad\qquad\qquad\qquad\quad (t,x)\in[T,T+\delta]\times \overline{D};\\ r(t,x,\cdot)&=0, \qquad\qquad\qquad\qquad\qquad\quad\;\;\qquad\qquad\qquad\qquad\quad (t,x)\in[T,T+\delta]\times \overline{D}. \end{cases} \end{equation} \section{Maximum principles} We are now able to derive the sufficient version of the maximum principle. \subsection{A sufficient maximum principle} \begin{assumption}\label{+11} Let $\widehat{u}\in \mathcal{U}^{ad}$ be a control with corresponding solutions $\widehat{X}(t,x)$ to \eqref{SDE} and $(\widehat{p}(t,x),\widehat{q}(t,x),\widehat{r}(t,x,\cdot))$ to \eqref{+5}, respectively. Furthermore, the control and the solutions satisfy \begin{enumerate}[($\romannumeral1$)] \item $\widehat{X}\Big|_{t\in[0,T]}\in H_T$; \item $(\widehat{p},\widehat{q},\widehat{r}(\cdot))\Big|_{(t,x)\in[0,T]\times \overline{D}}\in V\times H\times L^2_{\nu}(H)$; \item $\mathbb{E}\left[\int_0^T\|\widehat{p}(t,x)\|_V^2+\|\widehat{q}(t,x)\|_H^2ds+\|\widehat{r}(t,x,\cdot)\|_{L^2_{\nu}(H)}^2dt\right]<\infty.$ \end{enumerate} \end{assumption} \begin{thm}\label{sufficient} Suppose that Assumption \ref{+11} holds. For arbitrary $u\in \mathcal{U}$, put \begin{align*} H(t,x)=H(t,x,\widehat{X},\widehat{S}_X,u,S_u,\widehat{p},\widehat{q},\widehat{r}(\cdot)),\quad \widehat{H}(t,x)=H(t,x,\widehat{X},\widehat{S}_X,\widehat{u},\widehat{S}_u,\widehat{p},\widehat{q},\widehat{r}(\cdot)). \end{align*} Assume that \begin{itemize} \item (Concavity) For each $t\in [0,T]$, the functions \begin{align*} (X,S_X,u,S_u)&\rightarrow H(t,x,X,S_X,u,S_u,\widehat{p},\widehat{q},\widehat{r}),\\ X(T)&\rightarrow g(x,X(T)) \end{align*} are concave a.s.. \item (Maximum condition) For each $t\in [0,T]$, $$ \mathbb{E}\left[\widehat{H}(t,x)\Big|\mathcal{G}_t\right]=\sup \limits_{u\in \mathcal{U}}\mathbb{E}\left[H(t,x)\Big|\mathcal{G}_t\right],\quad a.s.. $$ \end{itemize} Then, $\widehat{u}$ is an optimal control. \end{thm} \begin{proof} Consider \begin{equation}\label{1} J(u)-J(\widehat{u})=I_1+I_2. \end{equation} Here, \begin{align*} I_1=&\mathbb{E}\bigg[\int_0^T\int_Df(t,x,X(t,x),\overline{X}(t,x),u(t,x),\overline{u}(t,x))-f(t,x,X(t,x),\overline{X}(t,x),u(t,x),\overline{u}(t,x))dxdt\bigg],\\ I_2=&\int_D\mathbb{E}\left[g(x,X(T,x))-g(x,\widehat{X}(T,x))\right]dx. \end{align*} Setting $\widetilde{X}(t,x)=X(t,x)-\widehat{X}(t,x)$ and applying It\^{o}'s formula, one has \begin{align}\label{2} I_2\leq&\int_D\mathbb{E}\left[\frac{\partial\widehat{g}}{\partial X}(T,x)\widetilde{X}(T,x)\right]dx=\int_D\mathbb{E}\left[\widehat{p}(T,x)\widetilde{X}(T,x)\right]dx\\ \nonumber =&\int_D\mathbb{E}\left[\int_0^T\widehat{p}(t,x)d\widetilde{X}(t,x)+\widetilde{X}(t,x)d\widehat{p}(t,x)+\widehat{q}(t,x)\widetilde{\sigma}(t,x)dt +d\widehat{p}(t,x)d\widetilde{X}(t,x)\right]\\ \nonumber =&\int_D\mathbb{E}\Bigg\{\int_0^T\widehat{p}(t,x)\left[\widetilde{b}(t,x)+A_x\widetilde{X}(t,x)\right]-\widetilde{X}(t,x)\left[\frac{\partial\widehat{H}}{\partial X}(t,x)+A^{*}_x\widetilde{p}(t,x)+\mathbb{E}\left[\nabla^*_{S}H(t,x)\Big|\mathscr{F}_t\right]\right]\\ &+\widehat{q}(t,x)\widetilde{\sigma}(t,x)dt +\int_{\mathbb{R}_0}\widehat{r}(t,x,\zeta)\widetilde{\gamma}(t,x,\zeta)\nu(d\zeta,dt)\Bigg\}dx. \end{align} By the First Green formula \cite{Wloka1987Partial}, there exist first order boundary differential operators $A_1$ and $A_2$ such that \begin{equation}\label{3} \int_D\widehat{p}(t,x)A_x\widetilde{X}(t,x)-\widetilde{X}(t,x)A^{*}_x\widetilde{p}(t,x)dx=\int_{\partial D}\widehat{p}(t,x)A_1\widetilde{X}(t,x)-\widetilde{X}(t,x)A_2\widetilde{p}(t,x)d\mathcal{S}=0. \end{equation} Combining \eqref{2}, \eqref{3} and the fact that $u(t)$ is $\mathcal{G}_t$-measurable gives \begin{align*} I_2\leq& \int_D\mathbb{E}\Bigg\{\int_0^T\widehat{p}(t,x)\widetilde{b}(t,x)-\widetilde{X}(t,x)\left[\frac{\partial\widehat{H}}{\partial X}(t,x)+\mathbb{E}\left[\nabla^*_{S_X}H(t,x)\Big|\mathscr{F}_t\right]\right]+\widehat{q}(t,x)\widetilde{\sigma}(t,x)dt\\ &+\int_{\mathbb{R}_0}\widehat{r}(t,x,\zeta)\widetilde{\gamma}(t,x,\zeta)\nu(d\zeta,dt)\Bigg\}dx\\ =&-I_1+\int_D\mathbb{E}\left[\int_0^TH(t,x)-\widetilde{X}(t,x)\left[\frac{\partial\widehat{H}}{\partial X}(t,x)+\mathbb{E}\left[\nabla^*_{S_X}H(t,x)\Big|\mathscr{F}_t\right]\right]dt\right]dx\\ \leq& -I_1+\int_D\mathbb{E}\left[\int_0^T\widetilde{u}(t,x)\left[\frac{\partial\widehat{H}}{\partial u}(t,x)+\mathbb{E}\left[\nabla^*_{S_u}H(t,x)\Big|\mathscr{F}_t\right]\right]dt\right]dx\\ =& -I_1+\int_D\mathbb{E}\left[\int_0^T\widetilde{u}(t,x)\frac{\partial\widehat{H}}{\partial u}(t,x)+\widetilde{u}(t,x)\nabla^*_{S_u}H(t,x)dt\right]dx\\ =& -I_1+\int_D\mathbb{E}\left[\int_0^T\widetilde{u}(t,x)\mathbb{E}\left[\frac{\partial\widehat{H}}{\partial u}(t,x)+\nabla^*_{S_u}H(t,x)\Big|\mathcal{G}_t\right]dt\right]dx\\ \leq& -I_1, \end{align*} where the last inequality is derived by the maximum condition imposed on $H(t,x)$. This implies that $$ J(u)-J(\widehat{u})=I_1+I_2\leq 0. $$ Therefore $\widehat{u}$ becomes the optimal control. \end{proof} \subsection{A Necessary Maximum Principle} We now proceed to study the necessary version of maximum principle. \begin{assumption}\label{ass nece} For each $t_0\in[0,T]$ and all bounded $\mathcal{G}_{t_0}$-measurable random variable $\pi(x)$, the process $\vartheta(t,x)=\pi(x)\mathbb{I}_{(t_0,T]}(t)$ belongs to $\mathcal{U}^{ad}$. \end{assumption} \begin{remark} Thanking to the convex condition imposed on $\mathcal{U}^{ad}$, one has $$ u^{\epsilon}=\widehat{u}+\epsilon u\in \mathcal{U}^{ad},\;\epsilon\in[0,1] $$ for any $u,\widehat{u}\in \mathcal{U}^{ad}$. \end{remark} Consider the process $Z(t,x)$ obtained by differentiating $X^{\epsilon}(t,x)$ with respect to $\epsilon$ at $\epsilon=0$. Clearly, $Z(t,x)$ satisfies the following equation: \begin{equation}\label{SPDE nece} \begin{cases} dZ(t,x)=&\left[\left(\frac{\partial b}{\partial X}(t,x)+\mathbb{E}\left[\nabla^*_{S_X}b(t,x)\Big|\mathscr{F}_t\right]\right)Z(t,x) +\left(\frac{\partial b}{\partial u}(t,x)+\mathbb{E}\left[\nabla^*_{S_u}b(t,x)\Big|\mathscr{F}_t\right]\right)u(t,x)\right]dt\\ &+\left[\left(\frac{\partial \sigma}{\partial X}(t,x)+\mathbb{E}\left[\nabla^*_{S_X}\sigma(t,x)\Big|\mathscr{F}_t\right]\right)Z(t,x) +\left(\frac{\partial \sigma}{\partial u}(t,x)+\mathbb{E}\left[\nabla^*_{S_u}\sigma(t,x)\Big|\mathscr{F}_t\right]\right)u(t,x)\right]dB_t\\ &+\int_{\mathbb{R}_0}\Big[\left(\frac{\partial \gamma}{\partial X}(t,x,\zeta)+\mathbb{E}\left[\nabla^*_{S_X}\gamma(t,x,\zeta)\Big|\mathscr{F}_t\right]\right)Z(t,x) +\left(\frac{\partial \gamma}{\partial u}(t,x,\zeta)+\mathbb{E}\left[\nabla^*_{S_u}\gamma(t,x,\zeta)\Big|\mathscr{F}_t\right]\right)\\ &u(t,x)\Big]\widetilde{N}(dt,d\zeta)+A_xZ(t,x)dt,\qquad\qquad\qquad\qquad\qquad\qquad\quad(t,x)\in(0,T)\times D,\\ Z(t,x)&=0,\qquad\qquad\qquad\qquad\qquad\qquad\quad\qquad\qquad\qquad\qquad\qquad\qquad(t,x)\in[-\delta,0]\times \overline{D}. \end{cases} \end{equation} \begin{thm}\label{thm nece} Suppose that Assumptions \ref{+11} and \ref{ass nece} hold. Then£¬ the following equalities are equivalent. \begin{enumerate}[($\romannumeral1$)] \item For all bounded $u \in \mathcal{U}^{ad}$, \begin{equation}\label{nece1} 0=\frac{d}{dt}J(\widehat{u}+\epsilon u)\Big|_{\epsilon=0}. \end{equation} \item \begin{equation}\label{nece2} 0=\int_D\mathbb{E}\left[\frac{\partial H}{\partial u}(t,x)+\nabla^*_{S_u}H(t,x)\Big|\mathcal{G}_t\right]dx\Big|_{u=\widehat{u}},\quad \forall t\in[0,T]. \end{equation} \end{enumerate} \end{thm} \begin{proof} Assume that \eqref{nece1} holds. Then \begin{align}\label{5} 0=&\frac{d}{dt}J(\widehat{u}+\epsilon u)\Big|_{\epsilon=0} \nonumber\\ =&\mathbb{E}\left[\int_0^T\int_D\left[\left(\frac{\partial f}{\partial X}(t,x)+\mathbb{E}\left[\nabla^*_{S_X}f(t,x)\Big|\mathscr{F}_t\right]\right)Z(t,x) +\left(\frac{\partial f}{\partial u}(t,x)+\mathbb{E}\left[\nabla^*_{S_u}f(t,x)\Big|\mathscr{F}_t\right]\right)u(t,x)\right]dxdt\right]\nonumber\\ &+\mathbb{E}\left[\int_D\frac{\partial \widehat{g}}{\partial X}(T,x)\widehat{Z}(T,x)\rangle dx\right] \end{align} where $\widehat{Z}(t,x)$ is the solution to \eqref{SPDE nece}. By It\^{o}'s formula, \begin{align}\label{6} &\mathbb{E}\left[\int_D\frac{\partial \widehat{g}}{\partial X}(T,x)\widehat{Z}(T,x)\rangle dx\right]=\mathbb{E}\left[\int_D\widehat{p}(T,x)\widehat{Z}(T,x)dx\right]\nonumber\\ =&\mathbb{E}\bigg[\int_Ddx\int_0^T\widehat{p}(t,x)d\widehat{Z}(t,x)+\widehat{Z}(t,x)d\widehat{p}(t,x)+d\widehat{Z}(t,x)d\widehat{p}(t,x)\bigg]\nonumber\\ =&\mathbb{E}\bigg[\int_Ddx\int_0^T\widehat{p}(t,x)\Big[\left(\frac{\partial b}{\partial X}(t,x)+\mathbb{E}\left[\nabla^*_{S_X}b(t,x)\Big|\mathscr{F}_t\right]\right)Z(t,x) +\left(\frac{\partial b}{\partial u}(t,x)+\mathbb{E}\left[\nabla^*_{S_u}b(t,x)\Big|\mathscr{F}_t\right]\right)u(t,x)\nonumber\\ &+A_xZ(t,x)\Big]dt-\Big(\frac{\partial H}{\partial X}(t,x)+A^{*}_xp(t,x)+\mathbb{E}\left[\nabla^*_{S_X}H(t,x)\Big|\mathscr{F}_t\right]\Big)\widehat{Z}(t,x)dt+\widehat{q}(t,x)\Big[\Big(\frac{\partial \sigma}{\partial X}(t,x) \nonumber\\ &+\mathbb{E}\left[\nabla^*_{S_X}\sigma(t,x)\Big|\mathscr{F}_t\right]\Big)Z(t,x)+\left(\frac{\partial \sigma}{\partial u}(t,x)+\mathbb{E}\left[\nabla^*_{S_u}\sigma(t,x)\Big|\mathscr{F}_t\right]\right)u(t,x)\Big]d t+\int_{\mathbb{R}_0}\widehat{r}(t,x,\zeta)\Big[\Big(\frac{\partial \gamma}{\partial X}(t,x,\zeta) \nonumber\\ &+\mathbb{E}\left[\nabla^*_{S_X}\gamma(t,x,\zeta)\Big|\mathscr{F}_t\right]\Big)Z(t,x) +\Big(\frac{\partial \gamma}{\partial u}(t,x,\zeta)+\mathbb{E}\left[\nabla^*_{S_u}\gamma(t,x,\zeta)\Big|\mathscr{F}_t\right]\Big)u(t,x)\Big]\nu(dt,d\zeta)\bigg]\nonumber\\ =&-\mathbb{E}\left[\int_0^T\int_D\left[\left(\frac{\partial f}{\partial X}(t,x)+\mathbb{E}\left[\nabla^*_{S_X}f(t,x)\Big|\mathscr{F}_t\right]\right)Z(t,x) +\left(\frac{\partial f}{\partial u}(t,x)+\mathbb{E}\left[\nabla^*_{S_u}f(t,x)\Big|\mathscr{F}_t\right]\right)u(t,x)\right]dxdt\right]\nonumber\\ &+\mathbb{E}\left[\int_0^T\int_D\frac{\partial H}{\partial u}(t,x)u(t,x)+\mathbb{E}\left[\nabla^*_{S_u}H(t,x)\Big|\mathscr{F}_t\right]u(t,x)dxdt\right], \end{align} where the last step follows from the first Green formula \cite{Wloka1987Partial}. Combining \eqref{5} and \eqref{6}, one has $$ 0=\mathbb{E}\left[\int_0^T\int_D\frac{\partial H}{\partial u}(t,x)u(t,x)+\mathbb{E}\left[\nabla^*_{S_u}H(t,x)\Big|\mathscr{F}_t\right]u(t,x)dxdt\right]. $$ Now we set $u(t,x)=\pi(x)\mathbb{I}_{(t_0,T]}(t)$, where $\pi(x)$ is a bounded $\mathcal{G}_{t_0}$-measurable random variable. Then, we have \begin{align*} 0= &\int_0^T\mathbb{E}\left[\int_D\frac{\partial H}{\partial u}(t,x)\pi(x)\mathbb{I}_{(t_0,T]}(t)+\mathbb{E}\left[\nabla^*_{S_u}H(t,x)\Big|\mathscr{F}_t\right]\pi(x)\mathbb{I}_{(t_0,T]}(t)\right]dt\\ =&\int_{t_0}^T\mathbb{E}\left[\int_D\frac{\partial H}{\partial u}(t,x)\pi(x)+\mathbb{E}\left[\nabla^*_{S_u}H(t,x)\Big|\mathscr{F}_t\right]\pi(x)\right]dt\\ =&\int_{t_0}^T\mathbb{E}\left[\int_D\frac{\partial H}{\partial u}(t,x)\pi(x)+\nabla^*_{S_u}H(t,x)\pi(x)\right]dt. \end{align*} Differentiating with respect to $t_0$, it follows that $$ 0=\mathbb{E}\left[\int_D\frac{\partial H}{\partial u}(t_0,x)\pi(x)+\nabla^*_{S_u}H(t_0,x)\pi(x)dx\right],\quad \forall t_0\in[0,T]. $$ Since this holds for all such $\pi(x)$, we have $$ 0=\int_D\mathbb{E}\left[\frac{\partial H}{\partial u}(t_0,x)+\nabla^*_{S_u}H(t_0,x)\Big|\mathcal{G}_t\right]dx,\quad \forall t_0\in[0,T]. $$ The argument above is reversible. Thus \eqref{nece1} and \eqref{nece2} are equivalent. \end{proof} \section{Existence and Uniqueness} In this section, we prove the existence and uniqueness of the solution to the following general BSPDE \eqref{+5} with spatial-temporal dependence: \begin{equation}\label{BSPDE} \begin{cases} dp(t,x)&=-\left(A_xp(t,x)-\mathbb{E}[F(t)|\mathscr{F}_t]\right)dt+q(t,x)dB_t+\int_{\mathbb{R}_0}r(t,x,\zeta)\widetilde{N}(dt,d\zeta),\\ &\qquad\qquad\qquad\qquad\qquad\qquad\;\:\qquad\quad\qquad(t,x)\in[0,T]\times D;\\ p(t,x)&=\theta(t,x),\qquad\quad\qquad\qquad\qquad\quad\:\:\qquad\quad (t,x)\in[T,T+\delta]\times \overline{D};\\ p(t,x)&=\chi(t,x), \qquad\qquad\qquad\qquad\qquad\:\;\quad\qquad (t,x)\in[0,T)\times \partial D;\\ q(t,x)&=0, \qquad\qquad\qquad\qquad\qquad\quad\qquad\;\;\,\qquad (t,x)\in[T,T+\delta]\times \overline{D};\\ r(t,x,\cdot)&=0, \qquad\qquad\qquad\qquad\qquad\quad\qquad\;\;\,\qquad (t,x)\in[T,T+\delta]\times \overline{D}. \end{cases} \end{equation} Here $F=F(t):[0,T+\delta]\times \mathbb{R}\times \mathbb{R}\times \mathbb{R}\times \mathbb{R}\times \mathbb{R}\times \mathbb{R}\rightarrow \mathbb{R}$ is a functional on $C^1(H)$ as follows: $$ F(t)=F(t,p(t,x),\overline{p}(t+\delta,x),q(t,x),\overline{q}(t+\delta,x),r(t,x,\cdot),\overline{r}(t+\delta,x,\cdot)). $$ \begin{assumption}\label{Ax condition} Assume that $A_x:V \rightarrow V^{*}$ is a bounded and linear operator. Moreover, there exists two constants $\alpha_1>0$ and $\alpha_2 \geq 0$ such that \begin{equation}\label{7} 2\langle A_xu,u\rangle_{*}+\alpha_1 \|u\|^2_V\leq \alpha_2||u||^2_H, \quad \forall u\in V. \end{equation} \end{assumption} \begin{assumption}\label{exi and uni} Suppose that the following assumptions hold: \begin{enumerate}[($\romannumeral1$)] \item $\theta(t,x)$ is a given $\mathscr{F}_t$-measurable process such that $$ \mathbb{E}\left[\sup \limits_{t\in[T,T+\delta]}\|\theta(t,x)\|_H^2\right]< \infty; $$ \item $F(t,0,0,0,0,0,0)\in H_{T}$; \item For any $t,p_1,q_1,r_1,p_2,q_2,r_2$, there is a constant $C>0$ such that \begin{align*} &|F(t,p_1,\overline{p}_1,q_1,\overline{q}^{\delta}_1,r_1,\overline{r}^{\delta}_1) -F(t,p_2,\overline{p}^{\delta}_2,q_2,\overline{q}^{\delta}_2,r_2,\overline{r}^{\delta}_2)|^2\\ \leq&C\left(|p_1-p_2|^2+|q_1-q_2|^2+\int_{\mathbb{R}_0}|r_1-r_2|^2\nu(d\zeta)+|\overline{p}^{\delta}_1-\overline{p}^{\delta}_2|^2+|\overline{q}^{\delta}_1-\overline{q}^{\delta}_2|^2 +\int_{\mathbb{R}_0}|\overline{r}^{\delta}_1-\overline{r}^{\delta}_2|^2\nu(d\zeta)\right). \end{align*} \end{enumerate} \end{assumption} In the sequel, we use $C$ to represent the constant large enough such that all the inequalities are satisfied. \begin{thm}\label{exi and uni thm} Under Assumptions \ref{Ax condition} and \ref{exi and uni}, BSPDEs \eqref{BSPDE} has a unique solution $(p,q,r(\cdot))$ such that the restriction on $(t,x)\in [0,T]\times \overline{D}$ satisfies \begin{enumerate}[($\romannumeral1$)] \item $(p,q,r(\cdot))\Big|_{(t,x)\in[0,T]\times \overline{D}}\in V\times H\times L^2_{\nu}(H)$, \item $\mathbb{E}\left[\int_0^T\|p(t,x)\|_V^2+\|q(t,x)\|_H^2ds+\|r(t,x,\cdot)\|_{L^2_{\nu}(H)}^2dt\right]<\infty.$ \end{enumerate} \end{thm} \begin{proof} We decompose the proof into five steps. {\bf Step 1:} Assume that the driver $F(t)$ is independent of $p$ and $\overline{p}$ such that $(p,q,r(\cdot))\in V\times H\times L^2_{\nu}(H)$ satisfies \begin{equation}\label{+13} \begin{cases} dp(t,x)=&-\left(A_xp(t,x)-\mathbb{E}[F(t,q(t,x),\overline{q}(t+\delta,x),r(t,x,\cdot),\overline{r}(t+\delta,x,\cdot)|\mathscr{F}_t]\right)dt+q(t,x)dB_t\\ &+\int_{\mathbb{R}_0}r(t,x,\zeta)\widetilde{N}(dt,d\zeta),\qquad\qquad\qquad\quad\,\,\qquad(t,x)\in[0,T]\times D;\\ p(t,x)=&\zeta(t,x),\qquad\quad\qquad\qquad\qquad\quad\:\:\qquad\quad\qquad\quad\:\,(t,x)\in[T,T+\delta]\times \overline{D};\\ p(t,x)=&\theta(t,x), \qquad\qquad\qquad\qquad\qquad\:\;\quad\qquad\qquad\quad\:\, (t,x)\in[0,T)\times \partial D;\\ q(t,x)=&0, \qquad\qquad\qquad\qquad\qquad\quad\qquad\;\;\,\qquad\qquad\quad\:\, (t,x)\in[T,T+\delta]\times \overline{D};\\ r(t,x,\cdot)=&0, \qquad\qquad\qquad\qquad\qquad\quad\qquad\;\;\,\qquad\qquad\quad\:\, (t,x)\in[T,T+\delta]\times \overline{D}. \end{cases} \end{equation} We first prove the uniqueness and existence of solutions to \eqref{+13}. By Theorem 4.2 in \cite{oksendal2012Optimal}, it is easy to show that for each fixed $n\in \mathbb{N}$, there exists a unique solution to the following stochastic partial differential equation \begin{equation}\label{BSPDE n} \begin{cases} dp^{n+1}(t,x)=&\mathbb{E}\Big[F\left(t,q^n(t,x),\overline{q}^n(t+\delta,x),r^n(t,x,\cdot),\overline{r}^n(t+\delta,x,\cdot)\right)\Big|\mathscr{F}_t\Big]dt-A_xp^{n+1}(t,x)dt\\ &+q^{n+1}(t,x)dB_t+\int_{\mathbb{R}_0}r^{n+1}(t,x,\zeta)\widetilde{N}(dt,d\zeta),\qquad\qquad(t,x)\in[0,T]\times D;\\ p^{n+1}(t,x)=&\zeta(t,x),\qquad\quad\qquad\qquad\quad\:\:\qquad\qquad\qquad\qquad\qquad\quad\;\, (t,x)\in[T,T+\delta]\times \overline{D};\\ p^{n+1}(t,x)=&\theta(t,x), \qquad\qquad\qquad\qquad\:\;\qquad\qquad\qquad\qquad\qquad\quad\;\, (t,x)\in[0,T)\times \partial D;\\ q^{n+1}(t,x)=&0, \qquad\qquad\qquad\qquad\qquad\;\;\,\qquad\qquad\qquad\qquad\qquad\quad\;\, (t,x)\in[T,T+\delta]\times \overline{D};\\ r^{n+1}(t,x,\cdot)=&0, \qquad\qquad\qquad\qquad\qquad\;\;\,\qquad\qquad\qquad\qquad\qquad\quad\;\, (t,x)\in[T,T+\delta]\times \overline{D}. \end{cases} \end{equation} such that $$ (p^n,q^n,r^n(\cdot))\Big|_{(t,x)\in[0,T]\times \overline{D}}\in V\times H\times L^2_{\nu}(H). $$ Here, $q^{0}(t,x)=r^{0}(t,x,\cdot)=0$ for all $(t,x)\in [0,T+\delta]\times \bar{D}$. We now aim to show that $(p^n,q^n,r^n(\cdot))$ forms a Cauchy sequence. By similar arguments in Proposition \ref{+6}, we have \begin{align*} &\mathbb{E}\left[\int_t^T\|\overline{p}^{n+1}(s+\delta,x)-\overline{p}^n(s+\delta,x)\|^2_{H}ds\right]\\ =&\mathbb{E}\left[\int_D\int_t^T\int_{R_{\theta}}\int_{s}^{s+\delta}|Q(s+\delta,\varsigma,x,y)|^2|p^{n+1}(\varsigma,x+y)-p^{n}(\varsigma,x+y)|^2 d\zeta dydxds\right]\\ \leq&\mathbb{E}\left[\int_D\int_{t}^{T+\delta}\left(\int_{D\cap(z-R_{\theta})}\int_{(\varsigma-\delta)\vee t}^{\varsigma\wedge T}|Q(s+\delta,\varsigma,x,z-x)|^2 dsdx\right)|p^{n+1}(\varsigma,z)-p^{n}(\varsigma,z)|^2d\varsigma dz\right]\\ \leq& C\mathbb{E}\left[\int_D\int_{t}^{T+\delta}|p^{n+1}(\varsigma,z)-p^{n}(\varsigma,z)|^2d\varsigma dz\right]=C\mathbb{E}\left[\int_{t}^{T}\|p^{n+1}(s,x)-p^n(s,x)\|_{H}^2ds\right]. \end{align*} Similarly, one has \begin{equation}\label{91} \mathbb{E}\left[\int_t^T\|\overline{q}^{n+1}(s+\delta,x)-\overline{q}^n(s+\delta,x)\|^2_{H}ds\right]\leq C\mathbb{E}\left[\int_{t}^{T}\|q^{n+1}(s,x)-q^n(s,x)\|_{H}^2ds\right]. \end{equation} and \begin{equation}\label{92} \mathbb{E}\left[\int_t^T\|\overline{\gamma}^{n+1}(s+\delta,x,\cdot)-\overline{\gamma}^n(s+\delta,x,\cdot)\|^2_{H}ds\right]\leq C\mathbb{E}\left[\int_{t}^{T}\|\gamma^{n+1}(s,x,\cdot)-\gamma^n(s,x,\cdot)\|_{L^2_{\nu}(H)}^2ds\right]. \end{equation} For simplicity, we can write \begin{align*} F_n(t)&=F(t,q^{n}(t,x),\overline{q}^{n}(t+\delta,x),r^{n}(t,x,\cdot),\overline{r}^{n}(t+\delta,x,\cdot));\\ L^n(s)&=\|q^{n}(s,x)-q^{n-1}(s,x)\|_H^2+\|r^{n}(s,x,\cdot)-r^{n-1}(s,x,\cdot)\|_{L^2_{\nu}(H)}^2. \end{align*} {\bf Step 2:} Applying It\^{o}'s formula to $\|p^{n+1}(t,x)-p^n(t,x)\|_H^2$, we have \begin{align}\label{pr1} &-\|p^{n+1}(t,x)-p^n(t,x)\|_H^2 \nonumber\\ =&2\int_t^T\langle p^{n+1}(s,x)-p^n(s,x),A_x(p^{n+1}(s,x)-p^n(s,x))\rangle_{*} ds \nonumber\\ &-2\int_t^T\langle p^{n+1}(s,x)-p^n(s,x),\mathbb{E}\left[F^{n}(s)-F^{n-1}(s)\Big|\mathscr{F}_s\right]\rangle_H ds+\int_t^T\|q^{n+1}(s,x)-q^n(s,x)\|_{L^2_{\nu}(H)}^2ds \nonumber\\ &+2\int_t^T\langle p^{n+1}(s,x)-p^n(s,x),q^{n+1}(s,x)-q^n(s,x)\rangle_H dB_s+\int_t^T\|r^{n+1}(s,x,\cdot)-r^n(s,x,\cdot)\|_{L_{\nu}(H)}^2ds \nonumber\\ &+2\int_t^T\int_{\mathbb{R}_0}\langle p^{n+1}(s,x)-p^n(s,x),r^{n+1}(s,x,\zeta)-r^n(s,x,\zeta) \rangle_H \widetilde{N}(ds,d\zeta). \end{align} By the Lipschitz condition imposed on $F$, \eqref{91} and \eqref{92}, for any fixed constant $\rho> 0$, one has \begin{align}\label{pr2} &-2\mathbb{E}\Big[\int_t^T\langle p^{n+1}(s,x)-p^n(s,x),\mathbb{E}\left[F^{n}(s)-F^{n-1}(s)\Big|\mathscr{F}_s\right]\rangle_H ds\Big] \nonumber\\ =&-2\mathbb{E}\Big[\int_t^T\langle p^{n+1}(s,x)-p^n(s,x),F^{n}(s)-F^{n-1}(s)\rangle_H ds\Big] \nonumber\\ \leq& 2\mathbb{E}\Big[\int_t^T\|p^{n+1}(s,x)-p^n(s,x)\|_H\|F^{n}(s)-F^{n-1}(s)\|_Hds\Big] \nonumber\\ \leq& \frac{1}{\rho}\mathbb{E}\Big[\int_t^T\|p^{n+1}(s,x)-p^n(s,x)\|_H^2\Big]+\rho \mathbb{E}\Big[\int_t^T\|F^{n}(s)-F^{n-1}(s)\|_H^2ds\Big] \nonumber\\ \leq& \frac{1}{\rho}\mathbb{E}\Big[\int_t^T\|p^{n+1}(s,x)-p^n(s,x)\|_H^2ds\Big]+ \rho C\mathbb{E}\Big[\int_t^T\|q^{n}(s,x)-q^{n-1}(s,x)\|_H^2\nonumber\\ &+\|r^{n}(s,x,\cdot)-r^{n-1}(s,x,\cdot)\|_{L_{\nu}(H)}^2+\|\overline{q}^{n+1}(s+\delta,x)-\overline{q}^n(s+\delta,x)\|^2_{H}\nonumber\\ &+\|\overline{\gamma}^{n+1}(s+\delta,x,\cdot)-\overline{\gamma}^n(s+\delta,x,\cdot)\|^2_{H}ds\Big]\nonumber\\ \leq&\frac{1}{\rho}\mathbb{E}\Big[\int_t^T\|p^{n+1}(s,x)-p^n(s,x)\|_H^2ds\Big]+\rho C\mathbb{E}\left[\int_t^TL^n(s)ds\right]. \end{align} Taking expectation on both sides of \eqref{pr1}, and applying both \eqref{7} and \eqref{pr2}, we have \begin{align}\label{pr8} &\mathbb{E}\|p^{n+1}(t,x)-p^n(t,x)\|_H^2 \nonumber\\ \leq& \mathbb{E}\left[\int_t^T\alpha_2\|p^{n+1}(s,x)-p^n(s,x)\|_H^2-\alpha_1\|p^{n+1}(s,x)-p^n(s,x)\|_V^2ds\right]\nonumber\\ &+\frac{1}{\rho}\mathbb{E}\left[\int_t^T\|p^{n+1}(s,x)-p^n(s,x)\|_H^2ds\right]-\mathbb{E}\left[\int_t^T\|q^{n+1}(s,x)-q^n(s,x)\|_H^2ds\right]\nonumber\\ &+\rho C\mathbb{E}\left[\int_t^TL^n(s)ds\right]-\mathbb{E}\left[\int_t^T\|r^{n+1}(s,x,\cdot)-r^n(s,x,\cdot)\|_{L_{\nu}(H)}^2ds\right]\nonumber\\ \leq& (\frac{1}{\rho}+\alpha_2)\mathbb{E}\left[\int_t^T\|p^{n+1}(s,x)-p^n(s,x)\|_H^2ds\right]+\rho C\mathbb{E}\left[\int_t^TL^n(s)ds\right]\nonumber\\ &-\mathbb{E}\left[\int_t^TL^{n+1}(s)ds\right]-\alpha_1\mathbb{E}\left[\int_t^T\|p^{n+1}(s,x)-p^n(s,x)\|_V^2ds\right]. \end{align} We can choose a $\rho>0$ such that \begin{align}\label{pr4} &\mathbb{E}\|p^{n+1}(t,x)-p^n(t,x)\|_H^2+\mathbb{E}\left[\int_t^TL^{n+1}(s)ds\right]+\alpha_1\mathbb{E}\left[\int_t^T\|p^{n+1}(s,x)-p^n(s,x)\|_V^2ds\right]\nonumber\\ \leq&\frac{1}{2}\mathbb{E}\left[\int_t^TL^n(s)ds\right]+\alpha_3\mathbb{E}[\int_t^T\|p^{n+1}(s,x)-p^n(s,x)\|_H^2ds]. \end{align} where $\alpha_3=\alpha_2+\frac{1}{\rho}$. Multiplying by $e^{\alpha_3 t}$ and integrating both sides in $[0,T]$, we have \begin{align*} &\int_0^Te^{\alpha_3 t}\mathbb{E}\left[\|p^{n+1}(t,x)-p^n(t,x)\|_H^2-\alpha_3\int_t^T\|p^{n+1}(s,x)-p^n(s,x)\|_H^2ds\right]dt\nonumber\\ &+\int_0^Te^{\alpha_3 t}\mathbb{E}\left[\int_t^T L^{n+1}(s)ds\right]dt\leq\frac{1}{2}\int_0^Te^{\alpha_3 t}\mathbb{E}\left[\int_t^T L^n(s)ds\right]dt \end{align*} and so \begin{equation}\label{10} \|p^{n+1}(t,x)-p^n(t,x)\|_{H_T}^2+\int_0^Te^{\alpha_3 t}\mathbb{E}\left[\int_t^T L^{n+1}(s)ds\right]dt\leq \frac{1}{2}\int_0^Te^{\alpha_3 t}\mathbb{E}\left[\int_t^T L^n(s)ds\right]dt. \end{equation} In particular, \begin{equation}\label{9} \int_0^Te^{\alpha_3 t}\mathbb{E}\left[\int_t^T L^{n+1}(s)ds\right]dt\leq \frac{1}{2}\int_0^Te^{\alpha_3 t}\mathbb{E}\left[\int_t^T L^n(s)ds\right]dt\leq C(\frac{1}{2})^n. \end{equation} {\bf Step 3: Existence} Substituting \eqref{9} into \eqref{10}, one has $$ \|p^{n+1}(t,x)-p^n(t,x)\|_{H_T}^2\leq C(\frac{1}{2})^n. $$ It follows from \eqref{pr4} that \begin{align*} \mathbb{E}\left[\int_t^T L^{n+1}(s)ds\right]&\leq C(\frac{1}{2})^n+\frac{1}{2}\mathbb{E}\left[\int_t^T L^{n}(s)ds\right]\\ &\leq C(\frac{1}{2})^n+C(\frac{1}{2})^n+\frac{1}{2^2}\mathbb{E}\left[\int_t^T L^{n-1}(s)ds\right]\\ &\leq \frac{nC}{2^n}+\frac{1}{2^n}\mathbb{E}\left[\int_t^TL^{1}(s)ds\right]\\ &\leq \frac{nC}{2^n}+\frac{C}{2^n}=\frac{n(C+1)}{2^n}. \end{align*} Letting $n\rightarrow\infty$, we have \begin{equation}\label{+7} 0=\lim \limits_{n\rightarrow\infty}\mathbb{E}\left[\int_t^T L^{n+1}(s)ds\right]. \end{equation} Substituting \eqref{+7} into \eqref{pr4} yields $$ \lim \limits_{n\rightarrow\infty}\mathbb{E}\left[\int_t^T\|p^{n+1}(s,x)-p^n(s,x)\|_V^2ds\right]=0. $$ Therefore, for $(t,x)\in[0,T]\times\overline{D}$, the sequence $\{(p^n,q^n,r^n(\cdot))\in V\times H\times L^2_{\nu}(H)\}$ converges to $(p,q,r(\cdot))\in V\times H\times L^2_{\nu}(H)$. Letting $n\rightarrow\infty$ in \eqref{BSPDE n}, we can see that the limit $(p,q,r(\cdot))=\lim \limits_{n\rightarrow\infty}(p^n,q^n,r^n(\cdot))$ is indeed the solution to \eqref{+13} on $(t,x)\in[0,T]\times\overline{D}$. {\bf Step 4: Uniqueness} Suppose that $(p,q,r(\cdot))$, $(p^{(0)},q^{(0)},r^{(0)}(\cdot))$ are two solutions to \eqref{+13}. As the same arguments in {\bf Step 2}, we see that \begin{align*} &\mathbb{E}\|p(t,x)-p^{(0)}(t,x)\|_H^2+\alpha_1\mathbb{E}\left[\int_t^T\alpha_1\|p(s,x)-p^{(0)}(s,x)\|_V^2ds\right]+\frac{1}{2}\mathbb{E}\bigg[\int_t^T\|p(t,x)-p^{(0)}(t,x)\|_H^2\\ &+\|q(s,x)-q^{(0)}(s,x)\|_H^2+\|r(s,x,\cdot)-r^{(0)}(s,x,\cdot)\|_H^2ds\bigg]\leq\alpha_3\mathbb{E}\left[\int_t^T\|p(s,x)-p^{(0)}(s,x)\|_H^2ds\right]. \end{align*} It follows that $$ \mathbb{E}\|p(t,x)-p^{(0)}(t,x)\|_H^2\leq \alpha_3\mathbb{E}\left[\int_t^T\|p(s,x)-p^{(0)}(s,x)\|_H^2ds\right]. $$ By Gronwall's Lemma, we know that $\mathbb{E}\|p(t,x)-p^{(0)}(t,x)\|_H^2=0$ and $p(t,x)=p^{(0)}(t,x)$ a.s. and so \begin{align*} &\mathbb{E}\left[\int_t^T\alpha_1\|p(s,x)-p^{(0)}(s,x)\|_V^2ds\right]+\frac{1}{2}\mathbb{E}\left[\int_t^T\|q(s,x)-q^{(0)}(s,x)\|_H^2ds\right] \\ &+\frac{1}{2}\mathbb{E}\left[\int_t^T\|r(s,x,\cdot)-r^{(0)}(s,x,\cdot)\|_{L_{\nu}(H)}^2ds\right]\leq 0, \end{align*} which implies $q(t,x)=q^{(0)}(t,x)$ and $r(t,x,\cdot)=r^{(0)}(t,x,\cdot)$ a.s.. {\bf Step 5: General case} Consider the following iteration with general driver $F$: \begin{equation}\label{BSPDE n2} \begin{cases} dp^{n+1}(t,x)&=-A_xp^{n+1}(t,x)dt+\mathbb{E}\Big[F\left(t,p^n(t,x),\overline{p}^n(t+\delta,x),q^{n+1}(t,x),\overline{q}^{n+1}(t+\delta,x),\right.\\ &\quad\left.r^{n+1}(t,x,\cdot),\overline{r}^{n+1}(t+\delta,x,\cdot)\right)\Big|\mathscr{F}_t\Big]dt+q^{n+1}(t,x)dB_t+\int_{\mathbb{R}_0}r^{n+1}(t,x,\zeta)\widetilde{N}(dt,d\zeta),\\ &\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\;\;(t,x)\in[0,T]\times D;\\ p^{n+1}(t,x)&=\zeta(t,x),\qquad\quad\qquad\qquad\quad\:\:\qquad\qquad\qquad\qquad\qquad (t,x)\in[T,T+\delta]\times \overline{D};\\ p^{n+1}(t,x)&=\theta(t,x), \qquad\qquad\qquad\qquad\:\;\qquad\qquad\qquad\qquad\qquad (t,x)\in[0,T)\times \partial D;\\ q^{n+1}(t,x)&=0, \qquad\qquad\qquad\qquad\qquad\;\;\,\qquad\qquad\qquad\qquad\qquad (t,x)\in[T,T+\delta]\times \overline{D};\\ r^{n+1}(t,x,\cdot)&=0, \qquad\qquad\qquad\qquad\qquad\;\;\,\qquad\qquad\qquad\qquad\qquad (t,x)\in[T,T+\delta]\times \overline{D}, \end{cases} \end{equation} where $p^0(t,x)=0$. Similar to the proofs of {\bf Steps 1-2}, we can easily obtain the following inequality: \begin{align*} &\mathbb{E}\|p^{n+1}(t,x)-p^n(t,x)\|_H^2+(1-\rho C)\mathbb{E}\left[\int_t^TL^{n+1}(s)ds\right]+\alpha_1\mathbb{E}\left[\int_t^T\|p^{n+1}(s,x)-p^n(s,x)\|_V^2ds\right]\\ &-\alpha_3\mathbb{E}[\int_t^T\|p^{n+1}(s,x)-p^n(s,x)\|_H^2ds]\leq \rho C\mathbb{E}\left[\int_t^T\|p^{n}(s,x)-p^{n-1}(s,x)\|_H^2ds\right]. \end{align*} Choosing $\rho=\frac{1}{2C}$, we have \begin{align*} &\mathbb{E}\|p^{n+1}(t,x)-p^n(t,x)\|_H^2-\alpha_3\mathbb{E}[\int_t^T\|p^{n+1}(s,x)-p^n(s,x)\|_H^2ds]\\ \leq&\mathbb{E}\|p^{n+1}(t,x)-p^n(t,x)\|_H^2+\frac{1}{2}\mathbb{E}\left[\int_t^TL^{n+1}(s)ds\right]+\alpha_1\mathbb{E}\left[\int_t^T\|p^{n+1}(s,x)-p^n(s,x)\|_V^2ds\right]\\ &-\alpha_3\mathbb{E}[\int_t^T\|p^{n+1}(s,x)-p^n(s,x)\|_H^2ds]\\ \leq& \frac{1}{2}\mathbb{E}\left[\int_t^T\|p^{n}(s,x)-p^{n-1}(s,x)\|_H^2ds\right]. \end{align*} Multiplying by $e^{\alpha_3 t}$ and integrating both sides in $[0,T]$, one has \begin{align*} \mathbb{E}\left[\int_{\tau}^T\|p^{n+1}(s,x)-p^n(s,x)\|_H^2ds\right] \leq& \frac{1}{2}\int_{\tau}^Te^{\alpha_3 t}\mathbb{E}\left[\int_t^T\|p^{n}(s,x)-p^{n-1}(s,x)\|_H^2dsdt\right]\\ \leq& C\mathbb{E}\left[\int_{\tau}^T\int_t^T\|p^{n}(s,x)-p^{n-1}(s,x)\|_H^2dsdt\right]\\ \leq& C\int_{\tau}^T\mathbb{E}\left[\int_{t}^T\|p^{n}(s,x)-p^{n-1}(s,x)\|_H^2ds\right]dt. \end{align*} Note that for any $\tau\in[0,T]$, $$ \mathbb{E}\left[\int_{\tau}^T\|p^{2}(s,x)-p^1(s,x)\|_H^2ds\right]\leq C\int_{\tau}^T\mathbb{E}\left[\int_{0}^T\|p^{1}(s,x)-p^0(s,x)\|_H^2ds\right]\leq C^2(T-\tau). $$ Iterating the above inequality shows that \begin{equation}\label{+15} \mathbb{E}\left[\int_{\tau}^T\|p^{n+1}(s,x)-p^n(s,x)\|_H^2ds\right]\leq \frac{C^{n+1}(T-\tau)^n}{n!}. \end{equation} Using \eqref{+15} and a similar argument as in {\bf Steps 3-4}, it can see that the limit $(p,q,r(\cdot))=\lim \limits_{n\rightarrow\infty}(p^n,q^n,r^n(\cdot))$ is indeed the solution to \eqref{+5} on $(t,x)\in[0,T]\times\overline{D}$. This ends the proof. \end{proof} \section{Applications} In this section, as applications, our results obtained in the previous sections are applied to the stochastic population dynamic models with spatial-temporal dependence. As discussed in \cite{Oksendal2005optimal}, the following SPDE is a natural model for population growth: \begin{equation}\label{exam 0} \begin{cases} dX(t,x)&=\left[\frac{1}{2}\Delta X(t,x)+\alpha{X}(t,x)-u(t,x)\right]dt+\beta{X}(t,x)dB_t,\,(t,x)\in[0,T]\times D;\\ X(0,x)&=\xi(t,x), \qquad\qquad\qquad\qquad\qquad\quad\quad \qquad\qquad\qquad\;\,\;\,\;\; x\in \overline{D};\\ X(t,x)&=\eta(t,x)\geq0, \qquad\qquad\qquad\qquad\qquad\:\!\qquad\qquad\qquad\;\:\;\,\;\; (t,x)\in(0,T]\times \partial D.\\ \end{cases} \end{equation} Here, $X(t,x)$ is the density of a population (e.g. fish) at $(t,x)$, $u(t,x)$ is the harvesting rate at $(t,x)\in [0,T]\times \overline{D}$, and $$ \Delta=\sum \limits_{i=1}^n \frac{\partial^2}{\partial{x_i}^2} $$ is a Laplacian operator. This type equation is called the stochastic reaction-diffusion equation. We improve \eqref{exam 0} to the following system: \begin{equation}\label{exam max} \begin{cases} dX(t,x)&=\left[\frac{1}{2}\Delta X(t,x)-u(t,x)\right]dt+\left(\gamma_1(t,x)X(t,x)+\gamma_2(t,x)\overline{X}(t,x)\right)\left(\gamma_3(t,x)dt\right.\\ &\quad\left.+\gamma_4(t,x)dB_t+\int_{\mathbb{R}_0}\gamma_5(t,x,\zeta)\widetilde{N}(dt,d\zeta)\right],\quad\qquad\quad\:(t,x)\in[0,T]\times D;\\ X(t,x)&=\xi(t,x), \qquad\qquad\qquad\qquad\qquad\quad\quad \qquad\qquad\qquad\;\,\;(t,x)\in[-\delta,0]\times \overline{D};\\ X(t,x)&=\eta(t,x)\geq0, \qquad\qquad\qquad\qquad\qquad\:\!\qquad\qquad\qquad\;\:\;(t,x)\in(0,T]\times \partial D;\\ u(t,x)&=\beta(t,x)\geq0, \qquad\qquad\qquad\qquad\qquad\:\!\qquad\qquad\qquad\;\:\,(t,x)\in[-\delta,0]\times \overline{D}, \end{cases} \end{equation} where $\gamma_i(t,x)\in H_T\; (i=1,2,3,4)$, $\int_{\mathbb{R}_0}\gamma_5(t,x,\zeta)\widetilde{N}(dt,d\zeta)\in H_T$ are all given. Now, we give two examples for the stochastic optimal control problems governed by \eqref{exam max} under different performance functionals. \begin{example}\label{+9} We consider the following performance functional: \begin{equation}\label{per func} J_0(u)=\mathbb{E}\left[\int_D\int_0^T\log \left(u(t,x)\right)dtdx+\int_Dk(x)\log \left(X(T,x)\right)dx\right]. \end{equation} Here, $\beta \in(0,1)$ is a constant and $k(x)\in H$ is a nonnegative, $\mathscr{F}_T$-measurable process. Now, we aim to find $\widehat{u}(t,x)\in \mathcal{U}^{ad}$ such that $$ J_0(\widehat{u})=\sup \limits_{u\in\mathcal{U}^{ad}}J_0(u). $$ Since the Laplacian operator $\Delta$ is self-adjoint, the Hamiltonian functional associated to this problem takes the following form \begin{align*} H(t,x,S,z,u,p,q,r(\cdot))=&\log u(t,x)+[\gamma_1(t,x)\gamma_3(t,x)X(t,x)+\gamma_2(t,x)\gamma_3(t,x)\overline{X}(t,x)-u(t,x)]p(t,x)\\ &+\gamma_4(t,x)[\gamma_1(t,x)X(t,x)+\gamma_2(t,x)\overline{X}(t,x)]q(t,x)\\ &+\int_{\mathbb{R}_0}\gamma_5(t,x,\zeta)[\gamma_1(t,x)X(t,x)+\gamma_2(t,x)\overline{X}(t,x)]r(t,x,\zeta)\nu(d\zeta), \end{align*} where $(p,q,r(\cdot))$ is the unique solution to the following BSPDE: \begin{equation}\label{exam e} \begin{cases} dp(t,x)=&-\Big[\frac{1}{2}\Delta p(t,x)+\left(\gamma_1(t,x)+\gamma_2(t,x){\nabla}^{*}_{S}(t,x)\right)\left(\gamma_3(t,x)p(t,x)+\gamma_4(t,x)q(t,x)\right.\\ &\left.+\int_{\mathbb{R}_0}\gamma_5(t,x,\zeta)r(t,x,\zeta)\nu(d\zeta)\right),\qquad\qquad\qquad\qquad\qquad\quad(t,x)\in[0,T]\times D;\\ p(t,x)=&\frac{k(x)}{X(T,x)},\qquad\qquad\qquad\qquad\qquad\qquad\,\qquad\qquad\qquad\qquad\quad\:\: (t,x)\in[T,T+\delta]\times \overline{D};\\ p(t,x)=&0, \qquad\qquad\qquad\qquad\qquad\quad\;\;\qquad\qquad\qquad\qquad\qquad\: \qquad (t,x)\in[0,T)\times \partial D;\\ q(t,x)=&0, \qquad\qquad\qquad\qquad\qquad\quad\;\;\qquad\qquad\qquad\qquad\qquad\: \qquad (t,x)\in[T,T+\delta]\times \overline{D};\\ r(t,x,\cdot)=&0, \qquad\qquad\qquad\qquad\qquad\quad\qquad\qquad\qquad\qquad\qquad\quad\;\;\;\quad (t,x)\in[T,T+\delta]\times \overline{D}. \end{cases} \end{equation} Here ${\nabla}^{*}_{S}$ has been given in Example \ref{exam1}. By Theorems \ref{sufficient} and \ref{thm nece}, the optimal control $\widehat{u}$ satisfies $$ \widehat{u}(t,x)=\frac{1}{\widehat{p}(t,x)}, $$ where $(\widehat{p},\widehat{q},\widehat{r}(\cdot))$ is the unique solution to \eqref{exam e} for $u=\widehat{u}$ and $X=\widehat{X}$. \end{example} \begin{example}\label{+10} We modify \eqref{per func} to the following performance functional: \begin{equation}\label{per func2} J_1(u)=\mathbb{E}\left[\frac{1}{\beta}\int_D\int_0^Tu^{\beta}(t,x)dtdx+\int_Dk(x)X(T,x)dx\right]. \end{equation} Here, $\beta \in(0,1)$ is a constant and $k(x)\in H_{T}$ is a nonnegative, $\mathscr{F}_T$-measurable process. Now, we aim to find $\widehat{u}(t,x)\in \mathcal{U}^{ad}$ such that $$ J_1(\widehat{u})=\sup \limits_{u\in\mathcal{U}^{ad}}J_1(u). $$ The associated Hamiltonian functional in this example becomes the following form \begin{align*} H(t,x,S,z,u,p,q,r(\cdot))=&\frac{1}{\beta} u^{\beta}(t,x)+[\gamma_1(t,x)\gamma_3(t,x)X(t,x)+\gamma_2(t,x)\gamma_3(t,x)\overline{X}(t,x)-u(t,x)]p(t,x)\\ &+\gamma_4(t,x)[\gamma_1(t,x)X(t,x)+\gamma_2(t,x)\overline{X}(t,x)]q(t,x)\\ &+\int_{\mathbb{R}_0}\gamma_5(t,x,\zeta)[\gamma_1(t,x)X(t,x)+\gamma_2(t,x)\overline{X}(t,x)]r(t,x,\zeta)\nu(d\zeta), \end{align*} where $(p,q,r(\cdot))$ is the unique solution to the following BSPDE: \begin{equation}\label{exam eq2} \begin{cases} dp(t,x)=&-\Big[\frac{1}{2}\Delta p(t,x)+\left(\gamma_1(t,x)+\gamma_2(t,x){\nabla}^{*}_{S}(t,x)\right)\left(\gamma_3(t,x)p(t,x)+\gamma_4(t,x)q(t,x)\right.\\ &\left.+\int_{\mathbb{R}_0}\gamma_5(t,x,\zeta)r(t,x,\zeta)\nu(d\zeta)\right),\qquad\qquad\qquad\qquad\qquad\quad(t,x)\in[0,T]\times D;\\ p(t,x)&=k(x),\qquad\qquad\qquad\qquad\qquad\qquad\,\qquad\qquad\qquad\qquad\quad\, (t,x)\in[T,T+\delta]\times \overline{D};\\ p(t,x)&=0, \qquad\qquad\qquad\qquad\qquad\quad\;\;\qquad\qquad\qquad\qquad\qquad\quad\, (t,x)\in[0,T)\times \partial D;\\ q(t,x)&=0, \qquad\qquad\qquad\qquad\qquad\quad\;\;\qquad\qquad\qquad\qquad\qquad\quad\, (t,x)\in[T,T+\delta]\times \overline{D};\\ r(t,x,\cdot)&=0, \qquad\qquad\qquad\qquad\qquad\quad\;\;\qquad\qquad\qquad\qquad\qquad\quad\, (t,x)\in[T,T+\delta]\times \overline{D}. \end{cases} \end{equation} Here ${\nabla}^{*}_{S}$ has been given in Example \ref{exam1}. By Theorems \ref{sufficient} and \ref{thm nece}, the optimal control $\widehat{u}$ satisfies $$ \widehat{u}(t,x)=(\widehat{p}(t,x))^{\frac{1}{\beta-1}}, $$ where $(\widehat{p},\widehat{q},\widehat{r}(\cdot))$ is the unique solution to \eqref{exam eq2} for $u=\widehat{u}$ and $X=\widehat{X}$. \end{example} \begin{remark} \begin{enumerate}[($\romannumeral1$)]\mbox{} \item If we take $\gamma_1(t,x)=1$, $\gamma_2(t,x)=\gamma_5(t,x,\zeta)=0$ $\gamma_3(t,x)=\gamma_3$, $\gamma_4(t,x)=\gamma_4$ and $k(x)=k>0$ in \eqref{exam max}, Example \ref{+10} reduces to Example 3.1 in \cite{Oksendal2005optimal} \item If we take $\delta=0$, $\gamma_1(t,x)=0$, $\gamma_i(t,x)=\gamma_i\;(i=2,3,4)$, $\gamma_5(t,x,\zeta)=\gamma_5(\zeta)$, $\overline{X}(t,x)=S_1({X}(t,x))$ and $Q_1(x,y)=\frac{1}{V(R_{\theta})}$ in \eqref{exam max}, where $S_1$, $Q_1$ are represented in Example \ref{example}, Example \ref{+10} reduces to Optimal Harvesting (II) in \cite{agram2019spdes}. In addition, if $k(x)=1$, then Example \ref{+9} reduces to Optimal Harvesting (I) in \cite{agram2019spdes}. \end{enumerate} \end{remark} \end{document}